4cd27451048d8bbc6ef0f8d834f619d31823b0f3
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include "as.h"
29 #include <limits.h>
30 #include <stdarg.h>
31 #define NO_RELOC 0
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35 #include "libiberty.h"
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #ifdef OBJ_ELF
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
48
49 /* This structure holds the unwinding state. */
50
51 static struct
52 {
53 symbolS * proc_start;
54 symbolS * table_entry;
55 symbolS * personality_routine;
56 int personality_index;
57 /* The segment containing the function. */
58 segT saved_seg;
59 subsegT saved_subseg;
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes;
62 int opcode_count;
63 int opcode_alloc;
64 /* The number of bytes pushed to the stack. */
65 offsetT frame_size;
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
72 offsetT fp_offset;
73 int fp_reg;
74 /* Nonzero if an unwind_setfp directive has been seen. */
75 unsigned fp_used:1;
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored:1;
78 } unwind;
79
80 #endif /* OBJ_ELF */
81
82 /* Results from operand parsing worker functions. */
83
84 typedef enum
85 {
86 PARSE_OPERAND_SUCCESS,
87 PARSE_OPERAND_FAIL,
88 PARSE_OPERAND_FAIL_NO_BACKTRACK
89 } parse_operand_result;
90
91 enum arm_float_abi
92 {
93 ARM_FLOAT_ABI_HARD,
94 ARM_FLOAT_ABI_SOFTFP,
95 ARM_FLOAT_ABI_SOFT
96 };
97
98 /* Types of processor to assemble for. */
99 #ifndef CPU_DEFAULT
100 /* The code that was here used to select a default CPU depending on compiler
101 pre-defines which were only present when doing native builds, thus
102 changing gas' default behaviour depending upon the build host.
103
104 If you have a target that requires a default CPU option then the you
105 should define CPU_DEFAULT here. */
106 #endif
107
108 #ifndef FPU_DEFAULT
109 # ifdef TE_LINUX
110 # define FPU_DEFAULT FPU_ARCH_FPA
111 # elif defined (TE_NetBSD)
112 # ifdef OBJ_ELF
113 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
114 # else
115 /* Legacy a.out format. */
116 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
117 # endif
118 # elif defined (TE_VXWORKS)
119 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
120 # else
121 /* For backwards compatibility, default to FPA. */
122 # define FPU_DEFAULT FPU_ARCH_FPA
123 # endif
124 #endif /* ifndef FPU_DEFAULT */
125
126 #define streq(a, b) (strcmp (a, b) == 0)
127
128 static arm_feature_set cpu_variant;
129 static arm_feature_set arm_arch_used;
130 static arm_feature_set thumb_arch_used;
131
132 /* Flags stored in private area of BFD structure. */
133 static int uses_apcs_26 = FALSE;
134 static int atpcs = FALSE;
135 static int support_interwork = FALSE;
136 static int uses_apcs_float = FALSE;
137 static int pic_code = FALSE;
138 static int fix_v4bx = FALSE;
139 /* Warn on using deprecated features. */
140 static int warn_on_deprecated = TRUE;
141
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
167 #ifdef CPU_DEFAULT
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 #endif
170
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
189 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
190 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
191 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
192 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
193 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198 static const arm_feature_set arm_ext_v8 = ARM_FEATURE (ARM_EXT_V8, 0);
199 static const arm_feature_set arm_ext_m =
200 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
201 static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
202 static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
203 static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
204 static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
205 static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0);
206
207 static const arm_feature_set arm_arch_any = ARM_ANY;
208 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
209 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
210 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
211 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
212
213 static const arm_feature_set arm_cext_iwmmxt2 =
214 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
215 static const arm_feature_set arm_cext_iwmmxt =
216 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
217 static const arm_feature_set arm_cext_xscale =
218 ARM_FEATURE (0, ARM_CEXT_XSCALE);
219 static const arm_feature_set arm_cext_maverick =
220 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
221 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
222 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
223 static const arm_feature_set fpu_vfp_ext_v1xd =
224 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
225 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
226 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
227 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
228 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
229 static const arm_feature_set fpu_vfp_ext_d32 =
230 ARM_FEATURE (0, FPU_VFP_EXT_D32);
231 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
232 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
233 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
234 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
235 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
236 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
237 static const arm_feature_set fpu_vfp_ext_armv8 =
238 ARM_FEATURE (0, FPU_VFP_EXT_ARMV8);
239 static const arm_feature_set fpu_neon_ext_armv8 =
240 ARM_FEATURE (0, FPU_NEON_EXT_ARMV8);
241 static const arm_feature_set fpu_crypto_ext_armv8 =
242 ARM_FEATURE (0, FPU_CRYPTO_EXT_ARMV8);
243
244 static int mfloat_abi_opt = -1;
245 /* Record user cpu selection for object attributes. */
246 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
247 /* Must be long enough to hold any of the names in arm_cpus. */
248 static char selected_cpu_name[16];
249
250 /* Return if no cpu was selected on command-line. */
251 static bfd_boolean
252 no_cpu_selected (void)
253 {
254 return selected_cpu.core == arm_arch_none.core
255 && selected_cpu.coproc == arm_arch_none.coproc;
256 }
257
258 #ifdef OBJ_ELF
259 # ifdef EABI_DEFAULT
260 static int meabi_flags = EABI_DEFAULT;
261 # else
262 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
263 # endif
264
265 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
266
267 bfd_boolean
268 arm_is_eabi (void)
269 {
270 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
271 }
272 #endif
273
274 #ifdef OBJ_ELF
275 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
276 symbolS * GOT_symbol;
277 #endif
278
279 /* 0: assemble for ARM,
280 1: assemble for Thumb,
281 2: assemble for Thumb even though target CPU does not support thumb
282 instructions. */
283 static int thumb_mode = 0;
284 /* A value distinct from the possible values for thumb_mode that we
285 can use to record whether thumb_mode has been copied into the
286 tc_frag_data field of a frag. */
287 #define MODE_RECORDED (1 << 4)
288
289 /* Specifies the intrinsic IT insn behavior mode. */
290 enum implicit_it_mode
291 {
292 IMPLICIT_IT_MODE_NEVER = 0x00,
293 IMPLICIT_IT_MODE_ARM = 0x01,
294 IMPLICIT_IT_MODE_THUMB = 0x02,
295 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
296 };
297 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
298
299 /* If unified_syntax is true, we are processing the new unified
300 ARM/Thumb syntax. Important differences from the old ARM mode:
301
302 - Immediate operands do not require a # prefix.
303 - Conditional affixes always appear at the end of the
304 instruction. (For backward compatibility, those instructions
305 that formerly had them in the middle, continue to accept them
306 there.)
307 - The IT instruction may appear, and if it does is validated
308 against subsequent conditional affixes. It does not generate
309 machine code.
310
311 Important differences from the old Thumb mode:
312
313 - Immediate operands do not require a # prefix.
314 - Most of the V6T2 instructions are only available in unified mode.
315 - The .N and .W suffixes are recognized and honored (it is an error
316 if they cannot be honored).
317 - All instructions set the flags if and only if they have an 's' affix.
318 - Conditional affixes may be used. They are validated against
319 preceding IT instructions. Unlike ARM mode, you cannot use a
320 conditional affix except in the scope of an IT instruction. */
321
322 static bfd_boolean unified_syntax = FALSE;
323
324 /* An immediate operand can start with #, and ld*, st*, pld operands
325 can contain [ and ]. We need to tell APP not to elide whitespace
326 before a [, which can appear as the first operand for pld. */
327 const char arm_symbol_chars[] = "#[]";
328
329 enum neon_el_type
330 {
331 NT_invtype,
332 NT_untyped,
333 NT_integer,
334 NT_float,
335 NT_poly,
336 NT_signed,
337 NT_unsigned
338 };
339
340 struct neon_type_el
341 {
342 enum neon_el_type type;
343 unsigned size;
344 };
345
346 #define NEON_MAX_TYPE_ELS 4
347
348 struct neon_type
349 {
350 struct neon_type_el el[NEON_MAX_TYPE_ELS];
351 unsigned elems;
352 };
353
354 enum it_instruction_type
355 {
356 OUTSIDE_IT_INSN,
357 INSIDE_IT_INSN,
358 INSIDE_IT_LAST_INSN,
359 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
360 if inside, should be the last one. */
361 NEUTRAL_IT_INSN, /* This could be either inside or outside,
362 i.e. BKPT and NOP. */
363 IT_INSN /* The IT insn has been parsed. */
364 };
365
366 /* The maximum number of operands we need. */
367 #define ARM_IT_MAX_OPERANDS 6
368
369 struct arm_it
370 {
371 const char * error;
372 unsigned long instruction;
373 int size;
374 int size_req;
375 int cond;
376 /* "uncond_value" is set to the value in place of the conditional field in
377 unconditional versions of the instruction, or -1 if nothing is
378 appropriate. */
379 int uncond_value;
380 struct neon_type vectype;
381 /* This does not indicate an actual NEON instruction, only that
382 the mnemonic accepts neon-style type suffixes. */
383 int is_neon;
384 /* Set to the opcode if the instruction needs relaxation.
385 Zero if the instruction is not relaxed. */
386 unsigned long relax;
387 struct
388 {
389 bfd_reloc_code_real_type type;
390 expressionS exp;
391 int pc_rel;
392 } reloc;
393
394 enum it_instruction_type it_insn_type;
395
396 struct
397 {
398 unsigned reg;
399 signed int imm;
400 struct neon_type_el vectype;
401 unsigned present : 1; /* Operand present. */
402 unsigned isreg : 1; /* Operand was a register. */
403 unsigned immisreg : 1; /* .imm field is a second register. */
404 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
405 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
406 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
407 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
408 instructions. This allows us to disambiguate ARM <-> vector insns. */
409 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
410 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
411 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
412 unsigned issingle : 1; /* Operand is VFP single-precision register. */
413 unsigned hasreloc : 1; /* Operand has relocation suffix. */
414 unsigned writeback : 1; /* Operand has trailing ! */
415 unsigned preind : 1; /* Preindexed address. */
416 unsigned postind : 1; /* Postindexed address. */
417 unsigned negative : 1; /* Index register was negated. */
418 unsigned shifted : 1; /* Shift applied to operation. */
419 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
420 } operands[ARM_IT_MAX_OPERANDS];
421 };
422
423 static struct arm_it inst;
424
425 #define NUM_FLOAT_VALS 8
426
427 const char * fp_const[] =
428 {
429 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
430 };
431
432 /* Number of littlenums required to hold an extended precision number. */
433 #define MAX_LITTLENUMS 6
434
435 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
436
437 #define FAIL (-1)
438 #define SUCCESS (0)
439
440 #define SUFF_S 1
441 #define SUFF_D 2
442 #define SUFF_E 3
443 #define SUFF_P 4
444
445 #define CP_T_X 0x00008000
446 #define CP_T_Y 0x00400000
447
448 #define CONDS_BIT 0x00100000
449 #define LOAD_BIT 0x00100000
450
451 #define DOUBLE_LOAD_FLAG 0x00000001
452
453 struct asm_cond
454 {
455 const char * template_name;
456 unsigned long value;
457 };
458
459 #define COND_ALWAYS 0xE
460
461 struct asm_psr
462 {
463 const char * template_name;
464 unsigned long field;
465 };
466
467 struct asm_barrier_opt
468 {
469 const char * template_name;
470 unsigned long value;
471 const arm_feature_set arch;
472 };
473
474 /* The bit that distinguishes CPSR and SPSR. */
475 #define SPSR_BIT (1 << 22)
476
477 /* The individual PSR flag bits. */
478 #define PSR_c (1 << 16)
479 #define PSR_x (1 << 17)
480 #define PSR_s (1 << 18)
481 #define PSR_f (1 << 19)
482
483 struct reloc_entry
484 {
485 char * name;
486 bfd_reloc_code_real_type reloc;
487 };
488
489 enum vfp_reg_pos
490 {
491 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
492 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
493 };
494
495 enum vfp_ldstm_type
496 {
497 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
498 };
499
500 /* Bits for DEFINED field in neon_typed_alias. */
501 #define NTA_HASTYPE 1
502 #define NTA_HASINDEX 2
503
504 struct neon_typed_alias
505 {
506 unsigned char defined;
507 unsigned char index;
508 struct neon_type_el eltype;
509 };
510
511 /* ARM register categories. This includes coprocessor numbers and various
512 architecture extensions' registers. */
513 enum arm_reg_type
514 {
515 REG_TYPE_RN,
516 REG_TYPE_CP,
517 REG_TYPE_CN,
518 REG_TYPE_FN,
519 REG_TYPE_VFS,
520 REG_TYPE_VFD,
521 REG_TYPE_NQ,
522 REG_TYPE_VFSD,
523 REG_TYPE_NDQ,
524 REG_TYPE_NSDQ,
525 REG_TYPE_VFC,
526 REG_TYPE_MVF,
527 REG_TYPE_MVD,
528 REG_TYPE_MVFX,
529 REG_TYPE_MVDX,
530 REG_TYPE_MVAX,
531 REG_TYPE_DSPSC,
532 REG_TYPE_MMXWR,
533 REG_TYPE_MMXWC,
534 REG_TYPE_MMXWCG,
535 REG_TYPE_XSCALE,
536 REG_TYPE_RNB
537 };
538
539 /* Structure for a hash table entry for a register.
540 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
541 information which states whether a vector type or index is specified (for a
542 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
543 struct reg_entry
544 {
545 const char * name;
546 unsigned int number;
547 unsigned char type;
548 unsigned char builtin;
549 struct neon_typed_alias * neon;
550 };
551
552 /* Diagnostics used when we don't get a register of the expected type. */
553 const char * const reg_expected_msgs[] =
554 {
555 N_("ARM register expected"),
556 N_("bad or missing co-processor number"),
557 N_("co-processor register expected"),
558 N_("FPA register expected"),
559 N_("VFP single precision register expected"),
560 N_("VFP/Neon double precision register expected"),
561 N_("Neon quad precision register expected"),
562 N_("VFP single or double precision register expected"),
563 N_("Neon double or quad precision register expected"),
564 N_("VFP single, double or Neon quad precision register expected"),
565 N_("VFP system register expected"),
566 N_("Maverick MVF register expected"),
567 N_("Maverick MVD register expected"),
568 N_("Maverick MVFX register expected"),
569 N_("Maverick MVDX register expected"),
570 N_("Maverick MVAX register expected"),
571 N_("Maverick DSPSC register expected"),
572 N_("iWMMXt data register expected"),
573 N_("iWMMXt control register expected"),
574 N_("iWMMXt scalar register expected"),
575 N_("XScale accumulator register expected"),
576 };
577
578 /* Some well known registers that we refer to directly elsewhere. */
579 #define REG_R12 12
580 #define REG_SP 13
581 #define REG_LR 14
582 #define REG_PC 15
583
584 /* ARM instructions take 4bytes in the object file, Thumb instructions
585 take 2: */
586 #define INSN_SIZE 4
587
588 struct asm_opcode
589 {
590 /* Basic string to match. */
591 const char * template_name;
592
593 /* Parameters to instruction. */
594 unsigned int operands[8];
595
596 /* Conditional tag - see opcode_lookup. */
597 unsigned int tag : 4;
598
599 /* Basic instruction code. */
600 unsigned int avalue : 28;
601
602 /* Thumb-format instruction code. */
603 unsigned int tvalue;
604
605 /* Which architecture variant provides this instruction. */
606 const arm_feature_set * avariant;
607 const arm_feature_set * tvariant;
608
609 /* Function to call to encode instruction in ARM format. */
610 void (* aencode) (void);
611
612 /* Function to call to encode instruction in Thumb format. */
613 void (* tencode) (void);
614 };
615
616 /* Defines for various bits that we will want to toggle. */
617 #define INST_IMMEDIATE 0x02000000
618 #define OFFSET_REG 0x02000000
619 #define HWOFFSET_IMM 0x00400000
620 #define SHIFT_BY_REG 0x00000010
621 #define PRE_INDEX 0x01000000
622 #define INDEX_UP 0x00800000
623 #define WRITE_BACK 0x00200000
624 #define LDM_TYPE_2_OR_3 0x00400000
625 #define CPSI_MMOD 0x00020000
626
627 #define LITERAL_MASK 0xf000f000
628 #define OPCODE_MASK 0xfe1fffff
629 #define V4_STR_BIT 0x00000020
630
631 #define T2_SUBS_PC_LR 0xf3de8f00
632
633 #define DATA_OP_SHIFT 21
634
635 #define T2_OPCODE_MASK 0xfe1fffff
636 #define T2_DATA_OP_SHIFT 21
637
638 #define A_COND_MASK 0xf0000000
639 #define A_PUSH_POP_OP_MASK 0x0fff0000
640
641 /* Opcodes for pushing/poping registers to/from the stack. */
642 #define A1_OPCODE_PUSH 0x092d0000
643 #define A2_OPCODE_PUSH 0x052d0004
644 #define A2_OPCODE_POP 0x049d0004
645
646 /* Codes to distinguish the arithmetic instructions. */
647 #define OPCODE_AND 0
648 #define OPCODE_EOR 1
649 #define OPCODE_SUB 2
650 #define OPCODE_RSB 3
651 #define OPCODE_ADD 4
652 #define OPCODE_ADC 5
653 #define OPCODE_SBC 6
654 #define OPCODE_RSC 7
655 #define OPCODE_TST 8
656 #define OPCODE_TEQ 9
657 #define OPCODE_CMP 10
658 #define OPCODE_CMN 11
659 #define OPCODE_ORR 12
660 #define OPCODE_MOV 13
661 #define OPCODE_BIC 14
662 #define OPCODE_MVN 15
663
664 #define T2_OPCODE_AND 0
665 #define T2_OPCODE_BIC 1
666 #define T2_OPCODE_ORR 2
667 #define T2_OPCODE_ORN 3
668 #define T2_OPCODE_EOR 4
669 #define T2_OPCODE_ADD 8
670 #define T2_OPCODE_ADC 10
671 #define T2_OPCODE_SBC 11
672 #define T2_OPCODE_SUB 13
673 #define T2_OPCODE_RSB 14
674
675 #define T_OPCODE_MUL 0x4340
676 #define T_OPCODE_TST 0x4200
677 #define T_OPCODE_CMN 0x42c0
678 #define T_OPCODE_NEG 0x4240
679 #define T_OPCODE_MVN 0x43c0
680
681 #define T_OPCODE_ADD_R3 0x1800
682 #define T_OPCODE_SUB_R3 0x1a00
683 #define T_OPCODE_ADD_HI 0x4400
684 #define T_OPCODE_ADD_ST 0xb000
685 #define T_OPCODE_SUB_ST 0xb080
686 #define T_OPCODE_ADD_SP 0xa800
687 #define T_OPCODE_ADD_PC 0xa000
688 #define T_OPCODE_ADD_I8 0x3000
689 #define T_OPCODE_SUB_I8 0x3800
690 #define T_OPCODE_ADD_I3 0x1c00
691 #define T_OPCODE_SUB_I3 0x1e00
692
693 #define T_OPCODE_ASR_R 0x4100
694 #define T_OPCODE_LSL_R 0x4080
695 #define T_OPCODE_LSR_R 0x40c0
696 #define T_OPCODE_ROR_R 0x41c0
697 #define T_OPCODE_ASR_I 0x1000
698 #define T_OPCODE_LSL_I 0x0000
699 #define T_OPCODE_LSR_I 0x0800
700
701 #define T_OPCODE_MOV_I8 0x2000
702 #define T_OPCODE_CMP_I8 0x2800
703 #define T_OPCODE_CMP_LR 0x4280
704 #define T_OPCODE_MOV_HR 0x4600
705 #define T_OPCODE_CMP_HR 0x4500
706
707 #define T_OPCODE_LDR_PC 0x4800
708 #define T_OPCODE_LDR_SP 0x9800
709 #define T_OPCODE_STR_SP 0x9000
710 #define T_OPCODE_LDR_IW 0x6800
711 #define T_OPCODE_STR_IW 0x6000
712 #define T_OPCODE_LDR_IH 0x8800
713 #define T_OPCODE_STR_IH 0x8000
714 #define T_OPCODE_LDR_IB 0x7800
715 #define T_OPCODE_STR_IB 0x7000
716 #define T_OPCODE_LDR_RW 0x5800
717 #define T_OPCODE_STR_RW 0x5000
718 #define T_OPCODE_LDR_RH 0x5a00
719 #define T_OPCODE_STR_RH 0x5200
720 #define T_OPCODE_LDR_RB 0x5c00
721 #define T_OPCODE_STR_RB 0x5400
722
723 #define T_OPCODE_PUSH 0xb400
724 #define T_OPCODE_POP 0xbc00
725
726 #define T_OPCODE_BRANCH 0xe000
727
728 #define THUMB_SIZE 2 /* Size of thumb instruction. */
729 #define THUMB_PP_PC_LR 0x0100
730 #define THUMB_LOAD_BIT 0x0800
731 #define THUMB2_LOAD_BIT 0x00100000
732
733 #define BAD_ARGS _("bad arguments to instruction")
734 #define BAD_SP _("r13 not allowed here")
735 #define BAD_PC _("r15 not allowed here")
736 #define BAD_COND _("instruction cannot be conditional")
737 #define BAD_OVERLAP _("registers may not be the same")
738 #define BAD_HIREG _("lo register required")
739 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
740 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
741 #define BAD_BRANCH _("branch must be last instruction in IT block")
742 #define BAD_NOT_IT _("instruction not allowed in IT block")
743 #define BAD_FPU _("selected FPU does not support instruction")
744 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
745 #define BAD_IT_COND _("incorrect condition in IT block")
746 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
747 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
748 #define BAD_PC_ADDRESSING \
749 _("cannot use register index with PC-relative addressing")
750 #define BAD_PC_WRITEBACK \
751 _("cannot use writeback with PC-relative addressing")
752 #define BAD_RANGE _("branch out of range")
753
754 static struct hash_control * arm_ops_hsh;
755 static struct hash_control * arm_cond_hsh;
756 static struct hash_control * arm_shift_hsh;
757 static struct hash_control * arm_psr_hsh;
758 static struct hash_control * arm_v7m_psr_hsh;
759 static struct hash_control * arm_reg_hsh;
760 static struct hash_control * arm_reloc_hsh;
761 static struct hash_control * arm_barrier_opt_hsh;
762
763 /* Stuff needed to resolve the label ambiguity
764 As:
765 ...
766 label: <insn>
767 may differ from:
768 ...
769 label:
770 <insn> */
771
772 symbolS * last_label_seen;
773 static int label_is_thumb_function_name = FALSE;
774
775 /* Literal pool structure. Held on a per-section
776 and per-sub-section basis. */
777
778 #define MAX_LITERAL_POOL_SIZE 1024
779 typedef struct literal_pool
780 {
781 expressionS literals [MAX_LITERAL_POOL_SIZE];
782 unsigned int next_free_entry;
783 unsigned int id;
784 symbolS * symbol;
785 segT section;
786 subsegT sub_section;
787 #ifdef OBJ_ELF
788 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
789 #endif
790 struct literal_pool * next;
791 } literal_pool;
792
793 /* Pointer to a linked list of literal pools. */
794 literal_pool * list_of_pools = NULL;
795
796 #ifdef OBJ_ELF
797 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
798 #else
799 static struct current_it now_it;
800 #endif
801
802 static inline int
803 now_it_compatible (int cond)
804 {
805 return (cond & ~1) == (now_it.cc & ~1);
806 }
807
808 static inline int
809 conditional_insn (void)
810 {
811 return inst.cond != COND_ALWAYS;
812 }
813
814 static int in_it_block (void);
815
816 static int handle_it_state (void);
817
818 static void force_automatic_it_block_close (void);
819
820 static void it_fsm_post_encode (void);
821
822 #define set_it_insn_type(type) \
823 do \
824 { \
825 inst.it_insn_type = type; \
826 if (handle_it_state () == FAIL) \
827 return; \
828 } \
829 while (0)
830
831 #define set_it_insn_type_nonvoid(type, failret) \
832 do \
833 { \
834 inst.it_insn_type = type; \
835 if (handle_it_state () == FAIL) \
836 return failret; \
837 } \
838 while(0)
839
840 #define set_it_insn_type_last() \
841 do \
842 { \
843 if (inst.cond == COND_ALWAYS) \
844 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
845 else \
846 set_it_insn_type (INSIDE_IT_LAST_INSN); \
847 } \
848 while (0)
849
850 /* Pure syntax. */
851
852 /* This array holds the chars that always start a comment. If the
853 pre-processor is disabled, these aren't very useful. */
854 const char comment_chars[] = "@";
855
856 /* This array holds the chars that only start a comment at the beginning of
857 a line. If the line seems to have the form '# 123 filename'
858 .line and .file directives will appear in the pre-processed output. */
859 /* Note that input_file.c hand checks for '#' at the beginning of the
860 first line of the input file. This is because the compiler outputs
861 #NO_APP at the beginning of its output. */
862 /* Also note that comments like this one will always work. */
863 const char line_comment_chars[] = "#";
864
865 const char line_separator_chars[] = ";";
866
867 /* Chars that can be used to separate mant
868 from exp in floating point numbers. */
869 const char EXP_CHARS[] = "eE";
870
871 /* Chars that mean this number is a floating point constant. */
872 /* As in 0f12.456 */
873 /* or 0d1.2345e12 */
874
875 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
876
877 /* Prefix characters that indicate the start of an immediate
878 value. */
879 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
880
881 /* Separator character handling. */
882
883 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
884
885 static inline int
886 skip_past_char (char ** str, char c)
887 {
888 if (**str == c)
889 {
890 (*str)++;
891 return SUCCESS;
892 }
893 else
894 return FAIL;
895 }
896
897 #define skip_past_comma(str) skip_past_char (str, ',')
898
899 /* Arithmetic expressions (possibly involving symbols). */
900
901 /* Return TRUE if anything in the expression is a bignum. */
902
903 static int
904 walk_no_bignums (symbolS * sp)
905 {
906 if (symbol_get_value_expression (sp)->X_op == O_big)
907 return 1;
908
909 if (symbol_get_value_expression (sp)->X_add_symbol)
910 {
911 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
912 || (symbol_get_value_expression (sp)->X_op_symbol
913 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
914 }
915
916 return 0;
917 }
918
919 static int in_my_get_expression = 0;
920
921 /* Third argument to my_get_expression. */
922 #define GE_NO_PREFIX 0
923 #define GE_IMM_PREFIX 1
924 #define GE_OPT_PREFIX 2
925 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
926 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
927 #define GE_OPT_PREFIX_BIG 3
928
929 static int
930 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
931 {
932 char * save_in;
933 segT seg;
934
935 /* In unified syntax, all prefixes are optional. */
936 if (unified_syntax)
937 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
938 : GE_OPT_PREFIX;
939
940 switch (prefix_mode)
941 {
942 case GE_NO_PREFIX: break;
943 case GE_IMM_PREFIX:
944 if (!is_immediate_prefix (**str))
945 {
946 inst.error = _("immediate expression requires a # prefix");
947 return FAIL;
948 }
949 (*str)++;
950 break;
951 case GE_OPT_PREFIX:
952 case GE_OPT_PREFIX_BIG:
953 if (is_immediate_prefix (**str))
954 (*str)++;
955 break;
956 default: abort ();
957 }
958
959 memset (ep, 0, sizeof (expressionS));
960
961 save_in = input_line_pointer;
962 input_line_pointer = *str;
963 in_my_get_expression = 1;
964 seg = expression (ep);
965 in_my_get_expression = 0;
966
967 if (ep->X_op == O_illegal || ep->X_op == O_absent)
968 {
969 /* We found a bad or missing expression in md_operand(). */
970 *str = input_line_pointer;
971 input_line_pointer = save_in;
972 if (inst.error == NULL)
973 inst.error = (ep->X_op == O_absent
974 ? _("missing expression") :_("bad expression"));
975 return 1;
976 }
977
978 #ifdef OBJ_AOUT
979 if (seg != absolute_section
980 && seg != text_section
981 && seg != data_section
982 && seg != bss_section
983 && seg != undefined_section)
984 {
985 inst.error = _("bad segment");
986 *str = input_line_pointer;
987 input_line_pointer = save_in;
988 return 1;
989 }
990 #else
991 (void) seg;
992 #endif
993
994 /* Get rid of any bignums now, so that we don't generate an error for which
995 we can't establish a line number later on. Big numbers are never valid
996 in instructions, which is where this routine is always called. */
997 if (prefix_mode != GE_OPT_PREFIX_BIG
998 && (ep->X_op == O_big
999 || (ep->X_add_symbol
1000 && (walk_no_bignums (ep->X_add_symbol)
1001 || (ep->X_op_symbol
1002 && walk_no_bignums (ep->X_op_symbol))))))
1003 {
1004 inst.error = _("invalid constant");
1005 *str = input_line_pointer;
1006 input_line_pointer = save_in;
1007 return 1;
1008 }
1009
1010 *str = input_line_pointer;
1011 input_line_pointer = save_in;
1012 return 0;
1013 }
1014
1015 /* Turn a string in input_line_pointer into a floating point constant
1016 of type TYPE, and store the appropriate bytes in *LITP. The number
1017 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1018 returned, or NULL on OK.
1019
1020 Note that fp constants aren't represent in the normal way on the ARM.
1021 In big endian mode, things are as expected. However, in little endian
1022 mode fp constants are big-endian word-wise, and little-endian byte-wise
1023 within the words. For example, (double) 1.1 in big endian mode is
1024 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1025 the byte sequence 99 99 f1 3f 9a 99 99 99.
1026
1027 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1028
1029 char *
1030 md_atof (int type, char * litP, int * sizeP)
1031 {
1032 int prec;
1033 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1034 char *t;
1035 int i;
1036
1037 switch (type)
1038 {
1039 case 'f':
1040 case 'F':
1041 case 's':
1042 case 'S':
1043 prec = 2;
1044 break;
1045
1046 case 'd':
1047 case 'D':
1048 case 'r':
1049 case 'R':
1050 prec = 4;
1051 break;
1052
1053 case 'x':
1054 case 'X':
1055 prec = 5;
1056 break;
1057
1058 case 'p':
1059 case 'P':
1060 prec = 5;
1061 break;
1062
1063 default:
1064 *sizeP = 0;
1065 return _("Unrecognized or unsupported floating point constant");
1066 }
1067
1068 t = atof_ieee (input_line_pointer, type, words);
1069 if (t)
1070 input_line_pointer = t;
1071 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1072
1073 if (target_big_endian)
1074 {
1075 for (i = 0; i < prec; i++)
1076 {
1077 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1078 litP += sizeof (LITTLENUM_TYPE);
1079 }
1080 }
1081 else
1082 {
1083 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1084 for (i = prec - 1; i >= 0; i--)
1085 {
1086 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1087 litP += sizeof (LITTLENUM_TYPE);
1088 }
1089 else
1090 /* For a 4 byte float the order of elements in `words' is 1 0.
1091 For an 8 byte float the order is 1 0 3 2. */
1092 for (i = 0; i < prec; i += 2)
1093 {
1094 md_number_to_chars (litP, (valueT) words[i + 1],
1095 sizeof (LITTLENUM_TYPE));
1096 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1097 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1098 litP += 2 * sizeof (LITTLENUM_TYPE);
1099 }
1100 }
1101
1102 return NULL;
1103 }
1104
1105 /* We handle all bad expressions here, so that we can report the faulty
1106 instruction in the error message. */
1107 void
1108 md_operand (expressionS * exp)
1109 {
1110 if (in_my_get_expression)
1111 exp->X_op = O_illegal;
1112 }
1113
1114 /* Immediate values. */
1115
1116 /* Generic immediate-value read function for use in directives.
1117 Accepts anything that 'expression' can fold to a constant.
1118 *val receives the number. */
1119 #ifdef OBJ_ELF
1120 static int
1121 immediate_for_directive (int *val)
1122 {
1123 expressionS exp;
1124 exp.X_op = O_illegal;
1125
1126 if (is_immediate_prefix (*input_line_pointer))
1127 {
1128 input_line_pointer++;
1129 expression (&exp);
1130 }
1131
1132 if (exp.X_op != O_constant)
1133 {
1134 as_bad (_("expected #constant"));
1135 ignore_rest_of_line ();
1136 return FAIL;
1137 }
1138 *val = exp.X_add_number;
1139 return SUCCESS;
1140 }
1141 #endif
1142
1143 /* Register parsing. */
1144
1145 /* Generic register parser. CCP points to what should be the
1146 beginning of a register name. If it is indeed a valid register
1147 name, advance CCP over it and return the reg_entry structure;
1148 otherwise return NULL. Does not issue diagnostics. */
1149
1150 static struct reg_entry *
1151 arm_reg_parse_multi (char **ccp)
1152 {
1153 char *start = *ccp;
1154 char *p;
1155 struct reg_entry *reg;
1156
1157 #ifdef REGISTER_PREFIX
1158 if (*start != REGISTER_PREFIX)
1159 return NULL;
1160 start++;
1161 #endif
1162 #ifdef OPTIONAL_REGISTER_PREFIX
1163 if (*start == OPTIONAL_REGISTER_PREFIX)
1164 start++;
1165 #endif
1166
1167 p = start;
1168 if (!ISALPHA (*p) || !is_name_beginner (*p))
1169 return NULL;
1170
1171 do
1172 p++;
1173 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1174
1175 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1176
1177 if (!reg)
1178 return NULL;
1179
1180 *ccp = p;
1181 return reg;
1182 }
1183
1184 static int
1185 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1186 enum arm_reg_type type)
1187 {
1188 /* Alternative syntaxes are accepted for a few register classes. */
1189 switch (type)
1190 {
1191 case REG_TYPE_MVF:
1192 case REG_TYPE_MVD:
1193 case REG_TYPE_MVFX:
1194 case REG_TYPE_MVDX:
1195 /* Generic coprocessor register names are allowed for these. */
1196 if (reg && reg->type == REG_TYPE_CN)
1197 return reg->number;
1198 break;
1199
1200 case REG_TYPE_CP:
1201 /* For backward compatibility, a bare number is valid here. */
1202 {
1203 unsigned long processor = strtoul (start, ccp, 10);
1204 if (*ccp != start && processor <= 15)
1205 return processor;
1206 }
1207
1208 case REG_TYPE_MMXWC:
1209 /* WC includes WCG. ??? I'm not sure this is true for all
1210 instructions that take WC registers. */
1211 if (reg && reg->type == REG_TYPE_MMXWCG)
1212 return reg->number;
1213 break;
1214
1215 default:
1216 break;
1217 }
1218
1219 return FAIL;
1220 }
1221
1222 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1223 return value is the register number or FAIL. */
1224
1225 static int
1226 arm_reg_parse (char **ccp, enum arm_reg_type type)
1227 {
1228 char *start = *ccp;
1229 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1230 int ret;
1231
1232 /* Do not allow a scalar (reg+index) to parse as a register. */
1233 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1234 return FAIL;
1235
1236 if (reg && reg->type == type)
1237 return reg->number;
1238
1239 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1240 return ret;
1241
1242 *ccp = start;
1243 return FAIL;
1244 }
1245
1246 /* Parse a Neon type specifier. *STR should point at the leading '.'
1247 character. Does no verification at this stage that the type fits the opcode
1248 properly. E.g.,
1249
1250 .i32.i32.s16
1251 .s32.f32
1252 .u16
1253
1254 Can all be legally parsed by this function.
1255
1256 Fills in neon_type struct pointer with parsed information, and updates STR
1257 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1258 type, FAIL if not. */
1259
1260 static int
1261 parse_neon_type (struct neon_type *type, char **str)
1262 {
1263 char *ptr = *str;
1264
1265 if (type)
1266 type->elems = 0;
1267
1268 while (type->elems < NEON_MAX_TYPE_ELS)
1269 {
1270 enum neon_el_type thistype = NT_untyped;
1271 unsigned thissize = -1u;
1272
1273 if (*ptr != '.')
1274 break;
1275
1276 ptr++;
1277
1278 /* Just a size without an explicit type. */
1279 if (ISDIGIT (*ptr))
1280 goto parsesize;
1281
1282 switch (TOLOWER (*ptr))
1283 {
1284 case 'i': thistype = NT_integer; break;
1285 case 'f': thistype = NT_float; break;
1286 case 'p': thistype = NT_poly; break;
1287 case 's': thistype = NT_signed; break;
1288 case 'u': thistype = NT_unsigned; break;
1289 case 'd':
1290 thistype = NT_float;
1291 thissize = 64;
1292 ptr++;
1293 goto done;
1294 default:
1295 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1296 return FAIL;
1297 }
1298
1299 ptr++;
1300
1301 /* .f is an abbreviation for .f32. */
1302 if (thistype == NT_float && !ISDIGIT (*ptr))
1303 thissize = 32;
1304 else
1305 {
1306 parsesize:
1307 thissize = strtoul (ptr, &ptr, 10);
1308
1309 if (thissize != 8 && thissize != 16 && thissize != 32
1310 && thissize != 64)
1311 {
1312 as_bad (_("bad size %d in type specifier"), thissize);
1313 return FAIL;
1314 }
1315 }
1316
1317 done:
1318 if (type)
1319 {
1320 type->el[type->elems].type = thistype;
1321 type->el[type->elems].size = thissize;
1322 type->elems++;
1323 }
1324 }
1325
1326 /* Empty/missing type is not a successful parse. */
1327 if (type->elems == 0)
1328 return FAIL;
1329
1330 *str = ptr;
1331
1332 return SUCCESS;
1333 }
1334
1335 /* Errors may be set multiple times during parsing or bit encoding
1336 (particularly in the Neon bits), but usually the earliest error which is set
1337 will be the most meaningful. Avoid overwriting it with later (cascading)
1338 errors by calling this function. */
1339
1340 static void
1341 first_error (const char *err)
1342 {
1343 if (!inst.error)
1344 inst.error = err;
1345 }
1346
1347 /* Parse a single type, e.g. ".s32", leading period included. */
1348 static int
1349 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1350 {
1351 char *str = *ccp;
1352 struct neon_type optype;
1353
1354 if (*str == '.')
1355 {
1356 if (parse_neon_type (&optype, &str) == SUCCESS)
1357 {
1358 if (optype.elems == 1)
1359 *vectype = optype.el[0];
1360 else
1361 {
1362 first_error (_("only one type should be specified for operand"));
1363 return FAIL;
1364 }
1365 }
1366 else
1367 {
1368 first_error (_("vector type expected"));
1369 return FAIL;
1370 }
1371 }
1372 else
1373 return FAIL;
1374
1375 *ccp = str;
1376
1377 return SUCCESS;
1378 }
1379
1380 /* Special meanings for indices (which have a range of 0-7), which will fit into
1381 a 4-bit integer. */
1382
1383 #define NEON_ALL_LANES 15
1384 #define NEON_INTERLEAVE_LANES 14
1385
1386 /* Parse either a register or a scalar, with an optional type. Return the
1387 register number, and optionally fill in the actual type of the register
1388 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1389 type/index information in *TYPEINFO. */
1390
1391 static int
1392 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1393 enum arm_reg_type *rtype,
1394 struct neon_typed_alias *typeinfo)
1395 {
1396 char *str = *ccp;
1397 struct reg_entry *reg = arm_reg_parse_multi (&str);
1398 struct neon_typed_alias atype;
1399 struct neon_type_el parsetype;
1400
1401 atype.defined = 0;
1402 atype.index = -1;
1403 atype.eltype.type = NT_invtype;
1404 atype.eltype.size = -1;
1405
1406 /* Try alternate syntax for some types of register. Note these are mutually
1407 exclusive with the Neon syntax extensions. */
1408 if (reg == NULL)
1409 {
1410 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1411 if (altreg != FAIL)
1412 *ccp = str;
1413 if (typeinfo)
1414 *typeinfo = atype;
1415 return altreg;
1416 }
1417
1418 /* Undo polymorphism when a set of register types may be accepted. */
1419 if ((type == REG_TYPE_NDQ
1420 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1421 || (type == REG_TYPE_VFSD
1422 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1423 || (type == REG_TYPE_NSDQ
1424 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1425 || reg->type == REG_TYPE_NQ))
1426 || (type == REG_TYPE_MMXWC
1427 && (reg->type == REG_TYPE_MMXWCG)))
1428 type = (enum arm_reg_type) reg->type;
1429
1430 if (type != reg->type)
1431 return FAIL;
1432
1433 if (reg->neon)
1434 atype = *reg->neon;
1435
1436 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1437 {
1438 if ((atype.defined & NTA_HASTYPE) != 0)
1439 {
1440 first_error (_("can't redefine type for operand"));
1441 return FAIL;
1442 }
1443 atype.defined |= NTA_HASTYPE;
1444 atype.eltype = parsetype;
1445 }
1446
1447 if (skip_past_char (&str, '[') == SUCCESS)
1448 {
1449 if (type != REG_TYPE_VFD)
1450 {
1451 first_error (_("only D registers may be indexed"));
1452 return FAIL;
1453 }
1454
1455 if ((atype.defined & NTA_HASINDEX) != 0)
1456 {
1457 first_error (_("can't change index for operand"));
1458 return FAIL;
1459 }
1460
1461 atype.defined |= NTA_HASINDEX;
1462
1463 if (skip_past_char (&str, ']') == SUCCESS)
1464 atype.index = NEON_ALL_LANES;
1465 else
1466 {
1467 expressionS exp;
1468
1469 my_get_expression (&exp, &str, GE_NO_PREFIX);
1470
1471 if (exp.X_op != O_constant)
1472 {
1473 first_error (_("constant expression required"));
1474 return FAIL;
1475 }
1476
1477 if (skip_past_char (&str, ']') == FAIL)
1478 return FAIL;
1479
1480 atype.index = exp.X_add_number;
1481 }
1482 }
1483
1484 if (typeinfo)
1485 *typeinfo = atype;
1486
1487 if (rtype)
1488 *rtype = type;
1489
1490 *ccp = str;
1491
1492 return reg->number;
1493 }
1494
1495 /* Like arm_reg_parse, but allow allow the following extra features:
1496 - If RTYPE is non-zero, return the (possibly restricted) type of the
1497 register (e.g. Neon double or quad reg when either has been requested).
1498 - If this is a Neon vector type with additional type information, fill
1499 in the struct pointed to by VECTYPE (if non-NULL).
1500 This function will fault on encountering a scalar. */
1501
1502 static int
1503 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1504 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1505 {
1506 struct neon_typed_alias atype;
1507 char *str = *ccp;
1508 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1509
1510 if (reg == FAIL)
1511 return FAIL;
1512
1513 /* Do not allow regname(... to parse as a register. */
1514 if (*str == '(')
1515 return FAIL;
1516
1517 /* Do not allow a scalar (reg+index) to parse as a register. */
1518 if ((atype.defined & NTA_HASINDEX) != 0)
1519 {
1520 first_error (_("register operand expected, but got scalar"));
1521 return FAIL;
1522 }
1523
1524 if (vectype)
1525 *vectype = atype.eltype;
1526
1527 *ccp = str;
1528
1529 return reg;
1530 }
1531
1532 #define NEON_SCALAR_REG(X) ((X) >> 4)
1533 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1534
1535 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1536 have enough information to be able to do a good job bounds-checking. So, we
1537 just do easy checks here, and do further checks later. */
1538
1539 static int
1540 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1541 {
1542 int reg;
1543 char *str = *ccp;
1544 struct neon_typed_alias atype;
1545
1546 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1547
1548 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1549 return FAIL;
1550
1551 if (atype.index == NEON_ALL_LANES)
1552 {
1553 first_error (_("scalar must have an index"));
1554 return FAIL;
1555 }
1556 else if (atype.index >= 64 / elsize)
1557 {
1558 first_error (_("scalar index out of range"));
1559 return FAIL;
1560 }
1561
1562 if (type)
1563 *type = atype.eltype;
1564
1565 *ccp = str;
1566
1567 return reg * 16 + atype.index;
1568 }
1569
1570 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1571
1572 static long
1573 parse_reg_list (char ** strp)
1574 {
1575 char * str = * strp;
1576 long range = 0;
1577 int another_range;
1578
1579 /* We come back here if we get ranges concatenated by '+' or '|'. */
1580 do
1581 {
1582 another_range = 0;
1583
1584 if (*str == '{')
1585 {
1586 int in_range = 0;
1587 int cur_reg = -1;
1588
1589 str++;
1590 do
1591 {
1592 int reg;
1593
1594 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1595 {
1596 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1597 return FAIL;
1598 }
1599
1600 if (in_range)
1601 {
1602 int i;
1603
1604 if (reg <= cur_reg)
1605 {
1606 first_error (_("bad range in register list"));
1607 return FAIL;
1608 }
1609
1610 for (i = cur_reg + 1; i < reg; i++)
1611 {
1612 if (range & (1 << i))
1613 as_tsktsk
1614 (_("Warning: duplicated register (r%d) in register list"),
1615 i);
1616 else
1617 range |= 1 << i;
1618 }
1619 in_range = 0;
1620 }
1621
1622 if (range & (1 << reg))
1623 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1624 reg);
1625 else if (reg <= cur_reg)
1626 as_tsktsk (_("Warning: register range not in ascending order"));
1627
1628 range |= 1 << reg;
1629 cur_reg = reg;
1630 }
1631 while (skip_past_comma (&str) != FAIL
1632 || (in_range = 1, *str++ == '-'));
1633 str--;
1634
1635 if (*str++ != '}')
1636 {
1637 first_error (_("missing `}'"));
1638 return FAIL;
1639 }
1640 }
1641 else
1642 {
1643 expressionS exp;
1644
1645 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1646 return FAIL;
1647
1648 if (exp.X_op == O_constant)
1649 {
1650 if (exp.X_add_number
1651 != (exp.X_add_number & 0x0000ffff))
1652 {
1653 inst.error = _("invalid register mask");
1654 return FAIL;
1655 }
1656
1657 if ((range & exp.X_add_number) != 0)
1658 {
1659 int regno = range & exp.X_add_number;
1660
1661 regno &= -regno;
1662 regno = (1 << regno) - 1;
1663 as_tsktsk
1664 (_("Warning: duplicated register (r%d) in register list"),
1665 regno);
1666 }
1667
1668 range |= exp.X_add_number;
1669 }
1670 else
1671 {
1672 if (inst.reloc.type != 0)
1673 {
1674 inst.error = _("expression too complex");
1675 return FAIL;
1676 }
1677
1678 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1679 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1680 inst.reloc.pc_rel = 0;
1681 }
1682 }
1683
1684 if (*str == '|' || *str == '+')
1685 {
1686 str++;
1687 another_range = 1;
1688 }
1689 }
1690 while (another_range);
1691
1692 *strp = str;
1693 return range;
1694 }
1695
1696 /* Types of registers in a list. */
1697
1698 enum reg_list_els
1699 {
1700 REGLIST_VFP_S,
1701 REGLIST_VFP_D,
1702 REGLIST_NEON_D
1703 };
1704
1705 /* Parse a VFP register list. If the string is invalid return FAIL.
1706 Otherwise return the number of registers, and set PBASE to the first
1707 register. Parses registers of type ETYPE.
1708 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1709 - Q registers can be used to specify pairs of D registers
1710 - { } can be omitted from around a singleton register list
1711 FIXME: This is not implemented, as it would require backtracking in
1712 some cases, e.g.:
1713 vtbl.8 d3,d4,d5
1714 This could be done (the meaning isn't really ambiguous), but doesn't
1715 fit in well with the current parsing framework.
1716 - 32 D registers may be used (also true for VFPv3).
1717 FIXME: Types are ignored in these register lists, which is probably a
1718 bug. */
1719
1720 static int
1721 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1722 {
1723 char *str = *ccp;
1724 int base_reg;
1725 int new_base;
1726 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1727 int max_regs = 0;
1728 int count = 0;
1729 int warned = 0;
1730 unsigned long mask = 0;
1731 int i;
1732
1733 if (*str != '{')
1734 {
1735 inst.error = _("expecting {");
1736 return FAIL;
1737 }
1738
1739 str++;
1740
1741 switch (etype)
1742 {
1743 case REGLIST_VFP_S:
1744 regtype = REG_TYPE_VFS;
1745 max_regs = 32;
1746 break;
1747
1748 case REGLIST_VFP_D:
1749 regtype = REG_TYPE_VFD;
1750 break;
1751
1752 case REGLIST_NEON_D:
1753 regtype = REG_TYPE_NDQ;
1754 break;
1755 }
1756
1757 if (etype != REGLIST_VFP_S)
1758 {
1759 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1760 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1761 {
1762 max_regs = 32;
1763 if (thumb_mode)
1764 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1765 fpu_vfp_ext_d32);
1766 else
1767 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1768 fpu_vfp_ext_d32);
1769 }
1770 else
1771 max_regs = 16;
1772 }
1773
1774 base_reg = max_regs;
1775
1776 do
1777 {
1778 int setmask = 1, addregs = 1;
1779
1780 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1781
1782 if (new_base == FAIL)
1783 {
1784 first_error (_(reg_expected_msgs[regtype]));
1785 return FAIL;
1786 }
1787
1788 if (new_base >= max_regs)
1789 {
1790 first_error (_("register out of range in list"));
1791 return FAIL;
1792 }
1793
1794 /* Note: a value of 2 * n is returned for the register Q<n>. */
1795 if (regtype == REG_TYPE_NQ)
1796 {
1797 setmask = 3;
1798 addregs = 2;
1799 }
1800
1801 if (new_base < base_reg)
1802 base_reg = new_base;
1803
1804 if (mask & (setmask << new_base))
1805 {
1806 first_error (_("invalid register list"));
1807 return FAIL;
1808 }
1809
1810 if ((mask >> new_base) != 0 && ! warned)
1811 {
1812 as_tsktsk (_("register list not in ascending order"));
1813 warned = 1;
1814 }
1815
1816 mask |= setmask << new_base;
1817 count += addregs;
1818
1819 if (*str == '-') /* We have the start of a range expression */
1820 {
1821 int high_range;
1822
1823 str++;
1824
1825 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1826 == FAIL)
1827 {
1828 inst.error = gettext (reg_expected_msgs[regtype]);
1829 return FAIL;
1830 }
1831
1832 if (high_range >= max_regs)
1833 {
1834 first_error (_("register out of range in list"));
1835 return FAIL;
1836 }
1837
1838 if (regtype == REG_TYPE_NQ)
1839 high_range = high_range + 1;
1840
1841 if (high_range <= new_base)
1842 {
1843 inst.error = _("register range not in ascending order");
1844 return FAIL;
1845 }
1846
1847 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1848 {
1849 if (mask & (setmask << new_base))
1850 {
1851 inst.error = _("invalid register list");
1852 return FAIL;
1853 }
1854
1855 mask |= setmask << new_base;
1856 count += addregs;
1857 }
1858 }
1859 }
1860 while (skip_past_comma (&str) != FAIL);
1861
1862 str++;
1863
1864 /* Sanity check -- should have raised a parse error above. */
1865 if (count == 0 || count > max_regs)
1866 abort ();
1867
1868 *pbase = base_reg;
1869
1870 /* Final test -- the registers must be consecutive. */
1871 mask >>= base_reg;
1872 for (i = 0; i < count; i++)
1873 {
1874 if ((mask & (1u << i)) == 0)
1875 {
1876 inst.error = _("non-contiguous register range");
1877 return FAIL;
1878 }
1879 }
1880
1881 *ccp = str;
1882
1883 return count;
1884 }
1885
1886 /* True if two alias types are the same. */
1887
1888 static bfd_boolean
1889 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1890 {
1891 if (!a && !b)
1892 return TRUE;
1893
1894 if (!a || !b)
1895 return FALSE;
1896
1897 if (a->defined != b->defined)
1898 return FALSE;
1899
1900 if ((a->defined & NTA_HASTYPE) != 0
1901 && (a->eltype.type != b->eltype.type
1902 || a->eltype.size != b->eltype.size))
1903 return FALSE;
1904
1905 if ((a->defined & NTA_HASINDEX) != 0
1906 && (a->index != b->index))
1907 return FALSE;
1908
1909 return TRUE;
1910 }
1911
1912 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1913 The base register is put in *PBASE.
1914 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1915 the return value.
1916 The register stride (minus one) is put in bit 4 of the return value.
1917 Bits [6:5] encode the list length (minus one).
1918 The type of the list elements is put in *ELTYPE, if non-NULL. */
1919
1920 #define NEON_LANE(X) ((X) & 0xf)
1921 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1922 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1923
1924 static int
1925 parse_neon_el_struct_list (char **str, unsigned *pbase,
1926 struct neon_type_el *eltype)
1927 {
1928 char *ptr = *str;
1929 int base_reg = -1;
1930 int reg_incr = -1;
1931 int count = 0;
1932 int lane = -1;
1933 int leading_brace = 0;
1934 enum arm_reg_type rtype = REG_TYPE_NDQ;
1935 const char *const incr_error = _("register stride must be 1 or 2");
1936 const char *const type_error = _("mismatched element/structure types in list");
1937 struct neon_typed_alias firsttype;
1938
1939 if (skip_past_char (&ptr, '{') == SUCCESS)
1940 leading_brace = 1;
1941
1942 do
1943 {
1944 struct neon_typed_alias atype;
1945 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1946
1947 if (getreg == FAIL)
1948 {
1949 first_error (_(reg_expected_msgs[rtype]));
1950 return FAIL;
1951 }
1952
1953 if (base_reg == -1)
1954 {
1955 base_reg = getreg;
1956 if (rtype == REG_TYPE_NQ)
1957 {
1958 reg_incr = 1;
1959 }
1960 firsttype = atype;
1961 }
1962 else if (reg_incr == -1)
1963 {
1964 reg_incr = getreg - base_reg;
1965 if (reg_incr < 1 || reg_incr > 2)
1966 {
1967 first_error (_(incr_error));
1968 return FAIL;
1969 }
1970 }
1971 else if (getreg != base_reg + reg_incr * count)
1972 {
1973 first_error (_(incr_error));
1974 return FAIL;
1975 }
1976
1977 if (! neon_alias_types_same (&atype, &firsttype))
1978 {
1979 first_error (_(type_error));
1980 return FAIL;
1981 }
1982
1983 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1984 modes. */
1985 if (ptr[0] == '-')
1986 {
1987 struct neon_typed_alias htype;
1988 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1989 if (lane == -1)
1990 lane = NEON_INTERLEAVE_LANES;
1991 else if (lane != NEON_INTERLEAVE_LANES)
1992 {
1993 first_error (_(type_error));
1994 return FAIL;
1995 }
1996 if (reg_incr == -1)
1997 reg_incr = 1;
1998 else if (reg_incr != 1)
1999 {
2000 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2001 return FAIL;
2002 }
2003 ptr++;
2004 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2005 if (hireg == FAIL)
2006 {
2007 first_error (_(reg_expected_msgs[rtype]));
2008 return FAIL;
2009 }
2010 if (! neon_alias_types_same (&htype, &firsttype))
2011 {
2012 first_error (_(type_error));
2013 return FAIL;
2014 }
2015 count += hireg + dregs - getreg;
2016 continue;
2017 }
2018
2019 /* If we're using Q registers, we can't use [] or [n] syntax. */
2020 if (rtype == REG_TYPE_NQ)
2021 {
2022 count += 2;
2023 continue;
2024 }
2025
2026 if ((atype.defined & NTA_HASINDEX) != 0)
2027 {
2028 if (lane == -1)
2029 lane = atype.index;
2030 else if (lane != atype.index)
2031 {
2032 first_error (_(type_error));
2033 return FAIL;
2034 }
2035 }
2036 else if (lane == -1)
2037 lane = NEON_INTERLEAVE_LANES;
2038 else if (lane != NEON_INTERLEAVE_LANES)
2039 {
2040 first_error (_(type_error));
2041 return FAIL;
2042 }
2043 count++;
2044 }
2045 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2046
2047 /* No lane set by [x]. We must be interleaving structures. */
2048 if (lane == -1)
2049 lane = NEON_INTERLEAVE_LANES;
2050
2051 /* Sanity check. */
2052 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2053 || (count > 1 && reg_incr == -1))
2054 {
2055 first_error (_("error parsing element/structure list"));
2056 return FAIL;
2057 }
2058
2059 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2060 {
2061 first_error (_("expected }"));
2062 return FAIL;
2063 }
2064
2065 if (reg_incr == -1)
2066 reg_incr = 1;
2067
2068 if (eltype)
2069 *eltype = firsttype.eltype;
2070
2071 *pbase = base_reg;
2072 *str = ptr;
2073
2074 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2075 }
2076
2077 /* Parse an explicit relocation suffix on an expression. This is
2078 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2079 arm_reloc_hsh contains no entries, so this function can only
2080 succeed if there is no () after the word. Returns -1 on error,
2081 BFD_RELOC_UNUSED if there wasn't any suffix. */
2082
2083 static int
2084 parse_reloc (char **str)
2085 {
2086 struct reloc_entry *r;
2087 char *p, *q;
2088
2089 if (**str != '(')
2090 return BFD_RELOC_UNUSED;
2091
2092 p = *str + 1;
2093 q = p;
2094
2095 while (*q && *q != ')' && *q != ',')
2096 q++;
2097 if (*q != ')')
2098 return -1;
2099
2100 if ((r = (struct reloc_entry *)
2101 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2102 return -1;
2103
2104 *str = q + 1;
2105 return r->reloc;
2106 }
2107
2108 /* Directives: register aliases. */
2109
2110 static struct reg_entry *
2111 insert_reg_alias (char *str, unsigned number, int type)
2112 {
2113 struct reg_entry *new_reg;
2114 const char *name;
2115
2116 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2117 {
2118 if (new_reg->builtin)
2119 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2120
2121 /* Only warn about a redefinition if it's not defined as the
2122 same register. */
2123 else if (new_reg->number != number || new_reg->type != type)
2124 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2125
2126 return NULL;
2127 }
2128
2129 name = xstrdup (str);
2130 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2131
2132 new_reg->name = name;
2133 new_reg->number = number;
2134 new_reg->type = type;
2135 new_reg->builtin = FALSE;
2136 new_reg->neon = NULL;
2137
2138 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2139 abort ();
2140
2141 return new_reg;
2142 }
2143
2144 static void
2145 insert_neon_reg_alias (char *str, int number, int type,
2146 struct neon_typed_alias *atype)
2147 {
2148 struct reg_entry *reg = insert_reg_alias (str, number, type);
2149
2150 if (!reg)
2151 {
2152 first_error (_("attempt to redefine typed alias"));
2153 return;
2154 }
2155
2156 if (atype)
2157 {
2158 reg->neon = (struct neon_typed_alias *)
2159 xmalloc (sizeof (struct neon_typed_alias));
2160 *reg->neon = *atype;
2161 }
2162 }
2163
2164 /* Look for the .req directive. This is of the form:
2165
2166 new_register_name .req existing_register_name
2167
2168 If we find one, or if it looks sufficiently like one that we want to
2169 handle any error here, return TRUE. Otherwise return FALSE. */
2170
2171 static bfd_boolean
2172 create_register_alias (char * newname, char *p)
2173 {
2174 struct reg_entry *old;
2175 char *oldname, *nbuf;
2176 size_t nlen;
2177
2178 /* The input scrubber ensures that whitespace after the mnemonic is
2179 collapsed to single spaces. */
2180 oldname = p;
2181 if (strncmp (oldname, " .req ", 6) != 0)
2182 return FALSE;
2183
2184 oldname += 6;
2185 if (*oldname == '\0')
2186 return FALSE;
2187
2188 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2189 if (!old)
2190 {
2191 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2192 return TRUE;
2193 }
2194
2195 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2196 the desired alias name, and p points to its end. If not, then
2197 the desired alias name is in the global original_case_string. */
2198 #ifdef TC_CASE_SENSITIVE
2199 nlen = p - newname;
2200 #else
2201 newname = original_case_string;
2202 nlen = strlen (newname);
2203 #endif
2204
2205 nbuf = (char *) alloca (nlen + 1);
2206 memcpy (nbuf, newname, nlen);
2207 nbuf[nlen] = '\0';
2208
2209 /* Create aliases under the new name as stated; an all-lowercase
2210 version of the new name; and an all-uppercase version of the new
2211 name. */
2212 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2213 {
2214 for (p = nbuf; *p; p++)
2215 *p = TOUPPER (*p);
2216
2217 if (strncmp (nbuf, newname, nlen))
2218 {
2219 /* If this attempt to create an additional alias fails, do not bother
2220 trying to create the all-lower case alias. We will fail and issue
2221 a second, duplicate error message. This situation arises when the
2222 programmer does something like:
2223 foo .req r0
2224 Foo .req r1
2225 The second .req creates the "Foo" alias but then fails to create
2226 the artificial FOO alias because it has already been created by the
2227 first .req. */
2228 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2229 return TRUE;
2230 }
2231
2232 for (p = nbuf; *p; p++)
2233 *p = TOLOWER (*p);
2234
2235 if (strncmp (nbuf, newname, nlen))
2236 insert_reg_alias (nbuf, old->number, old->type);
2237 }
2238
2239 return TRUE;
2240 }
2241
2242 /* Create a Neon typed/indexed register alias using directives, e.g.:
2243 X .dn d5.s32[1]
2244 Y .qn 6.s16
2245 Z .dn d7
2246 T .dn Z[0]
2247 These typed registers can be used instead of the types specified after the
2248 Neon mnemonic, so long as all operands given have types. Types can also be
2249 specified directly, e.g.:
2250 vadd d0.s32, d1.s32, d2.s32 */
2251
2252 static bfd_boolean
2253 create_neon_reg_alias (char *newname, char *p)
2254 {
2255 enum arm_reg_type basetype;
2256 struct reg_entry *basereg;
2257 struct reg_entry mybasereg;
2258 struct neon_type ntype;
2259 struct neon_typed_alias typeinfo;
2260 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2261 int namelen;
2262
2263 typeinfo.defined = 0;
2264 typeinfo.eltype.type = NT_invtype;
2265 typeinfo.eltype.size = -1;
2266 typeinfo.index = -1;
2267
2268 nameend = p;
2269
2270 if (strncmp (p, " .dn ", 5) == 0)
2271 basetype = REG_TYPE_VFD;
2272 else if (strncmp (p, " .qn ", 5) == 0)
2273 basetype = REG_TYPE_NQ;
2274 else
2275 return FALSE;
2276
2277 p += 5;
2278
2279 if (*p == '\0')
2280 return FALSE;
2281
2282 basereg = arm_reg_parse_multi (&p);
2283
2284 if (basereg && basereg->type != basetype)
2285 {
2286 as_bad (_("bad type for register"));
2287 return FALSE;
2288 }
2289
2290 if (basereg == NULL)
2291 {
2292 expressionS exp;
2293 /* Try parsing as an integer. */
2294 my_get_expression (&exp, &p, GE_NO_PREFIX);
2295 if (exp.X_op != O_constant)
2296 {
2297 as_bad (_("expression must be constant"));
2298 return FALSE;
2299 }
2300 basereg = &mybasereg;
2301 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2302 : exp.X_add_number;
2303 basereg->neon = 0;
2304 }
2305
2306 if (basereg->neon)
2307 typeinfo = *basereg->neon;
2308
2309 if (parse_neon_type (&ntype, &p) == SUCCESS)
2310 {
2311 /* We got a type. */
2312 if (typeinfo.defined & NTA_HASTYPE)
2313 {
2314 as_bad (_("can't redefine the type of a register alias"));
2315 return FALSE;
2316 }
2317
2318 typeinfo.defined |= NTA_HASTYPE;
2319 if (ntype.elems != 1)
2320 {
2321 as_bad (_("you must specify a single type only"));
2322 return FALSE;
2323 }
2324 typeinfo.eltype = ntype.el[0];
2325 }
2326
2327 if (skip_past_char (&p, '[') == SUCCESS)
2328 {
2329 expressionS exp;
2330 /* We got a scalar index. */
2331
2332 if (typeinfo.defined & NTA_HASINDEX)
2333 {
2334 as_bad (_("can't redefine the index of a scalar alias"));
2335 return FALSE;
2336 }
2337
2338 my_get_expression (&exp, &p, GE_NO_PREFIX);
2339
2340 if (exp.X_op != O_constant)
2341 {
2342 as_bad (_("scalar index must be constant"));
2343 return FALSE;
2344 }
2345
2346 typeinfo.defined |= NTA_HASINDEX;
2347 typeinfo.index = exp.X_add_number;
2348
2349 if (skip_past_char (&p, ']') == FAIL)
2350 {
2351 as_bad (_("expecting ]"));
2352 return FALSE;
2353 }
2354 }
2355
2356 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2357 the desired alias name, and p points to its end. If not, then
2358 the desired alias name is in the global original_case_string. */
2359 #ifdef TC_CASE_SENSITIVE
2360 namelen = nameend - newname;
2361 #else
2362 newname = original_case_string;
2363 namelen = strlen (newname);
2364 #endif
2365
2366 namebuf = (char *) alloca (namelen + 1);
2367 strncpy (namebuf, newname, namelen);
2368 namebuf[namelen] = '\0';
2369
2370 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2371 typeinfo.defined != 0 ? &typeinfo : NULL);
2372
2373 /* Insert name in all uppercase. */
2374 for (p = namebuf; *p; p++)
2375 *p = TOUPPER (*p);
2376
2377 if (strncmp (namebuf, newname, namelen))
2378 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2379 typeinfo.defined != 0 ? &typeinfo : NULL);
2380
2381 /* Insert name in all lowercase. */
2382 for (p = namebuf; *p; p++)
2383 *p = TOLOWER (*p);
2384
2385 if (strncmp (namebuf, newname, namelen))
2386 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2387 typeinfo.defined != 0 ? &typeinfo : NULL);
2388
2389 return TRUE;
2390 }
2391
2392 /* Should never be called, as .req goes between the alias and the
2393 register name, not at the beginning of the line. */
2394
2395 static void
2396 s_req (int a ATTRIBUTE_UNUSED)
2397 {
2398 as_bad (_("invalid syntax for .req directive"));
2399 }
2400
2401 static void
2402 s_dn (int a ATTRIBUTE_UNUSED)
2403 {
2404 as_bad (_("invalid syntax for .dn directive"));
2405 }
2406
2407 static void
2408 s_qn (int a ATTRIBUTE_UNUSED)
2409 {
2410 as_bad (_("invalid syntax for .qn directive"));
2411 }
2412
2413 /* The .unreq directive deletes an alias which was previously defined
2414 by .req. For example:
2415
2416 my_alias .req r11
2417 .unreq my_alias */
2418
2419 static void
2420 s_unreq (int a ATTRIBUTE_UNUSED)
2421 {
2422 char * name;
2423 char saved_char;
2424
2425 name = input_line_pointer;
2426
2427 while (*input_line_pointer != 0
2428 && *input_line_pointer != ' '
2429 && *input_line_pointer != '\n')
2430 ++input_line_pointer;
2431
2432 saved_char = *input_line_pointer;
2433 *input_line_pointer = 0;
2434
2435 if (!*name)
2436 as_bad (_("invalid syntax for .unreq directive"));
2437 else
2438 {
2439 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2440 name);
2441
2442 if (!reg)
2443 as_bad (_("unknown register alias '%s'"), name);
2444 else if (reg->builtin)
2445 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2446 name);
2447 else
2448 {
2449 char * p;
2450 char * nbuf;
2451
2452 hash_delete (arm_reg_hsh, name, FALSE);
2453 free ((char *) reg->name);
2454 if (reg->neon)
2455 free (reg->neon);
2456 free (reg);
2457
2458 /* Also locate the all upper case and all lower case versions.
2459 Do not complain if we cannot find one or the other as it
2460 was probably deleted above. */
2461
2462 nbuf = strdup (name);
2463 for (p = nbuf; *p; p++)
2464 *p = TOUPPER (*p);
2465 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2466 if (reg)
2467 {
2468 hash_delete (arm_reg_hsh, nbuf, FALSE);
2469 free ((char *) reg->name);
2470 if (reg->neon)
2471 free (reg->neon);
2472 free (reg);
2473 }
2474
2475 for (p = nbuf; *p; p++)
2476 *p = TOLOWER (*p);
2477 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2478 if (reg)
2479 {
2480 hash_delete (arm_reg_hsh, nbuf, FALSE);
2481 free ((char *) reg->name);
2482 if (reg->neon)
2483 free (reg->neon);
2484 free (reg);
2485 }
2486
2487 free (nbuf);
2488 }
2489 }
2490
2491 *input_line_pointer = saved_char;
2492 demand_empty_rest_of_line ();
2493 }
2494
2495 /* Directives: Instruction set selection. */
2496
2497 #ifdef OBJ_ELF
2498 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2499 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2500 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2501 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2502
2503 /* Create a new mapping symbol for the transition to STATE. */
2504
2505 static void
2506 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2507 {
2508 symbolS * symbolP;
2509 const char * symname;
2510 int type;
2511
2512 switch (state)
2513 {
2514 case MAP_DATA:
2515 symname = "$d";
2516 type = BSF_NO_FLAGS;
2517 break;
2518 case MAP_ARM:
2519 symname = "$a";
2520 type = BSF_NO_FLAGS;
2521 break;
2522 case MAP_THUMB:
2523 symname = "$t";
2524 type = BSF_NO_FLAGS;
2525 break;
2526 default:
2527 abort ();
2528 }
2529
2530 symbolP = symbol_new (symname, now_seg, value, frag);
2531 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2532
2533 switch (state)
2534 {
2535 case MAP_ARM:
2536 THUMB_SET_FUNC (symbolP, 0);
2537 ARM_SET_THUMB (symbolP, 0);
2538 ARM_SET_INTERWORK (symbolP, support_interwork);
2539 break;
2540
2541 case MAP_THUMB:
2542 THUMB_SET_FUNC (symbolP, 1);
2543 ARM_SET_THUMB (symbolP, 1);
2544 ARM_SET_INTERWORK (symbolP, support_interwork);
2545 break;
2546
2547 case MAP_DATA:
2548 default:
2549 break;
2550 }
2551
2552 /* Save the mapping symbols for future reference. Also check that
2553 we do not place two mapping symbols at the same offset within a
2554 frag. We'll handle overlap between frags in
2555 check_mapping_symbols.
2556
2557 If .fill or other data filling directive generates zero sized data,
2558 the mapping symbol for the following code will have the same value
2559 as the one generated for the data filling directive. In this case,
2560 we replace the old symbol with the new one at the same address. */
2561 if (value == 0)
2562 {
2563 if (frag->tc_frag_data.first_map != NULL)
2564 {
2565 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2566 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2567 }
2568 frag->tc_frag_data.first_map = symbolP;
2569 }
2570 if (frag->tc_frag_data.last_map != NULL)
2571 {
2572 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2573 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2574 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2575 }
2576 frag->tc_frag_data.last_map = symbolP;
2577 }
2578
2579 /* We must sometimes convert a region marked as code to data during
2580 code alignment, if an odd number of bytes have to be padded. The
2581 code mapping symbol is pushed to an aligned address. */
2582
2583 static void
2584 insert_data_mapping_symbol (enum mstate state,
2585 valueT value, fragS *frag, offsetT bytes)
2586 {
2587 /* If there was already a mapping symbol, remove it. */
2588 if (frag->tc_frag_data.last_map != NULL
2589 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2590 {
2591 symbolS *symp = frag->tc_frag_data.last_map;
2592
2593 if (value == 0)
2594 {
2595 know (frag->tc_frag_data.first_map == symp);
2596 frag->tc_frag_data.first_map = NULL;
2597 }
2598 frag->tc_frag_data.last_map = NULL;
2599 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2600 }
2601
2602 make_mapping_symbol (MAP_DATA, value, frag);
2603 make_mapping_symbol (state, value + bytes, frag);
2604 }
2605
2606 static void mapping_state_2 (enum mstate state, int max_chars);
2607
2608 /* Set the mapping state to STATE. Only call this when about to
2609 emit some STATE bytes to the file. */
2610
2611 void
2612 mapping_state (enum mstate state)
2613 {
2614 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2615
2616 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2617
2618 if (mapstate == state)
2619 /* The mapping symbol has already been emitted.
2620 There is nothing else to do. */
2621 return;
2622
2623 if (state == MAP_ARM || state == MAP_THUMB)
2624 /* PR gas/12931
2625 All ARM instructions require 4-byte alignment.
2626 (Almost) all Thumb instructions require 2-byte alignment.
2627
2628 When emitting instructions into any section, mark the section
2629 appropriately.
2630
2631 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2632 but themselves require 2-byte alignment; this applies to some
2633 PC- relative forms. However, these cases will invovle implicit
2634 literal pool generation or an explicit .align >=2, both of
2635 which will cause the section to me marked with sufficient
2636 alignment. Thus, we don't handle those cases here. */
2637 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2638
2639 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2640 /* This case will be evaluated later in the next else. */
2641 return;
2642 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2643 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2644 {
2645 /* Only add the symbol if the offset is > 0:
2646 if we're at the first frag, check it's size > 0;
2647 if we're not at the first frag, then for sure
2648 the offset is > 0. */
2649 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2650 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2651
2652 if (add_symbol)
2653 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2654 }
2655
2656 mapping_state_2 (state, 0);
2657 #undef TRANSITION
2658 }
2659
2660 /* Same as mapping_state, but MAX_CHARS bytes have already been
2661 allocated. Put the mapping symbol that far back. */
2662
2663 static void
2664 mapping_state_2 (enum mstate state, int max_chars)
2665 {
2666 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2667
2668 if (!SEG_NORMAL (now_seg))
2669 return;
2670
2671 if (mapstate == state)
2672 /* The mapping symbol has already been emitted.
2673 There is nothing else to do. */
2674 return;
2675
2676 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2677 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2678 }
2679 #else
2680 #define mapping_state(x) ((void)0)
2681 #define mapping_state_2(x, y) ((void)0)
2682 #endif
2683
2684 /* Find the real, Thumb encoded start of a Thumb function. */
2685
2686 #ifdef OBJ_COFF
2687 static symbolS *
2688 find_real_start (symbolS * symbolP)
2689 {
2690 char * real_start;
2691 const char * name = S_GET_NAME (symbolP);
2692 symbolS * new_target;
2693
2694 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2695 #define STUB_NAME ".real_start_of"
2696
2697 if (name == NULL)
2698 abort ();
2699
2700 /* The compiler may generate BL instructions to local labels because
2701 it needs to perform a branch to a far away location. These labels
2702 do not have a corresponding ".real_start_of" label. We check
2703 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2704 the ".real_start_of" convention for nonlocal branches. */
2705 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2706 return symbolP;
2707
2708 real_start = ACONCAT ((STUB_NAME, name, NULL));
2709 new_target = symbol_find (real_start);
2710
2711 if (new_target == NULL)
2712 {
2713 as_warn (_("Failed to find real start of function: %s\n"), name);
2714 new_target = symbolP;
2715 }
2716
2717 return new_target;
2718 }
2719 #endif
2720
2721 static void
2722 opcode_select (int width)
2723 {
2724 switch (width)
2725 {
2726 case 16:
2727 if (! thumb_mode)
2728 {
2729 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2730 as_bad (_("selected processor does not support THUMB opcodes"));
2731
2732 thumb_mode = 1;
2733 /* No need to force the alignment, since we will have been
2734 coming from ARM mode, which is word-aligned. */
2735 record_alignment (now_seg, 1);
2736 }
2737 break;
2738
2739 case 32:
2740 if (thumb_mode)
2741 {
2742 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2743 as_bad (_("selected processor does not support ARM opcodes"));
2744
2745 thumb_mode = 0;
2746
2747 if (!need_pass_2)
2748 frag_align (2, 0, 0);
2749
2750 record_alignment (now_seg, 1);
2751 }
2752 break;
2753
2754 default:
2755 as_bad (_("invalid instruction size selected (%d)"), width);
2756 }
2757 }
2758
2759 static void
2760 s_arm (int ignore ATTRIBUTE_UNUSED)
2761 {
2762 opcode_select (32);
2763 demand_empty_rest_of_line ();
2764 }
2765
2766 static void
2767 s_thumb (int ignore ATTRIBUTE_UNUSED)
2768 {
2769 opcode_select (16);
2770 demand_empty_rest_of_line ();
2771 }
2772
2773 static void
2774 s_code (int unused ATTRIBUTE_UNUSED)
2775 {
2776 int temp;
2777
2778 temp = get_absolute_expression ();
2779 switch (temp)
2780 {
2781 case 16:
2782 case 32:
2783 opcode_select (temp);
2784 break;
2785
2786 default:
2787 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2788 }
2789 }
2790
2791 static void
2792 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2793 {
2794 /* If we are not already in thumb mode go into it, EVEN if
2795 the target processor does not support thumb instructions.
2796 This is used by gcc/config/arm/lib1funcs.asm for example
2797 to compile interworking support functions even if the
2798 target processor should not support interworking. */
2799 if (! thumb_mode)
2800 {
2801 thumb_mode = 2;
2802 record_alignment (now_seg, 1);
2803 }
2804
2805 demand_empty_rest_of_line ();
2806 }
2807
2808 static void
2809 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2810 {
2811 s_thumb (0);
2812
2813 /* The following label is the name/address of the start of a Thumb function.
2814 We need to know this for the interworking support. */
2815 label_is_thumb_function_name = TRUE;
2816 }
2817
2818 /* Perform a .set directive, but also mark the alias as
2819 being a thumb function. */
2820
2821 static void
2822 s_thumb_set (int equiv)
2823 {
2824 /* XXX the following is a duplicate of the code for s_set() in read.c
2825 We cannot just call that code as we need to get at the symbol that
2826 is created. */
2827 char * name;
2828 char delim;
2829 char * end_name;
2830 symbolS * symbolP;
2831
2832 /* Especial apologies for the random logic:
2833 This just grew, and could be parsed much more simply!
2834 Dean - in haste. */
2835 name = input_line_pointer;
2836 delim = get_symbol_end ();
2837 end_name = input_line_pointer;
2838 *end_name = delim;
2839
2840 if (*input_line_pointer != ',')
2841 {
2842 *end_name = 0;
2843 as_bad (_("expected comma after name \"%s\""), name);
2844 *end_name = delim;
2845 ignore_rest_of_line ();
2846 return;
2847 }
2848
2849 input_line_pointer++;
2850 *end_name = 0;
2851
2852 if (name[0] == '.' && name[1] == '\0')
2853 {
2854 /* XXX - this should not happen to .thumb_set. */
2855 abort ();
2856 }
2857
2858 if ((symbolP = symbol_find (name)) == NULL
2859 && (symbolP = md_undefined_symbol (name)) == NULL)
2860 {
2861 #ifndef NO_LISTING
2862 /* When doing symbol listings, play games with dummy fragments living
2863 outside the normal fragment chain to record the file and line info
2864 for this symbol. */
2865 if (listing & LISTING_SYMBOLS)
2866 {
2867 extern struct list_info_struct * listing_tail;
2868 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2869
2870 memset (dummy_frag, 0, sizeof (fragS));
2871 dummy_frag->fr_type = rs_fill;
2872 dummy_frag->line = listing_tail;
2873 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2874 dummy_frag->fr_symbol = symbolP;
2875 }
2876 else
2877 #endif
2878 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2879
2880 #ifdef OBJ_COFF
2881 /* "set" symbols are local unless otherwise specified. */
2882 SF_SET_LOCAL (symbolP);
2883 #endif /* OBJ_COFF */
2884 } /* Make a new symbol. */
2885
2886 symbol_table_insert (symbolP);
2887
2888 * end_name = delim;
2889
2890 if (equiv
2891 && S_IS_DEFINED (symbolP)
2892 && S_GET_SEGMENT (symbolP) != reg_section)
2893 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2894
2895 pseudo_set (symbolP);
2896
2897 demand_empty_rest_of_line ();
2898
2899 /* XXX Now we come to the Thumb specific bit of code. */
2900
2901 THUMB_SET_FUNC (symbolP, 1);
2902 ARM_SET_THUMB (symbolP, 1);
2903 #if defined OBJ_ELF || defined OBJ_COFF
2904 ARM_SET_INTERWORK (symbolP, support_interwork);
2905 #endif
2906 }
2907
2908 /* Directives: Mode selection. */
2909
2910 /* .syntax [unified|divided] - choose the new unified syntax
2911 (same for Arm and Thumb encoding, modulo slight differences in what
2912 can be represented) or the old divergent syntax for each mode. */
2913 static void
2914 s_syntax (int unused ATTRIBUTE_UNUSED)
2915 {
2916 char *name, delim;
2917
2918 name = input_line_pointer;
2919 delim = get_symbol_end ();
2920
2921 if (!strcasecmp (name, "unified"))
2922 unified_syntax = TRUE;
2923 else if (!strcasecmp (name, "divided"))
2924 unified_syntax = FALSE;
2925 else
2926 {
2927 as_bad (_("unrecognized syntax mode \"%s\""), name);
2928 return;
2929 }
2930 *input_line_pointer = delim;
2931 demand_empty_rest_of_line ();
2932 }
2933
2934 /* Directives: sectioning and alignment. */
2935
2936 /* Same as s_align_ptwo but align 0 => align 2. */
2937
2938 static void
2939 s_align (int unused ATTRIBUTE_UNUSED)
2940 {
2941 int temp;
2942 bfd_boolean fill_p;
2943 long temp_fill;
2944 long max_alignment = 15;
2945
2946 temp = get_absolute_expression ();
2947 if (temp > max_alignment)
2948 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2949 else if (temp < 0)
2950 {
2951 as_bad (_("alignment negative. 0 assumed."));
2952 temp = 0;
2953 }
2954
2955 if (*input_line_pointer == ',')
2956 {
2957 input_line_pointer++;
2958 temp_fill = get_absolute_expression ();
2959 fill_p = TRUE;
2960 }
2961 else
2962 {
2963 fill_p = FALSE;
2964 temp_fill = 0;
2965 }
2966
2967 if (!temp)
2968 temp = 2;
2969
2970 /* Only make a frag if we HAVE to. */
2971 if (temp && !need_pass_2)
2972 {
2973 if (!fill_p && subseg_text_p (now_seg))
2974 frag_align_code (temp, 0);
2975 else
2976 frag_align (temp, (int) temp_fill, 0);
2977 }
2978 demand_empty_rest_of_line ();
2979
2980 record_alignment (now_seg, temp);
2981 }
2982
2983 static void
2984 s_bss (int ignore ATTRIBUTE_UNUSED)
2985 {
2986 /* We don't support putting frags in the BSS segment, we fake it by
2987 marking in_bss, then looking at s_skip for clues. */
2988 subseg_set (bss_section, 0);
2989 demand_empty_rest_of_line ();
2990
2991 #ifdef md_elf_section_change_hook
2992 md_elf_section_change_hook ();
2993 #endif
2994 }
2995
2996 static void
2997 s_even (int ignore ATTRIBUTE_UNUSED)
2998 {
2999 /* Never make frag if expect extra pass. */
3000 if (!need_pass_2)
3001 frag_align (1, 0, 0);
3002
3003 record_alignment (now_seg, 1);
3004
3005 demand_empty_rest_of_line ();
3006 }
3007
3008 /* Directives: Literal pools. */
3009
3010 static literal_pool *
3011 find_literal_pool (void)
3012 {
3013 literal_pool * pool;
3014
3015 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3016 {
3017 if (pool->section == now_seg
3018 && pool->sub_section == now_subseg)
3019 break;
3020 }
3021
3022 return pool;
3023 }
3024
3025 static literal_pool *
3026 find_or_make_literal_pool (void)
3027 {
3028 /* Next literal pool ID number. */
3029 static unsigned int latest_pool_num = 1;
3030 literal_pool * pool;
3031
3032 pool = find_literal_pool ();
3033
3034 if (pool == NULL)
3035 {
3036 /* Create a new pool. */
3037 pool = (literal_pool *) xmalloc (sizeof (* pool));
3038 if (! pool)
3039 return NULL;
3040
3041 pool->next_free_entry = 0;
3042 pool->section = now_seg;
3043 pool->sub_section = now_subseg;
3044 pool->next = list_of_pools;
3045 pool->symbol = NULL;
3046
3047 /* Add it to the list. */
3048 list_of_pools = pool;
3049 }
3050
3051 /* New pools, and emptied pools, will have a NULL symbol. */
3052 if (pool->symbol == NULL)
3053 {
3054 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3055 (valueT) 0, &zero_address_frag);
3056 pool->id = latest_pool_num ++;
3057 }
3058
3059 /* Done. */
3060 return pool;
3061 }
3062
3063 /* Add the literal in the global 'inst'
3064 structure to the relevant literal pool. */
3065
3066 static int
3067 add_to_lit_pool (void)
3068 {
3069 literal_pool * pool;
3070 unsigned int entry;
3071
3072 pool = find_or_make_literal_pool ();
3073
3074 /* Check if this literal value is already in the pool. */
3075 for (entry = 0; entry < pool->next_free_entry; entry ++)
3076 {
3077 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3078 && (inst.reloc.exp.X_op == O_constant)
3079 && (pool->literals[entry].X_add_number
3080 == inst.reloc.exp.X_add_number)
3081 && (pool->literals[entry].X_unsigned
3082 == inst.reloc.exp.X_unsigned))
3083 break;
3084
3085 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3086 && (inst.reloc.exp.X_op == O_symbol)
3087 && (pool->literals[entry].X_add_number
3088 == inst.reloc.exp.X_add_number)
3089 && (pool->literals[entry].X_add_symbol
3090 == inst.reloc.exp.X_add_symbol)
3091 && (pool->literals[entry].X_op_symbol
3092 == inst.reloc.exp.X_op_symbol))
3093 break;
3094 }
3095
3096 /* Do we need to create a new entry? */
3097 if (entry == pool->next_free_entry)
3098 {
3099 if (entry >= MAX_LITERAL_POOL_SIZE)
3100 {
3101 inst.error = _("literal pool overflow");
3102 return FAIL;
3103 }
3104
3105 pool->literals[entry] = inst.reloc.exp;
3106 #ifdef OBJ_ELF
3107 /* PR ld/12974: Record the location of the first source line to reference
3108 this entry in the literal pool. If it turns out during linking that the
3109 symbol does not exist we will be able to give an accurate line number for
3110 the (first use of the) missing reference. */
3111 if (debug_type == DEBUG_DWARF2)
3112 dwarf2_where (pool->locs + entry);
3113 #endif
3114 pool->next_free_entry += 1;
3115 }
3116
3117 inst.reloc.exp.X_op = O_symbol;
3118 inst.reloc.exp.X_add_number = ((int) entry) * 4;
3119 inst.reloc.exp.X_add_symbol = pool->symbol;
3120
3121 return SUCCESS;
3122 }
3123
3124 /* Can't use symbol_new here, so have to create a symbol and then at
3125 a later date assign it a value. Thats what these functions do. */
3126
3127 static void
3128 symbol_locate (symbolS * symbolP,
3129 const char * name, /* It is copied, the caller can modify. */
3130 segT segment, /* Segment identifier (SEG_<something>). */
3131 valueT valu, /* Symbol value. */
3132 fragS * frag) /* Associated fragment. */
3133 {
3134 unsigned int name_length;
3135 char * preserved_copy_of_name;
3136
3137 name_length = strlen (name) + 1; /* +1 for \0. */
3138 obstack_grow (&notes, name, name_length);
3139 preserved_copy_of_name = (char *) obstack_finish (&notes);
3140
3141 #ifdef tc_canonicalize_symbol_name
3142 preserved_copy_of_name =
3143 tc_canonicalize_symbol_name (preserved_copy_of_name);
3144 #endif
3145
3146 S_SET_NAME (symbolP, preserved_copy_of_name);
3147
3148 S_SET_SEGMENT (symbolP, segment);
3149 S_SET_VALUE (symbolP, valu);
3150 symbol_clear_list_pointers (symbolP);
3151
3152 symbol_set_frag (symbolP, frag);
3153
3154 /* Link to end of symbol chain. */
3155 {
3156 extern int symbol_table_frozen;
3157
3158 if (symbol_table_frozen)
3159 abort ();
3160 }
3161
3162 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3163
3164 obj_symbol_new_hook (symbolP);
3165
3166 #ifdef tc_symbol_new_hook
3167 tc_symbol_new_hook (symbolP);
3168 #endif
3169
3170 #ifdef DEBUG_SYMS
3171 verify_symbol_chain (symbol_rootP, symbol_lastP);
3172 #endif /* DEBUG_SYMS */
3173 }
3174
3175
3176 static void
3177 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3178 {
3179 unsigned int entry;
3180 literal_pool * pool;
3181 char sym_name[20];
3182
3183 pool = find_literal_pool ();
3184 if (pool == NULL
3185 || pool->symbol == NULL
3186 || pool->next_free_entry == 0)
3187 return;
3188
3189 mapping_state (MAP_DATA);
3190
3191 /* Align pool as you have word accesses.
3192 Only make a frag if we have to. */
3193 if (!need_pass_2)
3194 frag_align (2, 0, 0);
3195
3196 record_alignment (now_seg, 2);
3197
3198 sprintf (sym_name, "$$lit_\002%x", pool->id);
3199
3200 symbol_locate (pool->symbol, sym_name, now_seg,
3201 (valueT) frag_now_fix (), frag_now);
3202 symbol_table_insert (pool->symbol);
3203
3204 ARM_SET_THUMB (pool->symbol, thumb_mode);
3205
3206 #if defined OBJ_COFF || defined OBJ_ELF
3207 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3208 #endif
3209
3210 for (entry = 0; entry < pool->next_free_entry; entry ++)
3211 {
3212 #ifdef OBJ_ELF
3213 if (debug_type == DEBUG_DWARF2)
3214 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3215 #endif
3216 /* First output the expression in the instruction to the pool. */
3217 emit_expr (&(pool->literals[entry]), 4); /* .word */
3218 }
3219
3220 /* Mark the pool as empty. */
3221 pool->next_free_entry = 0;
3222 pool->symbol = NULL;
3223 }
3224
3225 #ifdef OBJ_ELF
3226 /* Forward declarations for functions below, in the MD interface
3227 section. */
3228 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3229 static valueT create_unwind_entry (int);
3230 static void start_unwind_section (const segT, int);
3231 static void add_unwind_opcode (valueT, int);
3232 static void flush_pending_unwind (void);
3233
3234 /* Directives: Data. */
3235
3236 static void
3237 s_arm_elf_cons (int nbytes)
3238 {
3239 expressionS exp;
3240
3241 #ifdef md_flush_pending_output
3242 md_flush_pending_output ();
3243 #endif
3244
3245 if (is_it_end_of_statement ())
3246 {
3247 demand_empty_rest_of_line ();
3248 return;
3249 }
3250
3251 #ifdef md_cons_align
3252 md_cons_align (nbytes);
3253 #endif
3254
3255 mapping_state (MAP_DATA);
3256 do
3257 {
3258 int reloc;
3259 char *base = input_line_pointer;
3260
3261 expression (& exp);
3262
3263 if (exp.X_op != O_symbol)
3264 emit_expr (&exp, (unsigned int) nbytes);
3265 else
3266 {
3267 char *before_reloc = input_line_pointer;
3268 reloc = parse_reloc (&input_line_pointer);
3269 if (reloc == -1)
3270 {
3271 as_bad (_("unrecognized relocation suffix"));
3272 ignore_rest_of_line ();
3273 return;
3274 }
3275 else if (reloc == BFD_RELOC_UNUSED)
3276 emit_expr (&exp, (unsigned int) nbytes);
3277 else
3278 {
3279 reloc_howto_type *howto = (reloc_howto_type *)
3280 bfd_reloc_type_lookup (stdoutput,
3281 (bfd_reloc_code_real_type) reloc);
3282 int size = bfd_get_reloc_size (howto);
3283
3284 if (reloc == BFD_RELOC_ARM_PLT32)
3285 {
3286 as_bad (_("(plt) is only valid on branch targets"));
3287 reloc = BFD_RELOC_UNUSED;
3288 size = 0;
3289 }
3290
3291 if (size > nbytes)
3292 as_bad (_("%s relocations do not fit in %d bytes"),
3293 howto->name, nbytes);
3294 else
3295 {
3296 /* We've parsed an expression stopping at O_symbol.
3297 But there may be more expression left now that we
3298 have parsed the relocation marker. Parse it again.
3299 XXX Surely there is a cleaner way to do this. */
3300 char *p = input_line_pointer;
3301 int offset;
3302 char *save_buf = (char *) alloca (input_line_pointer - base);
3303 memcpy (save_buf, base, input_line_pointer - base);
3304 memmove (base + (input_line_pointer - before_reloc),
3305 base, before_reloc - base);
3306
3307 input_line_pointer = base + (input_line_pointer-before_reloc);
3308 expression (&exp);
3309 memcpy (base, save_buf, p - base);
3310
3311 offset = nbytes - size;
3312 p = frag_more ((int) nbytes);
3313 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3314 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3315 }
3316 }
3317 }
3318 }
3319 while (*input_line_pointer++ == ',');
3320
3321 /* Put terminator back into stream. */
3322 input_line_pointer --;
3323 demand_empty_rest_of_line ();
3324 }
3325
3326 /* Emit an expression containing a 32-bit thumb instruction.
3327 Implementation based on put_thumb32_insn. */
3328
3329 static void
3330 emit_thumb32_expr (expressionS * exp)
3331 {
3332 expressionS exp_high = *exp;
3333
3334 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3335 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3336 exp->X_add_number &= 0xffff;
3337 emit_expr (exp, (unsigned int) THUMB_SIZE);
3338 }
3339
3340 /* Guess the instruction size based on the opcode. */
3341
3342 static int
3343 thumb_insn_size (int opcode)
3344 {
3345 if ((unsigned int) opcode < 0xe800u)
3346 return 2;
3347 else if ((unsigned int) opcode >= 0xe8000000u)
3348 return 4;
3349 else
3350 return 0;
3351 }
3352
3353 static bfd_boolean
3354 emit_insn (expressionS *exp, int nbytes)
3355 {
3356 int size = 0;
3357
3358 if (exp->X_op == O_constant)
3359 {
3360 size = nbytes;
3361
3362 if (size == 0)
3363 size = thumb_insn_size (exp->X_add_number);
3364
3365 if (size != 0)
3366 {
3367 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3368 {
3369 as_bad (_(".inst.n operand too big. "\
3370 "Use .inst.w instead"));
3371 size = 0;
3372 }
3373 else
3374 {
3375 if (now_it.state == AUTOMATIC_IT_BLOCK)
3376 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3377 else
3378 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3379
3380 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3381 emit_thumb32_expr (exp);
3382 else
3383 emit_expr (exp, (unsigned int) size);
3384
3385 it_fsm_post_encode ();
3386 }
3387 }
3388 else
3389 as_bad (_("cannot determine Thumb instruction size. " \
3390 "Use .inst.n/.inst.w instead"));
3391 }
3392 else
3393 as_bad (_("constant expression required"));
3394
3395 return (size != 0);
3396 }
3397
3398 /* Like s_arm_elf_cons but do not use md_cons_align and
3399 set the mapping state to MAP_ARM/MAP_THUMB. */
3400
3401 static void
3402 s_arm_elf_inst (int nbytes)
3403 {
3404 if (is_it_end_of_statement ())
3405 {
3406 demand_empty_rest_of_line ();
3407 return;
3408 }
3409
3410 /* Calling mapping_state () here will not change ARM/THUMB,
3411 but will ensure not to be in DATA state. */
3412
3413 if (thumb_mode)
3414 mapping_state (MAP_THUMB);
3415 else
3416 {
3417 if (nbytes != 0)
3418 {
3419 as_bad (_("width suffixes are invalid in ARM mode"));
3420 ignore_rest_of_line ();
3421 return;
3422 }
3423
3424 nbytes = 4;
3425
3426 mapping_state (MAP_ARM);
3427 }
3428
3429 do
3430 {
3431 expressionS exp;
3432
3433 expression (& exp);
3434
3435 if (! emit_insn (& exp, nbytes))
3436 {
3437 ignore_rest_of_line ();
3438 return;
3439 }
3440 }
3441 while (*input_line_pointer++ == ',');
3442
3443 /* Put terminator back into stream. */
3444 input_line_pointer --;
3445 demand_empty_rest_of_line ();
3446 }
3447
3448 /* Parse a .rel31 directive. */
3449
3450 static void
3451 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3452 {
3453 expressionS exp;
3454 char *p;
3455 valueT highbit;
3456
3457 highbit = 0;
3458 if (*input_line_pointer == '1')
3459 highbit = 0x80000000;
3460 else if (*input_line_pointer != '0')
3461 as_bad (_("expected 0 or 1"));
3462
3463 input_line_pointer++;
3464 if (*input_line_pointer != ',')
3465 as_bad (_("missing comma"));
3466 input_line_pointer++;
3467
3468 #ifdef md_flush_pending_output
3469 md_flush_pending_output ();
3470 #endif
3471
3472 #ifdef md_cons_align
3473 md_cons_align (4);
3474 #endif
3475
3476 mapping_state (MAP_DATA);
3477
3478 expression (&exp);
3479
3480 p = frag_more (4);
3481 md_number_to_chars (p, highbit, 4);
3482 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3483 BFD_RELOC_ARM_PREL31);
3484
3485 demand_empty_rest_of_line ();
3486 }
3487
3488 /* Directives: AEABI stack-unwind tables. */
3489
3490 /* Parse an unwind_fnstart directive. Simply records the current location. */
3491
3492 static void
3493 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3494 {
3495 demand_empty_rest_of_line ();
3496 if (unwind.proc_start)
3497 {
3498 as_bad (_("duplicate .fnstart directive"));
3499 return;
3500 }
3501
3502 /* Mark the start of the function. */
3503 unwind.proc_start = expr_build_dot ();
3504
3505 /* Reset the rest of the unwind info. */
3506 unwind.opcode_count = 0;
3507 unwind.table_entry = NULL;
3508 unwind.personality_routine = NULL;
3509 unwind.personality_index = -1;
3510 unwind.frame_size = 0;
3511 unwind.fp_offset = 0;
3512 unwind.fp_reg = REG_SP;
3513 unwind.fp_used = 0;
3514 unwind.sp_restored = 0;
3515 }
3516
3517
3518 /* Parse a handlerdata directive. Creates the exception handling table entry
3519 for the function. */
3520
3521 static void
3522 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3523 {
3524 demand_empty_rest_of_line ();
3525 if (!unwind.proc_start)
3526 as_bad (MISSING_FNSTART);
3527
3528 if (unwind.table_entry)
3529 as_bad (_("duplicate .handlerdata directive"));
3530
3531 create_unwind_entry (1);
3532 }
3533
3534 /* Parse an unwind_fnend directive. Generates the index table entry. */
3535
3536 static void
3537 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3538 {
3539 long where;
3540 char *ptr;
3541 valueT val;
3542 unsigned int marked_pr_dependency;
3543
3544 demand_empty_rest_of_line ();
3545
3546 if (!unwind.proc_start)
3547 {
3548 as_bad (_(".fnend directive without .fnstart"));
3549 return;
3550 }
3551
3552 /* Add eh table entry. */
3553 if (unwind.table_entry == NULL)
3554 val = create_unwind_entry (0);
3555 else
3556 val = 0;
3557
3558 /* Add index table entry. This is two words. */
3559 start_unwind_section (unwind.saved_seg, 1);
3560 frag_align (2, 0, 0);
3561 record_alignment (now_seg, 2);
3562
3563 ptr = frag_more (8);
3564 memset (ptr, 0, 8);
3565 where = frag_now_fix () - 8;
3566
3567 /* Self relative offset of the function start. */
3568 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3569 BFD_RELOC_ARM_PREL31);
3570
3571 /* Indicate dependency on EHABI-defined personality routines to the
3572 linker, if it hasn't been done already. */
3573 marked_pr_dependency
3574 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3575 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3576 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3577 {
3578 static const char *const name[] =
3579 {
3580 "__aeabi_unwind_cpp_pr0",
3581 "__aeabi_unwind_cpp_pr1",
3582 "__aeabi_unwind_cpp_pr2"
3583 };
3584 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3585 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3586 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3587 |= 1 << unwind.personality_index;
3588 }
3589
3590 if (val)
3591 /* Inline exception table entry. */
3592 md_number_to_chars (ptr + 4, val, 4);
3593 else
3594 /* Self relative offset of the table entry. */
3595 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3596 BFD_RELOC_ARM_PREL31);
3597
3598 /* Restore the original section. */
3599 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3600
3601 unwind.proc_start = NULL;
3602 }
3603
3604
3605 /* Parse an unwind_cantunwind directive. */
3606
3607 static void
3608 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3609 {
3610 demand_empty_rest_of_line ();
3611 if (!unwind.proc_start)
3612 as_bad (MISSING_FNSTART);
3613
3614 if (unwind.personality_routine || unwind.personality_index != -1)
3615 as_bad (_("personality routine specified for cantunwind frame"));
3616
3617 unwind.personality_index = -2;
3618 }
3619
3620
3621 /* Parse a personalityindex directive. */
3622
3623 static void
3624 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3625 {
3626 expressionS exp;
3627
3628 if (!unwind.proc_start)
3629 as_bad (MISSING_FNSTART);
3630
3631 if (unwind.personality_routine || unwind.personality_index != -1)
3632 as_bad (_("duplicate .personalityindex directive"));
3633
3634 expression (&exp);
3635
3636 if (exp.X_op != O_constant
3637 || exp.X_add_number < 0 || exp.X_add_number > 15)
3638 {
3639 as_bad (_("bad personality routine number"));
3640 ignore_rest_of_line ();
3641 return;
3642 }
3643
3644 unwind.personality_index = exp.X_add_number;
3645
3646 demand_empty_rest_of_line ();
3647 }
3648
3649
3650 /* Parse a personality directive. */
3651
3652 static void
3653 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3654 {
3655 char *name, *p, c;
3656
3657 if (!unwind.proc_start)
3658 as_bad (MISSING_FNSTART);
3659
3660 if (unwind.personality_routine || unwind.personality_index != -1)
3661 as_bad (_("duplicate .personality directive"));
3662
3663 name = input_line_pointer;
3664 c = get_symbol_end ();
3665 p = input_line_pointer;
3666 unwind.personality_routine = symbol_find_or_make (name);
3667 *p = c;
3668 demand_empty_rest_of_line ();
3669 }
3670
3671
3672 /* Parse a directive saving core registers. */
3673
3674 static void
3675 s_arm_unwind_save_core (void)
3676 {
3677 valueT op;
3678 long range;
3679 int n;
3680
3681 range = parse_reg_list (&input_line_pointer);
3682 if (range == FAIL)
3683 {
3684 as_bad (_("expected register list"));
3685 ignore_rest_of_line ();
3686 return;
3687 }
3688
3689 demand_empty_rest_of_line ();
3690
3691 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3692 into .unwind_save {..., sp...}. We aren't bothered about the value of
3693 ip because it is clobbered by calls. */
3694 if (unwind.sp_restored && unwind.fp_reg == 12
3695 && (range & 0x3000) == 0x1000)
3696 {
3697 unwind.opcode_count--;
3698 unwind.sp_restored = 0;
3699 range = (range | 0x2000) & ~0x1000;
3700 unwind.pending_offset = 0;
3701 }
3702
3703 /* Pop r4-r15. */
3704 if (range & 0xfff0)
3705 {
3706 /* See if we can use the short opcodes. These pop a block of up to 8
3707 registers starting with r4, plus maybe r14. */
3708 for (n = 0; n < 8; n++)
3709 {
3710 /* Break at the first non-saved register. */
3711 if ((range & (1 << (n + 4))) == 0)
3712 break;
3713 }
3714 /* See if there are any other bits set. */
3715 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3716 {
3717 /* Use the long form. */
3718 op = 0x8000 | ((range >> 4) & 0xfff);
3719 add_unwind_opcode (op, 2);
3720 }
3721 else
3722 {
3723 /* Use the short form. */
3724 if (range & 0x4000)
3725 op = 0xa8; /* Pop r14. */
3726 else
3727 op = 0xa0; /* Do not pop r14. */
3728 op |= (n - 1);
3729 add_unwind_opcode (op, 1);
3730 }
3731 }
3732
3733 /* Pop r0-r3. */
3734 if (range & 0xf)
3735 {
3736 op = 0xb100 | (range & 0xf);
3737 add_unwind_opcode (op, 2);
3738 }
3739
3740 /* Record the number of bytes pushed. */
3741 for (n = 0; n < 16; n++)
3742 {
3743 if (range & (1 << n))
3744 unwind.frame_size += 4;
3745 }
3746 }
3747
3748
3749 /* Parse a directive saving FPA registers. */
3750
3751 static void
3752 s_arm_unwind_save_fpa (int reg)
3753 {
3754 expressionS exp;
3755 int num_regs;
3756 valueT op;
3757
3758 /* Get Number of registers to transfer. */
3759 if (skip_past_comma (&input_line_pointer) != FAIL)
3760 expression (&exp);
3761 else
3762 exp.X_op = O_illegal;
3763
3764 if (exp.X_op != O_constant)
3765 {
3766 as_bad (_("expected , <constant>"));
3767 ignore_rest_of_line ();
3768 return;
3769 }
3770
3771 num_regs = exp.X_add_number;
3772
3773 if (num_regs < 1 || num_regs > 4)
3774 {
3775 as_bad (_("number of registers must be in the range [1:4]"));
3776 ignore_rest_of_line ();
3777 return;
3778 }
3779
3780 demand_empty_rest_of_line ();
3781
3782 if (reg == 4)
3783 {
3784 /* Short form. */
3785 op = 0xb4 | (num_regs - 1);
3786 add_unwind_opcode (op, 1);
3787 }
3788 else
3789 {
3790 /* Long form. */
3791 op = 0xc800 | (reg << 4) | (num_regs - 1);
3792 add_unwind_opcode (op, 2);
3793 }
3794 unwind.frame_size += num_regs * 12;
3795 }
3796
3797
3798 /* Parse a directive saving VFP registers for ARMv6 and above. */
3799
3800 static void
3801 s_arm_unwind_save_vfp_armv6 (void)
3802 {
3803 int count;
3804 unsigned int start;
3805 valueT op;
3806 int num_vfpv3_regs = 0;
3807 int num_regs_below_16;
3808
3809 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3810 if (count == FAIL)
3811 {
3812 as_bad (_("expected register list"));
3813 ignore_rest_of_line ();
3814 return;
3815 }
3816
3817 demand_empty_rest_of_line ();
3818
3819 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3820 than FSTMX/FLDMX-style ones). */
3821
3822 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3823 if (start >= 16)
3824 num_vfpv3_regs = count;
3825 else if (start + count > 16)
3826 num_vfpv3_regs = start + count - 16;
3827
3828 if (num_vfpv3_regs > 0)
3829 {
3830 int start_offset = start > 16 ? start - 16 : 0;
3831 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3832 add_unwind_opcode (op, 2);
3833 }
3834
3835 /* Generate opcode for registers numbered in the range 0 .. 15. */
3836 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3837 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
3838 if (num_regs_below_16 > 0)
3839 {
3840 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3841 add_unwind_opcode (op, 2);
3842 }
3843
3844 unwind.frame_size += count * 8;
3845 }
3846
3847
3848 /* Parse a directive saving VFP registers for pre-ARMv6. */
3849
3850 static void
3851 s_arm_unwind_save_vfp (void)
3852 {
3853 int count;
3854 unsigned int reg;
3855 valueT op;
3856
3857 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3858 if (count == FAIL)
3859 {
3860 as_bad (_("expected register list"));
3861 ignore_rest_of_line ();
3862 return;
3863 }
3864
3865 demand_empty_rest_of_line ();
3866
3867 if (reg == 8)
3868 {
3869 /* Short form. */
3870 op = 0xb8 | (count - 1);
3871 add_unwind_opcode (op, 1);
3872 }
3873 else
3874 {
3875 /* Long form. */
3876 op = 0xb300 | (reg << 4) | (count - 1);
3877 add_unwind_opcode (op, 2);
3878 }
3879 unwind.frame_size += count * 8 + 4;
3880 }
3881
3882
3883 /* Parse a directive saving iWMMXt data registers. */
3884
3885 static void
3886 s_arm_unwind_save_mmxwr (void)
3887 {
3888 int reg;
3889 int hi_reg;
3890 int i;
3891 unsigned mask = 0;
3892 valueT op;
3893
3894 if (*input_line_pointer == '{')
3895 input_line_pointer++;
3896
3897 do
3898 {
3899 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3900
3901 if (reg == FAIL)
3902 {
3903 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3904 goto error;
3905 }
3906
3907 if (mask >> reg)
3908 as_tsktsk (_("register list not in ascending order"));
3909 mask |= 1 << reg;
3910
3911 if (*input_line_pointer == '-')
3912 {
3913 input_line_pointer++;
3914 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3915 if (hi_reg == FAIL)
3916 {
3917 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3918 goto error;
3919 }
3920 else if (reg >= hi_reg)
3921 {
3922 as_bad (_("bad register range"));
3923 goto error;
3924 }
3925 for (; reg < hi_reg; reg++)
3926 mask |= 1 << reg;
3927 }
3928 }
3929 while (skip_past_comma (&input_line_pointer) != FAIL);
3930
3931 if (*input_line_pointer == '}')
3932 input_line_pointer++;
3933
3934 demand_empty_rest_of_line ();
3935
3936 /* Generate any deferred opcodes because we're going to be looking at
3937 the list. */
3938 flush_pending_unwind ();
3939
3940 for (i = 0; i < 16; i++)
3941 {
3942 if (mask & (1 << i))
3943 unwind.frame_size += 8;
3944 }
3945
3946 /* Attempt to combine with a previous opcode. We do this because gcc
3947 likes to output separate unwind directives for a single block of
3948 registers. */
3949 if (unwind.opcode_count > 0)
3950 {
3951 i = unwind.opcodes[unwind.opcode_count - 1];
3952 if ((i & 0xf8) == 0xc0)
3953 {
3954 i &= 7;
3955 /* Only merge if the blocks are contiguous. */
3956 if (i < 6)
3957 {
3958 if ((mask & 0xfe00) == (1 << 9))
3959 {
3960 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3961 unwind.opcode_count--;
3962 }
3963 }
3964 else if (i == 6 && unwind.opcode_count >= 2)
3965 {
3966 i = unwind.opcodes[unwind.opcode_count - 2];
3967 reg = i >> 4;
3968 i &= 0xf;
3969
3970 op = 0xffff << (reg - 1);
3971 if (reg > 0
3972 && ((mask & op) == (1u << (reg - 1))))
3973 {
3974 op = (1 << (reg + i + 1)) - 1;
3975 op &= ~((1 << reg) - 1);
3976 mask |= op;
3977 unwind.opcode_count -= 2;
3978 }
3979 }
3980 }
3981 }
3982
3983 hi_reg = 15;
3984 /* We want to generate opcodes in the order the registers have been
3985 saved, ie. descending order. */
3986 for (reg = 15; reg >= -1; reg--)
3987 {
3988 /* Save registers in blocks. */
3989 if (reg < 0
3990 || !(mask & (1 << reg)))
3991 {
3992 /* We found an unsaved reg. Generate opcodes to save the
3993 preceding block. */
3994 if (reg != hi_reg)
3995 {
3996 if (reg == 9)
3997 {
3998 /* Short form. */
3999 op = 0xc0 | (hi_reg - 10);
4000 add_unwind_opcode (op, 1);
4001 }
4002 else
4003 {
4004 /* Long form. */
4005 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4006 add_unwind_opcode (op, 2);
4007 }
4008 }
4009 hi_reg = reg - 1;
4010 }
4011 }
4012
4013 return;
4014 error:
4015 ignore_rest_of_line ();
4016 }
4017
4018 static void
4019 s_arm_unwind_save_mmxwcg (void)
4020 {
4021 int reg;
4022 int hi_reg;
4023 unsigned mask = 0;
4024 valueT op;
4025
4026 if (*input_line_pointer == '{')
4027 input_line_pointer++;
4028
4029 do
4030 {
4031 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4032
4033 if (reg == FAIL)
4034 {
4035 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4036 goto error;
4037 }
4038
4039 reg -= 8;
4040 if (mask >> reg)
4041 as_tsktsk (_("register list not in ascending order"));
4042 mask |= 1 << reg;
4043
4044 if (*input_line_pointer == '-')
4045 {
4046 input_line_pointer++;
4047 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4048 if (hi_reg == FAIL)
4049 {
4050 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4051 goto error;
4052 }
4053 else if (reg >= hi_reg)
4054 {
4055 as_bad (_("bad register range"));
4056 goto error;
4057 }
4058 for (; reg < hi_reg; reg++)
4059 mask |= 1 << reg;
4060 }
4061 }
4062 while (skip_past_comma (&input_line_pointer) != FAIL);
4063
4064 if (*input_line_pointer == '}')
4065 input_line_pointer++;
4066
4067 demand_empty_rest_of_line ();
4068
4069 /* Generate any deferred opcodes because we're going to be looking at
4070 the list. */
4071 flush_pending_unwind ();
4072
4073 for (reg = 0; reg < 16; reg++)
4074 {
4075 if (mask & (1 << reg))
4076 unwind.frame_size += 4;
4077 }
4078 op = 0xc700 | mask;
4079 add_unwind_opcode (op, 2);
4080 return;
4081 error:
4082 ignore_rest_of_line ();
4083 }
4084
4085
4086 /* Parse an unwind_save directive.
4087 If the argument is non-zero, this is a .vsave directive. */
4088
4089 static void
4090 s_arm_unwind_save (int arch_v6)
4091 {
4092 char *peek;
4093 struct reg_entry *reg;
4094 bfd_boolean had_brace = FALSE;
4095
4096 if (!unwind.proc_start)
4097 as_bad (MISSING_FNSTART);
4098
4099 /* Figure out what sort of save we have. */
4100 peek = input_line_pointer;
4101
4102 if (*peek == '{')
4103 {
4104 had_brace = TRUE;
4105 peek++;
4106 }
4107
4108 reg = arm_reg_parse_multi (&peek);
4109
4110 if (!reg)
4111 {
4112 as_bad (_("register expected"));
4113 ignore_rest_of_line ();
4114 return;
4115 }
4116
4117 switch (reg->type)
4118 {
4119 case REG_TYPE_FN:
4120 if (had_brace)
4121 {
4122 as_bad (_("FPA .unwind_save does not take a register list"));
4123 ignore_rest_of_line ();
4124 return;
4125 }
4126 input_line_pointer = peek;
4127 s_arm_unwind_save_fpa (reg->number);
4128 return;
4129
4130 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
4131 case REG_TYPE_VFD:
4132 if (arch_v6)
4133 s_arm_unwind_save_vfp_armv6 ();
4134 else
4135 s_arm_unwind_save_vfp ();
4136 return;
4137 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
4138 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
4139
4140 default:
4141 as_bad (_(".unwind_save does not support this kind of register"));
4142 ignore_rest_of_line ();
4143 }
4144 }
4145
4146
4147 /* Parse an unwind_movsp directive. */
4148
4149 static void
4150 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4151 {
4152 int reg;
4153 valueT op;
4154 int offset;
4155
4156 if (!unwind.proc_start)
4157 as_bad (MISSING_FNSTART);
4158
4159 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4160 if (reg == FAIL)
4161 {
4162 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4163 ignore_rest_of_line ();
4164 return;
4165 }
4166
4167 /* Optional constant. */
4168 if (skip_past_comma (&input_line_pointer) != FAIL)
4169 {
4170 if (immediate_for_directive (&offset) == FAIL)
4171 return;
4172 }
4173 else
4174 offset = 0;
4175
4176 demand_empty_rest_of_line ();
4177
4178 if (reg == REG_SP || reg == REG_PC)
4179 {
4180 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4181 return;
4182 }
4183
4184 if (unwind.fp_reg != REG_SP)
4185 as_bad (_("unexpected .unwind_movsp directive"));
4186
4187 /* Generate opcode to restore the value. */
4188 op = 0x90 | reg;
4189 add_unwind_opcode (op, 1);
4190
4191 /* Record the information for later. */
4192 unwind.fp_reg = reg;
4193 unwind.fp_offset = unwind.frame_size - offset;
4194 unwind.sp_restored = 1;
4195 }
4196
4197 /* Parse an unwind_pad directive. */
4198
4199 static void
4200 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4201 {
4202 int offset;
4203
4204 if (!unwind.proc_start)
4205 as_bad (MISSING_FNSTART);
4206
4207 if (immediate_for_directive (&offset) == FAIL)
4208 return;
4209
4210 if (offset & 3)
4211 {
4212 as_bad (_("stack increment must be multiple of 4"));
4213 ignore_rest_of_line ();
4214 return;
4215 }
4216
4217 /* Don't generate any opcodes, just record the details for later. */
4218 unwind.frame_size += offset;
4219 unwind.pending_offset += offset;
4220
4221 demand_empty_rest_of_line ();
4222 }
4223
4224 /* Parse an unwind_setfp directive. */
4225
4226 static void
4227 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4228 {
4229 int sp_reg;
4230 int fp_reg;
4231 int offset;
4232
4233 if (!unwind.proc_start)
4234 as_bad (MISSING_FNSTART);
4235
4236 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4237 if (skip_past_comma (&input_line_pointer) == FAIL)
4238 sp_reg = FAIL;
4239 else
4240 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4241
4242 if (fp_reg == FAIL || sp_reg == FAIL)
4243 {
4244 as_bad (_("expected <reg>, <reg>"));
4245 ignore_rest_of_line ();
4246 return;
4247 }
4248
4249 /* Optional constant. */
4250 if (skip_past_comma (&input_line_pointer) != FAIL)
4251 {
4252 if (immediate_for_directive (&offset) == FAIL)
4253 return;
4254 }
4255 else
4256 offset = 0;
4257
4258 demand_empty_rest_of_line ();
4259
4260 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4261 {
4262 as_bad (_("register must be either sp or set by a previous"
4263 "unwind_movsp directive"));
4264 return;
4265 }
4266
4267 /* Don't generate any opcodes, just record the information for later. */
4268 unwind.fp_reg = fp_reg;
4269 unwind.fp_used = 1;
4270 if (sp_reg == REG_SP)
4271 unwind.fp_offset = unwind.frame_size - offset;
4272 else
4273 unwind.fp_offset -= offset;
4274 }
4275
4276 /* Parse an unwind_raw directive. */
4277
4278 static void
4279 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4280 {
4281 expressionS exp;
4282 /* This is an arbitrary limit. */
4283 unsigned char op[16];
4284 int count;
4285
4286 if (!unwind.proc_start)
4287 as_bad (MISSING_FNSTART);
4288
4289 expression (&exp);
4290 if (exp.X_op == O_constant
4291 && skip_past_comma (&input_line_pointer) != FAIL)
4292 {
4293 unwind.frame_size += exp.X_add_number;
4294 expression (&exp);
4295 }
4296 else
4297 exp.X_op = O_illegal;
4298
4299 if (exp.X_op != O_constant)
4300 {
4301 as_bad (_("expected <offset>, <opcode>"));
4302 ignore_rest_of_line ();
4303 return;
4304 }
4305
4306 count = 0;
4307
4308 /* Parse the opcode. */
4309 for (;;)
4310 {
4311 if (count >= 16)
4312 {
4313 as_bad (_("unwind opcode too long"));
4314 ignore_rest_of_line ();
4315 }
4316 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4317 {
4318 as_bad (_("invalid unwind opcode"));
4319 ignore_rest_of_line ();
4320 return;
4321 }
4322 op[count++] = exp.X_add_number;
4323
4324 /* Parse the next byte. */
4325 if (skip_past_comma (&input_line_pointer) == FAIL)
4326 break;
4327
4328 expression (&exp);
4329 }
4330
4331 /* Add the opcode bytes in reverse order. */
4332 while (count--)
4333 add_unwind_opcode (op[count], 1);
4334
4335 demand_empty_rest_of_line ();
4336 }
4337
4338
4339 /* Parse a .eabi_attribute directive. */
4340
4341 static void
4342 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4343 {
4344 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4345
4346 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4347 attributes_set_explicitly[tag] = 1;
4348 }
4349
4350 /* Emit a tls fix for the symbol. */
4351
4352 static void
4353 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4354 {
4355 char *p;
4356 expressionS exp;
4357 #ifdef md_flush_pending_output
4358 md_flush_pending_output ();
4359 #endif
4360
4361 #ifdef md_cons_align
4362 md_cons_align (4);
4363 #endif
4364
4365 /* Since we're just labelling the code, there's no need to define a
4366 mapping symbol. */
4367 expression (&exp);
4368 p = obstack_next_free (&frchain_now->frch_obstack);
4369 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4370 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4371 : BFD_RELOC_ARM_TLS_DESCSEQ);
4372 }
4373 #endif /* OBJ_ELF */
4374
4375 static void s_arm_arch (int);
4376 static void s_arm_object_arch (int);
4377 static void s_arm_cpu (int);
4378 static void s_arm_fpu (int);
4379 static void s_arm_arch_extension (int);
4380
4381 #ifdef TE_PE
4382
4383 static void
4384 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4385 {
4386 expressionS exp;
4387
4388 do
4389 {
4390 expression (&exp);
4391 if (exp.X_op == O_symbol)
4392 exp.X_op = O_secrel;
4393
4394 emit_expr (&exp, 4);
4395 }
4396 while (*input_line_pointer++ == ',');
4397
4398 input_line_pointer--;
4399 demand_empty_rest_of_line ();
4400 }
4401 #endif /* TE_PE */
4402
4403 /* This table describes all the machine specific pseudo-ops the assembler
4404 has to support. The fields are:
4405 pseudo-op name without dot
4406 function to call to execute this pseudo-op
4407 Integer arg to pass to the function. */
4408
4409 const pseudo_typeS md_pseudo_table[] =
4410 {
4411 /* Never called because '.req' does not start a line. */
4412 { "req", s_req, 0 },
4413 /* Following two are likewise never called. */
4414 { "dn", s_dn, 0 },
4415 { "qn", s_qn, 0 },
4416 { "unreq", s_unreq, 0 },
4417 { "bss", s_bss, 0 },
4418 { "align", s_align, 0 },
4419 { "arm", s_arm, 0 },
4420 { "thumb", s_thumb, 0 },
4421 { "code", s_code, 0 },
4422 { "force_thumb", s_force_thumb, 0 },
4423 { "thumb_func", s_thumb_func, 0 },
4424 { "thumb_set", s_thumb_set, 0 },
4425 { "even", s_even, 0 },
4426 { "ltorg", s_ltorg, 0 },
4427 { "pool", s_ltorg, 0 },
4428 { "syntax", s_syntax, 0 },
4429 { "cpu", s_arm_cpu, 0 },
4430 { "arch", s_arm_arch, 0 },
4431 { "object_arch", s_arm_object_arch, 0 },
4432 { "fpu", s_arm_fpu, 0 },
4433 { "arch_extension", s_arm_arch_extension, 0 },
4434 #ifdef OBJ_ELF
4435 { "word", s_arm_elf_cons, 4 },
4436 { "long", s_arm_elf_cons, 4 },
4437 { "inst.n", s_arm_elf_inst, 2 },
4438 { "inst.w", s_arm_elf_inst, 4 },
4439 { "inst", s_arm_elf_inst, 0 },
4440 { "rel31", s_arm_rel31, 0 },
4441 { "fnstart", s_arm_unwind_fnstart, 0 },
4442 { "fnend", s_arm_unwind_fnend, 0 },
4443 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4444 { "personality", s_arm_unwind_personality, 0 },
4445 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4446 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4447 { "save", s_arm_unwind_save, 0 },
4448 { "vsave", s_arm_unwind_save, 1 },
4449 { "movsp", s_arm_unwind_movsp, 0 },
4450 { "pad", s_arm_unwind_pad, 0 },
4451 { "setfp", s_arm_unwind_setfp, 0 },
4452 { "unwind_raw", s_arm_unwind_raw, 0 },
4453 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4454 { "tlsdescseq", s_arm_tls_descseq, 0 },
4455 #else
4456 { "word", cons, 4},
4457
4458 /* These are used for dwarf. */
4459 {"2byte", cons, 2},
4460 {"4byte", cons, 4},
4461 {"8byte", cons, 8},
4462 /* These are used for dwarf2. */
4463 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4464 { "loc", dwarf2_directive_loc, 0 },
4465 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4466 #endif
4467 { "extend", float_cons, 'x' },
4468 { "ldouble", float_cons, 'x' },
4469 { "packed", float_cons, 'p' },
4470 #ifdef TE_PE
4471 {"secrel32", pe_directive_secrel, 0},
4472 #endif
4473 { 0, 0, 0 }
4474 };
4475 \f
4476 /* Parser functions used exclusively in instruction operands. */
4477
4478 /* Generic immediate-value read function for use in insn parsing.
4479 STR points to the beginning of the immediate (the leading #);
4480 VAL receives the value; if the value is outside [MIN, MAX]
4481 issue an error. PREFIX_OPT is true if the immediate prefix is
4482 optional. */
4483
4484 static int
4485 parse_immediate (char **str, int *val, int min, int max,
4486 bfd_boolean prefix_opt)
4487 {
4488 expressionS exp;
4489 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4490 if (exp.X_op != O_constant)
4491 {
4492 inst.error = _("constant expression required");
4493 return FAIL;
4494 }
4495
4496 if (exp.X_add_number < min || exp.X_add_number > max)
4497 {
4498 inst.error = _("immediate value out of range");
4499 return FAIL;
4500 }
4501
4502 *val = exp.X_add_number;
4503 return SUCCESS;
4504 }
4505
4506 /* Less-generic immediate-value read function with the possibility of loading a
4507 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4508 instructions. Puts the result directly in inst.operands[i]. */
4509
4510 static int
4511 parse_big_immediate (char **str, int i)
4512 {
4513 expressionS exp;
4514 char *ptr = *str;
4515
4516 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4517
4518 if (exp.X_op == O_constant)
4519 {
4520 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4521 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4522 O_constant. We have to be careful not to break compilation for
4523 32-bit X_add_number, though. */
4524 if ((exp.X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4525 {
4526 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4527 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4528 inst.operands[i].regisimm = 1;
4529 }
4530 }
4531 else if (exp.X_op == O_big
4532 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32)
4533 {
4534 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4535
4536 /* Bignums have their least significant bits in
4537 generic_bignum[0]. Make sure we put 32 bits in imm and
4538 32 bits in reg, in a (hopefully) portable way. */
4539 gas_assert (parts != 0);
4540
4541 /* Make sure that the number is not too big.
4542 PR 11972: Bignums can now be sign-extended to the
4543 size of a .octa so check that the out of range bits
4544 are all zero or all one. */
4545 if (LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 64)
4546 {
4547 LITTLENUM_TYPE m = -1;
4548
4549 if (generic_bignum[parts * 2] != 0
4550 && generic_bignum[parts * 2] != m)
4551 return FAIL;
4552
4553 for (j = parts * 2 + 1; j < (unsigned) exp.X_add_number; j++)
4554 if (generic_bignum[j] != generic_bignum[j-1])
4555 return FAIL;
4556 }
4557
4558 inst.operands[i].imm = 0;
4559 for (j = 0; j < parts; j++, idx++)
4560 inst.operands[i].imm |= generic_bignum[idx]
4561 << (LITTLENUM_NUMBER_OF_BITS * j);
4562 inst.operands[i].reg = 0;
4563 for (j = 0; j < parts; j++, idx++)
4564 inst.operands[i].reg |= generic_bignum[idx]
4565 << (LITTLENUM_NUMBER_OF_BITS * j);
4566 inst.operands[i].regisimm = 1;
4567 }
4568 else
4569 return FAIL;
4570
4571 *str = ptr;
4572
4573 return SUCCESS;
4574 }
4575
4576 /* Returns the pseudo-register number of an FPA immediate constant,
4577 or FAIL if there isn't a valid constant here. */
4578
4579 static int
4580 parse_fpa_immediate (char ** str)
4581 {
4582 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4583 char * save_in;
4584 expressionS exp;
4585 int i;
4586 int j;
4587
4588 /* First try and match exact strings, this is to guarantee
4589 that some formats will work even for cross assembly. */
4590
4591 for (i = 0; fp_const[i]; i++)
4592 {
4593 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4594 {
4595 char *start = *str;
4596
4597 *str += strlen (fp_const[i]);
4598 if (is_end_of_line[(unsigned char) **str])
4599 return i + 8;
4600 *str = start;
4601 }
4602 }
4603
4604 /* Just because we didn't get a match doesn't mean that the constant
4605 isn't valid, just that it is in a format that we don't
4606 automatically recognize. Try parsing it with the standard
4607 expression routines. */
4608
4609 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4610
4611 /* Look for a raw floating point number. */
4612 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4613 && is_end_of_line[(unsigned char) *save_in])
4614 {
4615 for (i = 0; i < NUM_FLOAT_VALS; i++)
4616 {
4617 for (j = 0; j < MAX_LITTLENUMS; j++)
4618 {
4619 if (words[j] != fp_values[i][j])
4620 break;
4621 }
4622
4623 if (j == MAX_LITTLENUMS)
4624 {
4625 *str = save_in;
4626 return i + 8;
4627 }
4628 }
4629 }
4630
4631 /* Try and parse a more complex expression, this will probably fail
4632 unless the code uses a floating point prefix (eg "0f"). */
4633 save_in = input_line_pointer;
4634 input_line_pointer = *str;
4635 if (expression (&exp) == absolute_section
4636 && exp.X_op == O_big
4637 && exp.X_add_number < 0)
4638 {
4639 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4640 Ditto for 15. */
4641 if (gen_to_words (words, 5, (long) 15) == 0)
4642 {
4643 for (i = 0; i < NUM_FLOAT_VALS; i++)
4644 {
4645 for (j = 0; j < MAX_LITTLENUMS; j++)
4646 {
4647 if (words[j] != fp_values[i][j])
4648 break;
4649 }
4650
4651 if (j == MAX_LITTLENUMS)
4652 {
4653 *str = input_line_pointer;
4654 input_line_pointer = save_in;
4655 return i + 8;
4656 }
4657 }
4658 }
4659 }
4660
4661 *str = input_line_pointer;
4662 input_line_pointer = save_in;
4663 inst.error = _("invalid FPA immediate expression");
4664 return FAIL;
4665 }
4666
4667 /* Returns 1 if a number has "quarter-precision" float format
4668 0baBbbbbbc defgh000 00000000 00000000. */
4669
4670 static int
4671 is_quarter_float (unsigned imm)
4672 {
4673 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4674 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4675 }
4676
4677 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4678 0baBbbbbbc defgh000 00000000 00000000.
4679 The zero and minus-zero cases need special handling, since they can't be
4680 encoded in the "quarter-precision" float format, but can nonetheless be
4681 loaded as integer constants. */
4682
4683 static unsigned
4684 parse_qfloat_immediate (char **ccp, int *immed)
4685 {
4686 char *str = *ccp;
4687 char *fpnum;
4688 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4689 int found_fpchar = 0;
4690
4691 skip_past_char (&str, '#');
4692
4693 /* We must not accidentally parse an integer as a floating-point number. Make
4694 sure that the value we parse is not an integer by checking for special
4695 characters '.' or 'e'.
4696 FIXME: This is a horrible hack, but doing better is tricky because type
4697 information isn't in a very usable state at parse time. */
4698 fpnum = str;
4699 skip_whitespace (fpnum);
4700
4701 if (strncmp (fpnum, "0x", 2) == 0)
4702 return FAIL;
4703 else
4704 {
4705 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4706 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4707 {
4708 found_fpchar = 1;
4709 break;
4710 }
4711
4712 if (!found_fpchar)
4713 return FAIL;
4714 }
4715
4716 if ((str = atof_ieee (str, 's', words)) != NULL)
4717 {
4718 unsigned fpword = 0;
4719 int i;
4720
4721 /* Our FP word must be 32 bits (single-precision FP). */
4722 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4723 {
4724 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4725 fpword |= words[i];
4726 }
4727
4728 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4729 *immed = fpword;
4730 else
4731 return FAIL;
4732
4733 *ccp = str;
4734
4735 return SUCCESS;
4736 }
4737
4738 return FAIL;
4739 }
4740
4741 /* Shift operands. */
4742 enum shift_kind
4743 {
4744 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4745 };
4746
4747 struct asm_shift_name
4748 {
4749 const char *name;
4750 enum shift_kind kind;
4751 };
4752
4753 /* Third argument to parse_shift. */
4754 enum parse_shift_mode
4755 {
4756 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4757 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4758 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4759 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4760 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4761 };
4762
4763 /* Parse a <shift> specifier on an ARM data processing instruction.
4764 This has three forms:
4765
4766 (LSL|LSR|ASL|ASR|ROR) Rs
4767 (LSL|LSR|ASL|ASR|ROR) #imm
4768 RRX
4769
4770 Note that ASL is assimilated to LSL in the instruction encoding, and
4771 RRX to ROR #0 (which cannot be written as such). */
4772
4773 static int
4774 parse_shift (char **str, int i, enum parse_shift_mode mode)
4775 {
4776 const struct asm_shift_name *shift_name;
4777 enum shift_kind shift;
4778 char *s = *str;
4779 char *p = s;
4780 int reg;
4781
4782 for (p = *str; ISALPHA (*p); p++)
4783 ;
4784
4785 if (p == *str)
4786 {
4787 inst.error = _("shift expression expected");
4788 return FAIL;
4789 }
4790
4791 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
4792 p - *str);
4793
4794 if (shift_name == NULL)
4795 {
4796 inst.error = _("shift expression expected");
4797 return FAIL;
4798 }
4799
4800 shift = shift_name->kind;
4801
4802 switch (mode)
4803 {
4804 case NO_SHIFT_RESTRICT:
4805 case SHIFT_IMMEDIATE: break;
4806
4807 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4808 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4809 {
4810 inst.error = _("'LSL' or 'ASR' required");
4811 return FAIL;
4812 }
4813 break;
4814
4815 case SHIFT_LSL_IMMEDIATE:
4816 if (shift != SHIFT_LSL)
4817 {
4818 inst.error = _("'LSL' required");
4819 return FAIL;
4820 }
4821 break;
4822
4823 case SHIFT_ASR_IMMEDIATE:
4824 if (shift != SHIFT_ASR)
4825 {
4826 inst.error = _("'ASR' required");
4827 return FAIL;
4828 }
4829 break;
4830
4831 default: abort ();
4832 }
4833
4834 if (shift != SHIFT_RRX)
4835 {
4836 /* Whitespace can appear here if the next thing is a bare digit. */
4837 skip_whitespace (p);
4838
4839 if (mode == NO_SHIFT_RESTRICT
4840 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4841 {
4842 inst.operands[i].imm = reg;
4843 inst.operands[i].immisreg = 1;
4844 }
4845 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4846 return FAIL;
4847 }
4848 inst.operands[i].shift_kind = shift;
4849 inst.operands[i].shifted = 1;
4850 *str = p;
4851 return SUCCESS;
4852 }
4853
4854 /* Parse a <shifter_operand> for an ARM data processing instruction:
4855
4856 #<immediate>
4857 #<immediate>, <rotate>
4858 <Rm>
4859 <Rm>, <shift>
4860
4861 where <shift> is defined by parse_shift above, and <rotate> is a
4862 multiple of 2 between 0 and 30. Validation of immediate operands
4863 is deferred to md_apply_fix. */
4864
4865 static int
4866 parse_shifter_operand (char **str, int i)
4867 {
4868 int value;
4869 expressionS exp;
4870
4871 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4872 {
4873 inst.operands[i].reg = value;
4874 inst.operands[i].isreg = 1;
4875
4876 /* parse_shift will override this if appropriate */
4877 inst.reloc.exp.X_op = O_constant;
4878 inst.reloc.exp.X_add_number = 0;
4879
4880 if (skip_past_comma (str) == FAIL)
4881 return SUCCESS;
4882
4883 /* Shift operation on register. */
4884 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4885 }
4886
4887 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4888 return FAIL;
4889
4890 if (skip_past_comma (str) == SUCCESS)
4891 {
4892 /* #x, y -- ie explicit rotation by Y. */
4893 if (my_get_expression (&exp, str, GE_NO_PREFIX))
4894 return FAIL;
4895
4896 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4897 {
4898 inst.error = _("constant expression expected");
4899 return FAIL;
4900 }
4901
4902 value = exp.X_add_number;
4903 if (value < 0 || value > 30 || value % 2 != 0)
4904 {
4905 inst.error = _("invalid rotation");
4906 return FAIL;
4907 }
4908 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4909 {
4910 inst.error = _("invalid constant");
4911 return FAIL;
4912 }
4913
4914 /* Encode as specified. */
4915 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
4916 return SUCCESS;
4917 }
4918
4919 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4920 inst.reloc.pc_rel = 0;
4921 return SUCCESS;
4922 }
4923
4924 /* Group relocation information. Each entry in the table contains the
4925 textual name of the relocation as may appear in assembler source
4926 and must end with a colon.
4927 Along with this textual name are the relocation codes to be used if
4928 the corresponding instruction is an ALU instruction (ADD or SUB only),
4929 an LDR, an LDRS, or an LDC. */
4930
4931 struct group_reloc_table_entry
4932 {
4933 const char *name;
4934 int alu_code;
4935 int ldr_code;
4936 int ldrs_code;
4937 int ldc_code;
4938 };
4939
4940 typedef enum
4941 {
4942 /* Varieties of non-ALU group relocation. */
4943
4944 GROUP_LDR,
4945 GROUP_LDRS,
4946 GROUP_LDC
4947 } group_reloc_type;
4948
4949 static struct group_reloc_table_entry group_reloc_table[] =
4950 { /* Program counter relative: */
4951 { "pc_g0_nc",
4952 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4953 0, /* LDR */
4954 0, /* LDRS */
4955 0 }, /* LDC */
4956 { "pc_g0",
4957 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4958 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4959 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4960 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4961 { "pc_g1_nc",
4962 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4963 0, /* LDR */
4964 0, /* LDRS */
4965 0 }, /* LDC */
4966 { "pc_g1",
4967 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4968 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4969 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4970 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4971 { "pc_g2",
4972 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4973 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4974 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4975 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4976 /* Section base relative */
4977 { "sb_g0_nc",
4978 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4979 0, /* LDR */
4980 0, /* LDRS */
4981 0 }, /* LDC */
4982 { "sb_g0",
4983 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4984 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4985 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4986 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4987 { "sb_g1_nc",
4988 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4989 0, /* LDR */
4990 0, /* LDRS */
4991 0 }, /* LDC */
4992 { "sb_g1",
4993 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4994 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4995 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4996 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4997 { "sb_g2",
4998 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4999 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5000 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5001 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
5002
5003 /* Given the address of a pointer pointing to the textual name of a group
5004 relocation as may appear in assembler source, attempt to find its details
5005 in group_reloc_table. The pointer will be updated to the character after
5006 the trailing colon. On failure, FAIL will be returned; SUCCESS
5007 otherwise. On success, *entry will be updated to point at the relevant
5008 group_reloc_table entry. */
5009
5010 static int
5011 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5012 {
5013 unsigned int i;
5014 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5015 {
5016 int length = strlen (group_reloc_table[i].name);
5017
5018 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5019 && (*str)[length] == ':')
5020 {
5021 *out = &group_reloc_table[i];
5022 *str += (length + 1);
5023 return SUCCESS;
5024 }
5025 }
5026
5027 return FAIL;
5028 }
5029
5030 /* Parse a <shifter_operand> for an ARM data processing instruction
5031 (as for parse_shifter_operand) where group relocations are allowed:
5032
5033 #<immediate>
5034 #<immediate>, <rotate>
5035 #:<group_reloc>:<expression>
5036 <Rm>
5037 <Rm>, <shift>
5038
5039 where <group_reloc> is one of the strings defined in group_reloc_table.
5040 The hashes are optional.
5041
5042 Everything else is as for parse_shifter_operand. */
5043
5044 static parse_operand_result
5045 parse_shifter_operand_group_reloc (char **str, int i)
5046 {
5047 /* Determine if we have the sequence of characters #: or just :
5048 coming next. If we do, then we check for a group relocation.
5049 If we don't, punt the whole lot to parse_shifter_operand. */
5050
5051 if (((*str)[0] == '#' && (*str)[1] == ':')
5052 || (*str)[0] == ':')
5053 {
5054 struct group_reloc_table_entry *entry;
5055
5056 if ((*str)[0] == '#')
5057 (*str) += 2;
5058 else
5059 (*str)++;
5060
5061 /* Try to parse a group relocation. Anything else is an error. */
5062 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5063 {
5064 inst.error = _("unknown group relocation");
5065 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5066 }
5067
5068 /* We now have the group relocation table entry corresponding to
5069 the name in the assembler source. Next, we parse the expression. */
5070 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5071 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5072
5073 /* Record the relocation type (always the ALU variant here). */
5074 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5075 gas_assert (inst.reloc.type != 0);
5076
5077 return PARSE_OPERAND_SUCCESS;
5078 }
5079 else
5080 return parse_shifter_operand (str, i) == SUCCESS
5081 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5082
5083 /* Never reached. */
5084 }
5085
5086 /* Parse a Neon alignment expression. Information is written to
5087 inst.operands[i]. We assume the initial ':' has been skipped.
5088
5089 align .imm = align << 8, .immisalign=1, .preind=0 */
5090 static parse_operand_result
5091 parse_neon_alignment (char **str, int i)
5092 {
5093 char *p = *str;
5094 expressionS exp;
5095
5096 my_get_expression (&exp, &p, GE_NO_PREFIX);
5097
5098 if (exp.X_op != O_constant)
5099 {
5100 inst.error = _("alignment must be constant");
5101 return PARSE_OPERAND_FAIL;
5102 }
5103
5104 inst.operands[i].imm = exp.X_add_number << 8;
5105 inst.operands[i].immisalign = 1;
5106 /* Alignments are not pre-indexes. */
5107 inst.operands[i].preind = 0;
5108
5109 *str = p;
5110 return PARSE_OPERAND_SUCCESS;
5111 }
5112
5113 /* Parse all forms of an ARM address expression. Information is written
5114 to inst.operands[i] and/or inst.reloc.
5115
5116 Preindexed addressing (.preind=1):
5117
5118 [Rn, #offset] .reg=Rn .reloc.exp=offset
5119 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5120 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5121 .shift_kind=shift .reloc.exp=shift_imm
5122
5123 These three may have a trailing ! which causes .writeback to be set also.
5124
5125 Postindexed addressing (.postind=1, .writeback=1):
5126
5127 [Rn], #offset .reg=Rn .reloc.exp=offset
5128 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5129 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5130 .shift_kind=shift .reloc.exp=shift_imm
5131
5132 Unindexed addressing (.preind=0, .postind=0):
5133
5134 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5135
5136 Other:
5137
5138 [Rn]{!} shorthand for [Rn,#0]{!}
5139 =immediate .isreg=0 .reloc.exp=immediate
5140 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5141
5142 It is the caller's responsibility to check for addressing modes not
5143 supported by the instruction, and to set inst.reloc.type. */
5144
5145 static parse_operand_result
5146 parse_address_main (char **str, int i, int group_relocations,
5147 group_reloc_type group_type)
5148 {
5149 char *p = *str;
5150 int reg;
5151
5152 if (skip_past_char (&p, '[') == FAIL)
5153 {
5154 if (skip_past_char (&p, '=') == FAIL)
5155 {
5156 /* Bare address - translate to PC-relative offset. */
5157 inst.reloc.pc_rel = 1;
5158 inst.operands[i].reg = REG_PC;
5159 inst.operands[i].isreg = 1;
5160 inst.operands[i].preind = 1;
5161 }
5162 /* Otherwise a load-constant pseudo op, no special treatment needed here. */
5163
5164 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5165 return PARSE_OPERAND_FAIL;
5166
5167 *str = p;
5168 return PARSE_OPERAND_SUCCESS;
5169 }
5170
5171 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5172 {
5173 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5174 return PARSE_OPERAND_FAIL;
5175 }
5176 inst.operands[i].reg = reg;
5177 inst.operands[i].isreg = 1;
5178
5179 if (skip_past_comma (&p) == SUCCESS)
5180 {
5181 inst.operands[i].preind = 1;
5182
5183 if (*p == '+') p++;
5184 else if (*p == '-') p++, inst.operands[i].negative = 1;
5185
5186 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5187 {
5188 inst.operands[i].imm = reg;
5189 inst.operands[i].immisreg = 1;
5190
5191 if (skip_past_comma (&p) == SUCCESS)
5192 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5193 return PARSE_OPERAND_FAIL;
5194 }
5195 else if (skip_past_char (&p, ':') == SUCCESS)
5196 {
5197 /* FIXME: '@' should be used here, but it's filtered out by generic
5198 code before we get to see it here. This may be subject to
5199 change. */
5200 parse_operand_result result = parse_neon_alignment (&p, i);
5201
5202 if (result != PARSE_OPERAND_SUCCESS)
5203 return result;
5204 }
5205 else
5206 {
5207 if (inst.operands[i].negative)
5208 {
5209 inst.operands[i].negative = 0;
5210 p--;
5211 }
5212
5213 if (group_relocations
5214 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5215 {
5216 struct group_reloc_table_entry *entry;
5217
5218 /* Skip over the #: or : sequence. */
5219 if (*p == '#')
5220 p += 2;
5221 else
5222 p++;
5223
5224 /* Try to parse a group relocation. Anything else is an
5225 error. */
5226 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5227 {
5228 inst.error = _("unknown group relocation");
5229 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5230 }
5231
5232 /* We now have the group relocation table entry corresponding to
5233 the name in the assembler source. Next, we parse the
5234 expression. */
5235 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5236 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5237
5238 /* Record the relocation type. */
5239 switch (group_type)
5240 {
5241 case GROUP_LDR:
5242 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5243 break;
5244
5245 case GROUP_LDRS:
5246 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5247 break;
5248
5249 case GROUP_LDC:
5250 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5251 break;
5252
5253 default:
5254 gas_assert (0);
5255 }
5256
5257 if (inst.reloc.type == 0)
5258 {
5259 inst.error = _("this group relocation is not allowed on this instruction");
5260 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5261 }
5262 }
5263 else
5264 {
5265 char *q = p;
5266 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5267 return PARSE_OPERAND_FAIL;
5268 /* If the offset is 0, find out if it's a +0 or -0. */
5269 if (inst.reloc.exp.X_op == O_constant
5270 && inst.reloc.exp.X_add_number == 0)
5271 {
5272 skip_whitespace (q);
5273 if (*q == '#')
5274 {
5275 q++;
5276 skip_whitespace (q);
5277 }
5278 if (*q == '-')
5279 inst.operands[i].negative = 1;
5280 }
5281 }
5282 }
5283 }
5284 else if (skip_past_char (&p, ':') == SUCCESS)
5285 {
5286 /* FIXME: '@' should be used here, but it's filtered out by generic code
5287 before we get to see it here. This may be subject to change. */
5288 parse_operand_result result = parse_neon_alignment (&p, i);
5289
5290 if (result != PARSE_OPERAND_SUCCESS)
5291 return result;
5292 }
5293
5294 if (skip_past_char (&p, ']') == FAIL)
5295 {
5296 inst.error = _("']' expected");
5297 return PARSE_OPERAND_FAIL;
5298 }
5299
5300 if (skip_past_char (&p, '!') == SUCCESS)
5301 inst.operands[i].writeback = 1;
5302
5303 else if (skip_past_comma (&p) == SUCCESS)
5304 {
5305 if (skip_past_char (&p, '{') == SUCCESS)
5306 {
5307 /* [Rn], {expr} - unindexed, with option */
5308 if (parse_immediate (&p, &inst.operands[i].imm,
5309 0, 255, TRUE) == FAIL)
5310 return PARSE_OPERAND_FAIL;
5311
5312 if (skip_past_char (&p, '}') == FAIL)
5313 {
5314 inst.error = _("'}' expected at end of 'option' field");
5315 return PARSE_OPERAND_FAIL;
5316 }
5317 if (inst.operands[i].preind)
5318 {
5319 inst.error = _("cannot combine index with option");
5320 return PARSE_OPERAND_FAIL;
5321 }
5322 *str = p;
5323 return PARSE_OPERAND_SUCCESS;
5324 }
5325 else
5326 {
5327 inst.operands[i].postind = 1;
5328 inst.operands[i].writeback = 1;
5329
5330 if (inst.operands[i].preind)
5331 {
5332 inst.error = _("cannot combine pre- and post-indexing");
5333 return PARSE_OPERAND_FAIL;
5334 }
5335
5336 if (*p == '+') p++;
5337 else if (*p == '-') p++, inst.operands[i].negative = 1;
5338
5339 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5340 {
5341 /* We might be using the immediate for alignment already. If we
5342 are, OR the register number into the low-order bits. */
5343 if (inst.operands[i].immisalign)
5344 inst.operands[i].imm |= reg;
5345 else
5346 inst.operands[i].imm = reg;
5347 inst.operands[i].immisreg = 1;
5348
5349 if (skip_past_comma (&p) == SUCCESS)
5350 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5351 return PARSE_OPERAND_FAIL;
5352 }
5353 else
5354 {
5355 char *q = p;
5356 if (inst.operands[i].negative)
5357 {
5358 inst.operands[i].negative = 0;
5359 p--;
5360 }
5361 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5362 return PARSE_OPERAND_FAIL;
5363 /* If the offset is 0, find out if it's a +0 or -0. */
5364 if (inst.reloc.exp.X_op == O_constant
5365 && inst.reloc.exp.X_add_number == 0)
5366 {
5367 skip_whitespace (q);
5368 if (*q == '#')
5369 {
5370 q++;
5371 skip_whitespace (q);
5372 }
5373 if (*q == '-')
5374 inst.operands[i].negative = 1;
5375 }
5376 }
5377 }
5378 }
5379
5380 /* If at this point neither .preind nor .postind is set, we have a
5381 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5382 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5383 {
5384 inst.operands[i].preind = 1;
5385 inst.reloc.exp.X_op = O_constant;
5386 inst.reloc.exp.X_add_number = 0;
5387 }
5388 *str = p;
5389 return PARSE_OPERAND_SUCCESS;
5390 }
5391
5392 static int
5393 parse_address (char **str, int i)
5394 {
5395 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5396 ? SUCCESS : FAIL;
5397 }
5398
5399 static parse_operand_result
5400 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5401 {
5402 return parse_address_main (str, i, 1, type);
5403 }
5404
5405 /* Parse an operand for a MOVW or MOVT instruction. */
5406 static int
5407 parse_half (char **str)
5408 {
5409 char * p;
5410
5411 p = *str;
5412 skip_past_char (&p, '#');
5413 if (strncasecmp (p, ":lower16:", 9) == 0)
5414 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5415 else if (strncasecmp (p, ":upper16:", 9) == 0)
5416 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5417
5418 if (inst.reloc.type != BFD_RELOC_UNUSED)
5419 {
5420 p += 9;
5421 skip_whitespace (p);
5422 }
5423
5424 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5425 return FAIL;
5426
5427 if (inst.reloc.type == BFD_RELOC_UNUSED)
5428 {
5429 if (inst.reloc.exp.X_op != O_constant)
5430 {
5431 inst.error = _("constant expression expected");
5432 return FAIL;
5433 }
5434 if (inst.reloc.exp.X_add_number < 0
5435 || inst.reloc.exp.X_add_number > 0xffff)
5436 {
5437 inst.error = _("immediate value out of range");
5438 return FAIL;
5439 }
5440 }
5441 *str = p;
5442 return SUCCESS;
5443 }
5444
5445 /* Miscellaneous. */
5446
5447 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5448 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5449 static int
5450 parse_psr (char **str, bfd_boolean lhs)
5451 {
5452 char *p;
5453 unsigned long psr_field;
5454 const struct asm_psr *psr;
5455 char *start;
5456 bfd_boolean is_apsr = FALSE;
5457 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5458
5459 /* PR gas/12698: If the user has specified -march=all then m_profile will
5460 be TRUE, but we want to ignore it in this case as we are building for any
5461 CPU type, including non-m variants. */
5462 if (selected_cpu.core == arm_arch_any.core)
5463 m_profile = FALSE;
5464
5465 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5466 feature for ease of use and backwards compatibility. */
5467 p = *str;
5468 if (strncasecmp (p, "SPSR", 4) == 0)
5469 {
5470 if (m_profile)
5471 goto unsupported_psr;
5472
5473 psr_field = SPSR_BIT;
5474 }
5475 else if (strncasecmp (p, "CPSR", 4) == 0)
5476 {
5477 if (m_profile)
5478 goto unsupported_psr;
5479
5480 psr_field = 0;
5481 }
5482 else if (strncasecmp (p, "APSR", 4) == 0)
5483 {
5484 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5485 and ARMv7-R architecture CPUs. */
5486 is_apsr = TRUE;
5487 psr_field = 0;
5488 }
5489 else if (m_profile)
5490 {
5491 start = p;
5492 do
5493 p++;
5494 while (ISALNUM (*p) || *p == '_');
5495
5496 if (strncasecmp (start, "iapsr", 5) == 0
5497 || strncasecmp (start, "eapsr", 5) == 0
5498 || strncasecmp (start, "xpsr", 4) == 0
5499 || strncasecmp (start, "psr", 3) == 0)
5500 p = start + strcspn (start, "rR") + 1;
5501
5502 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5503 p - start);
5504
5505 if (!psr)
5506 return FAIL;
5507
5508 /* If APSR is being written, a bitfield may be specified. Note that
5509 APSR itself is handled above. */
5510 if (psr->field <= 3)
5511 {
5512 psr_field = psr->field;
5513 is_apsr = TRUE;
5514 goto check_suffix;
5515 }
5516
5517 *str = p;
5518 /* M-profile MSR instructions have the mask field set to "10", except
5519 *PSR variants which modify APSR, which may use a different mask (and
5520 have been handled already). Do that by setting the PSR_f field
5521 here. */
5522 return psr->field | (lhs ? PSR_f : 0);
5523 }
5524 else
5525 goto unsupported_psr;
5526
5527 p += 4;
5528 check_suffix:
5529 if (*p == '_')
5530 {
5531 /* A suffix follows. */
5532 p++;
5533 start = p;
5534
5535 do
5536 p++;
5537 while (ISALNUM (*p) || *p == '_');
5538
5539 if (is_apsr)
5540 {
5541 /* APSR uses a notation for bits, rather than fields. */
5542 unsigned int nzcvq_bits = 0;
5543 unsigned int g_bit = 0;
5544 char *bit;
5545
5546 for (bit = start; bit != p; bit++)
5547 {
5548 switch (TOLOWER (*bit))
5549 {
5550 case 'n':
5551 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5552 break;
5553
5554 case 'z':
5555 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5556 break;
5557
5558 case 'c':
5559 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5560 break;
5561
5562 case 'v':
5563 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5564 break;
5565
5566 case 'q':
5567 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5568 break;
5569
5570 case 'g':
5571 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5572 break;
5573
5574 default:
5575 inst.error = _("unexpected bit specified after APSR");
5576 return FAIL;
5577 }
5578 }
5579
5580 if (nzcvq_bits == 0x1f)
5581 psr_field |= PSR_f;
5582
5583 if (g_bit == 0x1)
5584 {
5585 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5586 {
5587 inst.error = _("selected processor does not "
5588 "support DSP extension");
5589 return FAIL;
5590 }
5591
5592 psr_field |= PSR_s;
5593 }
5594
5595 if ((nzcvq_bits & 0x20) != 0
5596 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5597 || (g_bit & 0x2) != 0)
5598 {
5599 inst.error = _("bad bitmask specified after APSR");
5600 return FAIL;
5601 }
5602 }
5603 else
5604 {
5605 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5606 p - start);
5607 if (!psr)
5608 goto error;
5609
5610 psr_field |= psr->field;
5611 }
5612 }
5613 else
5614 {
5615 if (ISALNUM (*p))
5616 goto error; /* Garbage after "[CS]PSR". */
5617
5618 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5619 is deprecated, but allow it anyway. */
5620 if (is_apsr && lhs)
5621 {
5622 psr_field |= PSR_f;
5623 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5624 "deprecated"));
5625 }
5626 else if (!m_profile)
5627 /* These bits are never right for M-profile devices: don't set them
5628 (only code paths which read/write APSR reach here). */
5629 psr_field |= (PSR_c | PSR_f);
5630 }
5631 *str = p;
5632 return psr_field;
5633
5634 unsupported_psr:
5635 inst.error = _("selected processor does not support requested special "
5636 "purpose register");
5637 return FAIL;
5638
5639 error:
5640 inst.error = _("flag for {c}psr instruction expected");
5641 return FAIL;
5642 }
5643
5644 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5645 value suitable for splatting into the AIF field of the instruction. */
5646
5647 static int
5648 parse_cps_flags (char **str)
5649 {
5650 int val = 0;
5651 int saw_a_flag = 0;
5652 char *s = *str;
5653
5654 for (;;)
5655 switch (*s++)
5656 {
5657 case '\0': case ',':
5658 goto done;
5659
5660 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5661 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5662 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5663
5664 default:
5665 inst.error = _("unrecognized CPS flag");
5666 return FAIL;
5667 }
5668
5669 done:
5670 if (saw_a_flag == 0)
5671 {
5672 inst.error = _("missing CPS flags");
5673 return FAIL;
5674 }
5675
5676 *str = s - 1;
5677 return val;
5678 }
5679
5680 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5681 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5682
5683 static int
5684 parse_endian_specifier (char **str)
5685 {
5686 int little_endian;
5687 char *s = *str;
5688
5689 if (strncasecmp (s, "BE", 2))
5690 little_endian = 0;
5691 else if (strncasecmp (s, "LE", 2))
5692 little_endian = 1;
5693 else
5694 {
5695 inst.error = _("valid endian specifiers are be or le");
5696 return FAIL;
5697 }
5698
5699 if (ISALNUM (s[2]) || s[2] == '_')
5700 {
5701 inst.error = _("valid endian specifiers are be or le");
5702 return FAIL;
5703 }
5704
5705 *str = s + 2;
5706 return little_endian;
5707 }
5708
5709 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5710 value suitable for poking into the rotate field of an sxt or sxta
5711 instruction, or FAIL on error. */
5712
5713 static int
5714 parse_ror (char **str)
5715 {
5716 int rot;
5717 char *s = *str;
5718
5719 if (strncasecmp (s, "ROR", 3) == 0)
5720 s += 3;
5721 else
5722 {
5723 inst.error = _("missing rotation field after comma");
5724 return FAIL;
5725 }
5726
5727 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5728 return FAIL;
5729
5730 switch (rot)
5731 {
5732 case 0: *str = s; return 0x0;
5733 case 8: *str = s; return 0x1;
5734 case 16: *str = s; return 0x2;
5735 case 24: *str = s; return 0x3;
5736
5737 default:
5738 inst.error = _("rotation can only be 0, 8, 16, or 24");
5739 return FAIL;
5740 }
5741 }
5742
5743 /* Parse a conditional code (from conds[] below). The value returned is in the
5744 range 0 .. 14, or FAIL. */
5745 static int
5746 parse_cond (char **str)
5747 {
5748 char *q;
5749 const struct asm_cond *c;
5750 int n;
5751 /* Condition codes are always 2 characters, so matching up to
5752 3 characters is sufficient. */
5753 char cond[3];
5754
5755 q = *str;
5756 n = 0;
5757 while (ISALPHA (*q) && n < 3)
5758 {
5759 cond[n] = TOLOWER (*q);
5760 q++;
5761 n++;
5762 }
5763
5764 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
5765 if (!c)
5766 {
5767 inst.error = _("condition required");
5768 return FAIL;
5769 }
5770
5771 *str = q;
5772 return c->value;
5773 }
5774
5775 /* If the given feature available in the selected CPU, mark it as used.
5776 Returns TRUE iff feature is available. */
5777 static bfd_boolean
5778 mark_feature_used (const arm_feature_set *feature)
5779 {
5780 /* Ensure the option is valid on the current architecture. */
5781 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
5782 return FALSE;
5783
5784 /* Add the appropriate architecture feature for the barrier option used.
5785 */
5786 if (thumb_mode)
5787 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
5788 else
5789 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
5790
5791 return TRUE;
5792 }
5793
5794 /* Parse an option for a barrier instruction. Returns the encoding for the
5795 option, or FAIL. */
5796 static int
5797 parse_barrier (char **str)
5798 {
5799 char *p, *q;
5800 const struct asm_barrier_opt *o;
5801
5802 p = q = *str;
5803 while (ISALPHA (*q))
5804 q++;
5805
5806 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
5807 q - p);
5808 if (!o)
5809 return FAIL;
5810
5811 if (!mark_feature_used (&o->arch))
5812 return FAIL;
5813
5814 *str = q;
5815 return o->value;
5816 }
5817
5818 /* Parse the operands of a table branch instruction. Similar to a memory
5819 operand. */
5820 static int
5821 parse_tb (char **str)
5822 {
5823 char * p = *str;
5824 int reg;
5825
5826 if (skip_past_char (&p, '[') == FAIL)
5827 {
5828 inst.error = _("'[' expected");
5829 return FAIL;
5830 }
5831
5832 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5833 {
5834 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5835 return FAIL;
5836 }
5837 inst.operands[0].reg = reg;
5838
5839 if (skip_past_comma (&p) == FAIL)
5840 {
5841 inst.error = _("',' expected");
5842 return FAIL;
5843 }
5844
5845 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5846 {
5847 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5848 return FAIL;
5849 }
5850 inst.operands[0].imm = reg;
5851
5852 if (skip_past_comma (&p) == SUCCESS)
5853 {
5854 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5855 return FAIL;
5856 if (inst.reloc.exp.X_add_number != 1)
5857 {
5858 inst.error = _("invalid shift");
5859 return FAIL;
5860 }
5861 inst.operands[0].shifted = 1;
5862 }
5863
5864 if (skip_past_char (&p, ']') == FAIL)
5865 {
5866 inst.error = _("']' expected");
5867 return FAIL;
5868 }
5869 *str = p;
5870 return SUCCESS;
5871 }
5872
5873 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5874 information on the types the operands can take and how they are encoded.
5875 Up to four operands may be read; this function handles setting the
5876 ".present" field for each read operand itself.
5877 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5878 else returns FAIL. */
5879
5880 static int
5881 parse_neon_mov (char **str, int *which_operand)
5882 {
5883 int i = *which_operand, val;
5884 enum arm_reg_type rtype;
5885 char *ptr = *str;
5886 struct neon_type_el optype;
5887
5888 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5889 {
5890 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5891 inst.operands[i].reg = val;
5892 inst.operands[i].isscalar = 1;
5893 inst.operands[i].vectype = optype;
5894 inst.operands[i++].present = 1;
5895
5896 if (skip_past_comma (&ptr) == FAIL)
5897 goto wanted_comma;
5898
5899 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5900 goto wanted_arm;
5901
5902 inst.operands[i].reg = val;
5903 inst.operands[i].isreg = 1;
5904 inst.operands[i].present = 1;
5905 }
5906 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5907 != FAIL)
5908 {
5909 /* Cases 0, 1, 2, 3, 5 (D only). */
5910 if (skip_past_comma (&ptr) == FAIL)
5911 goto wanted_comma;
5912
5913 inst.operands[i].reg = val;
5914 inst.operands[i].isreg = 1;
5915 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5916 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5917 inst.operands[i].isvec = 1;
5918 inst.operands[i].vectype = optype;
5919 inst.operands[i++].present = 1;
5920
5921 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5922 {
5923 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5924 Case 13: VMOV <Sd>, <Rm> */
5925 inst.operands[i].reg = val;
5926 inst.operands[i].isreg = 1;
5927 inst.operands[i].present = 1;
5928
5929 if (rtype == REG_TYPE_NQ)
5930 {
5931 first_error (_("can't use Neon quad register here"));
5932 return FAIL;
5933 }
5934 else if (rtype != REG_TYPE_VFS)
5935 {
5936 i++;
5937 if (skip_past_comma (&ptr) == FAIL)
5938 goto wanted_comma;
5939 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5940 goto wanted_arm;
5941 inst.operands[i].reg = val;
5942 inst.operands[i].isreg = 1;
5943 inst.operands[i].present = 1;
5944 }
5945 }
5946 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5947 &optype)) != FAIL)
5948 {
5949 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5950 Case 1: VMOV<c><q> <Dd>, <Dm>
5951 Case 8: VMOV.F32 <Sd>, <Sm>
5952 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5953
5954 inst.operands[i].reg = val;
5955 inst.operands[i].isreg = 1;
5956 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5957 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5958 inst.operands[i].isvec = 1;
5959 inst.operands[i].vectype = optype;
5960 inst.operands[i].present = 1;
5961
5962 if (skip_past_comma (&ptr) == SUCCESS)
5963 {
5964 /* Case 15. */
5965 i++;
5966
5967 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5968 goto wanted_arm;
5969
5970 inst.operands[i].reg = val;
5971 inst.operands[i].isreg = 1;
5972 inst.operands[i++].present = 1;
5973
5974 if (skip_past_comma (&ptr) == FAIL)
5975 goto wanted_comma;
5976
5977 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5978 goto wanted_arm;
5979
5980 inst.operands[i].reg = val;
5981 inst.operands[i].isreg = 1;
5982 inst.operands[i].present = 1;
5983 }
5984 }
5985 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5986 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5987 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5988 Case 10: VMOV.F32 <Sd>, #<imm>
5989 Case 11: VMOV.F64 <Dd>, #<imm> */
5990 inst.operands[i].immisfloat = 1;
5991 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5992 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5993 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5994 ;
5995 else
5996 {
5997 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5998 return FAIL;
5999 }
6000 }
6001 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6002 {
6003 /* Cases 6, 7. */
6004 inst.operands[i].reg = val;
6005 inst.operands[i].isreg = 1;
6006 inst.operands[i++].present = 1;
6007
6008 if (skip_past_comma (&ptr) == FAIL)
6009 goto wanted_comma;
6010
6011 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6012 {
6013 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6014 inst.operands[i].reg = val;
6015 inst.operands[i].isscalar = 1;
6016 inst.operands[i].present = 1;
6017 inst.operands[i].vectype = optype;
6018 }
6019 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6020 {
6021 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6022 inst.operands[i].reg = val;
6023 inst.operands[i].isreg = 1;
6024 inst.operands[i++].present = 1;
6025
6026 if (skip_past_comma (&ptr) == FAIL)
6027 goto wanted_comma;
6028
6029 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6030 == FAIL)
6031 {
6032 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6033 return FAIL;
6034 }
6035
6036 inst.operands[i].reg = val;
6037 inst.operands[i].isreg = 1;
6038 inst.operands[i].isvec = 1;
6039 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6040 inst.operands[i].vectype = optype;
6041 inst.operands[i].present = 1;
6042
6043 if (rtype == REG_TYPE_VFS)
6044 {
6045 /* Case 14. */
6046 i++;
6047 if (skip_past_comma (&ptr) == FAIL)
6048 goto wanted_comma;
6049 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6050 &optype)) == FAIL)
6051 {
6052 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6053 return FAIL;
6054 }
6055 inst.operands[i].reg = val;
6056 inst.operands[i].isreg = 1;
6057 inst.operands[i].isvec = 1;
6058 inst.operands[i].issingle = 1;
6059 inst.operands[i].vectype = optype;
6060 inst.operands[i].present = 1;
6061 }
6062 }
6063 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6064 != FAIL)
6065 {
6066 /* Case 13. */
6067 inst.operands[i].reg = val;
6068 inst.operands[i].isreg = 1;
6069 inst.operands[i].isvec = 1;
6070 inst.operands[i].issingle = 1;
6071 inst.operands[i].vectype = optype;
6072 inst.operands[i].present = 1;
6073 }
6074 }
6075 else
6076 {
6077 first_error (_("parse error"));
6078 return FAIL;
6079 }
6080
6081 /* Successfully parsed the operands. Update args. */
6082 *which_operand = i;
6083 *str = ptr;
6084 return SUCCESS;
6085
6086 wanted_comma:
6087 first_error (_("expected comma"));
6088 return FAIL;
6089
6090 wanted_arm:
6091 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6092 return FAIL;
6093 }
6094
6095 /* Use this macro when the operand constraints are different
6096 for ARM and THUMB (e.g. ldrd). */
6097 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6098 ((arm_operand) | ((thumb_operand) << 16))
6099
6100 /* Matcher codes for parse_operands. */
6101 enum operand_parse_code
6102 {
6103 OP_stop, /* end of line */
6104
6105 OP_RR, /* ARM register */
6106 OP_RRnpc, /* ARM register, not r15 */
6107 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6108 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6109 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6110 optional trailing ! */
6111 OP_RRw, /* ARM register, not r15, optional trailing ! */
6112 OP_RCP, /* Coprocessor number */
6113 OP_RCN, /* Coprocessor register */
6114 OP_RF, /* FPA register */
6115 OP_RVS, /* VFP single precision register */
6116 OP_RVD, /* VFP double precision register (0..15) */
6117 OP_RND, /* Neon double precision register (0..31) */
6118 OP_RNQ, /* Neon quad precision register */
6119 OP_RVSD, /* VFP single or double precision register */
6120 OP_RNDQ, /* Neon double or quad precision register */
6121 OP_RNSDQ, /* Neon single, double or quad precision register */
6122 OP_RNSC, /* Neon scalar D[X] */
6123 OP_RVC, /* VFP control register */
6124 OP_RMF, /* Maverick F register */
6125 OP_RMD, /* Maverick D register */
6126 OP_RMFX, /* Maverick FX register */
6127 OP_RMDX, /* Maverick DX register */
6128 OP_RMAX, /* Maverick AX register */
6129 OP_RMDS, /* Maverick DSPSC register */
6130 OP_RIWR, /* iWMMXt wR register */
6131 OP_RIWC, /* iWMMXt wC register */
6132 OP_RIWG, /* iWMMXt wCG register */
6133 OP_RXA, /* XScale accumulator register */
6134
6135 OP_REGLST, /* ARM register list */
6136 OP_VRSLST, /* VFP single-precision register list */
6137 OP_VRDLST, /* VFP double-precision register list */
6138 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6139 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6140 OP_NSTRLST, /* Neon element/structure list */
6141
6142 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6143 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6144 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6145 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6146 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6147 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6148 OP_VMOV, /* Neon VMOV operands. */
6149 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6150 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6151 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6152
6153 OP_I0, /* immediate zero */
6154 OP_I7, /* immediate value 0 .. 7 */
6155 OP_I15, /* 0 .. 15 */
6156 OP_I16, /* 1 .. 16 */
6157 OP_I16z, /* 0 .. 16 */
6158 OP_I31, /* 0 .. 31 */
6159 OP_I31w, /* 0 .. 31, optional trailing ! */
6160 OP_I32, /* 1 .. 32 */
6161 OP_I32z, /* 0 .. 32 */
6162 OP_I63, /* 0 .. 63 */
6163 OP_I63s, /* -64 .. 63 */
6164 OP_I64, /* 1 .. 64 */
6165 OP_I64z, /* 0 .. 64 */
6166 OP_I255, /* 0 .. 255 */
6167
6168 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6169 OP_I7b, /* 0 .. 7 */
6170 OP_I15b, /* 0 .. 15 */
6171 OP_I31b, /* 0 .. 31 */
6172
6173 OP_SH, /* shifter operand */
6174 OP_SHG, /* shifter operand with possible group relocation */
6175 OP_ADDR, /* Memory address expression (any mode) */
6176 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6177 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6178 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6179 OP_EXP, /* arbitrary expression */
6180 OP_EXPi, /* same, with optional immediate prefix */
6181 OP_EXPr, /* same, with optional relocation suffix */
6182 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6183
6184 OP_CPSF, /* CPS flags */
6185 OP_ENDI, /* Endianness specifier */
6186 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6187 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6188 OP_COND, /* conditional code */
6189 OP_TB, /* Table branch. */
6190
6191 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6192
6193 OP_RRnpc_I0, /* ARM register or literal 0 */
6194 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6195 OP_RR_EXi, /* ARM register or expression with imm prefix */
6196 OP_RF_IF, /* FPA register or immediate */
6197 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6198 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6199
6200 /* Optional operands. */
6201 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6202 OP_oI31b, /* 0 .. 31 */
6203 OP_oI32b, /* 1 .. 32 */
6204 OP_oI32z, /* 0 .. 32 */
6205 OP_oIffffb, /* 0 .. 65535 */
6206 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6207
6208 OP_oRR, /* ARM register */
6209 OP_oRRnpc, /* ARM register, not the PC */
6210 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6211 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6212 OP_oRND, /* Optional Neon double precision register */
6213 OP_oRNQ, /* Optional Neon quad precision register */
6214 OP_oRNDQ, /* Optional Neon double or quad precision register */
6215 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6216 OP_oSHll, /* LSL immediate */
6217 OP_oSHar, /* ASR immediate */
6218 OP_oSHllar, /* LSL or ASR immediate */
6219 OP_oROR, /* ROR 0/8/16/24 */
6220 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6221
6222 /* Some pre-defined mixed (ARM/THUMB) operands. */
6223 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6224 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6225 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6226
6227 OP_FIRST_OPTIONAL = OP_oI7b
6228 };
6229
6230 /* Generic instruction operand parser. This does no encoding and no
6231 semantic validation; it merely squirrels values away in the inst
6232 structure. Returns SUCCESS or FAIL depending on whether the
6233 specified grammar matched. */
6234 static int
6235 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6236 {
6237 unsigned const int *upat = pattern;
6238 char *backtrack_pos = 0;
6239 const char *backtrack_error = 0;
6240 int i, val = 0, backtrack_index = 0;
6241 enum arm_reg_type rtype;
6242 parse_operand_result result;
6243 unsigned int op_parse_code;
6244
6245 #define po_char_or_fail(chr) \
6246 do \
6247 { \
6248 if (skip_past_char (&str, chr) == FAIL) \
6249 goto bad_args; \
6250 } \
6251 while (0)
6252
6253 #define po_reg_or_fail(regtype) \
6254 do \
6255 { \
6256 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6257 & inst.operands[i].vectype); \
6258 if (val == FAIL) \
6259 { \
6260 first_error (_(reg_expected_msgs[regtype])); \
6261 goto failure; \
6262 } \
6263 inst.operands[i].reg = val; \
6264 inst.operands[i].isreg = 1; \
6265 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6266 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6267 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6268 || rtype == REG_TYPE_VFD \
6269 || rtype == REG_TYPE_NQ); \
6270 } \
6271 while (0)
6272
6273 #define po_reg_or_goto(regtype, label) \
6274 do \
6275 { \
6276 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6277 & inst.operands[i].vectype); \
6278 if (val == FAIL) \
6279 goto label; \
6280 \
6281 inst.operands[i].reg = val; \
6282 inst.operands[i].isreg = 1; \
6283 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6284 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6285 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6286 || rtype == REG_TYPE_VFD \
6287 || rtype == REG_TYPE_NQ); \
6288 } \
6289 while (0)
6290
6291 #define po_imm_or_fail(min, max, popt) \
6292 do \
6293 { \
6294 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6295 goto failure; \
6296 inst.operands[i].imm = val; \
6297 } \
6298 while (0)
6299
6300 #define po_scalar_or_goto(elsz, label) \
6301 do \
6302 { \
6303 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6304 if (val == FAIL) \
6305 goto label; \
6306 inst.operands[i].reg = val; \
6307 inst.operands[i].isscalar = 1; \
6308 } \
6309 while (0)
6310
6311 #define po_misc_or_fail(expr) \
6312 do \
6313 { \
6314 if (expr) \
6315 goto failure; \
6316 } \
6317 while (0)
6318
6319 #define po_misc_or_fail_no_backtrack(expr) \
6320 do \
6321 { \
6322 result = expr; \
6323 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6324 backtrack_pos = 0; \
6325 if (result != PARSE_OPERAND_SUCCESS) \
6326 goto failure; \
6327 } \
6328 while (0)
6329
6330 #define po_barrier_or_imm(str) \
6331 do \
6332 { \
6333 val = parse_barrier (&str); \
6334 if (val == FAIL) \
6335 { \
6336 if (ISALPHA (*str)) \
6337 goto failure; \
6338 else \
6339 goto immediate; \
6340 } \
6341 else \
6342 { \
6343 if ((inst.instruction & 0xf0) == 0x60 \
6344 && val != 0xf) \
6345 { \
6346 /* ISB can only take SY as an option. */ \
6347 inst.error = _("invalid barrier type"); \
6348 goto failure; \
6349 } \
6350 } \
6351 } \
6352 while (0)
6353
6354 skip_whitespace (str);
6355
6356 for (i = 0; upat[i] != OP_stop; i++)
6357 {
6358 op_parse_code = upat[i];
6359 if (op_parse_code >= 1<<16)
6360 op_parse_code = thumb ? (op_parse_code >> 16)
6361 : (op_parse_code & ((1<<16)-1));
6362
6363 if (op_parse_code >= OP_FIRST_OPTIONAL)
6364 {
6365 /* Remember where we are in case we need to backtrack. */
6366 gas_assert (!backtrack_pos);
6367 backtrack_pos = str;
6368 backtrack_error = inst.error;
6369 backtrack_index = i;
6370 }
6371
6372 if (i > 0 && (i > 1 || inst.operands[0].present))
6373 po_char_or_fail (',');
6374
6375 switch (op_parse_code)
6376 {
6377 /* Registers */
6378 case OP_oRRnpc:
6379 case OP_oRRnpcsp:
6380 case OP_RRnpc:
6381 case OP_RRnpcsp:
6382 case OP_oRR:
6383 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6384 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6385 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6386 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6387 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6388 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6389 case OP_oRND:
6390 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6391 case OP_RVC:
6392 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6393 break;
6394 /* Also accept generic coprocessor regs for unknown registers. */
6395 coproc_reg:
6396 po_reg_or_fail (REG_TYPE_CN);
6397 break;
6398 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6399 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6400 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6401 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6402 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6403 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6404 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6405 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6406 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6407 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6408 case OP_oRNQ:
6409 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6410 case OP_oRNDQ:
6411 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6412 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6413 case OP_oRNSDQ:
6414 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6415
6416 /* Neon scalar. Using an element size of 8 means that some invalid
6417 scalars are accepted here, so deal with those in later code. */
6418 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6419
6420 case OP_RNDQ_I0:
6421 {
6422 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6423 break;
6424 try_imm0:
6425 po_imm_or_fail (0, 0, TRUE);
6426 }
6427 break;
6428
6429 case OP_RVSD_I0:
6430 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6431 break;
6432
6433 case OP_RR_RNSC:
6434 {
6435 po_scalar_or_goto (8, try_rr);
6436 break;
6437 try_rr:
6438 po_reg_or_fail (REG_TYPE_RN);
6439 }
6440 break;
6441
6442 case OP_RNSDQ_RNSC:
6443 {
6444 po_scalar_or_goto (8, try_nsdq);
6445 break;
6446 try_nsdq:
6447 po_reg_or_fail (REG_TYPE_NSDQ);
6448 }
6449 break;
6450
6451 case OP_RNDQ_RNSC:
6452 {
6453 po_scalar_or_goto (8, try_ndq);
6454 break;
6455 try_ndq:
6456 po_reg_or_fail (REG_TYPE_NDQ);
6457 }
6458 break;
6459
6460 case OP_RND_RNSC:
6461 {
6462 po_scalar_or_goto (8, try_vfd);
6463 break;
6464 try_vfd:
6465 po_reg_or_fail (REG_TYPE_VFD);
6466 }
6467 break;
6468
6469 case OP_VMOV:
6470 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6471 not careful then bad things might happen. */
6472 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6473 break;
6474
6475 case OP_RNDQ_Ibig:
6476 {
6477 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6478 break;
6479 try_immbig:
6480 /* There's a possibility of getting a 64-bit immediate here, so
6481 we need special handling. */
6482 if (parse_big_immediate (&str, i) == FAIL)
6483 {
6484 inst.error = _("immediate value is out of range");
6485 goto failure;
6486 }
6487 }
6488 break;
6489
6490 case OP_RNDQ_I63b:
6491 {
6492 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6493 break;
6494 try_shimm:
6495 po_imm_or_fail (0, 63, TRUE);
6496 }
6497 break;
6498
6499 case OP_RRnpcb:
6500 po_char_or_fail ('[');
6501 po_reg_or_fail (REG_TYPE_RN);
6502 po_char_or_fail (']');
6503 break;
6504
6505 case OP_RRnpctw:
6506 case OP_RRw:
6507 case OP_oRRw:
6508 po_reg_or_fail (REG_TYPE_RN);
6509 if (skip_past_char (&str, '!') == SUCCESS)
6510 inst.operands[i].writeback = 1;
6511 break;
6512
6513 /* Immediates */
6514 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6515 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6516 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6517 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6518 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6519 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6520 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6521 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6522 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6523 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6524 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6525 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6526
6527 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6528 case OP_oI7b:
6529 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6530 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6531 case OP_oI31b:
6532 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6533 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6534 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6535 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6536
6537 /* Immediate variants */
6538 case OP_oI255c:
6539 po_char_or_fail ('{');
6540 po_imm_or_fail (0, 255, TRUE);
6541 po_char_or_fail ('}');
6542 break;
6543
6544 case OP_I31w:
6545 /* The expression parser chokes on a trailing !, so we have
6546 to find it first and zap it. */
6547 {
6548 char *s = str;
6549 while (*s && *s != ',')
6550 s++;
6551 if (s[-1] == '!')
6552 {
6553 s[-1] = '\0';
6554 inst.operands[i].writeback = 1;
6555 }
6556 po_imm_or_fail (0, 31, TRUE);
6557 if (str == s - 1)
6558 str = s;
6559 }
6560 break;
6561
6562 /* Expressions */
6563 case OP_EXPi: EXPi:
6564 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6565 GE_OPT_PREFIX));
6566 break;
6567
6568 case OP_EXP:
6569 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6570 GE_NO_PREFIX));
6571 break;
6572
6573 case OP_EXPr: EXPr:
6574 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6575 GE_NO_PREFIX));
6576 if (inst.reloc.exp.X_op == O_symbol)
6577 {
6578 val = parse_reloc (&str);
6579 if (val == -1)
6580 {
6581 inst.error = _("unrecognized relocation suffix");
6582 goto failure;
6583 }
6584 else if (val != BFD_RELOC_UNUSED)
6585 {
6586 inst.operands[i].imm = val;
6587 inst.operands[i].hasreloc = 1;
6588 }
6589 }
6590 break;
6591
6592 /* Operand for MOVW or MOVT. */
6593 case OP_HALF:
6594 po_misc_or_fail (parse_half (&str));
6595 break;
6596
6597 /* Register or expression. */
6598 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6599 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6600
6601 /* Register or immediate. */
6602 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6603 I0: po_imm_or_fail (0, 0, FALSE); break;
6604
6605 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6606 IF:
6607 if (!is_immediate_prefix (*str))
6608 goto bad_args;
6609 str++;
6610 val = parse_fpa_immediate (&str);
6611 if (val == FAIL)
6612 goto failure;
6613 /* FPA immediates are encoded as registers 8-15.
6614 parse_fpa_immediate has already applied the offset. */
6615 inst.operands[i].reg = val;
6616 inst.operands[i].isreg = 1;
6617 break;
6618
6619 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6620 I32z: po_imm_or_fail (0, 32, FALSE); break;
6621
6622 /* Two kinds of register. */
6623 case OP_RIWR_RIWC:
6624 {
6625 struct reg_entry *rege = arm_reg_parse_multi (&str);
6626 if (!rege
6627 || (rege->type != REG_TYPE_MMXWR
6628 && rege->type != REG_TYPE_MMXWC
6629 && rege->type != REG_TYPE_MMXWCG))
6630 {
6631 inst.error = _("iWMMXt data or control register expected");
6632 goto failure;
6633 }
6634 inst.operands[i].reg = rege->number;
6635 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6636 }
6637 break;
6638
6639 case OP_RIWC_RIWG:
6640 {
6641 struct reg_entry *rege = arm_reg_parse_multi (&str);
6642 if (!rege
6643 || (rege->type != REG_TYPE_MMXWC
6644 && rege->type != REG_TYPE_MMXWCG))
6645 {
6646 inst.error = _("iWMMXt control register expected");
6647 goto failure;
6648 }
6649 inst.operands[i].reg = rege->number;
6650 inst.operands[i].isreg = 1;
6651 }
6652 break;
6653
6654 /* Misc */
6655 case OP_CPSF: val = parse_cps_flags (&str); break;
6656 case OP_ENDI: val = parse_endian_specifier (&str); break;
6657 case OP_oROR: val = parse_ror (&str); break;
6658 case OP_COND: val = parse_cond (&str); break;
6659 case OP_oBARRIER_I15:
6660 po_barrier_or_imm (str); break;
6661 immediate:
6662 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6663 goto failure;
6664 break;
6665
6666 case OP_wPSR:
6667 case OP_rPSR:
6668 po_reg_or_goto (REG_TYPE_RNB, try_psr);
6669 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6670 {
6671 inst.error = _("Banked registers are not available with this "
6672 "architecture.");
6673 goto failure;
6674 }
6675 break;
6676 try_psr:
6677 val = parse_psr (&str, op_parse_code == OP_wPSR);
6678 break;
6679
6680 case OP_APSR_RR:
6681 po_reg_or_goto (REG_TYPE_RN, try_apsr);
6682 break;
6683 try_apsr:
6684 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6685 instruction). */
6686 if (strncasecmp (str, "APSR_", 5) == 0)
6687 {
6688 unsigned found = 0;
6689 str += 5;
6690 while (found < 15)
6691 switch (*str++)
6692 {
6693 case 'c': found = (found & 1) ? 16 : found | 1; break;
6694 case 'n': found = (found & 2) ? 16 : found | 2; break;
6695 case 'z': found = (found & 4) ? 16 : found | 4; break;
6696 case 'v': found = (found & 8) ? 16 : found | 8; break;
6697 default: found = 16;
6698 }
6699 if (found != 15)
6700 goto failure;
6701 inst.operands[i].isvec = 1;
6702 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
6703 inst.operands[i].reg = REG_PC;
6704 }
6705 else
6706 goto failure;
6707 break;
6708
6709 case OP_TB:
6710 po_misc_or_fail (parse_tb (&str));
6711 break;
6712
6713 /* Register lists. */
6714 case OP_REGLST:
6715 val = parse_reg_list (&str);
6716 if (*str == '^')
6717 {
6718 inst.operands[1].writeback = 1;
6719 str++;
6720 }
6721 break;
6722
6723 case OP_VRSLST:
6724 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6725 break;
6726
6727 case OP_VRDLST:
6728 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
6729 break;
6730
6731 case OP_VRSDLST:
6732 /* Allow Q registers too. */
6733 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6734 REGLIST_NEON_D);
6735 if (val == FAIL)
6736 {
6737 inst.error = NULL;
6738 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6739 REGLIST_VFP_S);
6740 inst.operands[i].issingle = 1;
6741 }
6742 break;
6743
6744 case OP_NRDLST:
6745 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6746 REGLIST_NEON_D);
6747 break;
6748
6749 case OP_NSTRLST:
6750 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
6751 &inst.operands[i].vectype);
6752 break;
6753
6754 /* Addressing modes */
6755 case OP_ADDR:
6756 po_misc_or_fail (parse_address (&str, i));
6757 break;
6758
6759 case OP_ADDRGLDR:
6760 po_misc_or_fail_no_backtrack (
6761 parse_address_group_reloc (&str, i, GROUP_LDR));
6762 break;
6763
6764 case OP_ADDRGLDRS:
6765 po_misc_or_fail_no_backtrack (
6766 parse_address_group_reloc (&str, i, GROUP_LDRS));
6767 break;
6768
6769 case OP_ADDRGLDC:
6770 po_misc_or_fail_no_backtrack (
6771 parse_address_group_reloc (&str, i, GROUP_LDC));
6772 break;
6773
6774 case OP_SH:
6775 po_misc_or_fail (parse_shifter_operand (&str, i));
6776 break;
6777
6778 case OP_SHG:
6779 po_misc_or_fail_no_backtrack (
6780 parse_shifter_operand_group_reloc (&str, i));
6781 break;
6782
6783 case OP_oSHll:
6784 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6785 break;
6786
6787 case OP_oSHar:
6788 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6789 break;
6790
6791 case OP_oSHllar:
6792 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6793 break;
6794
6795 default:
6796 as_fatal (_("unhandled operand code %d"), op_parse_code);
6797 }
6798
6799 /* Various value-based sanity checks and shared operations. We
6800 do not signal immediate failures for the register constraints;
6801 this allows a syntax error to take precedence. */
6802 switch (op_parse_code)
6803 {
6804 case OP_oRRnpc:
6805 case OP_RRnpc:
6806 case OP_RRnpcb:
6807 case OP_RRw:
6808 case OP_oRRw:
6809 case OP_RRnpc_I0:
6810 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6811 inst.error = BAD_PC;
6812 break;
6813
6814 case OP_oRRnpcsp:
6815 case OP_RRnpcsp:
6816 if (inst.operands[i].isreg)
6817 {
6818 if (inst.operands[i].reg == REG_PC)
6819 inst.error = BAD_PC;
6820 else if (inst.operands[i].reg == REG_SP)
6821 inst.error = BAD_SP;
6822 }
6823 break;
6824
6825 case OP_RRnpctw:
6826 if (inst.operands[i].isreg
6827 && inst.operands[i].reg == REG_PC
6828 && (inst.operands[i].writeback || thumb))
6829 inst.error = BAD_PC;
6830 break;
6831
6832 case OP_CPSF:
6833 case OP_ENDI:
6834 case OP_oROR:
6835 case OP_wPSR:
6836 case OP_rPSR:
6837 case OP_COND:
6838 case OP_oBARRIER_I15:
6839 case OP_REGLST:
6840 case OP_VRSLST:
6841 case OP_VRDLST:
6842 case OP_VRSDLST:
6843 case OP_NRDLST:
6844 case OP_NSTRLST:
6845 if (val == FAIL)
6846 goto failure;
6847 inst.operands[i].imm = val;
6848 break;
6849
6850 default:
6851 break;
6852 }
6853
6854 /* If we get here, this operand was successfully parsed. */
6855 inst.operands[i].present = 1;
6856 continue;
6857
6858 bad_args:
6859 inst.error = BAD_ARGS;
6860
6861 failure:
6862 if (!backtrack_pos)
6863 {
6864 /* The parse routine should already have set inst.error, but set a
6865 default here just in case. */
6866 if (!inst.error)
6867 inst.error = _("syntax error");
6868 return FAIL;
6869 }
6870
6871 /* Do not backtrack over a trailing optional argument that
6872 absorbed some text. We will only fail again, with the
6873 'garbage following instruction' error message, which is
6874 probably less helpful than the current one. */
6875 if (backtrack_index == i && backtrack_pos != str
6876 && upat[i+1] == OP_stop)
6877 {
6878 if (!inst.error)
6879 inst.error = _("syntax error");
6880 return FAIL;
6881 }
6882
6883 /* Try again, skipping the optional argument at backtrack_pos. */
6884 str = backtrack_pos;
6885 inst.error = backtrack_error;
6886 inst.operands[backtrack_index].present = 0;
6887 i = backtrack_index;
6888 backtrack_pos = 0;
6889 }
6890
6891 /* Check that we have parsed all the arguments. */
6892 if (*str != '\0' && !inst.error)
6893 inst.error = _("garbage following instruction");
6894
6895 return inst.error ? FAIL : SUCCESS;
6896 }
6897
6898 #undef po_char_or_fail
6899 #undef po_reg_or_fail
6900 #undef po_reg_or_goto
6901 #undef po_imm_or_fail
6902 #undef po_scalar_or_fail
6903 #undef po_barrier_or_imm
6904
6905 /* Shorthand macro for instruction encoding functions issuing errors. */
6906 #define constraint(expr, err) \
6907 do \
6908 { \
6909 if (expr) \
6910 { \
6911 inst.error = err; \
6912 return; \
6913 } \
6914 } \
6915 while (0)
6916
6917 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6918 instructions are unpredictable if these registers are used. This
6919 is the BadReg predicate in ARM's Thumb-2 documentation. */
6920 #define reject_bad_reg(reg) \
6921 do \
6922 if (reg == REG_SP || reg == REG_PC) \
6923 { \
6924 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6925 return; \
6926 } \
6927 while (0)
6928
6929 /* If REG is R13 (the stack pointer), warn that its use is
6930 deprecated. */
6931 #define warn_deprecated_sp(reg) \
6932 do \
6933 if (warn_on_deprecated && reg == REG_SP) \
6934 as_warn (_("use of r13 is deprecated")); \
6935 while (0)
6936
6937 /* Functions for operand encoding. ARM, then Thumb. */
6938
6939 #define rotate_left(v, n) (v << n | v >> (32 - n))
6940
6941 /* If VAL can be encoded in the immediate field of an ARM instruction,
6942 return the encoded form. Otherwise, return FAIL. */
6943
6944 static unsigned int
6945 encode_arm_immediate (unsigned int val)
6946 {
6947 unsigned int a, i;
6948
6949 for (i = 0; i < 32; i += 2)
6950 if ((a = rotate_left (val, i)) <= 0xff)
6951 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6952
6953 return FAIL;
6954 }
6955
6956 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6957 return the encoded form. Otherwise, return FAIL. */
6958 static unsigned int
6959 encode_thumb32_immediate (unsigned int val)
6960 {
6961 unsigned int a, i;
6962
6963 if (val <= 0xff)
6964 return val;
6965
6966 for (i = 1; i <= 24; i++)
6967 {
6968 a = val >> i;
6969 if ((val & ~(0xff << i)) == 0)
6970 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6971 }
6972
6973 a = val & 0xff;
6974 if (val == ((a << 16) | a))
6975 return 0x100 | a;
6976 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6977 return 0x300 | a;
6978
6979 a = val & 0xff00;
6980 if (val == ((a << 16) | a))
6981 return 0x200 | (a >> 8);
6982
6983 return FAIL;
6984 }
6985 /* Encode a VFP SP or DP register number into inst.instruction. */
6986
6987 static void
6988 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6989 {
6990 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6991 && reg > 15)
6992 {
6993 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6994 {
6995 if (thumb_mode)
6996 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6997 fpu_vfp_ext_d32);
6998 else
6999 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7000 fpu_vfp_ext_d32);
7001 }
7002 else
7003 {
7004 first_error (_("D register out of range for selected VFP version"));
7005 return;
7006 }
7007 }
7008
7009 switch (pos)
7010 {
7011 case VFP_REG_Sd:
7012 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7013 break;
7014
7015 case VFP_REG_Sn:
7016 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7017 break;
7018
7019 case VFP_REG_Sm:
7020 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7021 break;
7022
7023 case VFP_REG_Dd:
7024 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7025 break;
7026
7027 case VFP_REG_Dn:
7028 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7029 break;
7030
7031 case VFP_REG_Dm:
7032 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7033 break;
7034
7035 default:
7036 abort ();
7037 }
7038 }
7039
7040 /* Encode a <shift> in an ARM-format instruction. The immediate,
7041 if any, is handled by md_apply_fix. */
7042 static void
7043 encode_arm_shift (int i)
7044 {
7045 if (inst.operands[i].shift_kind == SHIFT_RRX)
7046 inst.instruction |= SHIFT_ROR << 5;
7047 else
7048 {
7049 inst.instruction |= inst.operands[i].shift_kind << 5;
7050 if (inst.operands[i].immisreg)
7051 {
7052 inst.instruction |= SHIFT_BY_REG;
7053 inst.instruction |= inst.operands[i].imm << 8;
7054 }
7055 else
7056 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7057 }
7058 }
7059
7060 static void
7061 encode_arm_shifter_operand (int i)
7062 {
7063 if (inst.operands[i].isreg)
7064 {
7065 inst.instruction |= inst.operands[i].reg;
7066 encode_arm_shift (i);
7067 }
7068 else
7069 {
7070 inst.instruction |= INST_IMMEDIATE;
7071 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7072 inst.instruction |= inst.operands[i].imm;
7073 }
7074 }
7075
7076 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7077 static void
7078 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7079 {
7080 /* PR 14260:
7081 Generate an error if the operand is not a register. */
7082 constraint (!inst.operands[i].isreg,
7083 _("Instruction does not support =N addresses"));
7084
7085 inst.instruction |= inst.operands[i].reg << 16;
7086
7087 if (inst.operands[i].preind)
7088 {
7089 if (is_t)
7090 {
7091 inst.error = _("instruction does not accept preindexed addressing");
7092 return;
7093 }
7094 inst.instruction |= PRE_INDEX;
7095 if (inst.operands[i].writeback)
7096 inst.instruction |= WRITE_BACK;
7097
7098 }
7099 else if (inst.operands[i].postind)
7100 {
7101 gas_assert (inst.operands[i].writeback);
7102 if (is_t)
7103 inst.instruction |= WRITE_BACK;
7104 }
7105 else /* unindexed - only for coprocessor */
7106 {
7107 inst.error = _("instruction does not accept unindexed addressing");
7108 return;
7109 }
7110
7111 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7112 && (((inst.instruction & 0x000f0000) >> 16)
7113 == ((inst.instruction & 0x0000f000) >> 12)))
7114 as_warn ((inst.instruction & LOAD_BIT)
7115 ? _("destination register same as write-back base")
7116 : _("source register same as write-back base"));
7117 }
7118
7119 /* inst.operands[i] was set up by parse_address. Encode it into an
7120 ARM-format mode 2 load or store instruction. If is_t is true,
7121 reject forms that cannot be used with a T instruction (i.e. not
7122 post-indexed). */
7123 static void
7124 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7125 {
7126 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7127
7128 encode_arm_addr_mode_common (i, is_t);
7129
7130 if (inst.operands[i].immisreg)
7131 {
7132 constraint ((inst.operands[i].imm == REG_PC
7133 || (is_pc && inst.operands[i].writeback)),
7134 BAD_PC_ADDRESSING);
7135 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7136 inst.instruction |= inst.operands[i].imm;
7137 if (!inst.operands[i].negative)
7138 inst.instruction |= INDEX_UP;
7139 if (inst.operands[i].shifted)
7140 {
7141 if (inst.operands[i].shift_kind == SHIFT_RRX)
7142 inst.instruction |= SHIFT_ROR << 5;
7143 else
7144 {
7145 inst.instruction |= inst.operands[i].shift_kind << 5;
7146 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7147 }
7148 }
7149 }
7150 else /* immediate offset in inst.reloc */
7151 {
7152 if (is_pc && !inst.reloc.pc_rel)
7153 {
7154 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7155
7156 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7157 cannot use PC in addressing.
7158 PC cannot be used in writeback addressing, either. */
7159 constraint ((is_t || inst.operands[i].writeback),
7160 BAD_PC_ADDRESSING);
7161
7162 /* Use of PC in str is deprecated for ARMv7. */
7163 if (warn_on_deprecated
7164 && !is_load
7165 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7166 as_warn (_("use of PC in this instruction is deprecated"));
7167 }
7168
7169 if (inst.reloc.type == BFD_RELOC_UNUSED)
7170 {
7171 /* Prefer + for zero encoded value. */
7172 if (!inst.operands[i].negative)
7173 inst.instruction |= INDEX_UP;
7174 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7175 }
7176 }
7177 }
7178
7179 /* inst.operands[i] was set up by parse_address. Encode it into an
7180 ARM-format mode 3 load or store instruction. Reject forms that
7181 cannot be used with such instructions. If is_t is true, reject
7182 forms that cannot be used with a T instruction (i.e. not
7183 post-indexed). */
7184 static void
7185 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7186 {
7187 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7188 {
7189 inst.error = _("instruction does not accept scaled register index");
7190 return;
7191 }
7192
7193 encode_arm_addr_mode_common (i, is_t);
7194
7195 if (inst.operands[i].immisreg)
7196 {
7197 constraint ((inst.operands[i].imm == REG_PC
7198 || inst.operands[i].reg == REG_PC),
7199 BAD_PC_ADDRESSING);
7200 inst.instruction |= inst.operands[i].imm;
7201 if (!inst.operands[i].negative)
7202 inst.instruction |= INDEX_UP;
7203 }
7204 else /* immediate offset in inst.reloc */
7205 {
7206 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7207 && inst.operands[i].writeback),
7208 BAD_PC_WRITEBACK);
7209 inst.instruction |= HWOFFSET_IMM;
7210 if (inst.reloc.type == BFD_RELOC_UNUSED)
7211 {
7212 /* Prefer + for zero encoded value. */
7213 if (!inst.operands[i].negative)
7214 inst.instruction |= INDEX_UP;
7215
7216 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7217 }
7218 }
7219 }
7220
7221 /* inst.operands[i] was set up by parse_address. Encode it into an
7222 ARM-format instruction. Reject all forms which cannot be encoded
7223 into a coprocessor load/store instruction. If wb_ok is false,
7224 reject use of writeback; if unind_ok is false, reject use of
7225 unindexed addressing. If reloc_override is not 0, use it instead
7226 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7227 (in which case it is preserved). */
7228
7229 static int
7230 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7231 {
7232 inst.instruction |= inst.operands[i].reg << 16;
7233
7234 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7235
7236 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7237 {
7238 gas_assert (!inst.operands[i].writeback);
7239 if (!unind_ok)
7240 {
7241 inst.error = _("instruction does not support unindexed addressing");
7242 return FAIL;
7243 }
7244 inst.instruction |= inst.operands[i].imm;
7245 inst.instruction |= INDEX_UP;
7246 return SUCCESS;
7247 }
7248
7249 if (inst.operands[i].preind)
7250 inst.instruction |= PRE_INDEX;
7251
7252 if (inst.operands[i].writeback)
7253 {
7254 if (inst.operands[i].reg == REG_PC)
7255 {
7256 inst.error = _("pc may not be used with write-back");
7257 return FAIL;
7258 }
7259 if (!wb_ok)
7260 {
7261 inst.error = _("instruction does not support writeback");
7262 return FAIL;
7263 }
7264 inst.instruction |= WRITE_BACK;
7265 }
7266
7267 if (reloc_override)
7268 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7269 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7270 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7271 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7272 {
7273 if (thumb_mode)
7274 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7275 else
7276 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7277 }
7278
7279 /* Prefer + for zero encoded value. */
7280 if (!inst.operands[i].negative)
7281 inst.instruction |= INDEX_UP;
7282
7283 return SUCCESS;
7284 }
7285
7286 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7287 Determine whether it can be performed with a move instruction; if
7288 it can, convert inst.instruction to that move instruction and
7289 return TRUE; if it can't, convert inst.instruction to a literal-pool
7290 load and return FALSE. If this is not a valid thing to do in the
7291 current context, set inst.error and return TRUE.
7292
7293 inst.operands[i] describes the destination register. */
7294
7295 static bfd_boolean
7296 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
7297 {
7298 unsigned long tbit;
7299
7300 if (thumb_p)
7301 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7302 else
7303 tbit = LOAD_BIT;
7304
7305 if ((inst.instruction & tbit) == 0)
7306 {
7307 inst.error = _("invalid pseudo operation");
7308 return TRUE;
7309 }
7310 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
7311 {
7312 inst.error = _("constant expression expected");
7313 return TRUE;
7314 }
7315 if (inst.reloc.exp.X_op == O_constant)
7316 {
7317 if (thumb_p)
7318 {
7319 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7320 {
7321 /* This can be done with a mov(1) instruction. */
7322 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7323 inst.instruction |= inst.reloc.exp.X_add_number;
7324 return TRUE;
7325 }
7326 }
7327 else
7328 {
7329 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7330 if (value != FAIL)
7331 {
7332 /* This can be done with a mov instruction. */
7333 inst.instruction &= LITERAL_MASK;
7334 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7335 inst.instruction |= value & 0xfff;
7336 return TRUE;
7337 }
7338
7339 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7340 if (value != FAIL)
7341 {
7342 /* This can be done with a mvn instruction. */
7343 inst.instruction &= LITERAL_MASK;
7344 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7345 inst.instruction |= value & 0xfff;
7346 return TRUE;
7347 }
7348 }
7349 }
7350
7351 if (add_to_lit_pool () == FAIL)
7352 {
7353 inst.error = _("literal pool insertion failed");
7354 return TRUE;
7355 }
7356 inst.operands[1].reg = REG_PC;
7357 inst.operands[1].isreg = 1;
7358 inst.operands[1].preind = 1;
7359 inst.reloc.pc_rel = 1;
7360 inst.reloc.type = (thumb_p
7361 ? BFD_RELOC_ARM_THUMB_OFFSET
7362 : (mode_3
7363 ? BFD_RELOC_ARM_HWLITERAL
7364 : BFD_RELOC_ARM_LITERAL));
7365 return FALSE;
7366 }
7367
7368 /* Functions for instruction encoding, sorted by sub-architecture.
7369 First some generics; their names are taken from the conventional
7370 bit positions for register arguments in ARM format instructions. */
7371
7372 static void
7373 do_noargs (void)
7374 {
7375 }
7376
7377 static void
7378 do_rd (void)
7379 {
7380 inst.instruction |= inst.operands[0].reg << 12;
7381 }
7382
7383 static void
7384 do_rd_rm (void)
7385 {
7386 inst.instruction |= inst.operands[0].reg << 12;
7387 inst.instruction |= inst.operands[1].reg;
7388 }
7389
7390 static void
7391 do_rm_rn (void)
7392 {
7393 inst.instruction |= inst.operands[0].reg;
7394 inst.instruction |= inst.operands[1].reg << 16;
7395 }
7396
7397 static void
7398 do_rd_rn (void)
7399 {
7400 inst.instruction |= inst.operands[0].reg << 12;
7401 inst.instruction |= inst.operands[1].reg << 16;
7402 }
7403
7404 static void
7405 do_rn_rd (void)
7406 {
7407 inst.instruction |= inst.operands[0].reg << 16;
7408 inst.instruction |= inst.operands[1].reg << 12;
7409 }
7410
7411 static bfd_boolean
7412 check_obsolete (const arm_feature_set *feature, const char *msg)
7413 {
7414 if (ARM_CPU_IS_ANY (cpu_variant))
7415 {
7416 as_warn ("%s", msg);
7417 return TRUE;
7418 }
7419 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
7420 {
7421 as_bad ("%s", msg);
7422 return TRUE;
7423 }
7424
7425 return FALSE;
7426 }
7427
7428 static void
7429 do_rd_rm_rn (void)
7430 {
7431 unsigned Rn = inst.operands[2].reg;
7432 /* Enforce restrictions on SWP instruction. */
7433 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
7434 {
7435 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
7436 _("Rn must not overlap other operands"));
7437
7438 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
7439 */
7440 if (!check_obsolete (&arm_ext_v8,
7441 _("swp{b} use is obsoleted for ARMv8 and later"))
7442 && warn_on_deprecated
7443 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
7444 as_warn (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
7445 }
7446
7447 inst.instruction |= inst.operands[0].reg << 12;
7448 inst.instruction |= inst.operands[1].reg;
7449 inst.instruction |= Rn << 16;
7450 }
7451
7452 static void
7453 do_rd_rn_rm (void)
7454 {
7455 inst.instruction |= inst.operands[0].reg << 12;
7456 inst.instruction |= inst.operands[1].reg << 16;
7457 inst.instruction |= inst.operands[2].reg;
7458 }
7459
7460 static void
7461 do_rm_rd_rn (void)
7462 {
7463 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
7464 constraint (((inst.reloc.exp.X_op != O_constant
7465 && inst.reloc.exp.X_op != O_illegal)
7466 || inst.reloc.exp.X_add_number != 0),
7467 BAD_ADDR_MODE);
7468 inst.instruction |= inst.operands[0].reg;
7469 inst.instruction |= inst.operands[1].reg << 12;
7470 inst.instruction |= inst.operands[2].reg << 16;
7471 }
7472
7473 static void
7474 do_imm0 (void)
7475 {
7476 inst.instruction |= inst.operands[0].imm;
7477 }
7478
7479 static void
7480 do_rd_cpaddr (void)
7481 {
7482 inst.instruction |= inst.operands[0].reg << 12;
7483 encode_arm_cp_address (1, TRUE, TRUE, 0);
7484 }
7485
7486 /* ARM instructions, in alphabetical order by function name (except
7487 that wrapper functions appear immediately after the function they
7488 wrap). */
7489
7490 /* This is a pseudo-op of the form "adr rd, label" to be converted
7491 into a relative address of the form "add rd, pc, #label-.-8". */
7492
7493 static void
7494 do_adr (void)
7495 {
7496 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7497
7498 /* Frag hacking will turn this into a sub instruction if the offset turns
7499 out to be negative. */
7500 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7501 inst.reloc.pc_rel = 1;
7502 inst.reloc.exp.X_add_number -= 8;
7503 }
7504
7505 /* This is a pseudo-op of the form "adrl rd, label" to be converted
7506 into a relative address of the form:
7507 add rd, pc, #low(label-.-8)"
7508 add rd, rd, #high(label-.-8)" */
7509
7510 static void
7511 do_adrl (void)
7512 {
7513 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7514
7515 /* Frag hacking will turn this into a sub instruction if the offset turns
7516 out to be negative. */
7517 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
7518 inst.reloc.pc_rel = 1;
7519 inst.size = INSN_SIZE * 2;
7520 inst.reloc.exp.X_add_number -= 8;
7521 }
7522
7523 static void
7524 do_arit (void)
7525 {
7526 if (!inst.operands[1].present)
7527 inst.operands[1].reg = inst.operands[0].reg;
7528 inst.instruction |= inst.operands[0].reg << 12;
7529 inst.instruction |= inst.operands[1].reg << 16;
7530 encode_arm_shifter_operand (2);
7531 }
7532
7533 static void
7534 do_barrier (void)
7535 {
7536 if (inst.operands[0].present)
7537 {
7538 constraint ((inst.instruction & 0xf0) != 0x40
7539 && inst.operands[0].imm > 0xf
7540 && inst.operands[0].imm < 0x0,
7541 _("bad barrier type"));
7542 inst.instruction |= inst.operands[0].imm;
7543 }
7544 else
7545 inst.instruction |= 0xf;
7546 }
7547
7548 static void
7549 do_bfc (void)
7550 {
7551 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7552 constraint (msb > 32, _("bit-field extends past end of register"));
7553 /* The instruction encoding stores the LSB and MSB,
7554 not the LSB and width. */
7555 inst.instruction |= inst.operands[0].reg << 12;
7556 inst.instruction |= inst.operands[1].imm << 7;
7557 inst.instruction |= (msb - 1) << 16;
7558 }
7559
7560 static void
7561 do_bfi (void)
7562 {
7563 unsigned int msb;
7564
7565 /* #0 in second position is alternative syntax for bfc, which is
7566 the same instruction but with REG_PC in the Rm field. */
7567 if (!inst.operands[1].isreg)
7568 inst.operands[1].reg = REG_PC;
7569
7570 msb = inst.operands[2].imm + inst.operands[3].imm;
7571 constraint (msb > 32, _("bit-field extends past end of register"));
7572 /* The instruction encoding stores the LSB and MSB,
7573 not the LSB and width. */
7574 inst.instruction |= inst.operands[0].reg << 12;
7575 inst.instruction |= inst.operands[1].reg;
7576 inst.instruction |= inst.operands[2].imm << 7;
7577 inst.instruction |= (msb - 1) << 16;
7578 }
7579
7580 static void
7581 do_bfx (void)
7582 {
7583 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7584 _("bit-field extends past end of register"));
7585 inst.instruction |= inst.operands[0].reg << 12;
7586 inst.instruction |= inst.operands[1].reg;
7587 inst.instruction |= inst.operands[2].imm << 7;
7588 inst.instruction |= (inst.operands[3].imm - 1) << 16;
7589 }
7590
7591 /* ARM V5 breakpoint instruction (argument parse)
7592 BKPT <16 bit unsigned immediate>
7593 Instruction is not conditional.
7594 The bit pattern given in insns[] has the COND_ALWAYS condition,
7595 and it is an error if the caller tried to override that. */
7596
7597 static void
7598 do_bkpt (void)
7599 {
7600 /* Top 12 of 16 bits to bits 19:8. */
7601 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
7602
7603 /* Bottom 4 of 16 bits to bits 3:0. */
7604 inst.instruction |= inst.operands[0].imm & 0xf;
7605 }
7606
7607 static void
7608 encode_branch (int default_reloc)
7609 {
7610 if (inst.operands[0].hasreloc)
7611 {
7612 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
7613 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
7614 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
7615 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
7616 ? BFD_RELOC_ARM_PLT32
7617 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
7618 }
7619 else
7620 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
7621 inst.reloc.pc_rel = 1;
7622 }
7623
7624 static void
7625 do_branch (void)
7626 {
7627 #ifdef OBJ_ELF
7628 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7629 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7630 else
7631 #endif
7632 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7633 }
7634
7635 static void
7636 do_bl (void)
7637 {
7638 #ifdef OBJ_ELF
7639 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7640 {
7641 if (inst.cond == COND_ALWAYS)
7642 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
7643 else
7644 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7645 }
7646 else
7647 #endif
7648 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7649 }
7650
7651 /* ARM V5 branch-link-exchange instruction (argument parse)
7652 BLX <target_addr> ie BLX(1)
7653 BLX{<condition>} <Rm> ie BLX(2)
7654 Unfortunately, there are two different opcodes for this mnemonic.
7655 So, the insns[].value is not used, and the code here zaps values
7656 into inst.instruction.
7657 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7658
7659 static void
7660 do_blx (void)
7661 {
7662 if (inst.operands[0].isreg)
7663 {
7664 /* Arg is a register; the opcode provided by insns[] is correct.
7665 It is not illegal to do "blx pc", just useless. */
7666 if (inst.operands[0].reg == REG_PC)
7667 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7668
7669 inst.instruction |= inst.operands[0].reg;
7670 }
7671 else
7672 {
7673 /* Arg is an address; this instruction cannot be executed
7674 conditionally, and the opcode must be adjusted.
7675 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7676 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7677 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7678 inst.instruction = 0xfa000000;
7679 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
7680 }
7681 }
7682
7683 static void
7684 do_bx (void)
7685 {
7686 bfd_boolean want_reloc;
7687
7688 if (inst.operands[0].reg == REG_PC)
7689 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7690
7691 inst.instruction |= inst.operands[0].reg;
7692 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7693 it is for ARMv4t or earlier. */
7694 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
7695 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
7696 want_reloc = TRUE;
7697
7698 #ifdef OBJ_ELF
7699 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
7700 #endif
7701 want_reloc = FALSE;
7702
7703 if (want_reloc)
7704 inst.reloc.type = BFD_RELOC_ARM_V4BX;
7705 }
7706
7707
7708 /* ARM v5TEJ. Jump to Jazelle code. */
7709
7710 static void
7711 do_bxj (void)
7712 {
7713 if (inst.operands[0].reg == REG_PC)
7714 as_tsktsk (_("use of r15 in bxj is not really useful"));
7715
7716 inst.instruction |= inst.operands[0].reg;
7717 }
7718
7719 /* Co-processor data operation:
7720 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7721 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7722 static void
7723 do_cdp (void)
7724 {
7725 inst.instruction |= inst.operands[0].reg << 8;
7726 inst.instruction |= inst.operands[1].imm << 20;
7727 inst.instruction |= inst.operands[2].reg << 12;
7728 inst.instruction |= inst.operands[3].reg << 16;
7729 inst.instruction |= inst.operands[4].reg;
7730 inst.instruction |= inst.operands[5].imm << 5;
7731 }
7732
7733 static void
7734 do_cmp (void)
7735 {
7736 inst.instruction |= inst.operands[0].reg << 16;
7737 encode_arm_shifter_operand (1);
7738 }
7739
7740 /* Transfer between coprocessor and ARM registers.
7741 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7742 MRC2
7743 MCR{cond}
7744 MCR2
7745
7746 No special properties. */
7747
7748 struct deprecated_coproc_regs_s
7749 {
7750 unsigned cp;
7751 int opc1;
7752 unsigned crn;
7753 unsigned crm;
7754 int opc2;
7755 arm_feature_set deprecated;
7756 arm_feature_set obsoleted;
7757 const char *dep_msg;
7758 const char *obs_msg;
7759 };
7760
7761 #define DEPR_ACCESS_V8 \
7762 N_("This coprocessor register access is deprecated in ARMv8")
7763
7764 /* Table of all deprecated coprocessor registers. */
7765 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
7766 {
7767 {15, 0, 7, 10, 5, /* CP15DMB. */
7768 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7769 DEPR_ACCESS_V8, NULL},
7770 {15, 0, 7, 10, 4, /* CP15DSB. */
7771 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7772 DEPR_ACCESS_V8, NULL},
7773 {15, 0, 7, 5, 4, /* CP15ISB. */
7774 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7775 DEPR_ACCESS_V8, NULL},
7776 {14, 6, 1, 0, 0, /* TEEHBR. */
7777 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7778 DEPR_ACCESS_V8, NULL},
7779 {14, 6, 0, 0, 0, /* TEECR. */
7780 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7781 DEPR_ACCESS_V8, NULL},
7782 };
7783
7784 #undef DEPR_ACCESS_V8
7785
7786 static const size_t deprecated_coproc_reg_count =
7787 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
7788
7789 static void
7790 do_co_reg (void)
7791 {
7792 unsigned Rd;
7793 size_t i;
7794
7795 Rd = inst.operands[2].reg;
7796 if (thumb_mode)
7797 {
7798 if (inst.instruction == 0xee000010
7799 || inst.instruction == 0xfe000010)
7800 /* MCR, MCR2 */
7801 reject_bad_reg (Rd);
7802 else
7803 /* MRC, MRC2 */
7804 constraint (Rd == REG_SP, BAD_SP);
7805 }
7806 else
7807 {
7808 /* MCR */
7809 if (inst.instruction == 0xe000010)
7810 constraint (Rd == REG_PC, BAD_PC);
7811 }
7812
7813 for (i = 0; i < deprecated_coproc_reg_count; ++i)
7814 {
7815 const struct deprecated_coproc_regs_s *r =
7816 deprecated_coproc_regs + i;
7817
7818 if (inst.operands[0].reg == r->cp
7819 && inst.operands[1].imm == r->opc1
7820 && inst.operands[3].reg == r->crn
7821 && inst.operands[4].reg == r->crm
7822 && inst.operands[5].imm == r->opc2)
7823 {
7824 if (!check_obsolete (&r->obsoleted, r->obs_msg)
7825 && warn_on_deprecated
7826 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
7827 as_warn ("%s", r->dep_msg);
7828 }
7829 }
7830
7831 inst.instruction |= inst.operands[0].reg << 8;
7832 inst.instruction |= inst.operands[1].imm << 21;
7833 inst.instruction |= Rd << 12;
7834 inst.instruction |= inst.operands[3].reg << 16;
7835 inst.instruction |= inst.operands[4].reg;
7836 inst.instruction |= inst.operands[5].imm << 5;
7837 }
7838
7839 /* Transfer between coprocessor register and pair of ARM registers.
7840 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7841 MCRR2
7842 MRRC{cond}
7843 MRRC2
7844
7845 Two XScale instructions are special cases of these:
7846
7847 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7848 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7849
7850 Result unpredictable if Rd or Rn is R15. */
7851
7852 static void
7853 do_co_reg2c (void)
7854 {
7855 unsigned Rd, Rn;
7856
7857 Rd = inst.operands[2].reg;
7858 Rn = inst.operands[3].reg;
7859
7860 if (thumb_mode)
7861 {
7862 reject_bad_reg (Rd);
7863 reject_bad_reg (Rn);
7864 }
7865 else
7866 {
7867 constraint (Rd == REG_PC, BAD_PC);
7868 constraint (Rn == REG_PC, BAD_PC);
7869 }
7870
7871 inst.instruction |= inst.operands[0].reg << 8;
7872 inst.instruction |= inst.operands[1].imm << 4;
7873 inst.instruction |= Rd << 12;
7874 inst.instruction |= Rn << 16;
7875 inst.instruction |= inst.operands[4].reg;
7876 }
7877
7878 static void
7879 do_cpsi (void)
7880 {
7881 inst.instruction |= inst.operands[0].imm << 6;
7882 if (inst.operands[1].present)
7883 {
7884 inst.instruction |= CPSI_MMOD;
7885 inst.instruction |= inst.operands[1].imm;
7886 }
7887 }
7888
7889 static void
7890 do_dbg (void)
7891 {
7892 inst.instruction |= inst.operands[0].imm;
7893 }
7894
7895 static void
7896 do_div (void)
7897 {
7898 unsigned Rd, Rn, Rm;
7899
7900 Rd = inst.operands[0].reg;
7901 Rn = (inst.operands[1].present
7902 ? inst.operands[1].reg : Rd);
7903 Rm = inst.operands[2].reg;
7904
7905 constraint ((Rd == REG_PC), BAD_PC);
7906 constraint ((Rn == REG_PC), BAD_PC);
7907 constraint ((Rm == REG_PC), BAD_PC);
7908
7909 inst.instruction |= Rd << 16;
7910 inst.instruction |= Rn << 0;
7911 inst.instruction |= Rm << 8;
7912 }
7913
7914 static void
7915 do_it (void)
7916 {
7917 /* There is no IT instruction in ARM mode. We
7918 process it to do the validation as if in
7919 thumb mode, just in case the code gets
7920 assembled for thumb using the unified syntax. */
7921
7922 inst.size = 0;
7923 if (unified_syntax)
7924 {
7925 set_it_insn_type (IT_INSN);
7926 now_it.mask = (inst.instruction & 0xf) | 0x10;
7927 now_it.cc = inst.operands[0].imm;
7928 }
7929 }
7930
7931 /* If there is only one register in the register list,
7932 then return its register number. Otherwise return -1. */
7933 static int
7934 only_one_reg_in_list (int range)
7935 {
7936 int i = ffs (range) - 1;
7937 return (i > 15 || range != (1 << i)) ? -1 : i;
7938 }
7939
7940 static void
7941 encode_ldmstm(int from_push_pop_mnem)
7942 {
7943 int base_reg = inst.operands[0].reg;
7944 int range = inst.operands[1].imm;
7945 int one_reg;
7946
7947 inst.instruction |= base_reg << 16;
7948 inst.instruction |= range;
7949
7950 if (inst.operands[1].writeback)
7951 inst.instruction |= LDM_TYPE_2_OR_3;
7952
7953 if (inst.operands[0].writeback)
7954 {
7955 inst.instruction |= WRITE_BACK;
7956 /* Check for unpredictable uses of writeback. */
7957 if (inst.instruction & LOAD_BIT)
7958 {
7959 /* Not allowed in LDM type 2. */
7960 if ((inst.instruction & LDM_TYPE_2_OR_3)
7961 && ((range & (1 << REG_PC)) == 0))
7962 as_warn (_("writeback of base register is UNPREDICTABLE"));
7963 /* Only allowed if base reg not in list for other types. */
7964 else if (range & (1 << base_reg))
7965 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7966 }
7967 else /* STM. */
7968 {
7969 /* Not allowed for type 2. */
7970 if (inst.instruction & LDM_TYPE_2_OR_3)
7971 as_warn (_("writeback of base register is UNPREDICTABLE"));
7972 /* Only allowed if base reg not in list, or first in list. */
7973 else if ((range & (1 << base_reg))
7974 && (range & ((1 << base_reg) - 1)))
7975 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7976 }
7977 }
7978
7979 /* If PUSH/POP has only one register, then use the A2 encoding. */
7980 one_reg = only_one_reg_in_list (range);
7981 if (from_push_pop_mnem && one_reg >= 0)
7982 {
7983 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
7984
7985 inst.instruction &= A_COND_MASK;
7986 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
7987 inst.instruction |= one_reg << 12;
7988 }
7989 }
7990
7991 static void
7992 do_ldmstm (void)
7993 {
7994 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
7995 }
7996
7997 /* ARMv5TE load-consecutive (argument parse)
7998 Mode is like LDRH.
7999
8000 LDRccD R, mode
8001 STRccD R, mode. */
8002
8003 static void
8004 do_ldrd (void)
8005 {
8006 constraint (inst.operands[0].reg % 2 != 0,
8007 _("first transfer register must be even"));
8008 constraint (inst.operands[1].present
8009 && inst.operands[1].reg != inst.operands[0].reg + 1,
8010 _("can only transfer two consecutive registers"));
8011 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8012 constraint (!inst.operands[2].isreg, _("'[' expected"));
8013
8014 if (!inst.operands[1].present)
8015 inst.operands[1].reg = inst.operands[0].reg + 1;
8016
8017 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8018 register and the first register written; we have to diagnose
8019 overlap between the base and the second register written here. */
8020
8021 if (inst.operands[2].reg == inst.operands[1].reg
8022 && (inst.operands[2].writeback || inst.operands[2].postind))
8023 as_warn (_("base register written back, and overlaps "
8024 "second transfer register"));
8025
8026 if (!(inst.instruction & V4_STR_BIT))
8027 {
8028 /* For an index-register load, the index register must not overlap the
8029 destination (even if not write-back). */
8030 if (inst.operands[2].immisreg
8031 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8032 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8033 as_warn (_("index register overlaps transfer register"));
8034 }
8035 inst.instruction |= inst.operands[0].reg << 12;
8036 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8037 }
8038
8039 static void
8040 do_ldrex (void)
8041 {
8042 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8043 || inst.operands[1].postind || inst.operands[1].writeback
8044 || inst.operands[1].immisreg || inst.operands[1].shifted
8045 || inst.operands[1].negative
8046 /* This can arise if the programmer has written
8047 strex rN, rM, foo
8048 or if they have mistakenly used a register name as the last
8049 operand, eg:
8050 strex rN, rM, rX
8051 It is very difficult to distinguish between these two cases
8052 because "rX" might actually be a label. ie the register
8053 name has been occluded by a symbol of the same name. So we
8054 just generate a general 'bad addressing mode' type error
8055 message and leave it up to the programmer to discover the
8056 true cause and fix their mistake. */
8057 || (inst.operands[1].reg == REG_PC),
8058 BAD_ADDR_MODE);
8059
8060 constraint (inst.reloc.exp.X_op != O_constant
8061 || inst.reloc.exp.X_add_number != 0,
8062 _("offset must be zero in ARM encoding"));
8063
8064 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8065
8066 inst.instruction |= inst.operands[0].reg << 12;
8067 inst.instruction |= inst.operands[1].reg << 16;
8068 inst.reloc.type = BFD_RELOC_UNUSED;
8069 }
8070
8071 static void
8072 do_ldrexd (void)
8073 {
8074 constraint (inst.operands[0].reg % 2 != 0,
8075 _("even register required"));
8076 constraint (inst.operands[1].present
8077 && inst.operands[1].reg != inst.operands[0].reg + 1,
8078 _("can only load two consecutive registers"));
8079 /* If op 1 were present and equal to PC, this function wouldn't
8080 have been called in the first place. */
8081 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8082
8083 inst.instruction |= inst.operands[0].reg << 12;
8084 inst.instruction |= inst.operands[2].reg << 16;
8085 }
8086
8087 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8088 which is not a multiple of four is UNPREDICTABLE. */
8089 static void
8090 check_ldr_r15_aligned (void)
8091 {
8092 constraint (!(inst.operands[1].immisreg)
8093 && (inst.operands[0].reg == REG_PC
8094 && inst.operands[1].reg == REG_PC
8095 && (inst.reloc.exp.X_add_number & 0x3)),
8096 _("ldr to register 15 must be 4-byte alligned"));
8097 }
8098
8099 static void
8100 do_ldst (void)
8101 {
8102 inst.instruction |= inst.operands[0].reg << 12;
8103 if (!inst.operands[1].isreg)
8104 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
8105 return;
8106 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8107 check_ldr_r15_aligned ();
8108 }
8109
8110 static void
8111 do_ldstt (void)
8112 {
8113 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8114 reject [Rn,...]. */
8115 if (inst.operands[1].preind)
8116 {
8117 constraint (inst.reloc.exp.X_op != O_constant
8118 || inst.reloc.exp.X_add_number != 0,
8119 _("this instruction requires a post-indexed address"));
8120
8121 inst.operands[1].preind = 0;
8122 inst.operands[1].postind = 1;
8123 inst.operands[1].writeback = 1;
8124 }
8125 inst.instruction |= inst.operands[0].reg << 12;
8126 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8127 }
8128
8129 /* Halfword and signed-byte load/store operations. */
8130
8131 static void
8132 do_ldstv4 (void)
8133 {
8134 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8135 inst.instruction |= inst.operands[0].reg << 12;
8136 if (!inst.operands[1].isreg)
8137 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
8138 return;
8139 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8140 }
8141
8142 static void
8143 do_ldsttv4 (void)
8144 {
8145 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8146 reject [Rn,...]. */
8147 if (inst.operands[1].preind)
8148 {
8149 constraint (inst.reloc.exp.X_op != O_constant
8150 || inst.reloc.exp.X_add_number != 0,
8151 _("this instruction requires a post-indexed address"));
8152
8153 inst.operands[1].preind = 0;
8154 inst.operands[1].postind = 1;
8155 inst.operands[1].writeback = 1;
8156 }
8157 inst.instruction |= inst.operands[0].reg << 12;
8158 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8159 }
8160
8161 /* Co-processor register load/store.
8162 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8163 static void
8164 do_lstc (void)
8165 {
8166 inst.instruction |= inst.operands[0].reg << 8;
8167 inst.instruction |= inst.operands[1].reg << 12;
8168 encode_arm_cp_address (2, TRUE, TRUE, 0);
8169 }
8170
8171 static void
8172 do_mlas (void)
8173 {
8174 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8175 if (inst.operands[0].reg == inst.operands[1].reg
8176 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8177 && !(inst.instruction & 0x00400000))
8178 as_tsktsk (_("Rd and Rm should be different in mla"));
8179
8180 inst.instruction |= inst.operands[0].reg << 16;
8181 inst.instruction |= inst.operands[1].reg;
8182 inst.instruction |= inst.operands[2].reg << 8;
8183 inst.instruction |= inst.operands[3].reg << 12;
8184 }
8185
8186 static void
8187 do_mov (void)
8188 {
8189 inst.instruction |= inst.operands[0].reg << 12;
8190 encode_arm_shifter_operand (1);
8191 }
8192
8193 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8194 static void
8195 do_mov16 (void)
8196 {
8197 bfd_vma imm;
8198 bfd_boolean top;
8199
8200 top = (inst.instruction & 0x00400000) != 0;
8201 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8202 _(":lower16: not allowed this instruction"));
8203 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8204 _(":upper16: not allowed instruction"));
8205 inst.instruction |= inst.operands[0].reg << 12;
8206 if (inst.reloc.type == BFD_RELOC_UNUSED)
8207 {
8208 imm = inst.reloc.exp.X_add_number;
8209 /* The value is in two pieces: 0:11, 16:19. */
8210 inst.instruction |= (imm & 0x00000fff);
8211 inst.instruction |= (imm & 0x0000f000) << 4;
8212 }
8213 }
8214
8215 static void do_vfp_nsyn_opcode (const char *);
8216
8217 static int
8218 do_vfp_nsyn_mrs (void)
8219 {
8220 if (inst.operands[0].isvec)
8221 {
8222 if (inst.operands[1].reg != 1)
8223 first_error (_("operand 1 must be FPSCR"));
8224 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8225 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8226 do_vfp_nsyn_opcode ("fmstat");
8227 }
8228 else if (inst.operands[1].isvec)
8229 do_vfp_nsyn_opcode ("fmrx");
8230 else
8231 return FAIL;
8232
8233 return SUCCESS;
8234 }
8235
8236 static int
8237 do_vfp_nsyn_msr (void)
8238 {
8239 if (inst.operands[0].isvec)
8240 do_vfp_nsyn_opcode ("fmxr");
8241 else
8242 return FAIL;
8243
8244 return SUCCESS;
8245 }
8246
8247 static void
8248 do_vmrs (void)
8249 {
8250 unsigned Rt = inst.operands[0].reg;
8251
8252 if (thumb_mode && inst.operands[0].reg == REG_SP)
8253 {
8254 inst.error = BAD_SP;
8255 return;
8256 }
8257
8258 /* APSR_ sets isvec. All other refs to PC are illegal. */
8259 if (!inst.operands[0].isvec && inst.operands[0].reg == REG_PC)
8260 {
8261 inst.error = BAD_PC;
8262 return;
8263 }
8264
8265 switch (inst.operands[1].reg)
8266 {
8267 case 0: /* FPSID */
8268 case 1: /* FPSCR */
8269 case 6: /* MVFR1 */
8270 case 7: /* MVFR0 */
8271 case 8: /* FPEXC */
8272 inst.instruction |= (inst.operands[1].reg << 16);
8273 break;
8274 default:
8275 first_error (_("operand 1 must be a VFP extension System Register"));
8276 }
8277
8278 inst.instruction |= (Rt << 12);
8279 }
8280
8281 static void
8282 do_vmsr (void)
8283 {
8284 unsigned Rt = inst.operands[1].reg;
8285
8286 if (thumb_mode)
8287 reject_bad_reg (Rt);
8288 else if (Rt == REG_PC)
8289 {
8290 inst.error = BAD_PC;
8291 return;
8292 }
8293
8294 switch (inst.operands[0].reg)
8295 {
8296 case 0: /* FPSID */
8297 case 1: /* FPSCR */
8298 case 8: /* FPEXC */
8299 inst.instruction |= (inst.operands[0].reg << 16);
8300 break;
8301 default:
8302 first_error (_("operand 0 must be FPSID or FPSCR pr FPEXC"));
8303 }
8304
8305 inst.instruction |= (Rt << 12);
8306 }
8307
8308 static void
8309 do_mrs (void)
8310 {
8311 unsigned br;
8312
8313 if (do_vfp_nsyn_mrs () == SUCCESS)
8314 return;
8315
8316 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8317 inst.instruction |= inst.operands[0].reg << 12;
8318
8319 if (inst.operands[1].isreg)
8320 {
8321 br = inst.operands[1].reg;
8322 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8323 as_bad (_("bad register for mrs"));
8324 }
8325 else
8326 {
8327 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8328 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8329 != (PSR_c|PSR_f),
8330 _("'APSR', 'CPSR' or 'SPSR' expected"));
8331 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8332 }
8333
8334 inst.instruction |= br;
8335 }
8336
8337 /* Two possible forms:
8338 "{C|S}PSR_<field>, Rm",
8339 "{C|S}PSR_f, #expression". */
8340
8341 static void
8342 do_msr (void)
8343 {
8344 if (do_vfp_nsyn_msr () == SUCCESS)
8345 return;
8346
8347 inst.instruction |= inst.operands[0].imm;
8348 if (inst.operands[1].isreg)
8349 inst.instruction |= inst.operands[1].reg;
8350 else
8351 {
8352 inst.instruction |= INST_IMMEDIATE;
8353 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8354 inst.reloc.pc_rel = 0;
8355 }
8356 }
8357
8358 static void
8359 do_mul (void)
8360 {
8361 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8362
8363 if (!inst.operands[2].present)
8364 inst.operands[2].reg = inst.operands[0].reg;
8365 inst.instruction |= inst.operands[0].reg << 16;
8366 inst.instruction |= inst.operands[1].reg;
8367 inst.instruction |= inst.operands[2].reg << 8;
8368
8369 if (inst.operands[0].reg == inst.operands[1].reg
8370 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8371 as_tsktsk (_("Rd and Rm should be different in mul"));
8372 }
8373
8374 /* Long Multiply Parser
8375 UMULL RdLo, RdHi, Rm, Rs
8376 SMULL RdLo, RdHi, Rm, Rs
8377 UMLAL RdLo, RdHi, Rm, Rs
8378 SMLAL RdLo, RdHi, Rm, Rs. */
8379
8380 static void
8381 do_mull (void)
8382 {
8383 inst.instruction |= inst.operands[0].reg << 12;
8384 inst.instruction |= inst.operands[1].reg << 16;
8385 inst.instruction |= inst.operands[2].reg;
8386 inst.instruction |= inst.operands[3].reg << 8;
8387
8388 /* rdhi and rdlo must be different. */
8389 if (inst.operands[0].reg == inst.operands[1].reg)
8390 as_tsktsk (_("rdhi and rdlo must be different"));
8391
8392 /* rdhi, rdlo and rm must all be different before armv6. */
8393 if ((inst.operands[0].reg == inst.operands[2].reg
8394 || inst.operands[1].reg == inst.operands[2].reg)
8395 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8396 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8397 }
8398
8399 static void
8400 do_nop (void)
8401 {
8402 if (inst.operands[0].present
8403 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8404 {
8405 /* Architectural NOP hints are CPSR sets with no bits selected. */
8406 inst.instruction &= 0xf0000000;
8407 inst.instruction |= 0x0320f000;
8408 if (inst.operands[0].present)
8409 inst.instruction |= inst.operands[0].imm;
8410 }
8411 }
8412
8413 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8414 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8415 Condition defaults to COND_ALWAYS.
8416 Error if Rd, Rn or Rm are R15. */
8417
8418 static void
8419 do_pkhbt (void)
8420 {
8421 inst.instruction |= inst.operands[0].reg << 12;
8422 inst.instruction |= inst.operands[1].reg << 16;
8423 inst.instruction |= inst.operands[2].reg;
8424 if (inst.operands[3].present)
8425 encode_arm_shift (3);
8426 }
8427
8428 /* ARM V6 PKHTB (Argument Parse). */
8429
8430 static void
8431 do_pkhtb (void)
8432 {
8433 if (!inst.operands[3].present)
8434 {
8435 /* If the shift specifier is omitted, turn the instruction
8436 into pkhbt rd, rm, rn. */
8437 inst.instruction &= 0xfff00010;
8438 inst.instruction |= inst.operands[0].reg << 12;
8439 inst.instruction |= inst.operands[1].reg;
8440 inst.instruction |= inst.operands[2].reg << 16;
8441 }
8442 else
8443 {
8444 inst.instruction |= inst.operands[0].reg << 12;
8445 inst.instruction |= inst.operands[1].reg << 16;
8446 inst.instruction |= inst.operands[2].reg;
8447 encode_arm_shift (3);
8448 }
8449 }
8450
8451 /* ARMv5TE: Preload-Cache
8452 MP Extensions: Preload for write
8453
8454 PLD(W) <addr_mode>
8455
8456 Syntactically, like LDR with B=1, W=0, L=1. */
8457
8458 static void
8459 do_pld (void)
8460 {
8461 constraint (!inst.operands[0].isreg,
8462 _("'[' expected after PLD mnemonic"));
8463 constraint (inst.operands[0].postind,
8464 _("post-indexed expression used in preload instruction"));
8465 constraint (inst.operands[0].writeback,
8466 _("writeback used in preload instruction"));
8467 constraint (!inst.operands[0].preind,
8468 _("unindexed addressing used in preload instruction"));
8469 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8470 }
8471
8472 /* ARMv7: PLI <addr_mode> */
8473 static void
8474 do_pli (void)
8475 {
8476 constraint (!inst.operands[0].isreg,
8477 _("'[' expected after PLI mnemonic"));
8478 constraint (inst.operands[0].postind,
8479 _("post-indexed expression used in preload instruction"));
8480 constraint (inst.operands[0].writeback,
8481 _("writeback used in preload instruction"));
8482 constraint (!inst.operands[0].preind,
8483 _("unindexed addressing used in preload instruction"));
8484 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8485 inst.instruction &= ~PRE_INDEX;
8486 }
8487
8488 static void
8489 do_push_pop (void)
8490 {
8491 inst.operands[1] = inst.operands[0];
8492 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
8493 inst.operands[0].isreg = 1;
8494 inst.operands[0].writeback = 1;
8495 inst.operands[0].reg = REG_SP;
8496 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
8497 }
8498
8499 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
8500 word at the specified address and the following word
8501 respectively.
8502 Unconditionally executed.
8503 Error if Rn is R15. */
8504
8505 static void
8506 do_rfe (void)
8507 {
8508 inst.instruction |= inst.operands[0].reg << 16;
8509 if (inst.operands[0].writeback)
8510 inst.instruction |= WRITE_BACK;
8511 }
8512
8513 /* ARM V6 ssat (argument parse). */
8514
8515 static void
8516 do_ssat (void)
8517 {
8518 inst.instruction |= inst.operands[0].reg << 12;
8519 inst.instruction |= (inst.operands[1].imm - 1) << 16;
8520 inst.instruction |= inst.operands[2].reg;
8521
8522 if (inst.operands[3].present)
8523 encode_arm_shift (3);
8524 }
8525
8526 /* ARM V6 usat (argument parse). */
8527
8528 static void
8529 do_usat (void)
8530 {
8531 inst.instruction |= inst.operands[0].reg << 12;
8532 inst.instruction |= inst.operands[1].imm << 16;
8533 inst.instruction |= inst.operands[2].reg;
8534
8535 if (inst.operands[3].present)
8536 encode_arm_shift (3);
8537 }
8538
8539 /* ARM V6 ssat16 (argument parse). */
8540
8541 static void
8542 do_ssat16 (void)
8543 {
8544 inst.instruction |= inst.operands[0].reg << 12;
8545 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
8546 inst.instruction |= inst.operands[2].reg;
8547 }
8548
8549 static void
8550 do_usat16 (void)
8551 {
8552 inst.instruction |= inst.operands[0].reg << 12;
8553 inst.instruction |= inst.operands[1].imm << 16;
8554 inst.instruction |= inst.operands[2].reg;
8555 }
8556
8557 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
8558 preserving the other bits.
8559
8560 setend <endian_specifier>, where <endian_specifier> is either
8561 BE or LE. */
8562
8563 static void
8564 do_setend (void)
8565 {
8566 if (warn_on_deprecated
8567 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8568 as_warn (_("setend use is deprecated for ARMv8"));
8569
8570 if (inst.operands[0].imm)
8571 inst.instruction |= 0x200;
8572 }
8573
8574 static void
8575 do_shift (void)
8576 {
8577 unsigned int Rm = (inst.operands[1].present
8578 ? inst.operands[1].reg
8579 : inst.operands[0].reg);
8580
8581 inst.instruction |= inst.operands[0].reg << 12;
8582 inst.instruction |= Rm;
8583 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
8584 {
8585 inst.instruction |= inst.operands[2].reg << 8;
8586 inst.instruction |= SHIFT_BY_REG;
8587 /* PR 12854: Error on extraneous shifts. */
8588 constraint (inst.operands[2].shifted,
8589 _("extraneous shift as part of operand to shift insn"));
8590 }
8591 else
8592 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
8593 }
8594
8595 static void
8596 do_smc (void)
8597 {
8598 inst.reloc.type = BFD_RELOC_ARM_SMC;
8599 inst.reloc.pc_rel = 0;
8600 }
8601
8602 static void
8603 do_hvc (void)
8604 {
8605 inst.reloc.type = BFD_RELOC_ARM_HVC;
8606 inst.reloc.pc_rel = 0;
8607 }
8608
8609 static void
8610 do_swi (void)
8611 {
8612 inst.reloc.type = BFD_RELOC_ARM_SWI;
8613 inst.reloc.pc_rel = 0;
8614 }
8615
8616 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
8617 SMLAxy{cond} Rd,Rm,Rs,Rn
8618 SMLAWy{cond} Rd,Rm,Rs,Rn
8619 Error if any register is R15. */
8620
8621 static void
8622 do_smla (void)
8623 {
8624 inst.instruction |= inst.operands[0].reg << 16;
8625 inst.instruction |= inst.operands[1].reg;
8626 inst.instruction |= inst.operands[2].reg << 8;
8627 inst.instruction |= inst.operands[3].reg << 12;
8628 }
8629
8630 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
8631 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
8632 Error if any register is R15.
8633 Warning if Rdlo == Rdhi. */
8634
8635 static void
8636 do_smlal (void)
8637 {
8638 inst.instruction |= inst.operands[0].reg << 12;
8639 inst.instruction |= inst.operands[1].reg << 16;
8640 inst.instruction |= inst.operands[2].reg;
8641 inst.instruction |= inst.operands[3].reg << 8;
8642
8643 if (inst.operands[0].reg == inst.operands[1].reg)
8644 as_tsktsk (_("rdhi and rdlo must be different"));
8645 }
8646
8647 /* ARM V5E (El Segundo) signed-multiply (argument parse)
8648 SMULxy{cond} Rd,Rm,Rs
8649 Error if any register is R15. */
8650
8651 static void
8652 do_smul (void)
8653 {
8654 inst.instruction |= inst.operands[0].reg << 16;
8655 inst.instruction |= inst.operands[1].reg;
8656 inst.instruction |= inst.operands[2].reg << 8;
8657 }
8658
8659 /* ARM V6 srs (argument parse). The variable fields in the encoding are
8660 the same for both ARM and Thumb-2. */
8661
8662 static void
8663 do_srs (void)
8664 {
8665 int reg;
8666
8667 if (inst.operands[0].present)
8668 {
8669 reg = inst.operands[0].reg;
8670 constraint (reg != REG_SP, _("SRS base register must be r13"));
8671 }
8672 else
8673 reg = REG_SP;
8674
8675 inst.instruction |= reg << 16;
8676 inst.instruction |= inst.operands[1].imm;
8677 if (inst.operands[0].writeback || inst.operands[1].writeback)
8678 inst.instruction |= WRITE_BACK;
8679 }
8680
8681 /* ARM V6 strex (argument parse). */
8682
8683 static void
8684 do_strex (void)
8685 {
8686 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8687 || inst.operands[2].postind || inst.operands[2].writeback
8688 || inst.operands[2].immisreg || inst.operands[2].shifted
8689 || inst.operands[2].negative
8690 /* See comment in do_ldrex(). */
8691 || (inst.operands[2].reg == REG_PC),
8692 BAD_ADDR_MODE);
8693
8694 constraint (inst.operands[0].reg == inst.operands[1].reg
8695 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8696
8697 constraint (inst.reloc.exp.X_op != O_constant
8698 || inst.reloc.exp.X_add_number != 0,
8699 _("offset must be zero in ARM encoding"));
8700
8701 inst.instruction |= inst.operands[0].reg << 12;
8702 inst.instruction |= inst.operands[1].reg;
8703 inst.instruction |= inst.operands[2].reg << 16;
8704 inst.reloc.type = BFD_RELOC_UNUSED;
8705 }
8706
8707 static void
8708 do_t_strexbh (void)
8709 {
8710 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8711 || inst.operands[2].postind || inst.operands[2].writeback
8712 || inst.operands[2].immisreg || inst.operands[2].shifted
8713 || inst.operands[2].negative,
8714 BAD_ADDR_MODE);
8715
8716 constraint (inst.operands[0].reg == inst.operands[1].reg
8717 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8718
8719 do_rm_rd_rn ();
8720 }
8721
8722 static void
8723 do_strexd (void)
8724 {
8725 constraint (inst.operands[1].reg % 2 != 0,
8726 _("even register required"));
8727 constraint (inst.operands[2].present
8728 && inst.operands[2].reg != inst.operands[1].reg + 1,
8729 _("can only store two consecutive registers"));
8730 /* If op 2 were present and equal to PC, this function wouldn't
8731 have been called in the first place. */
8732 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
8733
8734 constraint (inst.operands[0].reg == inst.operands[1].reg
8735 || inst.operands[0].reg == inst.operands[1].reg + 1
8736 || inst.operands[0].reg == inst.operands[3].reg,
8737 BAD_OVERLAP);
8738
8739 inst.instruction |= inst.operands[0].reg << 12;
8740 inst.instruction |= inst.operands[1].reg;
8741 inst.instruction |= inst.operands[3].reg << 16;
8742 }
8743
8744 /* ARM V8 STRL. */
8745 static void
8746 do_stlex (void)
8747 {
8748 constraint (inst.operands[0].reg == inst.operands[1].reg
8749 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8750
8751 do_rd_rm_rn ();
8752 }
8753
8754 static void
8755 do_t_stlex (void)
8756 {
8757 constraint (inst.operands[0].reg == inst.operands[1].reg
8758 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8759
8760 do_rm_rd_rn ();
8761 }
8762
8763 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8764 extends it to 32-bits, and adds the result to a value in another
8765 register. You can specify a rotation by 0, 8, 16, or 24 bits
8766 before extracting the 16-bit value.
8767 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8768 Condition defaults to COND_ALWAYS.
8769 Error if any register uses R15. */
8770
8771 static void
8772 do_sxtah (void)
8773 {
8774 inst.instruction |= inst.operands[0].reg << 12;
8775 inst.instruction |= inst.operands[1].reg << 16;
8776 inst.instruction |= inst.operands[2].reg;
8777 inst.instruction |= inst.operands[3].imm << 10;
8778 }
8779
8780 /* ARM V6 SXTH.
8781
8782 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8783 Condition defaults to COND_ALWAYS.
8784 Error if any register uses R15. */
8785
8786 static void
8787 do_sxth (void)
8788 {
8789 inst.instruction |= inst.operands[0].reg << 12;
8790 inst.instruction |= inst.operands[1].reg;
8791 inst.instruction |= inst.operands[2].imm << 10;
8792 }
8793 \f
8794 /* VFP instructions. In a logical order: SP variant first, monad
8795 before dyad, arithmetic then move then load/store. */
8796
8797 static void
8798 do_vfp_sp_monadic (void)
8799 {
8800 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8801 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8802 }
8803
8804 static void
8805 do_vfp_sp_dyadic (void)
8806 {
8807 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8808 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8809 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8810 }
8811
8812 static void
8813 do_vfp_sp_compare_z (void)
8814 {
8815 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8816 }
8817
8818 static void
8819 do_vfp_dp_sp_cvt (void)
8820 {
8821 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8822 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8823 }
8824
8825 static void
8826 do_vfp_sp_dp_cvt (void)
8827 {
8828 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8829 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8830 }
8831
8832 static void
8833 do_vfp_reg_from_sp (void)
8834 {
8835 inst.instruction |= inst.operands[0].reg << 12;
8836 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8837 }
8838
8839 static void
8840 do_vfp_reg2_from_sp2 (void)
8841 {
8842 constraint (inst.operands[2].imm != 2,
8843 _("only two consecutive VFP SP registers allowed here"));
8844 inst.instruction |= inst.operands[0].reg << 12;
8845 inst.instruction |= inst.operands[1].reg << 16;
8846 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8847 }
8848
8849 static void
8850 do_vfp_sp_from_reg (void)
8851 {
8852 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
8853 inst.instruction |= inst.operands[1].reg << 12;
8854 }
8855
8856 static void
8857 do_vfp_sp2_from_reg2 (void)
8858 {
8859 constraint (inst.operands[0].imm != 2,
8860 _("only two consecutive VFP SP registers allowed here"));
8861 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
8862 inst.instruction |= inst.operands[1].reg << 12;
8863 inst.instruction |= inst.operands[2].reg << 16;
8864 }
8865
8866 static void
8867 do_vfp_sp_ldst (void)
8868 {
8869 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8870 encode_arm_cp_address (1, FALSE, TRUE, 0);
8871 }
8872
8873 static void
8874 do_vfp_dp_ldst (void)
8875 {
8876 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8877 encode_arm_cp_address (1, FALSE, TRUE, 0);
8878 }
8879
8880
8881 static void
8882 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
8883 {
8884 if (inst.operands[0].writeback)
8885 inst.instruction |= WRITE_BACK;
8886 else
8887 constraint (ldstm_type != VFP_LDSTMIA,
8888 _("this addressing mode requires base-register writeback"));
8889 inst.instruction |= inst.operands[0].reg << 16;
8890 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
8891 inst.instruction |= inst.operands[1].imm;
8892 }
8893
8894 static void
8895 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
8896 {
8897 int count;
8898
8899 if (inst.operands[0].writeback)
8900 inst.instruction |= WRITE_BACK;
8901 else
8902 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
8903 _("this addressing mode requires base-register writeback"));
8904
8905 inst.instruction |= inst.operands[0].reg << 16;
8906 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8907
8908 count = inst.operands[1].imm << 1;
8909 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
8910 count += 1;
8911
8912 inst.instruction |= count;
8913 }
8914
8915 static void
8916 do_vfp_sp_ldstmia (void)
8917 {
8918 vfp_sp_ldstm (VFP_LDSTMIA);
8919 }
8920
8921 static void
8922 do_vfp_sp_ldstmdb (void)
8923 {
8924 vfp_sp_ldstm (VFP_LDSTMDB);
8925 }
8926
8927 static void
8928 do_vfp_dp_ldstmia (void)
8929 {
8930 vfp_dp_ldstm (VFP_LDSTMIA);
8931 }
8932
8933 static void
8934 do_vfp_dp_ldstmdb (void)
8935 {
8936 vfp_dp_ldstm (VFP_LDSTMDB);
8937 }
8938
8939 static void
8940 do_vfp_xp_ldstmia (void)
8941 {
8942 vfp_dp_ldstm (VFP_LDSTMIAX);
8943 }
8944
8945 static void
8946 do_vfp_xp_ldstmdb (void)
8947 {
8948 vfp_dp_ldstm (VFP_LDSTMDBX);
8949 }
8950
8951 static void
8952 do_vfp_dp_rd_rm (void)
8953 {
8954 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8955 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8956 }
8957
8958 static void
8959 do_vfp_dp_rn_rd (void)
8960 {
8961 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
8962 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8963 }
8964
8965 static void
8966 do_vfp_dp_rd_rn (void)
8967 {
8968 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8969 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8970 }
8971
8972 static void
8973 do_vfp_dp_rd_rn_rm (void)
8974 {
8975 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8976 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8977 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
8978 }
8979
8980 static void
8981 do_vfp_dp_rd (void)
8982 {
8983 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8984 }
8985
8986 static void
8987 do_vfp_dp_rm_rd_rn (void)
8988 {
8989 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
8990 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8991 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
8992 }
8993
8994 /* VFPv3 instructions. */
8995 static void
8996 do_vfp_sp_const (void)
8997 {
8998 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8999 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9000 inst.instruction |= (inst.operands[1].imm & 0x0f);
9001 }
9002
9003 static void
9004 do_vfp_dp_const (void)
9005 {
9006 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9007 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9008 inst.instruction |= (inst.operands[1].imm & 0x0f);
9009 }
9010
9011 static void
9012 vfp_conv (int srcsize)
9013 {
9014 int immbits = srcsize - inst.operands[1].imm;
9015
9016 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9017 {
9018 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9019 i.e. immbits must be in range 0 - 16. */
9020 inst.error = _("immediate value out of range, expected range [0, 16]");
9021 return;
9022 }
9023 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9024 {
9025 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9026 i.e. immbits must be in range 0 - 31. */
9027 inst.error = _("immediate value out of range, expected range [1, 32]");
9028 return;
9029 }
9030
9031 inst.instruction |= (immbits & 1) << 5;
9032 inst.instruction |= (immbits >> 1);
9033 }
9034
9035 static void
9036 do_vfp_sp_conv_16 (void)
9037 {
9038 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9039 vfp_conv (16);
9040 }
9041
9042 static void
9043 do_vfp_dp_conv_16 (void)
9044 {
9045 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9046 vfp_conv (16);
9047 }
9048
9049 static void
9050 do_vfp_sp_conv_32 (void)
9051 {
9052 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9053 vfp_conv (32);
9054 }
9055
9056 static void
9057 do_vfp_dp_conv_32 (void)
9058 {
9059 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9060 vfp_conv (32);
9061 }
9062 \f
9063 /* FPA instructions. Also in a logical order. */
9064
9065 static void
9066 do_fpa_cmp (void)
9067 {
9068 inst.instruction |= inst.operands[0].reg << 16;
9069 inst.instruction |= inst.operands[1].reg;
9070 }
9071
9072 static void
9073 do_fpa_ldmstm (void)
9074 {
9075 inst.instruction |= inst.operands[0].reg << 12;
9076 switch (inst.operands[1].imm)
9077 {
9078 case 1: inst.instruction |= CP_T_X; break;
9079 case 2: inst.instruction |= CP_T_Y; break;
9080 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9081 case 4: break;
9082 default: abort ();
9083 }
9084
9085 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9086 {
9087 /* The instruction specified "ea" or "fd", so we can only accept
9088 [Rn]{!}. The instruction does not really support stacking or
9089 unstacking, so we have to emulate these by setting appropriate
9090 bits and offsets. */
9091 constraint (inst.reloc.exp.X_op != O_constant
9092 || inst.reloc.exp.X_add_number != 0,
9093 _("this instruction does not support indexing"));
9094
9095 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9096 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9097
9098 if (!(inst.instruction & INDEX_UP))
9099 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9100
9101 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9102 {
9103 inst.operands[2].preind = 0;
9104 inst.operands[2].postind = 1;
9105 }
9106 }
9107
9108 encode_arm_cp_address (2, TRUE, TRUE, 0);
9109 }
9110 \f
9111 /* iWMMXt instructions: strictly in alphabetical order. */
9112
9113 static void
9114 do_iwmmxt_tandorc (void)
9115 {
9116 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9117 }
9118
9119 static void
9120 do_iwmmxt_textrc (void)
9121 {
9122 inst.instruction |= inst.operands[0].reg << 12;
9123 inst.instruction |= inst.operands[1].imm;
9124 }
9125
9126 static void
9127 do_iwmmxt_textrm (void)
9128 {
9129 inst.instruction |= inst.operands[0].reg << 12;
9130 inst.instruction |= inst.operands[1].reg << 16;
9131 inst.instruction |= inst.operands[2].imm;
9132 }
9133
9134 static void
9135 do_iwmmxt_tinsr (void)
9136 {
9137 inst.instruction |= inst.operands[0].reg << 16;
9138 inst.instruction |= inst.operands[1].reg << 12;
9139 inst.instruction |= inst.operands[2].imm;
9140 }
9141
9142 static void
9143 do_iwmmxt_tmia (void)
9144 {
9145 inst.instruction |= inst.operands[0].reg << 5;
9146 inst.instruction |= inst.operands[1].reg;
9147 inst.instruction |= inst.operands[2].reg << 12;
9148 }
9149
9150 static void
9151 do_iwmmxt_waligni (void)
9152 {
9153 inst.instruction |= inst.operands[0].reg << 12;
9154 inst.instruction |= inst.operands[1].reg << 16;
9155 inst.instruction |= inst.operands[2].reg;
9156 inst.instruction |= inst.operands[3].imm << 20;
9157 }
9158
9159 static void
9160 do_iwmmxt_wmerge (void)
9161 {
9162 inst.instruction |= inst.operands[0].reg << 12;
9163 inst.instruction |= inst.operands[1].reg << 16;
9164 inst.instruction |= inst.operands[2].reg;
9165 inst.instruction |= inst.operands[3].imm << 21;
9166 }
9167
9168 static void
9169 do_iwmmxt_wmov (void)
9170 {
9171 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9172 inst.instruction |= inst.operands[0].reg << 12;
9173 inst.instruction |= inst.operands[1].reg << 16;
9174 inst.instruction |= inst.operands[1].reg;
9175 }
9176
9177 static void
9178 do_iwmmxt_wldstbh (void)
9179 {
9180 int reloc;
9181 inst.instruction |= inst.operands[0].reg << 12;
9182 if (thumb_mode)
9183 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9184 else
9185 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9186 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9187 }
9188
9189 static void
9190 do_iwmmxt_wldstw (void)
9191 {
9192 /* RIWR_RIWC clears .isreg for a control register. */
9193 if (!inst.operands[0].isreg)
9194 {
9195 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9196 inst.instruction |= 0xf0000000;
9197 }
9198
9199 inst.instruction |= inst.operands[0].reg << 12;
9200 encode_arm_cp_address (1, TRUE, TRUE, 0);
9201 }
9202
9203 static void
9204 do_iwmmxt_wldstd (void)
9205 {
9206 inst.instruction |= inst.operands[0].reg << 12;
9207 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9208 && inst.operands[1].immisreg)
9209 {
9210 inst.instruction &= ~0x1a000ff;
9211 inst.instruction |= (0xf << 28);
9212 if (inst.operands[1].preind)
9213 inst.instruction |= PRE_INDEX;
9214 if (!inst.operands[1].negative)
9215 inst.instruction |= INDEX_UP;
9216 if (inst.operands[1].writeback)
9217 inst.instruction |= WRITE_BACK;
9218 inst.instruction |= inst.operands[1].reg << 16;
9219 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9220 inst.instruction |= inst.operands[1].imm;
9221 }
9222 else
9223 encode_arm_cp_address (1, TRUE, FALSE, 0);
9224 }
9225
9226 static void
9227 do_iwmmxt_wshufh (void)
9228 {
9229 inst.instruction |= inst.operands[0].reg << 12;
9230 inst.instruction |= inst.operands[1].reg << 16;
9231 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9232 inst.instruction |= (inst.operands[2].imm & 0x0f);
9233 }
9234
9235 static void
9236 do_iwmmxt_wzero (void)
9237 {
9238 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9239 inst.instruction |= inst.operands[0].reg;
9240 inst.instruction |= inst.operands[0].reg << 12;
9241 inst.instruction |= inst.operands[0].reg << 16;
9242 }
9243
9244 static void
9245 do_iwmmxt_wrwrwr_or_imm5 (void)
9246 {
9247 if (inst.operands[2].isreg)
9248 do_rd_rn_rm ();
9249 else {
9250 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9251 _("immediate operand requires iWMMXt2"));
9252 do_rd_rn ();
9253 if (inst.operands[2].imm == 0)
9254 {
9255 switch ((inst.instruction >> 20) & 0xf)
9256 {
9257 case 4:
9258 case 5:
9259 case 6:
9260 case 7:
9261 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
9262 inst.operands[2].imm = 16;
9263 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9264 break;
9265 case 8:
9266 case 9:
9267 case 10:
9268 case 11:
9269 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
9270 inst.operands[2].imm = 32;
9271 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9272 break;
9273 case 12:
9274 case 13:
9275 case 14:
9276 case 15:
9277 {
9278 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
9279 unsigned long wrn;
9280 wrn = (inst.instruction >> 16) & 0xf;
9281 inst.instruction &= 0xff0fff0f;
9282 inst.instruction |= wrn;
9283 /* Bail out here; the instruction is now assembled. */
9284 return;
9285 }
9286 }
9287 }
9288 /* Map 32 -> 0, etc. */
9289 inst.operands[2].imm &= 0x1f;
9290 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9291 }
9292 }
9293 \f
9294 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
9295 operations first, then control, shift, and load/store. */
9296
9297 /* Insns like "foo X,Y,Z". */
9298
9299 static void
9300 do_mav_triple (void)
9301 {
9302 inst.instruction |= inst.operands[0].reg << 16;
9303 inst.instruction |= inst.operands[1].reg;
9304 inst.instruction |= inst.operands[2].reg << 12;
9305 }
9306
9307 /* Insns like "foo W,X,Y,Z".
9308 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
9309
9310 static void
9311 do_mav_quad (void)
9312 {
9313 inst.instruction |= inst.operands[0].reg << 5;
9314 inst.instruction |= inst.operands[1].reg << 12;
9315 inst.instruction |= inst.operands[2].reg << 16;
9316 inst.instruction |= inst.operands[3].reg;
9317 }
9318
9319 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
9320 static void
9321 do_mav_dspsc (void)
9322 {
9323 inst.instruction |= inst.operands[1].reg << 12;
9324 }
9325
9326 /* Maverick shift immediate instructions.
9327 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9328 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
9329
9330 static void
9331 do_mav_shift (void)
9332 {
9333 int imm = inst.operands[2].imm;
9334
9335 inst.instruction |= inst.operands[0].reg << 12;
9336 inst.instruction |= inst.operands[1].reg << 16;
9337
9338 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9339 Bits 5-7 of the insn should have bits 4-6 of the immediate.
9340 Bit 4 should be 0. */
9341 imm = (imm & 0xf) | ((imm & 0x70) << 1);
9342
9343 inst.instruction |= imm;
9344 }
9345 \f
9346 /* XScale instructions. Also sorted arithmetic before move. */
9347
9348 /* Xscale multiply-accumulate (argument parse)
9349 MIAcc acc0,Rm,Rs
9350 MIAPHcc acc0,Rm,Rs
9351 MIAxycc acc0,Rm,Rs. */
9352
9353 static void
9354 do_xsc_mia (void)
9355 {
9356 inst.instruction |= inst.operands[1].reg;
9357 inst.instruction |= inst.operands[2].reg << 12;
9358 }
9359
9360 /* Xscale move-accumulator-register (argument parse)
9361
9362 MARcc acc0,RdLo,RdHi. */
9363
9364 static void
9365 do_xsc_mar (void)
9366 {
9367 inst.instruction |= inst.operands[1].reg << 12;
9368 inst.instruction |= inst.operands[2].reg << 16;
9369 }
9370
9371 /* Xscale move-register-accumulator (argument parse)
9372
9373 MRAcc RdLo,RdHi,acc0. */
9374
9375 static void
9376 do_xsc_mra (void)
9377 {
9378 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9379 inst.instruction |= inst.operands[0].reg << 12;
9380 inst.instruction |= inst.operands[1].reg << 16;
9381 }
9382 \f
9383 /* Encoding functions relevant only to Thumb. */
9384
9385 /* inst.operands[i] is a shifted-register operand; encode
9386 it into inst.instruction in the format used by Thumb32. */
9387
9388 static void
9389 encode_thumb32_shifted_operand (int i)
9390 {
9391 unsigned int value = inst.reloc.exp.X_add_number;
9392 unsigned int shift = inst.operands[i].shift_kind;
9393
9394 constraint (inst.operands[i].immisreg,
9395 _("shift by register not allowed in thumb mode"));
9396 inst.instruction |= inst.operands[i].reg;
9397 if (shift == SHIFT_RRX)
9398 inst.instruction |= SHIFT_ROR << 4;
9399 else
9400 {
9401 constraint (inst.reloc.exp.X_op != O_constant,
9402 _("expression too complex"));
9403
9404 constraint (value > 32
9405 || (value == 32 && (shift == SHIFT_LSL
9406 || shift == SHIFT_ROR)),
9407 _("shift expression is too large"));
9408
9409 if (value == 0)
9410 shift = SHIFT_LSL;
9411 else if (value == 32)
9412 value = 0;
9413
9414 inst.instruction |= shift << 4;
9415 inst.instruction |= (value & 0x1c) << 10;
9416 inst.instruction |= (value & 0x03) << 6;
9417 }
9418 }
9419
9420
9421 /* inst.operands[i] was set up by parse_address. Encode it into a
9422 Thumb32 format load or store instruction. Reject forms that cannot
9423 be used with such instructions. If is_t is true, reject forms that
9424 cannot be used with a T instruction; if is_d is true, reject forms
9425 that cannot be used with a D instruction. If it is a store insn,
9426 reject PC in Rn. */
9427
9428 static void
9429 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9430 {
9431 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
9432
9433 constraint (!inst.operands[i].isreg,
9434 _("Instruction does not support =N addresses"));
9435
9436 inst.instruction |= inst.operands[i].reg << 16;
9437 if (inst.operands[i].immisreg)
9438 {
9439 constraint (is_pc, BAD_PC_ADDRESSING);
9440 constraint (is_t || is_d, _("cannot use register index with this instruction"));
9441 constraint (inst.operands[i].negative,
9442 _("Thumb does not support negative register indexing"));
9443 constraint (inst.operands[i].postind,
9444 _("Thumb does not support register post-indexing"));
9445 constraint (inst.operands[i].writeback,
9446 _("Thumb does not support register indexing with writeback"));
9447 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
9448 _("Thumb supports only LSL in shifted register indexing"));
9449
9450 inst.instruction |= inst.operands[i].imm;
9451 if (inst.operands[i].shifted)
9452 {
9453 constraint (inst.reloc.exp.X_op != O_constant,
9454 _("expression too complex"));
9455 constraint (inst.reloc.exp.X_add_number < 0
9456 || inst.reloc.exp.X_add_number > 3,
9457 _("shift out of range"));
9458 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9459 }
9460 inst.reloc.type = BFD_RELOC_UNUSED;
9461 }
9462 else if (inst.operands[i].preind)
9463 {
9464 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
9465 constraint (is_t && inst.operands[i].writeback,
9466 _("cannot use writeback with this instruction"));
9467 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0)
9468 && !inst.reloc.pc_rel, BAD_PC_ADDRESSING);
9469
9470 if (is_d)
9471 {
9472 inst.instruction |= 0x01000000;
9473 if (inst.operands[i].writeback)
9474 inst.instruction |= 0x00200000;
9475 }
9476 else
9477 {
9478 inst.instruction |= 0x00000c00;
9479 if (inst.operands[i].writeback)
9480 inst.instruction |= 0x00000100;
9481 }
9482 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9483 }
9484 else if (inst.operands[i].postind)
9485 {
9486 gas_assert (inst.operands[i].writeback);
9487 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
9488 constraint (is_t, _("cannot use post-indexing with this instruction"));
9489
9490 if (is_d)
9491 inst.instruction |= 0x00200000;
9492 else
9493 inst.instruction |= 0x00000900;
9494 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9495 }
9496 else /* unindexed - only for coprocessor */
9497 inst.error = _("instruction does not accept unindexed addressing");
9498 }
9499
9500 /* Table of Thumb instructions which exist in both 16- and 32-bit
9501 encodings (the latter only in post-V6T2 cores). The index is the
9502 value used in the insns table below. When there is more than one
9503 possible 16-bit encoding for the instruction, this table always
9504 holds variant (1).
9505 Also contains several pseudo-instructions used during relaxation. */
9506 #define T16_32_TAB \
9507 X(_adc, 4140, eb400000), \
9508 X(_adcs, 4140, eb500000), \
9509 X(_add, 1c00, eb000000), \
9510 X(_adds, 1c00, eb100000), \
9511 X(_addi, 0000, f1000000), \
9512 X(_addis, 0000, f1100000), \
9513 X(_add_pc,000f, f20f0000), \
9514 X(_add_sp,000d, f10d0000), \
9515 X(_adr, 000f, f20f0000), \
9516 X(_and, 4000, ea000000), \
9517 X(_ands, 4000, ea100000), \
9518 X(_asr, 1000, fa40f000), \
9519 X(_asrs, 1000, fa50f000), \
9520 X(_b, e000, f000b000), \
9521 X(_bcond, d000, f0008000), \
9522 X(_bic, 4380, ea200000), \
9523 X(_bics, 4380, ea300000), \
9524 X(_cmn, 42c0, eb100f00), \
9525 X(_cmp, 2800, ebb00f00), \
9526 X(_cpsie, b660, f3af8400), \
9527 X(_cpsid, b670, f3af8600), \
9528 X(_cpy, 4600, ea4f0000), \
9529 X(_dec_sp,80dd, f1ad0d00), \
9530 X(_eor, 4040, ea800000), \
9531 X(_eors, 4040, ea900000), \
9532 X(_inc_sp,00dd, f10d0d00), \
9533 X(_ldmia, c800, e8900000), \
9534 X(_ldr, 6800, f8500000), \
9535 X(_ldrb, 7800, f8100000), \
9536 X(_ldrh, 8800, f8300000), \
9537 X(_ldrsb, 5600, f9100000), \
9538 X(_ldrsh, 5e00, f9300000), \
9539 X(_ldr_pc,4800, f85f0000), \
9540 X(_ldr_pc2,4800, f85f0000), \
9541 X(_ldr_sp,9800, f85d0000), \
9542 X(_lsl, 0000, fa00f000), \
9543 X(_lsls, 0000, fa10f000), \
9544 X(_lsr, 0800, fa20f000), \
9545 X(_lsrs, 0800, fa30f000), \
9546 X(_mov, 2000, ea4f0000), \
9547 X(_movs, 2000, ea5f0000), \
9548 X(_mul, 4340, fb00f000), \
9549 X(_muls, 4340, ffffffff), /* no 32b muls */ \
9550 X(_mvn, 43c0, ea6f0000), \
9551 X(_mvns, 43c0, ea7f0000), \
9552 X(_neg, 4240, f1c00000), /* rsb #0 */ \
9553 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
9554 X(_orr, 4300, ea400000), \
9555 X(_orrs, 4300, ea500000), \
9556 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
9557 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
9558 X(_rev, ba00, fa90f080), \
9559 X(_rev16, ba40, fa90f090), \
9560 X(_revsh, bac0, fa90f0b0), \
9561 X(_ror, 41c0, fa60f000), \
9562 X(_rors, 41c0, fa70f000), \
9563 X(_sbc, 4180, eb600000), \
9564 X(_sbcs, 4180, eb700000), \
9565 X(_stmia, c000, e8800000), \
9566 X(_str, 6000, f8400000), \
9567 X(_strb, 7000, f8000000), \
9568 X(_strh, 8000, f8200000), \
9569 X(_str_sp,9000, f84d0000), \
9570 X(_sub, 1e00, eba00000), \
9571 X(_subs, 1e00, ebb00000), \
9572 X(_subi, 8000, f1a00000), \
9573 X(_subis, 8000, f1b00000), \
9574 X(_sxtb, b240, fa4ff080), \
9575 X(_sxth, b200, fa0ff080), \
9576 X(_tst, 4200, ea100f00), \
9577 X(_uxtb, b2c0, fa5ff080), \
9578 X(_uxth, b280, fa1ff080), \
9579 X(_nop, bf00, f3af8000), \
9580 X(_yield, bf10, f3af8001), \
9581 X(_wfe, bf20, f3af8002), \
9582 X(_wfi, bf30, f3af8003), \
9583 X(_sev, bf40, f3af8004), \
9584 X(_sevl, bf50, f3af8005)
9585
9586 /* To catch errors in encoding functions, the codes are all offset by
9587 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
9588 as 16-bit instructions. */
9589 #define X(a,b,c) T_MNEM##a
9590 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
9591 #undef X
9592
9593 #define X(a,b,c) 0x##b
9594 static const unsigned short thumb_op16[] = { T16_32_TAB };
9595 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
9596 #undef X
9597
9598 #define X(a,b,c) 0x##c
9599 static const unsigned int thumb_op32[] = { T16_32_TAB };
9600 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
9601 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
9602 #undef X
9603 #undef T16_32_TAB
9604
9605 /* Thumb instruction encoders, in alphabetical order. */
9606
9607 /* ADDW or SUBW. */
9608
9609 static void
9610 do_t_add_sub_w (void)
9611 {
9612 int Rd, Rn;
9613
9614 Rd = inst.operands[0].reg;
9615 Rn = inst.operands[1].reg;
9616
9617 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
9618 is the SP-{plus,minus}-immediate form of the instruction. */
9619 if (Rn == REG_SP)
9620 constraint (Rd == REG_PC, BAD_PC);
9621 else
9622 reject_bad_reg (Rd);
9623
9624 inst.instruction |= (Rn << 16) | (Rd << 8);
9625 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9626 }
9627
9628 /* Parse an add or subtract instruction. We get here with inst.instruction
9629 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
9630
9631 static void
9632 do_t_add_sub (void)
9633 {
9634 int Rd, Rs, Rn;
9635
9636 Rd = inst.operands[0].reg;
9637 Rs = (inst.operands[1].present
9638 ? inst.operands[1].reg /* Rd, Rs, foo */
9639 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9640
9641 if (Rd == REG_PC)
9642 set_it_insn_type_last ();
9643
9644 if (unified_syntax)
9645 {
9646 bfd_boolean flags;
9647 bfd_boolean narrow;
9648 int opcode;
9649
9650 flags = (inst.instruction == T_MNEM_adds
9651 || inst.instruction == T_MNEM_subs);
9652 if (flags)
9653 narrow = !in_it_block ();
9654 else
9655 narrow = in_it_block ();
9656 if (!inst.operands[2].isreg)
9657 {
9658 int add;
9659
9660 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9661
9662 add = (inst.instruction == T_MNEM_add
9663 || inst.instruction == T_MNEM_adds);
9664 opcode = 0;
9665 if (inst.size_req != 4)
9666 {
9667 /* Attempt to use a narrow opcode, with relaxation if
9668 appropriate. */
9669 if (Rd == REG_SP && Rs == REG_SP && !flags)
9670 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
9671 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
9672 opcode = T_MNEM_add_sp;
9673 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
9674 opcode = T_MNEM_add_pc;
9675 else if (Rd <= 7 && Rs <= 7 && narrow)
9676 {
9677 if (flags)
9678 opcode = add ? T_MNEM_addis : T_MNEM_subis;
9679 else
9680 opcode = add ? T_MNEM_addi : T_MNEM_subi;
9681 }
9682 if (opcode)
9683 {
9684 inst.instruction = THUMB_OP16(opcode);
9685 inst.instruction |= (Rd << 4) | Rs;
9686 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9687 if (inst.size_req != 2)
9688 inst.relax = opcode;
9689 }
9690 else
9691 constraint (inst.size_req == 2, BAD_HIREG);
9692 }
9693 if (inst.size_req == 4
9694 || (inst.size_req != 2 && !opcode))
9695 {
9696 if (Rd == REG_PC)
9697 {
9698 constraint (add, BAD_PC);
9699 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
9700 _("only SUBS PC, LR, #const allowed"));
9701 constraint (inst.reloc.exp.X_op != O_constant,
9702 _("expression too complex"));
9703 constraint (inst.reloc.exp.X_add_number < 0
9704 || inst.reloc.exp.X_add_number > 0xff,
9705 _("immediate value out of range"));
9706 inst.instruction = T2_SUBS_PC_LR
9707 | inst.reloc.exp.X_add_number;
9708 inst.reloc.type = BFD_RELOC_UNUSED;
9709 return;
9710 }
9711 else if (Rs == REG_PC)
9712 {
9713 /* Always use addw/subw. */
9714 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
9715 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9716 }
9717 else
9718 {
9719 inst.instruction = THUMB_OP32 (inst.instruction);
9720 inst.instruction = (inst.instruction & 0xe1ffffff)
9721 | 0x10000000;
9722 if (flags)
9723 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9724 else
9725 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
9726 }
9727 inst.instruction |= Rd << 8;
9728 inst.instruction |= Rs << 16;
9729 }
9730 }
9731 else
9732 {
9733 unsigned int value = inst.reloc.exp.X_add_number;
9734 unsigned int shift = inst.operands[2].shift_kind;
9735
9736 Rn = inst.operands[2].reg;
9737 /* See if we can do this with a 16-bit instruction. */
9738 if (!inst.operands[2].shifted && inst.size_req != 4)
9739 {
9740 if (Rd > 7 || Rs > 7 || Rn > 7)
9741 narrow = FALSE;
9742
9743 if (narrow)
9744 {
9745 inst.instruction = ((inst.instruction == T_MNEM_adds
9746 || inst.instruction == T_MNEM_add)
9747 ? T_OPCODE_ADD_R3
9748 : T_OPCODE_SUB_R3);
9749 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9750 return;
9751 }
9752
9753 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
9754 {
9755 /* Thumb-1 cores (except v6-M) require at least one high
9756 register in a narrow non flag setting add. */
9757 if (Rd > 7 || Rn > 7
9758 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
9759 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
9760 {
9761 if (Rd == Rn)
9762 {
9763 Rn = Rs;
9764 Rs = Rd;
9765 }
9766 inst.instruction = T_OPCODE_ADD_HI;
9767 inst.instruction |= (Rd & 8) << 4;
9768 inst.instruction |= (Rd & 7);
9769 inst.instruction |= Rn << 3;
9770 return;
9771 }
9772 }
9773 }
9774
9775 constraint (Rd == REG_PC, BAD_PC);
9776 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9777 constraint (Rs == REG_PC, BAD_PC);
9778 reject_bad_reg (Rn);
9779
9780 /* If we get here, it can't be done in 16 bits. */
9781 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
9782 _("shift must be constant"));
9783 inst.instruction = THUMB_OP32 (inst.instruction);
9784 inst.instruction |= Rd << 8;
9785 inst.instruction |= Rs << 16;
9786 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
9787 _("shift value over 3 not allowed in thumb mode"));
9788 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
9789 _("only LSL shift allowed in thumb mode"));
9790 encode_thumb32_shifted_operand (2);
9791 }
9792 }
9793 else
9794 {
9795 constraint (inst.instruction == T_MNEM_adds
9796 || inst.instruction == T_MNEM_subs,
9797 BAD_THUMB32);
9798
9799 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
9800 {
9801 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
9802 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
9803 BAD_HIREG);
9804
9805 inst.instruction = (inst.instruction == T_MNEM_add
9806 ? 0x0000 : 0x8000);
9807 inst.instruction |= (Rd << 4) | Rs;
9808 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9809 return;
9810 }
9811
9812 Rn = inst.operands[2].reg;
9813 constraint (inst.operands[2].shifted, _("unshifted register required"));
9814
9815 /* We now have Rd, Rs, and Rn set to registers. */
9816 if (Rd > 7 || Rs > 7 || Rn > 7)
9817 {
9818 /* Can't do this for SUB. */
9819 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
9820 inst.instruction = T_OPCODE_ADD_HI;
9821 inst.instruction |= (Rd & 8) << 4;
9822 inst.instruction |= (Rd & 7);
9823 if (Rs == Rd)
9824 inst.instruction |= Rn << 3;
9825 else if (Rn == Rd)
9826 inst.instruction |= Rs << 3;
9827 else
9828 constraint (1, _("dest must overlap one source register"));
9829 }
9830 else
9831 {
9832 inst.instruction = (inst.instruction == T_MNEM_add
9833 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
9834 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9835 }
9836 }
9837 }
9838
9839 static void
9840 do_t_adr (void)
9841 {
9842 unsigned Rd;
9843
9844 Rd = inst.operands[0].reg;
9845 reject_bad_reg (Rd);
9846
9847 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
9848 {
9849 /* Defer to section relaxation. */
9850 inst.relax = inst.instruction;
9851 inst.instruction = THUMB_OP16 (inst.instruction);
9852 inst.instruction |= Rd << 4;
9853 }
9854 else if (unified_syntax && inst.size_req != 2)
9855 {
9856 /* Generate a 32-bit opcode. */
9857 inst.instruction = THUMB_OP32 (inst.instruction);
9858 inst.instruction |= Rd << 8;
9859 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
9860 inst.reloc.pc_rel = 1;
9861 }
9862 else
9863 {
9864 /* Generate a 16-bit opcode. */
9865 inst.instruction = THUMB_OP16 (inst.instruction);
9866 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9867 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
9868 inst.reloc.pc_rel = 1;
9869
9870 inst.instruction |= Rd << 4;
9871 }
9872 }
9873
9874 /* Arithmetic instructions for which there is just one 16-bit
9875 instruction encoding, and it allows only two low registers.
9876 For maximal compatibility with ARM syntax, we allow three register
9877 operands even when Thumb-32 instructions are not available, as long
9878 as the first two are identical. For instance, both "sbc r0,r1" and
9879 "sbc r0,r0,r1" are allowed. */
9880 static void
9881 do_t_arit3 (void)
9882 {
9883 int Rd, Rs, Rn;
9884
9885 Rd = inst.operands[0].reg;
9886 Rs = (inst.operands[1].present
9887 ? inst.operands[1].reg /* Rd, Rs, foo */
9888 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9889 Rn = inst.operands[2].reg;
9890
9891 reject_bad_reg (Rd);
9892 reject_bad_reg (Rs);
9893 if (inst.operands[2].isreg)
9894 reject_bad_reg (Rn);
9895
9896 if (unified_syntax)
9897 {
9898 if (!inst.operands[2].isreg)
9899 {
9900 /* For an immediate, we always generate a 32-bit opcode;
9901 section relaxation will shrink it later if possible. */
9902 inst.instruction = THUMB_OP32 (inst.instruction);
9903 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9904 inst.instruction |= Rd << 8;
9905 inst.instruction |= Rs << 16;
9906 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9907 }
9908 else
9909 {
9910 bfd_boolean narrow;
9911
9912 /* See if we can do this with a 16-bit instruction. */
9913 if (THUMB_SETS_FLAGS (inst.instruction))
9914 narrow = !in_it_block ();
9915 else
9916 narrow = in_it_block ();
9917
9918 if (Rd > 7 || Rn > 7 || Rs > 7)
9919 narrow = FALSE;
9920 if (inst.operands[2].shifted)
9921 narrow = FALSE;
9922 if (inst.size_req == 4)
9923 narrow = FALSE;
9924
9925 if (narrow
9926 && Rd == Rs)
9927 {
9928 inst.instruction = THUMB_OP16 (inst.instruction);
9929 inst.instruction |= Rd;
9930 inst.instruction |= Rn << 3;
9931 return;
9932 }
9933
9934 /* If we get here, it can't be done in 16 bits. */
9935 constraint (inst.operands[2].shifted
9936 && inst.operands[2].immisreg,
9937 _("shift must be constant"));
9938 inst.instruction = THUMB_OP32 (inst.instruction);
9939 inst.instruction |= Rd << 8;
9940 inst.instruction |= Rs << 16;
9941 encode_thumb32_shifted_operand (2);
9942 }
9943 }
9944 else
9945 {
9946 /* On its face this is a lie - the instruction does set the
9947 flags. However, the only supported mnemonic in this mode
9948 says it doesn't. */
9949 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9950
9951 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9952 _("unshifted register required"));
9953 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9954 constraint (Rd != Rs,
9955 _("dest and source1 must be the same register"));
9956
9957 inst.instruction = THUMB_OP16 (inst.instruction);
9958 inst.instruction |= Rd;
9959 inst.instruction |= Rn << 3;
9960 }
9961 }
9962
9963 /* Similarly, but for instructions where the arithmetic operation is
9964 commutative, so we can allow either of them to be different from
9965 the destination operand in a 16-bit instruction. For instance, all
9966 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9967 accepted. */
9968 static void
9969 do_t_arit3c (void)
9970 {
9971 int Rd, Rs, Rn;
9972
9973 Rd = inst.operands[0].reg;
9974 Rs = (inst.operands[1].present
9975 ? inst.operands[1].reg /* Rd, Rs, foo */
9976 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9977 Rn = inst.operands[2].reg;
9978
9979 reject_bad_reg (Rd);
9980 reject_bad_reg (Rs);
9981 if (inst.operands[2].isreg)
9982 reject_bad_reg (Rn);
9983
9984 if (unified_syntax)
9985 {
9986 if (!inst.operands[2].isreg)
9987 {
9988 /* For an immediate, we always generate a 32-bit opcode;
9989 section relaxation will shrink it later if possible. */
9990 inst.instruction = THUMB_OP32 (inst.instruction);
9991 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9992 inst.instruction |= Rd << 8;
9993 inst.instruction |= Rs << 16;
9994 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9995 }
9996 else
9997 {
9998 bfd_boolean narrow;
9999
10000 /* See if we can do this with a 16-bit instruction. */
10001 if (THUMB_SETS_FLAGS (inst.instruction))
10002 narrow = !in_it_block ();
10003 else
10004 narrow = in_it_block ();
10005
10006 if (Rd > 7 || Rn > 7 || Rs > 7)
10007 narrow = FALSE;
10008 if (inst.operands[2].shifted)
10009 narrow = FALSE;
10010 if (inst.size_req == 4)
10011 narrow = FALSE;
10012
10013 if (narrow)
10014 {
10015 if (Rd == Rs)
10016 {
10017 inst.instruction = THUMB_OP16 (inst.instruction);
10018 inst.instruction |= Rd;
10019 inst.instruction |= Rn << 3;
10020 return;
10021 }
10022 if (Rd == Rn)
10023 {
10024 inst.instruction = THUMB_OP16 (inst.instruction);
10025 inst.instruction |= Rd;
10026 inst.instruction |= Rs << 3;
10027 return;
10028 }
10029 }
10030
10031 /* If we get here, it can't be done in 16 bits. */
10032 constraint (inst.operands[2].shifted
10033 && inst.operands[2].immisreg,
10034 _("shift must be constant"));
10035 inst.instruction = THUMB_OP32 (inst.instruction);
10036 inst.instruction |= Rd << 8;
10037 inst.instruction |= Rs << 16;
10038 encode_thumb32_shifted_operand (2);
10039 }
10040 }
10041 else
10042 {
10043 /* On its face this is a lie - the instruction does set the
10044 flags. However, the only supported mnemonic in this mode
10045 says it doesn't. */
10046 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10047
10048 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10049 _("unshifted register required"));
10050 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10051
10052 inst.instruction = THUMB_OP16 (inst.instruction);
10053 inst.instruction |= Rd;
10054
10055 if (Rd == Rs)
10056 inst.instruction |= Rn << 3;
10057 else if (Rd == Rn)
10058 inst.instruction |= Rs << 3;
10059 else
10060 constraint (1, _("dest must overlap one source register"));
10061 }
10062 }
10063
10064 static void
10065 do_t_barrier (void)
10066 {
10067 if (inst.operands[0].present)
10068 {
10069 constraint ((inst.instruction & 0xf0) != 0x40
10070 && inst.operands[0].imm > 0xf
10071 && inst.operands[0].imm < 0x0,
10072 _("bad barrier type"));
10073 inst.instruction |= inst.operands[0].imm;
10074 }
10075 else
10076 inst.instruction |= 0xf;
10077 }
10078
10079 static void
10080 do_t_bfc (void)
10081 {
10082 unsigned Rd;
10083 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10084 constraint (msb > 32, _("bit-field extends past end of register"));
10085 /* The instruction encoding stores the LSB and MSB,
10086 not the LSB and width. */
10087 Rd = inst.operands[0].reg;
10088 reject_bad_reg (Rd);
10089 inst.instruction |= Rd << 8;
10090 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10091 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10092 inst.instruction |= msb - 1;
10093 }
10094
10095 static void
10096 do_t_bfi (void)
10097 {
10098 int Rd, Rn;
10099 unsigned int msb;
10100
10101 Rd = inst.operands[0].reg;
10102 reject_bad_reg (Rd);
10103
10104 /* #0 in second position is alternative syntax for bfc, which is
10105 the same instruction but with REG_PC in the Rm field. */
10106 if (!inst.operands[1].isreg)
10107 Rn = REG_PC;
10108 else
10109 {
10110 Rn = inst.operands[1].reg;
10111 reject_bad_reg (Rn);
10112 }
10113
10114 msb = inst.operands[2].imm + inst.operands[3].imm;
10115 constraint (msb > 32, _("bit-field extends past end of register"));
10116 /* The instruction encoding stores the LSB and MSB,
10117 not the LSB and width. */
10118 inst.instruction |= Rd << 8;
10119 inst.instruction |= Rn << 16;
10120 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10121 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10122 inst.instruction |= msb - 1;
10123 }
10124
10125 static void
10126 do_t_bfx (void)
10127 {
10128 unsigned Rd, Rn;
10129
10130 Rd = inst.operands[0].reg;
10131 Rn = inst.operands[1].reg;
10132
10133 reject_bad_reg (Rd);
10134 reject_bad_reg (Rn);
10135
10136 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10137 _("bit-field extends past end of register"));
10138 inst.instruction |= Rd << 8;
10139 inst.instruction |= Rn << 16;
10140 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10141 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10142 inst.instruction |= inst.operands[3].imm - 1;
10143 }
10144
10145 /* ARM V5 Thumb BLX (argument parse)
10146 BLX <target_addr> which is BLX(1)
10147 BLX <Rm> which is BLX(2)
10148 Unfortunately, there are two different opcodes for this mnemonic.
10149 So, the insns[].value is not used, and the code here zaps values
10150 into inst.instruction.
10151
10152 ??? How to take advantage of the additional two bits of displacement
10153 available in Thumb32 mode? Need new relocation? */
10154
10155 static void
10156 do_t_blx (void)
10157 {
10158 set_it_insn_type_last ();
10159
10160 if (inst.operands[0].isreg)
10161 {
10162 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10163 /* We have a register, so this is BLX(2). */
10164 inst.instruction |= inst.operands[0].reg << 3;
10165 }
10166 else
10167 {
10168 /* No register. This must be BLX(1). */
10169 inst.instruction = 0xf000e800;
10170 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10171 }
10172 }
10173
10174 static void
10175 do_t_branch (void)
10176 {
10177 int opcode;
10178 int cond;
10179 int reloc;
10180
10181 cond = inst.cond;
10182 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10183
10184 if (in_it_block ())
10185 {
10186 /* Conditional branches inside IT blocks are encoded as unconditional
10187 branches. */
10188 cond = COND_ALWAYS;
10189 }
10190 else
10191 cond = inst.cond;
10192
10193 if (cond != COND_ALWAYS)
10194 opcode = T_MNEM_bcond;
10195 else
10196 opcode = inst.instruction;
10197
10198 if (unified_syntax
10199 && (inst.size_req == 4
10200 || (inst.size_req != 2
10201 && (inst.operands[0].hasreloc
10202 || inst.reloc.exp.X_op == O_constant))))
10203 {
10204 inst.instruction = THUMB_OP32(opcode);
10205 if (cond == COND_ALWAYS)
10206 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10207 else
10208 {
10209 gas_assert (cond != 0xF);
10210 inst.instruction |= cond << 22;
10211 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10212 }
10213 }
10214 else
10215 {
10216 inst.instruction = THUMB_OP16(opcode);
10217 if (cond == COND_ALWAYS)
10218 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10219 else
10220 {
10221 inst.instruction |= cond << 8;
10222 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10223 }
10224 /* Allow section relaxation. */
10225 if (unified_syntax && inst.size_req != 2)
10226 inst.relax = opcode;
10227 }
10228 inst.reloc.type = reloc;
10229 inst.reloc.pc_rel = 1;
10230 }
10231
10232 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10233 between the two is the maximum immediate allowed - which is passed in
10234 RANGE. */
10235 static void
10236 do_t_bkpt_hlt1 (int range)
10237 {
10238 constraint (inst.cond != COND_ALWAYS,
10239 _("instruction is always unconditional"));
10240 if (inst.operands[0].present)
10241 {
10242 constraint (inst.operands[0].imm > range,
10243 _("immediate value out of range"));
10244 inst.instruction |= inst.operands[0].imm;
10245 }
10246
10247 set_it_insn_type (NEUTRAL_IT_INSN);
10248 }
10249
10250 static void
10251 do_t_hlt (void)
10252 {
10253 do_t_bkpt_hlt1 (63);
10254 }
10255
10256 static void
10257 do_t_bkpt (void)
10258 {
10259 do_t_bkpt_hlt1 (255);
10260 }
10261
10262 static void
10263 do_t_branch23 (void)
10264 {
10265 set_it_insn_type_last ();
10266 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10267
10268 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10269 this file. We used to simply ignore the PLT reloc type here --
10270 the branch encoding is now needed to deal with TLSCALL relocs.
10271 So if we see a PLT reloc now, put it back to how it used to be to
10272 keep the preexisting behaviour. */
10273 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10274 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10275
10276 #if defined(OBJ_COFF)
10277 /* If the destination of the branch is a defined symbol which does not have
10278 the THUMB_FUNC attribute, then we must be calling a function which has
10279 the (interfacearm) attribute. We look for the Thumb entry point to that
10280 function and change the branch to refer to that function instead. */
10281 if ( inst.reloc.exp.X_op == O_symbol
10282 && inst.reloc.exp.X_add_symbol != NULL
10283 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10284 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10285 inst.reloc.exp.X_add_symbol =
10286 find_real_start (inst.reloc.exp.X_add_symbol);
10287 #endif
10288 }
10289
10290 static void
10291 do_t_bx (void)
10292 {
10293 set_it_insn_type_last ();
10294 inst.instruction |= inst.operands[0].reg << 3;
10295 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
10296 should cause the alignment to be checked once it is known. This is
10297 because BX PC only works if the instruction is word aligned. */
10298 }
10299
10300 static void
10301 do_t_bxj (void)
10302 {
10303 int Rm;
10304
10305 set_it_insn_type_last ();
10306 Rm = inst.operands[0].reg;
10307 reject_bad_reg (Rm);
10308 inst.instruction |= Rm << 16;
10309 }
10310
10311 static void
10312 do_t_clz (void)
10313 {
10314 unsigned Rd;
10315 unsigned Rm;
10316
10317 Rd = inst.operands[0].reg;
10318 Rm = inst.operands[1].reg;
10319
10320 reject_bad_reg (Rd);
10321 reject_bad_reg (Rm);
10322
10323 inst.instruction |= Rd << 8;
10324 inst.instruction |= Rm << 16;
10325 inst.instruction |= Rm;
10326 }
10327
10328 static void
10329 do_t_cps (void)
10330 {
10331 set_it_insn_type (OUTSIDE_IT_INSN);
10332 inst.instruction |= inst.operands[0].imm;
10333 }
10334
10335 static void
10336 do_t_cpsi (void)
10337 {
10338 set_it_insn_type (OUTSIDE_IT_INSN);
10339 if (unified_syntax
10340 && (inst.operands[1].present || inst.size_req == 4)
10341 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10342 {
10343 unsigned int imod = (inst.instruction & 0x0030) >> 4;
10344 inst.instruction = 0xf3af8000;
10345 inst.instruction |= imod << 9;
10346 inst.instruction |= inst.operands[0].imm << 5;
10347 if (inst.operands[1].present)
10348 inst.instruction |= 0x100 | inst.operands[1].imm;
10349 }
10350 else
10351 {
10352 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10353 && (inst.operands[0].imm & 4),
10354 _("selected processor does not support 'A' form "
10355 "of this instruction"));
10356 constraint (inst.operands[1].present || inst.size_req == 4,
10357 _("Thumb does not support the 2-argument "
10358 "form of this instruction"));
10359 inst.instruction |= inst.operands[0].imm;
10360 }
10361 }
10362
10363 /* THUMB CPY instruction (argument parse). */
10364
10365 static void
10366 do_t_cpy (void)
10367 {
10368 if (inst.size_req == 4)
10369 {
10370 inst.instruction = THUMB_OP32 (T_MNEM_mov);
10371 inst.instruction |= inst.operands[0].reg << 8;
10372 inst.instruction |= inst.operands[1].reg;
10373 }
10374 else
10375 {
10376 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10377 inst.instruction |= (inst.operands[0].reg & 0x7);
10378 inst.instruction |= inst.operands[1].reg << 3;
10379 }
10380 }
10381
10382 static void
10383 do_t_cbz (void)
10384 {
10385 set_it_insn_type (OUTSIDE_IT_INSN);
10386 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10387 inst.instruction |= inst.operands[0].reg;
10388 inst.reloc.pc_rel = 1;
10389 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10390 }
10391
10392 static void
10393 do_t_dbg (void)
10394 {
10395 inst.instruction |= inst.operands[0].imm;
10396 }
10397
10398 static void
10399 do_t_div (void)
10400 {
10401 unsigned Rd, Rn, Rm;
10402
10403 Rd = inst.operands[0].reg;
10404 Rn = (inst.operands[1].present
10405 ? inst.operands[1].reg : Rd);
10406 Rm = inst.operands[2].reg;
10407
10408 reject_bad_reg (Rd);
10409 reject_bad_reg (Rn);
10410 reject_bad_reg (Rm);
10411
10412 inst.instruction |= Rd << 8;
10413 inst.instruction |= Rn << 16;
10414 inst.instruction |= Rm;
10415 }
10416
10417 static void
10418 do_t_hint (void)
10419 {
10420 if (unified_syntax && inst.size_req == 4)
10421 inst.instruction = THUMB_OP32 (inst.instruction);
10422 else
10423 inst.instruction = THUMB_OP16 (inst.instruction);
10424 }
10425
10426 static void
10427 do_t_it (void)
10428 {
10429 unsigned int cond = inst.operands[0].imm;
10430
10431 set_it_insn_type (IT_INSN);
10432 now_it.mask = (inst.instruction & 0xf) | 0x10;
10433 now_it.cc = cond;
10434 now_it.warn_deprecated = FALSE;
10435
10436 /* If the condition is a negative condition, invert the mask. */
10437 if ((cond & 0x1) == 0x0)
10438 {
10439 unsigned int mask = inst.instruction & 0x000f;
10440
10441 if ((mask & 0x7) == 0)
10442 {
10443 /* No conversion needed. */
10444 now_it.block_length = 1;
10445 }
10446 else if ((mask & 0x3) == 0)
10447 {
10448 mask ^= 0x8;
10449 now_it.block_length = 2;
10450 }
10451 else if ((mask & 0x1) == 0)
10452 {
10453 mask ^= 0xC;
10454 now_it.block_length = 3;
10455 }
10456 else
10457 {
10458 mask ^= 0xE;
10459 now_it.block_length = 4;
10460 }
10461
10462 inst.instruction &= 0xfff0;
10463 inst.instruction |= mask;
10464 }
10465
10466 inst.instruction |= cond << 4;
10467 }
10468
10469 /* Helper function used for both push/pop and ldm/stm. */
10470 static void
10471 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
10472 {
10473 bfd_boolean load;
10474
10475 load = (inst.instruction & (1 << 20)) != 0;
10476
10477 if (mask & (1 << 13))
10478 inst.error = _("SP not allowed in register list");
10479
10480 if ((mask & (1 << base)) != 0
10481 && writeback)
10482 inst.error = _("having the base register in the register list when "
10483 "using write back is UNPREDICTABLE");
10484
10485 if (load)
10486 {
10487 if (mask & (1 << 15))
10488 {
10489 if (mask & (1 << 14))
10490 inst.error = _("LR and PC should not both be in register list");
10491 else
10492 set_it_insn_type_last ();
10493 }
10494 }
10495 else
10496 {
10497 if (mask & (1 << 15))
10498 inst.error = _("PC not allowed in register list");
10499 }
10500
10501 if ((mask & (mask - 1)) == 0)
10502 {
10503 /* Single register transfers implemented as str/ldr. */
10504 if (writeback)
10505 {
10506 if (inst.instruction & (1 << 23))
10507 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
10508 else
10509 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
10510 }
10511 else
10512 {
10513 if (inst.instruction & (1 << 23))
10514 inst.instruction = 0x00800000; /* ia -> [base] */
10515 else
10516 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
10517 }
10518
10519 inst.instruction |= 0xf8400000;
10520 if (load)
10521 inst.instruction |= 0x00100000;
10522
10523 mask = ffs (mask) - 1;
10524 mask <<= 12;
10525 }
10526 else if (writeback)
10527 inst.instruction |= WRITE_BACK;
10528
10529 inst.instruction |= mask;
10530 inst.instruction |= base << 16;
10531 }
10532
10533 static void
10534 do_t_ldmstm (void)
10535 {
10536 /* This really doesn't seem worth it. */
10537 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10538 _("expression too complex"));
10539 constraint (inst.operands[1].writeback,
10540 _("Thumb load/store multiple does not support {reglist}^"));
10541
10542 if (unified_syntax)
10543 {
10544 bfd_boolean narrow;
10545 unsigned mask;
10546
10547 narrow = FALSE;
10548 /* See if we can use a 16-bit instruction. */
10549 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
10550 && inst.size_req != 4
10551 && !(inst.operands[1].imm & ~0xff))
10552 {
10553 mask = 1 << inst.operands[0].reg;
10554
10555 if (inst.operands[0].reg <= 7)
10556 {
10557 if (inst.instruction == T_MNEM_stmia
10558 ? inst.operands[0].writeback
10559 : (inst.operands[0].writeback
10560 == !(inst.operands[1].imm & mask)))
10561 {
10562 if (inst.instruction == T_MNEM_stmia
10563 && (inst.operands[1].imm & mask)
10564 && (inst.operands[1].imm & (mask - 1)))
10565 as_warn (_("value stored for r%d is UNKNOWN"),
10566 inst.operands[0].reg);
10567
10568 inst.instruction = THUMB_OP16 (inst.instruction);
10569 inst.instruction |= inst.operands[0].reg << 8;
10570 inst.instruction |= inst.operands[1].imm;
10571 narrow = TRUE;
10572 }
10573 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10574 {
10575 /* This means 1 register in reg list one of 3 situations:
10576 1. Instruction is stmia, but without writeback.
10577 2. lmdia without writeback, but with Rn not in
10578 reglist.
10579 3. ldmia with writeback, but with Rn in reglist.
10580 Case 3 is UNPREDICTABLE behaviour, so we handle
10581 case 1 and 2 which can be converted into a 16-bit
10582 str or ldr. The SP cases are handled below. */
10583 unsigned long opcode;
10584 /* First, record an error for Case 3. */
10585 if (inst.operands[1].imm & mask
10586 && inst.operands[0].writeback)
10587 inst.error =
10588 _("having the base register in the register list when "
10589 "using write back is UNPREDICTABLE");
10590
10591 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
10592 : T_MNEM_ldr);
10593 inst.instruction = THUMB_OP16 (opcode);
10594 inst.instruction |= inst.operands[0].reg << 3;
10595 inst.instruction |= (ffs (inst.operands[1].imm)-1);
10596 narrow = TRUE;
10597 }
10598 }
10599 else if (inst.operands[0] .reg == REG_SP)
10600 {
10601 if (inst.operands[0].writeback)
10602 {
10603 inst.instruction =
10604 THUMB_OP16 (inst.instruction == T_MNEM_stmia
10605 ? T_MNEM_push : T_MNEM_pop);
10606 inst.instruction |= inst.operands[1].imm;
10607 narrow = TRUE;
10608 }
10609 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10610 {
10611 inst.instruction =
10612 THUMB_OP16 (inst.instruction == T_MNEM_stmia
10613 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
10614 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
10615 narrow = TRUE;
10616 }
10617 }
10618 }
10619
10620 if (!narrow)
10621 {
10622 if (inst.instruction < 0xffff)
10623 inst.instruction = THUMB_OP32 (inst.instruction);
10624
10625 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
10626 inst.operands[0].writeback);
10627 }
10628 }
10629 else
10630 {
10631 constraint (inst.operands[0].reg > 7
10632 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
10633 constraint (inst.instruction != T_MNEM_ldmia
10634 && inst.instruction != T_MNEM_stmia,
10635 _("Thumb-2 instruction only valid in unified syntax"));
10636 if (inst.instruction == T_MNEM_stmia)
10637 {
10638 if (!inst.operands[0].writeback)
10639 as_warn (_("this instruction will write back the base register"));
10640 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
10641 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
10642 as_warn (_("value stored for r%d is UNKNOWN"),
10643 inst.operands[0].reg);
10644 }
10645 else
10646 {
10647 if (!inst.operands[0].writeback
10648 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
10649 as_warn (_("this instruction will write back the base register"));
10650 else if (inst.operands[0].writeback
10651 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
10652 as_warn (_("this instruction will not write back the base register"));
10653 }
10654
10655 inst.instruction = THUMB_OP16 (inst.instruction);
10656 inst.instruction |= inst.operands[0].reg << 8;
10657 inst.instruction |= inst.operands[1].imm;
10658 }
10659 }
10660
10661 static void
10662 do_t_ldrex (void)
10663 {
10664 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
10665 || inst.operands[1].postind || inst.operands[1].writeback
10666 || inst.operands[1].immisreg || inst.operands[1].shifted
10667 || inst.operands[1].negative,
10668 BAD_ADDR_MODE);
10669
10670 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
10671
10672 inst.instruction |= inst.operands[0].reg << 12;
10673 inst.instruction |= inst.operands[1].reg << 16;
10674 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10675 }
10676
10677 static void
10678 do_t_ldrexd (void)
10679 {
10680 if (!inst.operands[1].present)
10681 {
10682 constraint (inst.operands[0].reg == REG_LR,
10683 _("r14 not allowed as first register "
10684 "when second register is omitted"));
10685 inst.operands[1].reg = inst.operands[0].reg + 1;
10686 }
10687 constraint (inst.operands[0].reg == inst.operands[1].reg,
10688 BAD_OVERLAP);
10689
10690 inst.instruction |= inst.operands[0].reg << 12;
10691 inst.instruction |= inst.operands[1].reg << 8;
10692 inst.instruction |= inst.operands[2].reg << 16;
10693 }
10694
10695 static void
10696 do_t_ldst (void)
10697 {
10698 unsigned long opcode;
10699 int Rn;
10700
10701 if (inst.operands[0].isreg
10702 && !inst.operands[0].preind
10703 && inst.operands[0].reg == REG_PC)
10704 set_it_insn_type_last ();
10705
10706 opcode = inst.instruction;
10707 if (unified_syntax)
10708 {
10709 if (!inst.operands[1].isreg)
10710 {
10711 if (opcode <= 0xffff)
10712 inst.instruction = THUMB_OP32 (opcode);
10713 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10714 return;
10715 }
10716 if (inst.operands[1].isreg
10717 && !inst.operands[1].writeback
10718 && !inst.operands[1].shifted && !inst.operands[1].postind
10719 && !inst.operands[1].negative && inst.operands[0].reg <= 7
10720 && opcode <= 0xffff
10721 && inst.size_req != 4)
10722 {
10723 /* Insn may have a 16-bit form. */
10724 Rn = inst.operands[1].reg;
10725 if (inst.operands[1].immisreg)
10726 {
10727 inst.instruction = THUMB_OP16 (opcode);
10728 /* [Rn, Rik] */
10729 if (Rn <= 7 && inst.operands[1].imm <= 7)
10730 goto op16;
10731 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
10732 reject_bad_reg (inst.operands[1].imm);
10733 }
10734 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
10735 && opcode != T_MNEM_ldrsb)
10736 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
10737 || (Rn == REG_SP && opcode == T_MNEM_str))
10738 {
10739 /* [Rn, #const] */
10740 if (Rn > 7)
10741 {
10742 if (Rn == REG_PC)
10743 {
10744 if (inst.reloc.pc_rel)
10745 opcode = T_MNEM_ldr_pc2;
10746 else
10747 opcode = T_MNEM_ldr_pc;
10748 }
10749 else
10750 {
10751 if (opcode == T_MNEM_ldr)
10752 opcode = T_MNEM_ldr_sp;
10753 else
10754 opcode = T_MNEM_str_sp;
10755 }
10756 inst.instruction = inst.operands[0].reg << 8;
10757 }
10758 else
10759 {
10760 inst.instruction = inst.operands[0].reg;
10761 inst.instruction |= inst.operands[1].reg << 3;
10762 }
10763 inst.instruction |= THUMB_OP16 (opcode);
10764 if (inst.size_req == 2)
10765 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10766 else
10767 inst.relax = opcode;
10768 return;
10769 }
10770 }
10771 /* Definitely a 32-bit variant. */
10772
10773 /* Warning for Erratum 752419. */
10774 if (opcode == T_MNEM_ldr
10775 && inst.operands[0].reg == REG_SP
10776 && inst.operands[1].writeback == 1
10777 && !inst.operands[1].immisreg)
10778 {
10779 if (no_cpu_selected ()
10780 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
10781 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
10782 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
10783 as_warn (_("This instruction may be unpredictable "
10784 "if executed on M-profile cores "
10785 "with interrupts enabled."));
10786 }
10787
10788 /* Do some validations regarding addressing modes. */
10789 if (inst.operands[1].immisreg)
10790 reject_bad_reg (inst.operands[1].imm);
10791
10792 constraint (inst.operands[1].writeback == 1
10793 && inst.operands[0].reg == inst.operands[1].reg,
10794 BAD_OVERLAP);
10795
10796 inst.instruction = THUMB_OP32 (opcode);
10797 inst.instruction |= inst.operands[0].reg << 12;
10798 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
10799 check_ldr_r15_aligned ();
10800 return;
10801 }
10802
10803 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10804
10805 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
10806 {
10807 /* Only [Rn,Rm] is acceptable. */
10808 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
10809 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
10810 || inst.operands[1].postind || inst.operands[1].shifted
10811 || inst.operands[1].negative,
10812 _("Thumb does not support this addressing mode"));
10813 inst.instruction = THUMB_OP16 (inst.instruction);
10814 goto op16;
10815 }
10816
10817 inst.instruction = THUMB_OP16 (inst.instruction);
10818 if (!inst.operands[1].isreg)
10819 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10820 return;
10821
10822 constraint (!inst.operands[1].preind
10823 || inst.operands[1].shifted
10824 || inst.operands[1].writeback,
10825 _("Thumb does not support this addressing mode"));
10826 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
10827 {
10828 constraint (inst.instruction & 0x0600,
10829 _("byte or halfword not valid for base register"));
10830 constraint (inst.operands[1].reg == REG_PC
10831 && !(inst.instruction & THUMB_LOAD_BIT),
10832 _("r15 based store not allowed"));
10833 constraint (inst.operands[1].immisreg,
10834 _("invalid base register for register offset"));
10835
10836 if (inst.operands[1].reg == REG_PC)
10837 inst.instruction = T_OPCODE_LDR_PC;
10838 else if (inst.instruction & THUMB_LOAD_BIT)
10839 inst.instruction = T_OPCODE_LDR_SP;
10840 else
10841 inst.instruction = T_OPCODE_STR_SP;
10842
10843 inst.instruction |= inst.operands[0].reg << 8;
10844 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10845 return;
10846 }
10847
10848 constraint (inst.operands[1].reg > 7, BAD_HIREG);
10849 if (!inst.operands[1].immisreg)
10850 {
10851 /* Immediate offset. */
10852 inst.instruction |= inst.operands[0].reg;
10853 inst.instruction |= inst.operands[1].reg << 3;
10854 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10855 return;
10856 }
10857
10858 /* Register offset. */
10859 constraint (inst.operands[1].imm > 7, BAD_HIREG);
10860 constraint (inst.operands[1].negative,
10861 _("Thumb does not support this addressing mode"));
10862
10863 op16:
10864 switch (inst.instruction)
10865 {
10866 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
10867 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
10868 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
10869 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
10870 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
10871 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
10872 case 0x5600 /* ldrsb */:
10873 case 0x5e00 /* ldrsh */: break;
10874 default: abort ();
10875 }
10876
10877 inst.instruction |= inst.operands[0].reg;
10878 inst.instruction |= inst.operands[1].reg << 3;
10879 inst.instruction |= inst.operands[1].imm << 6;
10880 }
10881
10882 static void
10883 do_t_ldstd (void)
10884 {
10885 if (!inst.operands[1].present)
10886 {
10887 inst.operands[1].reg = inst.operands[0].reg + 1;
10888 constraint (inst.operands[0].reg == REG_LR,
10889 _("r14 not allowed here"));
10890 constraint (inst.operands[0].reg == REG_R12,
10891 _("r12 not allowed here"));
10892 }
10893
10894 if (inst.operands[2].writeback
10895 && (inst.operands[0].reg == inst.operands[2].reg
10896 || inst.operands[1].reg == inst.operands[2].reg))
10897 as_warn (_("base register written back, and overlaps "
10898 "one of transfer registers"));
10899
10900 inst.instruction |= inst.operands[0].reg << 12;
10901 inst.instruction |= inst.operands[1].reg << 8;
10902 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
10903 }
10904
10905 static void
10906 do_t_ldstt (void)
10907 {
10908 inst.instruction |= inst.operands[0].reg << 12;
10909 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
10910 }
10911
10912 static void
10913 do_t_mla (void)
10914 {
10915 unsigned Rd, Rn, Rm, Ra;
10916
10917 Rd = inst.operands[0].reg;
10918 Rn = inst.operands[1].reg;
10919 Rm = inst.operands[2].reg;
10920 Ra = inst.operands[3].reg;
10921
10922 reject_bad_reg (Rd);
10923 reject_bad_reg (Rn);
10924 reject_bad_reg (Rm);
10925 reject_bad_reg (Ra);
10926
10927 inst.instruction |= Rd << 8;
10928 inst.instruction |= Rn << 16;
10929 inst.instruction |= Rm;
10930 inst.instruction |= Ra << 12;
10931 }
10932
10933 static void
10934 do_t_mlal (void)
10935 {
10936 unsigned RdLo, RdHi, Rn, Rm;
10937
10938 RdLo = inst.operands[0].reg;
10939 RdHi = inst.operands[1].reg;
10940 Rn = inst.operands[2].reg;
10941 Rm = inst.operands[3].reg;
10942
10943 reject_bad_reg (RdLo);
10944 reject_bad_reg (RdHi);
10945 reject_bad_reg (Rn);
10946 reject_bad_reg (Rm);
10947
10948 inst.instruction |= RdLo << 12;
10949 inst.instruction |= RdHi << 8;
10950 inst.instruction |= Rn << 16;
10951 inst.instruction |= Rm;
10952 }
10953
10954 static void
10955 do_t_mov_cmp (void)
10956 {
10957 unsigned Rn, Rm;
10958
10959 Rn = inst.operands[0].reg;
10960 Rm = inst.operands[1].reg;
10961
10962 if (Rn == REG_PC)
10963 set_it_insn_type_last ();
10964
10965 if (unified_syntax)
10966 {
10967 int r0off = (inst.instruction == T_MNEM_mov
10968 || inst.instruction == T_MNEM_movs) ? 8 : 16;
10969 unsigned long opcode;
10970 bfd_boolean narrow;
10971 bfd_boolean low_regs;
10972
10973 low_regs = (Rn <= 7 && Rm <= 7);
10974 opcode = inst.instruction;
10975 if (in_it_block ())
10976 narrow = opcode != T_MNEM_movs;
10977 else
10978 narrow = opcode != T_MNEM_movs || low_regs;
10979 if (inst.size_req == 4
10980 || inst.operands[1].shifted)
10981 narrow = FALSE;
10982
10983 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10984 if (opcode == T_MNEM_movs && inst.operands[1].isreg
10985 && !inst.operands[1].shifted
10986 && Rn == REG_PC
10987 && Rm == REG_LR)
10988 {
10989 inst.instruction = T2_SUBS_PC_LR;
10990 return;
10991 }
10992
10993 if (opcode == T_MNEM_cmp)
10994 {
10995 constraint (Rn == REG_PC, BAD_PC);
10996 if (narrow)
10997 {
10998 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10999 but valid. */
11000 warn_deprecated_sp (Rm);
11001 /* R15 was documented as a valid choice for Rm in ARMv6,
11002 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11003 tools reject R15, so we do too. */
11004 constraint (Rm == REG_PC, BAD_PC);
11005 }
11006 else
11007 reject_bad_reg (Rm);
11008 }
11009 else if (opcode == T_MNEM_mov
11010 || opcode == T_MNEM_movs)
11011 {
11012 if (inst.operands[1].isreg)
11013 {
11014 if (opcode == T_MNEM_movs)
11015 {
11016 reject_bad_reg (Rn);
11017 reject_bad_reg (Rm);
11018 }
11019 else if (narrow)
11020 {
11021 /* This is mov.n. */
11022 if ((Rn == REG_SP || Rn == REG_PC)
11023 && (Rm == REG_SP || Rm == REG_PC))
11024 {
11025 as_warn (_("Use of r%u as a source register is "
11026 "deprecated when r%u is the destination "
11027 "register."), Rm, Rn);
11028 }
11029 }
11030 else
11031 {
11032 /* This is mov.w. */
11033 constraint (Rn == REG_PC, BAD_PC);
11034 constraint (Rm == REG_PC, BAD_PC);
11035 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11036 }
11037 }
11038 else
11039 reject_bad_reg (Rn);
11040 }
11041
11042 if (!inst.operands[1].isreg)
11043 {
11044 /* Immediate operand. */
11045 if (!in_it_block () && opcode == T_MNEM_mov)
11046 narrow = 0;
11047 if (low_regs && narrow)
11048 {
11049 inst.instruction = THUMB_OP16 (opcode);
11050 inst.instruction |= Rn << 8;
11051 if (inst.size_req == 2)
11052 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11053 else
11054 inst.relax = opcode;
11055 }
11056 else
11057 {
11058 inst.instruction = THUMB_OP32 (inst.instruction);
11059 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11060 inst.instruction |= Rn << r0off;
11061 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11062 }
11063 }
11064 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11065 && (inst.instruction == T_MNEM_mov
11066 || inst.instruction == T_MNEM_movs))
11067 {
11068 /* Register shifts are encoded as separate shift instructions. */
11069 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11070
11071 if (in_it_block ())
11072 narrow = !flags;
11073 else
11074 narrow = flags;
11075
11076 if (inst.size_req == 4)
11077 narrow = FALSE;
11078
11079 if (!low_regs || inst.operands[1].imm > 7)
11080 narrow = FALSE;
11081
11082 if (Rn != Rm)
11083 narrow = FALSE;
11084
11085 switch (inst.operands[1].shift_kind)
11086 {
11087 case SHIFT_LSL:
11088 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11089 break;
11090 case SHIFT_ASR:
11091 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11092 break;
11093 case SHIFT_LSR:
11094 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11095 break;
11096 case SHIFT_ROR:
11097 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11098 break;
11099 default:
11100 abort ();
11101 }
11102
11103 inst.instruction = opcode;
11104 if (narrow)
11105 {
11106 inst.instruction |= Rn;
11107 inst.instruction |= inst.operands[1].imm << 3;
11108 }
11109 else
11110 {
11111 if (flags)
11112 inst.instruction |= CONDS_BIT;
11113
11114 inst.instruction |= Rn << 8;
11115 inst.instruction |= Rm << 16;
11116 inst.instruction |= inst.operands[1].imm;
11117 }
11118 }
11119 else if (!narrow)
11120 {
11121 /* Some mov with immediate shift have narrow variants.
11122 Register shifts are handled above. */
11123 if (low_regs && inst.operands[1].shifted
11124 && (inst.instruction == T_MNEM_mov
11125 || inst.instruction == T_MNEM_movs))
11126 {
11127 if (in_it_block ())
11128 narrow = (inst.instruction == T_MNEM_mov);
11129 else
11130 narrow = (inst.instruction == T_MNEM_movs);
11131 }
11132
11133 if (narrow)
11134 {
11135 switch (inst.operands[1].shift_kind)
11136 {
11137 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11138 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11139 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11140 default: narrow = FALSE; break;
11141 }
11142 }
11143
11144 if (narrow)
11145 {
11146 inst.instruction |= Rn;
11147 inst.instruction |= Rm << 3;
11148 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11149 }
11150 else
11151 {
11152 inst.instruction = THUMB_OP32 (inst.instruction);
11153 inst.instruction |= Rn << r0off;
11154 encode_thumb32_shifted_operand (1);
11155 }
11156 }
11157 else
11158 switch (inst.instruction)
11159 {
11160 case T_MNEM_mov:
11161 /* In v4t or v5t a move of two lowregs produces unpredictable
11162 results. Don't allow this. */
11163 if (low_regs)
11164 {
11165 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11166 "MOV Rd, Rs with two low registers is not "
11167 "permitted on this architecture");
11168 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11169 arm_ext_v6);
11170 }
11171
11172 inst.instruction = T_OPCODE_MOV_HR;
11173 inst.instruction |= (Rn & 0x8) << 4;
11174 inst.instruction |= (Rn & 0x7);
11175 inst.instruction |= Rm << 3;
11176 break;
11177
11178 case T_MNEM_movs:
11179 /* We know we have low registers at this point.
11180 Generate LSLS Rd, Rs, #0. */
11181 inst.instruction = T_OPCODE_LSL_I;
11182 inst.instruction |= Rn;
11183 inst.instruction |= Rm << 3;
11184 break;
11185
11186 case T_MNEM_cmp:
11187 if (low_regs)
11188 {
11189 inst.instruction = T_OPCODE_CMP_LR;
11190 inst.instruction |= Rn;
11191 inst.instruction |= Rm << 3;
11192 }
11193 else
11194 {
11195 inst.instruction = T_OPCODE_CMP_HR;
11196 inst.instruction |= (Rn & 0x8) << 4;
11197 inst.instruction |= (Rn & 0x7);
11198 inst.instruction |= Rm << 3;
11199 }
11200 break;
11201 }
11202 return;
11203 }
11204
11205 inst.instruction = THUMB_OP16 (inst.instruction);
11206
11207 /* PR 10443: Do not silently ignore shifted operands. */
11208 constraint (inst.operands[1].shifted,
11209 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11210
11211 if (inst.operands[1].isreg)
11212 {
11213 if (Rn < 8 && Rm < 8)
11214 {
11215 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11216 since a MOV instruction produces unpredictable results. */
11217 if (inst.instruction == T_OPCODE_MOV_I8)
11218 inst.instruction = T_OPCODE_ADD_I3;
11219 else
11220 inst.instruction = T_OPCODE_CMP_LR;
11221
11222 inst.instruction |= Rn;
11223 inst.instruction |= Rm << 3;
11224 }
11225 else
11226 {
11227 if (inst.instruction == T_OPCODE_MOV_I8)
11228 inst.instruction = T_OPCODE_MOV_HR;
11229 else
11230 inst.instruction = T_OPCODE_CMP_HR;
11231 do_t_cpy ();
11232 }
11233 }
11234 else
11235 {
11236 constraint (Rn > 7,
11237 _("only lo regs allowed with immediate"));
11238 inst.instruction |= Rn << 8;
11239 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11240 }
11241 }
11242
11243 static void
11244 do_t_mov16 (void)
11245 {
11246 unsigned Rd;
11247 bfd_vma imm;
11248 bfd_boolean top;
11249
11250 top = (inst.instruction & 0x00800000) != 0;
11251 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11252 {
11253 constraint (top, _(":lower16: not allowed this instruction"));
11254 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
11255 }
11256 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11257 {
11258 constraint (!top, _(":upper16: not allowed this instruction"));
11259 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11260 }
11261
11262 Rd = inst.operands[0].reg;
11263 reject_bad_reg (Rd);
11264
11265 inst.instruction |= Rd << 8;
11266 if (inst.reloc.type == BFD_RELOC_UNUSED)
11267 {
11268 imm = inst.reloc.exp.X_add_number;
11269 inst.instruction |= (imm & 0xf000) << 4;
11270 inst.instruction |= (imm & 0x0800) << 15;
11271 inst.instruction |= (imm & 0x0700) << 4;
11272 inst.instruction |= (imm & 0x00ff);
11273 }
11274 }
11275
11276 static void
11277 do_t_mvn_tst (void)
11278 {
11279 unsigned Rn, Rm;
11280
11281 Rn = inst.operands[0].reg;
11282 Rm = inst.operands[1].reg;
11283
11284 if (inst.instruction == T_MNEM_cmp
11285 || inst.instruction == T_MNEM_cmn)
11286 constraint (Rn == REG_PC, BAD_PC);
11287 else
11288 reject_bad_reg (Rn);
11289 reject_bad_reg (Rm);
11290
11291 if (unified_syntax)
11292 {
11293 int r0off = (inst.instruction == T_MNEM_mvn
11294 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11295 bfd_boolean narrow;
11296
11297 if (inst.size_req == 4
11298 || inst.instruction > 0xffff
11299 || inst.operands[1].shifted
11300 || Rn > 7 || Rm > 7)
11301 narrow = FALSE;
11302 else if (inst.instruction == T_MNEM_cmn)
11303 narrow = TRUE;
11304 else if (THUMB_SETS_FLAGS (inst.instruction))
11305 narrow = !in_it_block ();
11306 else
11307 narrow = in_it_block ();
11308
11309 if (!inst.operands[1].isreg)
11310 {
11311 /* For an immediate, we always generate a 32-bit opcode;
11312 section relaxation will shrink it later if possible. */
11313 if (inst.instruction < 0xffff)
11314 inst.instruction = THUMB_OP32 (inst.instruction);
11315 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11316 inst.instruction |= Rn << r0off;
11317 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11318 }
11319 else
11320 {
11321 /* See if we can do this with a 16-bit instruction. */
11322 if (narrow)
11323 {
11324 inst.instruction = THUMB_OP16 (inst.instruction);
11325 inst.instruction |= Rn;
11326 inst.instruction |= Rm << 3;
11327 }
11328 else
11329 {
11330 constraint (inst.operands[1].shifted
11331 && inst.operands[1].immisreg,
11332 _("shift must be constant"));
11333 if (inst.instruction < 0xffff)
11334 inst.instruction = THUMB_OP32 (inst.instruction);
11335 inst.instruction |= Rn << r0off;
11336 encode_thumb32_shifted_operand (1);
11337 }
11338 }
11339 }
11340 else
11341 {
11342 constraint (inst.instruction > 0xffff
11343 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11344 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11345 _("unshifted register required"));
11346 constraint (Rn > 7 || Rm > 7,
11347 BAD_HIREG);
11348
11349 inst.instruction = THUMB_OP16 (inst.instruction);
11350 inst.instruction |= Rn;
11351 inst.instruction |= Rm << 3;
11352 }
11353 }
11354
11355 static void
11356 do_t_mrs (void)
11357 {
11358 unsigned Rd;
11359
11360 if (do_vfp_nsyn_mrs () == SUCCESS)
11361 return;
11362
11363 Rd = inst.operands[0].reg;
11364 reject_bad_reg (Rd);
11365 inst.instruction |= Rd << 8;
11366
11367 if (inst.operands[1].isreg)
11368 {
11369 unsigned br = inst.operands[1].reg;
11370 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11371 as_bad (_("bad register for mrs"));
11372
11373 inst.instruction |= br & (0xf << 16);
11374 inst.instruction |= (br & 0x300) >> 4;
11375 inst.instruction |= (br & SPSR_BIT) >> 2;
11376 }
11377 else
11378 {
11379 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11380
11381 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11382 {
11383 /* PR gas/12698: The constraint is only applied for m_profile.
11384 If the user has specified -march=all, we want to ignore it as
11385 we are building for any CPU type, including non-m variants. */
11386 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11387 constraint ((flags != 0) && m_profile, _("selected processor does "
11388 "not support requested special purpose register"));
11389 }
11390 else
11391 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11392 devices). */
11393 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11394 _("'APSR', 'CPSR' or 'SPSR' expected"));
11395
11396 inst.instruction |= (flags & SPSR_BIT) >> 2;
11397 inst.instruction |= inst.operands[1].imm & 0xff;
11398 inst.instruction |= 0xf0000;
11399 }
11400 }
11401
11402 static void
11403 do_t_msr (void)
11404 {
11405 int flags;
11406 unsigned Rn;
11407
11408 if (do_vfp_nsyn_msr () == SUCCESS)
11409 return;
11410
11411 constraint (!inst.operands[1].isreg,
11412 _("Thumb encoding does not support an immediate here"));
11413
11414 if (inst.operands[0].isreg)
11415 flags = (int)(inst.operands[0].reg);
11416 else
11417 flags = inst.operands[0].imm;
11418
11419 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11420 {
11421 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11422
11423 /* PR gas/12698: The constraint is only applied for m_profile.
11424 If the user has specified -march=all, we want to ignore it as
11425 we are building for any CPU type, including non-m variants. */
11426 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11427 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11428 && (bits & ~(PSR_s | PSR_f)) != 0)
11429 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11430 && bits != PSR_f)) && m_profile,
11431 _("selected processor does not support requested special "
11432 "purpose register"));
11433 }
11434 else
11435 constraint ((flags & 0xff) != 0, _("selected processor does not support "
11436 "requested special purpose register"));
11437
11438 Rn = inst.operands[1].reg;
11439 reject_bad_reg (Rn);
11440
11441 inst.instruction |= (flags & SPSR_BIT) >> 2;
11442 inst.instruction |= (flags & 0xf0000) >> 8;
11443 inst.instruction |= (flags & 0x300) >> 4;
11444 inst.instruction |= (flags & 0xff);
11445 inst.instruction |= Rn << 16;
11446 }
11447
11448 static void
11449 do_t_mul (void)
11450 {
11451 bfd_boolean narrow;
11452 unsigned Rd, Rn, Rm;
11453
11454 if (!inst.operands[2].present)
11455 inst.operands[2].reg = inst.operands[0].reg;
11456
11457 Rd = inst.operands[0].reg;
11458 Rn = inst.operands[1].reg;
11459 Rm = inst.operands[2].reg;
11460
11461 if (unified_syntax)
11462 {
11463 if (inst.size_req == 4
11464 || (Rd != Rn
11465 && Rd != Rm)
11466 || Rn > 7
11467 || Rm > 7)
11468 narrow = FALSE;
11469 else if (inst.instruction == T_MNEM_muls)
11470 narrow = !in_it_block ();
11471 else
11472 narrow = in_it_block ();
11473 }
11474 else
11475 {
11476 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
11477 constraint (Rn > 7 || Rm > 7,
11478 BAD_HIREG);
11479 narrow = TRUE;
11480 }
11481
11482 if (narrow)
11483 {
11484 /* 16-bit MULS/Conditional MUL. */
11485 inst.instruction = THUMB_OP16 (inst.instruction);
11486 inst.instruction |= Rd;
11487
11488 if (Rd == Rn)
11489 inst.instruction |= Rm << 3;
11490 else if (Rd == Rm)
11491 inst.instruction |= Rn << 3;
11492 else
11493 constraint (1, _("dest must overlap one source register"));
11494 }
11495 else
11496 {
11497 constraint (inst.instruction != T_MNEM_mul,
11498 _("Thumb-2 MUL must not set flags"));
11499 /* 32-bit MUL. */
11500 inst.instruction = THUMB_OP32 (inst.instruction);
11501 inst.instruction |= Rd << 8;
11502 inst.instruction |= Rn << 16;
11503 inst.instruction |= Rm << 0;
11504
11505 reject_bad_reg (Rd);
11506 reject_bad_reg (Rn);
11507 reject_bad_reg (Rm);
11508 }
11509 }
11510
11511 static void
11512 do_t_mull (void)
11513 {
11514 unsigned RdLo, RdHi, Rn, Rm;
11515
11516 RdLo = inst.operands[0].reg;
11517 RdHi = inst.operands[1].reg;
11518 Rn = inst.operands[2].reg;
11519 Rm = inst.operands[3].reg;
11520
11521 reject_bad_reg (RdLo);
11522 reject_bad_reg (RdHi);
11523 reject_bad_reg (Rn);
11524 reject_bad_reg (Rm);
11525
11526 inst.instruction |= RdLo << 12;
11527 inst.instruction |= RdHi << 8;
11528 inst.instruction |= Rn << 16;
11529 inst.instruction |= Rm;
11530
11531 if (RdLo == RdHi)
11532 as_tsktsk (_("rdhi and rdlo must be different"));
11533 }
11534
11535 static void
11536 do_t_nop (void)
11537 {
11538 set_it_insn_type (NEUTRAL_IT_INSN);
11539
11540 if (unified_syntax)
11541 {
11542 if (inst.size_req == 4 || inst.operands[0].imm > 15)
11543 {
11544 inst.instruction = THUMB_OP32 (inst.instruction);
11545 inst.instruction |= inst.operands[0].imm;
11546 }
11547 else
11548 {
11549 /* PR9722: Check for Thumb2 availability before
11550 generating a thumb2 nop instruction. */
11551 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
11552 {
11553 inst.instruction = THUMB_OP16 (inst.instruction);
11554 inst.instruction |= inst.operands[0].imm << 4;
11555 }
11556 else
11557 inst.instruction = 0x46c0;
11558 }
11559 }
11560 else
11561 {
11562 constraint (inst.operands[0].present,
11563 _("Thumb does not support NOP with hints"));
11564 inst.instruction = 0x46c0;
11565 }
11566 }
11567
11568 static void
11569 do_t_neg (void)
11570 {
11571 if (unified_syntax)
11572 {
11573 bfd_boolean narrow;
11574
11575 if (THUMB_SETS_FLAGS (inst.instruction))
11576 narrow = !in_it_block ();
11577 else
11578 narrow = in_it_block ();
11579 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11580 narrow = FALSE;
11581 if (inst.size_req == 4)
11582 narrow = FALSE;
11583
11584 if (!narrow)
11585 {
11586 inst.instruction = THUMB_OP32 (inst.instruction);
11587 inst.instruction |= inst.operands[0].reg << 8;
11588 inst.instruction |= inst.operands[1].reg << 16;
11589 }
11590 else
11591 {
11592 inst.instruction = THUMB_OP16 (inst.instruction);
11593 inst.instruction |= inst.operands[0].reg;
11594 inst.instruction |= inst.operands[1].reg << 3;
11595 }
11596 }
11597 else
11598 {
11599 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
11600 BAD_HIREG);
11601 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11602
11603 inst.instruction = THUMB_OP16 (inst.instruction);
11604 inst.instruction |= inst.operands[0].reg;
11605 inst.instruction |= inst.operands[1].reg << 3;
11606 }
11607 }
11608
11609 static void
11610 do_t_orn (void)
11611 {
11612 unsigned Rd, Rn;
11613
11614 Rd = inst.operands[0].reg;
11615 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
11616
11617 reject_bad_reg (Rd);
11618 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
11619 reject_bad_reg (Rn);
11620
11621 inst.instruction |= Rd << 8;
11622 inst.instruction |= Rn << 16;
11623
11624 if (!inst.operands[2].isreg)
11625 {
11626 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11627 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11628 }
11629 else
11630 {
11631 unsigned Rm;
11632
11633 Rm = inst.operands[2].reg;
11634 reject_bad_reg (Rm);
11635
11636 constraint (inst.operands[2].shifted
11637 && inst.operands[2].immisreg,
11638 _("shift must be constant"));
11639 encode_thumb32_shifted_operand (2);
11640 }
11641 }
11642
11643 static void
11644 do_t_pkhbt (void)
11645 {
11646 unsigned Rd, Rn, Rm;
11647
11648 Rd = inst.operands[0].reg;
11649 Rn = inst.operands[1].reg;
11650 Rm = inst.operands[2].reg;
11651
11652 reject_bad_reg (Rd);
11653 reject_bad_reg (Rn);
11654 reject_bad_reg (Rm);
11655
11656 inst.instruction |= Rd << 8;
11657 inst.instruction |= Rn << 16;
11658 inst.instruction |= Rm;
11659 if (inst.operands[3].present)
11660 {
11661 unsigned int val = inst.reloc.exp.X_add_number;
11662 constraint (inst.reloc.exp.X_op != O_constant,
11663 _("expression too complex"));
11664 inst.instruction |= (val & 0x1c) << 10;
11665 inst.instruction |= (val & 0x03) << 6;
11666 }
11667 }
11668
11669 static void
11670 do_t_pkhtb (void)
11671 {
11672 if (!inst.operands[3].present)
11673 {
11674 unsigned Rtmp;
11675
11676 inst.instruction &= ~0x00000020;
11677
11678 /* PR 10168. Swap the Rm and Rn registers. */
11679 Rtmp = inst.operands[1].reg;
11680 inst.operands[1].reg = inst.operands[2].reg;
11681 inst.operands[2].reg = Rtmp;
11682 }
11683 do_t_pkhbt ();
11684 }
11685
11686 static void
11687 do_t_pld (void)
11688 {
11689 if (inst.operands[0].immisreg)
11690 reject_bad_reg (inst.operands[0].imm);
11691
11692 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
11693 }
11694
11695 static void
11696 do_t_push_pop (void)
11697 {
11698 unsigned mask;
11699
11700 constraint (inst.operands[0].writeback,
11701 _("push/pop do not support {reglist}^"));
11702 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11703 _("expression too complex"));
11704
11705 mask = inst.operands[0].imm;
11706 if ((mask & ~0xff) == 0)
11707 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
11708 else if ((inst.instruction == T_MNEM_push
11709 && (mask & ~0xff) == 1 << REG_LR)
11710 || (inst.instruction == T_MNEM_pop
11711 && (mask & ~0xff) == 1 << REG_PC))
11712 {
11713 inst.instruction = THUMB_OP16 (inst.instruction);
11714 inst.instruction |= THUMB_PP_PC_LR;
11715 inst.instruction |= mask & 0xff;
11716 }
11717 else if (unified_syntax)
11718 {
11719 inst.instruction = THUMB_OP32 (inst.instruction);
11720 encode_thumb2_ldmstm (13, mask, TRUE);
11721 }
11722 else
11723 {
11724 inst.error = _("invalid register list to push/pop instruction");
11725 return;
11726 }
11727 }
11728
11729 static void
11730 do_t_rbit (void)
11731 {
11732 unsigned Rd, Rm;
11733
11734 Rd = inst.operands[0].reg;
11735 Rm = inst.operands[1].reg;
11736
11737 reject_bad_reg (Rd);
11738 reject_bad_reg (Rm);
11739
11740 inst.instruction |= Rd << 8;
11741 inst.instruction |= Rm << 16;
11742 inst.instruction |= Rm;
11743 }
11744
11745 static void
11746 do_t_rev (void)
11747 {
11748 unsigned Rd, Rm;
11749
11750 Rd = inst.operands[0].reg;
11751 Rm = inst.operands[1].reg;
11752
11753 reject_bad_reg (Rd);
11754 reject_bad_reg (Rm);
11755
11756 if (Rd <= 7 && Rm <= 7
11757 && inst.size_req != 4)
11758 {
11759 inst.instruction = THUMB_OP16 (inst.instruction);
11760 inst.instruction |= Rd;
11761 inst.instruction |= Rm << 3;
11762 }
11763 else if (unified_syntax)
11764 {
11765 inst.instruction = THUMB_OP32 (inst.instruction);
11766 inst.instruction |= Rd << 8;
11767 inst.instruction |= Rm << 16;
11768 inst.instruction |= Rm;
11769 }
11770 else
11771 inst.error = BAD_HIREG;
11772 }
11773
11774 static void
11775 do_t_rrx (void)
11776 {
11777 unsigned Rd, Rm;
11778
11779 Rd = inst.operands[0].reg;
11780 Rm = inst.operands[1].reg;
11781
11782 reject_bad_reg (Rd);
11783 reject_bad_reg (Rm);
11784
11785 inst.instruction |= Rd << 8;
11786 inst.instruction |= Rm;
11787 }
11788
11789 static void
11790 do_t_rsb (void)
11791 {
11792 unsigned Rd, Rs;
11793
11794 Rd = inst.operands[0].reg;
11795 Rs = (inst.operands[1].present
11796 ? inst.operands[1].reg /* Rd, Rs, foo */
11797 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11798
11799 reject_bad_reg (Rd);
11800 reject_bad_reg (Rs);
11801 if (inst.operands[2].isreg)
11802 reject_bad_reg (inst.operands[2].reg);
11803
11804 inst.instruction |= Rd << 8;
11805 inst.instruction |= Rs << 16;
11806 if (!inst.operands[2].isreg)
11807 {
11808 bfd_boolean narrow;
11809
11810 if ((inst.instruction & 0x00100000) != 0)
11811 narrow = !in_it_block ();
11812 else
11813 narrow = in_it_block ();
11814
11815 if (Rd > 7 || Rs > 7)
11816 narrow = FALSE;
11817
11818 if (inst.size_req == 4 || !unified_syntax)
11819 narrow = FALSE;
11820
11821 if (inst.reloc.exp.X_op != O_constant
11822 || inst.reloc.exp.X_add_number != 0)
11823 narrow = FALSE;
11824
11825 /* Turn rsb #0 into 16-bit neg. We should probably do this via
11826 relaxation, but it doesn't seem worth the hassle. */
11827 if (narrow)
11828 {
11829 inst.reloc.type = BFD_RELOC_UNUSED;
11830 inst.instruction = THUMB_OP16 (T_MNEM_negs);
11831 inst.instruction |= Rs << 3;
11832 inst.instruction |= Rd;
11833 }
11834 else
11835 {
11836 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11837 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11838 }
11839 }
11840 else
11841 encode_thumb32_shifted_operand (2);
11842 }
11843
11844 static void
11845 do_t_setend (void)
11846 {
11847 if (warn_on_deprecated
11848 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11849 as_warn (_("setend use is deprecated for ARMv8"));
11850
11851 set_it_insn_type (OUTSIDE_IT_INSN);
11852 if (inst.operands[0].imm)
11853 inst.instruction |= 0x8;
11854 }
11855
11856 static void
11857 do_t_shift (void)
11858 {
11859 if (!inst.operands[1].present)
11860 inst.operands[1].reg = inst.operands[0].reg;
11861
11862 if (unified_syntax)
11863 {
11864 bfd_boolean narrow;
11865 int shift_kind;
11866
11867 switch (inst.instruction)
11868 {
11869 case T_MNEM_asr:
11870 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
11871 case T_MNEM_lsl:
11872 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
11873 case T_MNEM_lsr:
11874 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
11875 case T_MNEM_ror:
11876 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
11877 default: abort ();
11878 }
11879
11880 if (THUMB_SETS_FLAGS (inst.instruction))
11881 narrow = !in_it_block ();
11882 else
11883 narrow = in_it_block ();
11884 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11885 narrow = FALSE;
11886 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
11887 narrow = FALSE;
11888 if (inst.operands[2].isreg
11889 && (inst.operands[1].reg != inst.operands[0].reg
11890 || inst.operands[2].reg > 7))
11891 narrow = FALSE;
11892 if (inst.size_req == 4)
11893 narrow = FALSE;
11894
11895 reject_bad_reg (inst.operands[0].reg);
11896 reject_bad_reg (inst.operands[1].reg);
11897
11898 if (!narrow)
11899 {
11900 if (inst.operands[2].isreg)
11901 {
11902 reject_bad_reg (inst.operands[2].reg);
11903 inst.instruction = THUMB_OP32 (inst.instruction);
11904 inst.instruction |= inst.operands[0].reg << 8;
11905 inst.instruction |= inst.operands[1].reg << 16;
11906 inst.instruction |= inst.operands[2].reg;
11907
11908 /* PR 12854: Error on extraneous shifts. */
11909 constraint (inst.operands[2].shifted,
11910 _("extraneous shift as part of operand to shift insn"));
11911 }
11912 else
11913 {
11914 inst.operands[1].shifted = 1;
11915 inst.operands[1].shift_kind = shift_kind;
11916 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
11917 ? T_MNEM_movs : T_MNEM_mov);
11918 inst.instruction |= inst.operands[0].reg << 8;
11919 encode_thumb32_shifted_operand (1);
11920 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
11921 inst.reloc.type = BFD_RELOC_UNUSED;
11922 }
11923 }
11924 else
11925 {
11926 if (inst.operands[2].isreg)
11927 {
11928 switch (shift_kind)
11929 {
11930 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
11931 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
11932 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
11933 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
11934 default: abort ();
11935 }
11936
11937 inst.instruction |= inst.operands[0].reg;
11938 inst.instruction |= inst.operands[2].reg << 3;
11939
11940 /* PR 12854: Error on extraneous shifts. */
11941 constraint (inst.operands[2].shifted,
11942 _("extraneous shift as part of operand to shift insn"));
11943 }
11944 else
11945 {
11946 switch (shift_kind)
11947 {
11948 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11949 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11950 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11951 default: abort ();
11952 }
11953 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11954 inst.instruction |= inst.operands[0].reg;
11955 inst.instruction |= inst.operands[1].reg << 3;
11956 }
11957 }
11958 }
11959 else
11960 {
11961 constraint (inst.operands[0].reg > 7
11962 || inst.operands[1].reg > 7, BAD_HIREG);
11963 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11964
11965 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
11966 {
11967 constraint (inst.operands[2].reg > 7, BAD_HIREG);
11968 constraint (inst.operands[0].reg != inst.operands[1].reg,
11969 _("source1 and dest must be same register"));
11970
11971 switch (inst.instruction)
11972 {
11973 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
11974 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
11975 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
11976 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
11977 default: abort ();
11978 }
11979
11980 inst.instruction |= inst.operands[0].reg;
11981 inst.instruction |= inst.operands[2].reg << 3;
11982
11983 /* PR 12854: Error on extraneous shifts. */
11984 constraint (inst.operands[2].shifted,
11985 _("extraneous shift as part of operand to shift insn"));
11986 }
11987 else
11988 {
11989 switch (inst.instruction)
11990 {
11991 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
11992 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
11993 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
11994 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
11995 default: abort ();
11996 }
11997 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11998 inst.instruction |= inst.operands[0].reg;
11999 inst.instruction |= inst.operands[1].reg << 3;
12000 }
12001 }
12002 }
12003
12004 static void
12005 do_t_simd (void)
12006 {
12007 unsigned Rd, Rn, Rm;
12008
12009 Rd = inst.operands[0].reg;
12010 Rn = inst.operands[1].reg;
12011 Rm = inst.operands[2].reg;
12012
12013 reject_bad_reg (Rd);
12014 reject_bad_reg (Rn);
12015 reject_bad_reg (Rm);
12016
12017 inst.instruction |= Rd << 8;
12018 inst.instruction |= Rn << 16;
12019 inst.instruction |= Rm;
12020 }
12021
12022 static void
12023 do_t_simd2 (void)
12024 {
12025 unsigned Rd, Rn, Rm;
12026
12027 Rd = inst.operands[0].reg;
12028 Rm = inst.operands[1].reg;
12029 Rn = inst.operands[2].reg;
12030
12031 reject_bad_reg (Rd);
12032 reject_bad_reg (Rn);
12033 reject_bad_reg (Rm);
12034
12035 inst.instruction |= Rd << 8;
12036 inst.instruction |= Rn << 16;
12037 inst.instruction |= Rm;
12038 }
12039
12040 static void
12041 do_t_smc (void)
12042 {
12043 unsigned int value = inst.reloc.exp.X_add_number;
12044 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12045 _("SMC is not permitted on this architecture"));
12046 constraint (inst.reloc.exp.X_op != O_constant,
12047 _("expression too complex"));
12048 inst.reloc.type = BFD_RELOC_UNUSED;
12049 inst.instruction |= (value & 0xf000) >> 12;
12050 inst.instruction |= (value & 0x0ff0);
12051 inst.instruction |= (value & 0x000f) << 16;
12052 }
12053
12054 static void
12055 do_t_hvc (void)
12056 {
12057 unsigned int value = inst.reloc.exp.X_add_number;
12058
12059 inst.reloc.type = BFD_RELOC_UNUSED;
12060 inst.instruction |= (value & 0x0fff);
12061 inst.instruction |= (value & 0xf000) << 4;
12062 }
12063
12064 static void
12065 do_t_ssat_usat (int bias)
12066 {
12067 unsigned Rd, Rn;
12068
12069 Rd = inst.operands[0].reg;
12070 Rn = inst.operands[2].reg;
12071
12072 reject_bad_reg (Rd);
12073 reject_bad_reg (Rn);
12074
12075 inst.instruction |= Rd << 8;
12076 inst.instruction |= inst.operands[1].imm - bias;
12077 inst.instruction |= Rn << 16;
12078
12079 if (inst.operands[3].present)
12080 {
12081 offsetT shift_amount = inst.reloc.exp.X_add_number;
12082
12083 inst.reloc.type = BFD_RELOC_UNUSED;
12084
12085 constraint (inst.reloc.exp.X_op != O_constant,
12086 _("expression too complex"));
12087
12088 if (shift_amount != 0)
12089 {
12090 constraint (shift_amount > 31,
12091 _("shift expression is too large"));
12092
12093 if (inst.operands[3].shift_kind == SHIFT_ASR)
12094 inst.instruction |= 0x00200000; /* sh bit. */
12095
12096 inst.instruction |= (shift_amount & 0x1c) << 10;
12097 inst.instruction |= (shift_amount & 0x03) << 6;
12098 }
12099 }
12100 }
12101
12102 static void
12103 do_t_ssat (void)
12104 {
12105 do_t_ssat_usat (1);
12106 }
12107
12108 static void
12109 do_t_ssat16 (void)
12110 {
12111 unsigned Rd, Rn;
12112
12113 Rd = inst.operands[0].reg;
12114 Rn = inst.operands[2].reg;
12115
12116 reject_bad_reg (Rd);
12117 reject_bad_reg (Rn);
12118
12119 inst.instruction |= Rd << 8;
12120 inst.instruction |= inst.operands[1].imm - 1;
12121 inst.instruction |= Rn << 16;
12122 }
12123
12124 static void
12125 do_t_strex (void)
12126 {
12127 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12128 || inst.operands[2].postind || inst.operands[2].writeback
12129 || inst.operands[2].immisreg || inst.operands[2].shifted
12130 || inst.operands[2].negative,
12131 BAD_ADDR_MODE);
12132
12133 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12134
12135 inst.instruction |= inst.operands[0].reg << 8;
12136 inst.instruction |= inst.operands[1].reg << 12;
12137 inst.instruction |= inst.operands[2].reg << 16;
12138 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12139 }
12140
12141 static void
12142 do_t_strexd (void)
12143 {
12144 if (!inst.operands[2].present)
12145 inst.operands[2].reg = inst.operands[1].reg + 1;
12146
12147 constraint (inst.operands[0].reg == inst.operands[1].reg
12148 || inst.operands[0].reg == inst.operands[2].reg
12149 || inst.operands[0].reg == inst.operands[3].reg,
12150 BAD_OVERLAP);
12151
12152 inst.instruction |= inst.operands[0].reg;
12153 inst.instruction |= inst.operands[1].reg << 12;
12154 inst.instruction |= inst.operands[2].reg << 8;
12155 inst.instruction |= inst.operands[3].reg << 16;
12156 }
12157
12158 static void
12159 do_t_sxtah (void)
12160 {
12161 unsigned Rd, Rn, Rm;
12162
12163 Rd = inst.operands[0].reg;
12164 Rn = inst.operands[1].reg;
12165 Rm = inst.operands[2].reg;
12166
12167 reject_bad_reg (Rd);
12168 reject_bad_reg (Rn);
12169 reject_bad_reg (Rm);
12170
12171 inst.instruction |= Rd << 8;
12172 inst.instruction |= Rn << 16;
12173 inst.instruction |= Rm;
12174 inst.instruction |= inst.operands[3].imm << 4;
12175 }
12176
12177 static void
12178 do_t_sxth (void)
12179 {
12180 unsigned Rd, Rm;
12181
12182 Rd = inst.operands[0].reg;
12183 Rm = inst.operands[1].reg;
12184
12185 reject_bad_reg (Rd);
12186 reject_bad_reg (Rm);
12187
12188 if (inst.instruction <= 0xffff
12189 && inst.size_req != 4
12190 && Rd <= 7 && Rm <= 7
12191 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12192 {
12193 inst.instruction = THUMB_OP16 (inst.instruction);
12194 inst.instruction |= Rd;
12195 inst.instruction |= Rm << 3;
12196 }
12197 else if (unified_syntax)
12198 {
12199 if (inst.instruction <= 0xffff)
12200 inst.instruction = THUMB_OP32 (inst.instruction);
12201 inst.instruction |= Rd << 8;
12202 inst.instruction |= Rm;
12203 inst.instruction |= inst.operands[2].imm << 4;
12204 }
12205 else
12206 {
12207 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12208 _("Thumb encoding does not support rotation"));
12209 constraint (1, BAD_HIREG);
12210 }
12211 }
12212
12213 static void
12214 do_t_swi (void)
12215 {
12216 /* We have to do the following check manually as ARM_EXT_OS only applies
12217 to ARM_EXT_V6M. */
12218 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12219 {
12220 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12221 /* This only applies to the v6m howver, not later architectures. */
12222 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12223 as_bad (_("SVC is not permitted on this architecture"));
12224 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12225 }
12226
12227 inst.reloc.type = BFD_RELOC_ARM_SWI;
12228 }
12229
12230 static void
12231 do_t_tb (void)
12232 {
12233 unsigned Rn, Rm;
12234 int half;
12235
12236 half = (inst.instruction & 0x10) != 0;
12237 set_it_insn_type_last ();
12238 constraint (inst.operands[0].immisreg,
12239 _("instruction requires register index"));
12240
12241 Rn = inst.operands[0].reg;
12242 Rm = inst.operands[0].imm;
12243
12244 constraint (Rn == REG_SP, BAD_SP);
12245 reject_bad_reg (Rm);
12246
12247 constraint (!half && inst.operands[0].shifted,
12248 _("instruction does not allow shifted index"));
12249 inst.instruction |= (Rn << 16) | Rm;
12250 }
12251
12252 static void
12253 do_t_usat (void)
12254 {
12255 do_t_ssat_usat (0);
12256 }
12257
12258 static void
12259 do_t_usat16 (void)
12260 {
12261 unsigned Rd, Rn;
12262
12263 Rd = inst.operands[0].reg;
12264 Rn = inst.operands[2].reg;
12265
12266 reject_bad_reg (Rd);
12267 reject_bad_reg (Rn);
12268
12269 inst.instruction |= Rd << 8;
12270 inst.instruction |= inst.operands[1].imm;
12271 inst.instruction |= Rn << 16;
12272 }
12273
12274 /* Neon instruction encoder helpers. */
12275
12276 /* Encodings for the different types for various Neon opcodes. */
12277
12278 /* An "invalid" code for the following tables. */
12279 #define N_INV -1u
12280
12281 struct neon_tab_entry
12282 {
12283 unsigned integer;
12284 unsigned float_or_poly;
12285 unsigned scalar_or_imm;
12286 };
12287
12288 /* Map overloaded Neon opcodes to their respective encodings. */
12289 #define NEON_ENC_TAB \
12290 X(vabd, 0x0000700, 0x1200d00, N_INV), \
12291 X(vmax, 0x0000600, 0x0000f00, N_INV), \
12292 X(vmin, 0x0000610, 0x0200f00, N_INV), \
12293 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
12294 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
12295 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
12296 X(vadd, 0x0000800, 0x0000d00, N_INV), \
12297 X(vsub, 0x1000800, 0x0200d00, N_INV), \
12298 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
12299 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
12300 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
12301 /* Register variants of the following two instructions are encoded as
12302 vcge / vcgt with the operands reversed. */ \
12303 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
12304 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
12305 X(vfma, N_INV, 0x0000c10, N_INV), \
12306 X(vfms, N_INV, 0x0200c10, N_INV), \
12307 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
12308 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
12309 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
12310 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
12311 X(vmlal, 0x0800800, N_INV, 0x0800240), \
12312 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
12313 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
12314 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
12315 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
12316 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
12317 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
12318 X(vshl, 0x0000400, N_INV, 0x0800510), \
12319 X(vqshl, 0x0000410, N_INV, 0x0800710), \
12320 X(vand, 0x0000110, N_INV, 0x0800030), \
12321 X(vbic, 0x0100110, N_INV, 0x0800030), \
12322 X(veor, 0x1000110, N_INV, N_INV), \
12323 X(vorn, 0x0300110, N_INV, 0x0800010), \
12324 X(vorr, 0x0200110, N_INV, 0x0800010), \
12325 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
12326 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
12327 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
12328 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
12329 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
12330 X(vst1, 0x0000000, 0x0800000, N_INV), \
12331 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
12332 X(vst2, 0x0000100, 0x0800100, N_INV), \
12333 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
12334 X(vst3, 0x0000200, 0x0800200, N_INV), \
12335 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
12336 X(vst4, 0x0000300, 0x0800300, N_INV), \
12337 X(vmovn, 0x1b20200, N_INV, N_INV), \
12338 X(vtrn, 0x1b20080, N_INV, N_INV), \
12339 X(vqmovn, 0x1b20200, N_INV, N_INV), \
12340 X(vqmovun, 0x1b20240, N_INV, N_INV), \
12341 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
12342 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
12343 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
12344 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
12345 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
12346 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
12347 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
12348 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
12349 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
12350 X(vseleq, 0xe000a00, N_INV, N_INV), \
12351 X(vselvs, 0xe100a00, N_INV, N_INV), \
12352 X(vselge, 0xe200a00, N_INV, N_INV), \
12353 X(vselgt, 0xe300a00, N_INV, N_INV), \
12354 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
12355 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
12356 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
12357 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
12358 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
12359 X(aes, 0x3b00300, N_INV, N_INV), \
12360 X(sha3op, 0x2000c00, N_INV, N_INV), \
12361 X(sha1h, 0x3b902c0, N_INV, N_INV), \
12362 X(sha2op, 0x3ba0380, N_INV, N_INV)
12363
12364 enum neon_opc
12365 {
12366 #define X(OPC,I,F,S) N_MNEM_##OPC
12367 NEON_ENC_TAB
12368 #undef X
12369 };
12370
12371 static const struct neon_tab_entry neon_enc_tab[] =
12372 {
12373 #define X(OPC,I,F,S) { (I), (F), (S) }
12374 NEON_ENC_TAB
12375 #undef X
12376 };
12377
12378 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
12379 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12380 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12381 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12382 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12383 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12384 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12385 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12386 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12387 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12388 #define NEON_ENC_SINGLE_(X) \
12389 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12390 #define NEON_ENC_DOUBLE_(X) \
12391 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12392 #define NEON_ENC_FPV8_(X) \
12393 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
12394
12395 #define NEON_ENCODE(type, inst) \
12396 do \
12397 { \
12398 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12399 inst.is_neon = 1; \
12400 } \
12401 while (0)
12402
12403 #define check_neon_suffixes \
12404 do \
12405 { \
12406 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
12407 { \
12408 as_bad (_("invalid neon suffix for non neon instruction")); \
12409 return; \
12410 } \
12411 } \
12412 while (0)
12413
12414 /* Define shapes for instruction operands. The following mnemonic characters
12415 are used in this table:
12416
12417 F - VFP S<n> register
12418 D - Neon D<n> register
12419 Q - Neon Q<n> register
12420 I - Immediate
12421 S - Scalar
12422 R - ARM register
12423 L - D<n> register list
12424
12425 This table is used to generate various data:
12426 - enumerations of the form NS_DDR to be used as arguments to
12427 neon_select_shape.
12428 - a table classifying shapes into single, double, quad, mixed.
12429 - a table used to drive neon_select_shape. */
12430
12431 #define NEON_SHAPE_DEF \
12432 X(3, (D, D, D), DOUBLE), \
12433 X(3, (Q, Q, Q), QUAD), \
12434 X(3, (D, D, I), DOUBLE), \
12435 X(3, (Q, Q, I), QUAD), \
12436 X(3, (D, D, S), DOUBLE), \
12437 X(3, (Q, Q, S), QUAD), \
12438 X(2, (D, D), DOUBLE), \
12439 X(2, (Q, Q), QUAD), \
12440 X(2, (D, S), DOUBLE), \
12441 X(2, (Q, S), QUAD), \
12442 X(2, (D, R), DOUBLE), \
12443 X(2, (Q, R), QUAD), \
12444 X(2, (D, I), DOUBLE), \
12445 X(2, (Q, I), QUAD), \
12446 X(3, (D, L, D), DOUBLE), \
12447 X(2, (D, Q), MIXED), \
12448 X(2, (Q, D), MIXED), \
12449 X(3, (D, Q, I), MIXED), \
12450 X(3, (Q, D, I), MIXED), \
12451 X(3, (Q, D, D), MIXED), \
12452 X(3, (D, Q, Q), MIXED), \
12453 X(3, (Q, Q, D), MIXED), \
12454 X(3, (Q, D, S), MIXED), \
12455 X(3, (D, Q, S), MIXED), \
12456 X(4, (D, D, D, I), DOUBLE), \
12457 X(4, (Q, Q, Q, I), QUAD), \
12458 X(2, (F, F), SINGLE), \
12459 X(3, (F, F, F), SINGLE), \
12460 X(2, (F, I), SINGLE), \
12461 X(2, (F, D), MIXED), \
12462 X(2, (D, F), MIXED), \
12463 X(3, (F, F, I), MIXED), \
12464 X(4, (R, R, F, F), SINGLE), \
12465 X(4, (F, F, R, R), SINGLE), \
12466 X(3, (D, R, R), DOUBLE), \
12467 X(3, (R, R, D), DOUBLE), \
12468 X(2, (S, R), SINGLE), \
12469 X(2, (R, S), SINGLE), \
12470 X(2, (F, R), SINGLE), \
12471 X(2, (R, F), SINGLE)
12472
12473 #define S2(A,B) NS_##A##B
12474 #define S3(A,B,C) NS_##A##B##C
12475 #define S4(A,B,C,D) NS_##A##B##C##D
12476
12477 #define X(N, L, C) S##N L
12478
12479 enum neon_shape
12480 {
12481 NEON_SHAPE_DEF,
12482 NS_NULL
12483 };
12484
12485 #undef X
12486 #undef S2
12487 #undef S3
12488 #undef S4
12489
12490 enum neon_shape_class
12491 {
12492 SC_SINGLE,
12493 SC_DOUBLE,
12494 SC_QUAD,
12495 SC_MIXED
12496 };
12497
12498 #define X(N, L, C) SC_##C
12499
12500 static enum neon_shape_class neon_shape_class[] =
12501 {
12502 NEON_SHAPE_DEF
12503 };
12504
12505 #undef X
12506
12507 enum neon_shape_el
12508 {
12509 SE_F,
12510 SE_D,
12511 SE_Q,
12512 SE_I,
12513 SE_S,
12514 SE_R,
12515 SE_L
12516 };
12517
12518 /* Register widths of above. */
12519 static unsigned neon_shape_el_size[] =
12520 {
12521 32,
12522 64,
12523 128,
12524 0,
12525 32,
12526 32,
12527 0
12528 };
12529
12530 struct neon_shape_info
12531 {
12532 unsigned els;
12533 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
12534 };
12535
12536 #define S2(A,B) { SE_##A, SE_##B }
12537 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
12538 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
12539
12540 #define X(N, L, C) { N, S##N L }
12541
12542 static struct neon_shape_info neon_shape_tab[] =
12543 {
12544 NEON_SHAPE_DEF
12545 };
12546
12547 #undef X
12548 #undef S2
12549 #undef S3
12550 #undef S4
12551
12552 /* Bit masks used in type checking given instructions.
12553 'N_EQK' means the type must be the same as (or based on in some way) the key
12554 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
12555 set, various other bits can be set as well in order to modify the meaning of
12556 the type constraint. */
12557
12558 enum neon_type_mask
12559 {
12560 N_S8 = 0x0000001,
12561 N_S16 = 0x0000002,
12562 N_S32 = 0x0000004,
12563 N_S64 = 0x0000008,
12564 N_U8 = 0x0000010,
12565 N_U16 = 0x0000020,
12566 N_U32 = 0x0000040,
12567 N_U64 = 0x0000080,
12568 N_I8 = 0x0000100,
12569 N_I16 = 0x0000200,
12570 N_I32 = 0x0000400,
12571 N_I64 = 0x0000800,
12572 N_8 = 0x0001000,
12573 N_16 = 0x0002000,
12574 N_32 = 0x0004000,
12575 N_64 = 0x0008000,
12576 N_P8 = 0x0010000,
12577 N_P16 = 0x0020000,
12578 N_F16 = 0x0040000,
12579 N_F32 = 0x0080000,
12580 N_F64 = 0x0100000,
12581 N_P64 = 0x0200000,
12582 N_KEY = 0x1000000, /* Key element (main type specifier). */
12583 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
12584 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
12585 N_UNT = 0x8000000, /* Must be explicitly untyped. */
12586 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
12587 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
12588 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
12589 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
12590 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
12591 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
12592 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
12593 N_UTYP = 0,
12594 N_MAX_NONSPECIAL = N_P64
12595 };
12596
12597 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
12598
12599 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
12600 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
12601 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
12602 #define N_SUF_32 (N_SU_32 | N_F32)
12603 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
12604 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
12605
12606 /* Pass this as the first type argument to neon_check_type to ignore types
12607 altogether. */
12608 #define N_IGNORE_TYPE (N_KEY | N_EQK)
12609
12610 /* Select a "shape" for the current instruction (describing register types or
12611 sizes) from a list of alternatives. Return NS_NULL if the current instruction
12612 doesn't fit. For non-polymorphic shapes, checking is usually done as a
12613 function of operand parsing, so this function doesn't need to be called.
12614 Shapes should be listed in order of decreasing length. */
12615
12616 static enum neon_shape
12617 neon_select_shape (enum neon_shape shape, ...)
12618 {
12619 va_list ap;
12620 enum neon_shape first_shape = shape;
12621
12622 /* Fix missing optional operands. FIXME: we don't know at this point how
12623 many arguments we should have, so this makes the assumption that we have
12624 > 1. This is true of all current Neon opcodes, I think, but may not be
12625 true in the future. */
12626 if (!inst.operands[1].present)
12627 inst.operands[1] = inst.operands[0];
12628
12629 va_start (ap, shape);
12630
12631 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
12632 {
12633 unsigned j;
12634 int matches = 1;
12635
12636 for (j = 0; j < neon_shape_tab[shape].els; j++)
12637 {
12638 if (!inst.operands[j].present)
12639 {
12640 matches = 0;
12641 break;
12642 }
12643
12644 switch (neon_shape_tab[shape].el[j])
12645 {
12646 case SE_F:
12647 if (!(inst.operands[j].isreg
12648 && inst.operands[j].isvec
12649 && inst.operands[j].issingle
12650 && !inst.operands[j].isquad))
12651 matches = 0;
12652 break;
12653
12654 case SE_D:
12655 if (!(inst.operands[j].isreg
12656 && inst.operands[j].isvec
12657 && !inst.operands[j].isquad
12658 && !inst.operands[j].issingle))
12659 matches = 0;
12660 break;
12661
12662 case SE_R:
12663 if (!(inst.operands[j].isreg
12664 && !inst.operands[j].isvec))
12665 matches = 0;
12666 break;
12667
12668 case SE_Q:
12669 if (!(inst.operands[j].isreg
12670 && inst.operands[j].isvec
12671 && inst.operands[j].isquad
12672 && !inst.operands[j].issingle))
12673 matches = 0;
12674 break;
12675
12676 case SE_I:
12677 if (!(!inst.operands[j].isreg
12678 && !inst.operands[j].isscalar))
12679 matches = 0;
12680 break;
12681
12682 case SE_S:
12683 if (!(!inst.operands[j].isreg
12684 && inst.operands[j].isscalar))
12685 matches = 0;
12686 break;
12687
12688 case SE_L:
12689 break;
12690 }
12691 if (!matches)
12692 break;
12693 }
12694 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
12695 /* We've matched all the entries in the shape table, and we don't
12696 have any left over operands which have not been matched. */
12697 break;
12698 }
12699
12700 va_end (ap);
12701
12702 if (shape == NS_NULL && first_shape != NS_NULL)
12703 first_error (_("invalid instruction shape"));
12704
12705 return shape;
12706 }
12707
12708 /* True if SHAPE is predominantly a quadword operation (most of the time, this
12709 means the Q bit should be set). */
12710
12711 static int
12712 neon_quad (enum neon_shape shape)
12713 {
12714 return neon_shape_class[shape] == SC_QUAD;
12715 }
12716
12717 static void
12718 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
12719 unsigned *g_size)
12720 {
12721 /* Allow modification to be made to types which are constrained to be
12722 based on the key element, based on bits set alongside N_EQK. */
12723 if ((typebits & N_EQK) != 0)
12724 {
12725 if ((typebits & N_HLF) != 0)
12726 *g_size /= 2;
12727 else if ((typebits & N_DBL) != 0)
12728 *g_size *= 2;
12729 if ((typebits & N_SGN) != 0)
12730 *g_type = NT_signed;
12731 else if ((typebits & N_UNS) != 0)
12732 *g_type = NT_unsigned;
12733 else if ((typebits & N_INT) != 0)
12734 *g_type = NT_integer;
12735 else if ((typebits & N_FLT) != 0)
12736 *g_type = NT_float;
12737 else if ((typebits & N_SIZ) != 0)
12738 *g_type = NT_untyped;
12739 }
12740 }
12741
12742 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
12743 operand type, i.e. the single type specified in a Neon instruction when it
12744 is the only one given. */
12745
12746 static struct neon_type_el
12747 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
12748 {
12749 struct neon_type_el dest = *key;
12750
12751 gas_assert ((thisarg & N_EQK) != 0);
12752
12753 neon_modify_type_size (thisarg, &dest.type, &dest.size);
12754
12755 return dest;
12756 }
12757
12758 /* Convert Neon type and size into compact bitmask representation. */
12759
12760 static enum neon_type_mask
12761 type_chk_of_el_type (enum neon_el_type type, unsigned size)
12762 {
12763 switch (type)
12764 {
12765 case NT_untyped:
12766 switch (size)
12767 {
12768 case 8: return N_8;
12769 case 16: return N_16;
12770 case 32: return N_32;
12771 case 64: return N_64;
12772 default: ;
12773 }
12774 break;
12775
12776 case NT_integer:
12777 switch (size)
12778 {
12779 case 8: return N_I8;
12780 case 16: return N_I16;
12781 case 32: return N_I32;
12782 case 64: return N_I64;
12783 default: ;
12784 }
12785 break;
12786
12787 case NT_float:
12788 switch (size)
12789 {
12790 case 16: return N_F16;
12791 case 32: return N_F32;
12792 case 64: return N_F64;
12793 default: ;
12794 }
12795 break;
12796
12797 case NT_poly:
12798 switch (size)
12799 {
12800 case 8: return N_P8;
12801 case 16: return N_P16;
12802 case 64: return N_P64;
12803 default: ;
12804 }
12805 break;
12806
12807 case NT_signed:
12808 switch (size)
12809 {
12810 case 8: return N_S8;
12811 case 16: return N_S16;
12812 case 32: return N_S32;
12813 case 64: return N_S64;
12814 default: ;
12815 }
12816 break;
12817
12818 case NT_unsigned:
12819 switch (size)
12820 {
12821 case 8: return N_U8;
12822 case 16: return N_U16;
12823 case 32: return N_U32;
12824 case 64: return N_U64;
12825 default: ;
12826 }
12827 break;
12828
12829 default: ;
12830 }
12831
12832 return N_UTYP;
12833 }
12834
12835 /* Convert compact Neon bitmask type representation to a type and size. Only
12836 handles the case where a single bit is set in the mask. */
12837
12838 static int
12839 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
12840 enum neon_type_mask mask)
12841 {
12842 if ((mask & N_EQK) != 0)
12843 return FAIL;
12844
12845 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
12846 *size = 8;
12847 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
12848 *size = 16;
12849 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
12850 *size = 32;
12851 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
12852 *size = 64;
12853 else
12854 return FAIL;
12855
12856 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
12857 *type = NT_signed;
12858 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
12859 *type = NT_unsigned;
12860 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
12861 *type = NT_integer;
12862 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
12863 *type = NT_untyped;
12864 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
12865 *type = NT_poly;
12866 else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
12867 *type = NT_float;
12868 else
12869 return FAIL;
12870
12871 return SUCCESS;
12872 }
12873
12874 /* Modify a bitmask of allowed types. This is only needed for type
12875 relaxation. */
12876
12877 static unsigned
12878 modify_types_allowed (unsigned allowed, unsigned mods)
12879 {
12880 unsigned size;
12881 enum neon_el_type type;
12882 unsigned destmask;
12883 int i;
12884
12885 destmask = 0;
12886
12887 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
12888 {
12889 if (el_type_of_type_chk (&type, &size,
12890 (enum neon_type_mask) (allowed & i)) == SUCCESS)
12891 {
12892 neon_modify_type_size (mods, &type, &size);
12893 destmask |= type_chk_of_el_type (type, size);
12894 }
12895 }
12896
12897 return destmask;
12898 }
12899
12900 /* Check type and return type classification.
12901 The manual states (paraphrase): If one datatype is given, it indicates the
12902 type given in:
12903 - the second operand, if there is one
12904 - the operand, if there is no second operand
12905 - the result, if there are no operands.
12906 This isn't quite good enough though, so we use a concept of a "key" datatype
12907 which is set on a per-instruction basis, which is the one which matters when
12908 only one data type is written.
12909 Note: this function has side-effects (e.g. filling in missing operands). All
12910 Neon instructions should call it before performing bit encoding. */
12911
12912 static struct neon_type_el
12913 neon_check_type (unsigned els, enum neon_shape ns, ...)
12914 {
12915 va_list ap;
12916 unsigned i, pass, key_el = 0;
12917 unsigned types[NEON_MAX_TYPE_ELS];
12918 enum neon_el_type k_type = NT_invtype;
12919 unsigned k_size = -1u;
12920 struct neon_type_el badtype = {NT_invtype, -1};
12921 unsigned key_allowed = 0;
12922
12923 /* Optional registers in Neon instructions are always (not) in operand 1.
12924 Fill in the missing operand here, if it was omitted. */
12925 if (els > 1 && !inst.operands[1].present)
12926 inst.operands[1] = inst.operands[0];
12927
12928 /* Suck up all the varargs. */
12929 va_start (ap, ns);
12930 for (i = 0; i < els; i++)
12931 {
12932 unsigned thisarg = va_arg (ap, unsigned);
12933 if (thisarg == N_IGNORE_TYPE)
12934 {
12935 va_end (ap);
12936 return badtype;
12937 }
12938 types[i] = thisarg;
12939 if ((thisarg & N_KEY) != 0)
12940 key_el = i;
12941 }
12942 va_end (ap);
12943
12944 if (inst.vectype.elems > 0)
12945 for (i = 0; i < els; i++)
12946 if (inst.operands[i].vectype.type != NT_invtype)
12947 {
12948 first_error (_("types specified in both the mnemonic and operands"));
12949 return badtype;
12950 }
12951
12952 /* Duplicate inst.vectype elements here as necessary.
12953 FIXME: No idea if this is exactly the same as the ARM assembler,
12954 particularly when an insn takes one register and one non-register
12955 operand. */
12956 if (inst.vectype.elems == 1 && els > 1)
12957 {
12958 unsigned j;
12959 inst.vectype.elems = els;
12960 inst.vectype.el[key_el] = inst.vectype.el[0];
12961 for (j = 0; j < els; j++)
12962 if (j != key_el)
12963 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12964 types[j]);
12965 }
12966 else if (inst.vectype.elems == 0 && els > 0)
12967 {
12968 unsigned j;
12969 /* No types were given after the mnemonic, so look for types specified
12970 after each operand. We allow some flexibility here; as long as the
12971 "key" operand has a type, we can infer the others. */
12972 for (j = 0; j < els; j++)
12973 if (inst.operands[j].vectype.type != NT_invtype)
12974 inst.vectype.el[j] = inst.operands[j].vectype;
12975
12976 if (inst.operands[key_el].vectype.type != NT_invtype)
12977 {
12978 for (j = 0; j < els; j++)
12979 if (inst.operands[j].vectype.type == NT_invtype)
12980 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12981 types[j]);
12982 }
12983 else
12984 {
12985 first_error (_("operand types can't be inferred"));
12986 return badtype;
12987 }
12988 }
12989 else if (inst.vectype.elems != els)
12990 {
12991 first_error (_("type specifier has the wrong number of parts"));
12992 return badtype;
12993 }
12994
12995 for (pass = 0; pass < 2; pass++)
12996 {
12997 for (i = 0; i < els; i++)
12998 {
12999 unsigned thisarg = types[i];
13000 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13001 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13002 enum neon_el_type g_type = inst.vectype.el[i].type;
13003 unsigned g_size = inst.vectype.el[i].size;
13004
13005 /* Decay more-specific signed & unsigned types to sign-insensitive
13006 integer types if sign-specific variants are unavailable. */
13007 if ((g_type == NT_signed || g_type == NT_unsigned)
13008 && (types_allowed & N_SU_ALL) == 0)
13009 g_type = NT_integer;
13010
13011 /* If only untyped args are allowed, decay any more specific types to
13012 them. Some instructions only care about signs for some element
13013 sizes, so handle that properly. */
13014 if (((types_allowed & N_UNT) == 0)
13015 && ((g_size == 8 && (types_allowed & N_8) != 0)
13016 || (g_size == 16 && (types_allowed & N_16) != 0)
13017 || (g_size == 32 && (types_allowed & N_32) != 0)
13018 || (g_size == 64 && (types_allowed & N_64) != 0)))
13019 g_type = NT_untyped;
13020
13021 if (pass == 0)
13022 {
13023 if ((thisarg & N_KEY) != 0)
13024 {
13025 k_type = g_type;
13026 k_size = g_size;
13027 key_allowed = thisarg & ~N_KEY;
13028 }
13029 }
13030 else
13031 {
13032 if ((thisarg & N_VFP) != 0)
13033 {
13034 enum neon_shape_el regshape;
13035 unsigned regwidth, match;
13036
13037 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13038 if (ns == NS_NULL)
13039 {
13040 first_error (_("invalid instruction shape"));
13041 return badtype;
13042 }
13043 regshape = neon_shape_tab[ns].el[i];
13044 regwidth = neon_shape_el_size[regshape];
13045
13046 /* In VFP mode, operands must match register widths. If we
13047 have a key operand, use its width, else use the width of
13048 the current operand. */
13049 if (k_size != -1u)
13050 match = k_size;
13051 else
13052 match = g_size;
13053
13054 if (regwidth != match)
13055 {
13056 first_error (_("operand size must match register width"));
13057 return badtype;
13058 }
13059 }
13060
13061 if ((thisarg & N_EQK) == 0)
13062 {
13063 unsigned given_type = type_chk_of_el_type (g_type, g_size);
13064
13065 if ((given_type & types_allowed) == 0)
13066 {
13067 first_error (_("bad type in Neon instruction"));
13068 return badtype;
13069 }
13070 }
13071 else
13072 {
13073 enum neon_el_type mod_k_type = k_type;
13074 unsigned mod_k_size = k_size;
13075 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13076 if (g_type != mod_k_type || g_size != mod_k_size)
13077 {
13078 first_error (_("inconsistent types in Neon instruction"));
13079 return badtype;
13080 }
13081 }
13082 }
13083 }
13084 }
13085
13086 return inst.vectype.el[key_el];
13087 }
13088
13089 /* Neon-style VFP instruction forwarding. */
13090
13091 /* Thumb VFP instructions have 0xE in the condition field. */
13092
13093 static void
13094 do_vfp_cond_or_thumb (void)
13095 {
13096 inst.is_neon = 1;
13097
13098 if (thumb_mode)
13099 inst.instruction |= 0xe0000000;
13100 else
13101 inst.instruction |= inst.cond << 28;
13102 }
13103
13104 /* Look up and encode a simple mnemonic, for use as a helper function for the
13105 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13106 etc. It is assumed that operand parsing has already been done, and that the
13107 operands are in the form expected by the given opcode (this isn't necessarily
13108 the same as the form in which they were parsed, hence some massaging must
13109 take place before this function is called).
13110 Checks current arch version against that in the looked-up opcode. */
13111
13112 static void
13113 do_vfp_nsyn_opcode (const char *opname)
13114 {
13115 const struct asm_opcode *opcode;
13116
13117 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13118
13119 if (!opcode)
13120 abort ();
13121
13122 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13123 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13124 _(BAD_FPU));
13125
13126 inst.is_neon = 1;
13127
13128 if (thumb_mode)
13129 {
13130 inst.instruction = opcode->tvalue;
13131 opcode->tencode ();
13132 }
13133 else
13134 {
13135 inst.instruction = (inst.cond << 28) | opcode->avalue;
13136 opcode->aencode ();
13137 }
13138 }
13139
13140 static void
13141 do_vfp_nsyn_add_sub (enum neon_shape rs)
13142 {
13143 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13144
13145 if (rs == NS_FFF)
13146 {
13147 if (is_add)
13148 do_vfp_nsyn_opcode ("fadds");
13149 else
13150 do_vfp_nsyn_opcode ("fsubs");
13151 }
13152 else
13153 {
13154 if (is_add)
13155 do_vfp_nsyn_opcode ("faddd");
13156 else
13157 do_vfp_nsyn_opcode ("fsubd");
13158 }
13159 }
13160
13161 /* Check operand types to see if this is a VFP instruction, and if so call
13162 PFN (). */
13163
13164 static int
13165 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13166 {
13167 enum neon_shape rs;
13168 struct neon_type_el et;
13169
13170 switch (args)
13171 {
13172 case 2:
13173 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13174 et = neon_check_type (2, rs,
13175 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13176 break;
13177
13178 case 3:
13179 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13180 et = neon_check_type (3, rs,
13181 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13182 break;
13183
13184 default:
13185 abort ();
13186 }
13187
13188 if (et.type != NT_invtype)
13189 {
13190 pfn (rs);
13191 return SUCCESS;
13192 }
13193
13194 inst.error = NULL;
13195 return FAIL;
13196 }
13197
13198 static void
13199 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13200 {
13201 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13202
13203 if (rs == NS_FFF)
13204 {
13205 if (is_mla)
13206 do_vfp_nsyn_opcode ("fmacs");
13207 else
13208 do_vfp_nsyn_opcode ("fnmacs");
13209 }
13210 else
13211 {
13212 if (is_mla)
13213 do_vfp_nsyn_opcode ("fmacd");
13214 else
13215 do_vfp_nsyn_opcode ("fnmacd");
13216 }
13217 }
13218
13219 static void
13220 do_vfp_nsyn_fma_fms (enum neon_shape rs)
13221 {
13222 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
13223
13224 if (rs == NS_FFF)
13225 {
13226 if (is_fma)
13227 do_vfp_nsyn_opcode ("ffmas");
13228 else
13229 do_vfp_nsyn_opcode ("ffnmas");
13230 }
13231 else
13232 {
13233 if (is_fma)
13234 do_vfp_nsyn_opcode ("ffmad");
13235 else
13236 do_vfp_nsyn_opcode ("ffnmad");
13237 }
13238 }
13239
13240 static void
13241 do_vfp_nsyn_mul (enum neon_shape rs)
13242 {
13243 if (rs == NS_FFF)
13244 do_vfp_nsyn_opcode ("fmuls");
13245 else
13246 do_vfp_nsyn_opcode ("fmuld");
13247 }
13248
13249 static void
13250 do_vfp_nsyn_abs_neg (enum neon_shape rs)
13251 {
13252 int is_neg = (inst.instruction & 0x80) != 0;
13253 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
13254
13255 if (rs == NS_FF)
13256 {
13257 if (is_neg)
13258 do_vfp_nsyn_opcode ("fnegs");
13259 else
13260 do_vfp_nsyn_opcode ("fabss");
13261 }
13262 else
13263 {
13264 if (is_neg)
13265 do_vfp_nsyn_opcode ("fnegd");
13266 else
13267 do_vfp_nsyn_opcode ("fabsd");
13268 }
13269 }
13270
13271 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13272 insns belong to Neon, and are handled elsewhere. */
13273
13274 static void
13275 do_vfp_nsyn_ldm_stm (int is_dbmode)
13276 {
13277 int is_ldm = (inst.instruction & (1 << 20)) != 0;
13278 if (is_ldm)
13279 {
13280 if (is_dbmode)
13281 do_vfp_nsyn_opcode ("fldmdbs");
13282 else
13283 do_vfp_nsyn_opcode ("fldmias");
13284 }
13285 else
13286 {
13287 if (is_dbmode)
13288 do_vfp_nsyn_opcode ("fstmdbs");
13289 else
13290 do_vfp_nsyn_opcode ("fstmias");
13291 }
13292 }
13293
13294 static void
13295 do_vfp_nsyn_sqrt (void)
13296 {
13297 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13298 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13299
13300 if (rs == NS_FF)
13301 do_vfp_nsyn_opcode ("fsqrts");
13302 else
13303 do_vfp_nsyn_opcode ("fsqrtd");
13304 }
13305
13306 static void
13307 do_vfp_nsyn_div (void)
13308 {
13309 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13310 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13311 N_F32 | N_F64 | N_KEY | N_VFP);
13312
13313 if (rs == NS_FFF)
13314 do_vfp_nsyn_opcode ("fdivs");
13315 else
13316 do_vfp_nsyn_opcode ("fdivd");
13317 }
13318
13319 static void
13320 do_vfp_nsyn_nmul (void)
13321 {
13322 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13323 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13324 N_F32 | N_F64 | N_KEY | N_VFP);
13325
13326 if (rs == NS_FFF)
13327 {
13328 NEON_ENCODE (SINGLE, inst);
13329 do_vfp_sp_dyadic ();
13330 }
13331 else
13332 {
13333 NEON_ENCODE (DOUBLE, inst);
13334 do_vfp_dp_rd_rn_rm ();
13335 }
13336 do_vfp_cond_or_thumb ();
13337 }
13338
13339 static void
13340 do_vfp_nsyn_cmp (void)
13341 {
13342 if (inst.operands[1].isreg)
13343 {
13344 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13345 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13346
13347 if (rs == NS_FF)
13348 {
13349 NEON_ENCODE (SINGLE, inst);
13350 do_vfp_sp_monadic ();
13351 }
13352 else
13353 {
13354 NEON_ENCODE (DOUBLE, inst);
13355 do_vfp_dp_rd_rm ();
13356 }
13357 }
13358 else
13359 {
13360 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13361 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13362
13363 switch (inst.instruction & 0x0fffffff)
13364 {
13365 case N_MNEM_vcmp:
13366 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13367 break;
13368 case N_MNEM_vcmpe:
13369 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13370 break;
13371 default:
13372 abort ();
13373 }
13374
13375 if (rs == NS_FI)
13376 {
13377 NEON_ENCODE (SINGLE, inst);
13378 do_vfp_sp_compare_z ();
13379 }
13380 else
13381 {
13382 NEON_ENCODE (DOUBLE, inst);
13383 do_vfp_dp_rd ();
13384 }
13385 }
13386 do_vfp_cond_or_thumb ();
13387 }
13388
13389 static void
13390 nsyn_insert_sp (void)
13391 {
13392 inst.operands[1] = inst.operands[0];
13393 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13394 inst.operands[0].reg = REG_SP;
13395 inst.operands[0].isreg = 1;
13396 inst.operands[0].writeback = 1;
13397 inst.operands[0].present = 1;
13398 }
13399
13400 static void
13401 do_vfp_nsyn_push (void)
13402 {
13403 nsyn_insert_sp ();
13404 if (inst.operands[1].issingle)
13405 do_vfp_nsyn_opcode ("fstmdbs");
13406 else
13407 do_vfp_nsyn_opcode ("fstmdbd");
13408 }
13409
13410 static void
13411 do_vfp_nsyn_pop (void)
13412 {
13413 nsyn_insert_sp ();
13414 if (inst.operands[1].issingle)
13415 do_vfp_nsyn_opcode ("fldmias");
13416 else
13417 do_vfp_nsyn_opcode ("fldmiad");
13418 }
13419
13420 /* Fix up Neon data-processing instructions, ORing in the correct bits for
13421 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
13422
13423 static void
13424 neon_dp_fixup (struct arm_it* insn)
13425 {
13426 unsigned int i = insn->instruction;
13427 insn->is_neon = 1;
13428
13429 if (thumb_mode)
13430 {
13431 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
13432 if (i & (1 << 24))
13433 i |= 1 << 28;
13434
13435 i &= ~(1 << 24);
13436
13437 i |= 0xef000000;
13438 }
13439 else
13440 i |= 0xf2000000;
13441
13442 insn->instruction = i;
13443 }
13444
13445 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
13446 (0, 1, 2, 3). */
13447
13448 static unsigned
13449 neon_logbits (unsigned x)
13450 {
13451 return ffs (x) - 4;
13452 }
13453
13454 #define LOW4(R) ((R) & 0xf)
13455 #define HI1(R) (((R) >> 4) & 1)
13456
13457 /* Encode insns with bit pattern:
13458
13459 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13460 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
13461
13462 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
13463 different meaning for some instruction. */
13464
13465 static void
13466 neon_three_same (int isquad, int ubit, int size)
13467 {
13468 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13469 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13470 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13471 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13472 inst.instruction |= LOW4 (inst.operands[2].reg);
13473 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13474 inst.instruction |= (isquad != 0) << 6;
13475 inst.instruction |= (ubit != 0) << 24;
13476 if (size != -1)
13477 inst.instruction |= neon_logbits (size) << 20;
13478
13479 neon_dp_fixup (&inst);
13480 }
13481
13482 /* Encode instructions of the form:
13483
13484 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
13485 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
13486
13487 Don't write size if SIZE == -1. */
13488
13489 static void
13490 neon_two_same (int qbit, int ubit, int size)
13491 {
13492 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13493 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13494 inst.instruction |= LOW4 (inst.operands[1].reg);
13495 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13496 inst.instruction |= (qbit != 0) << 6;
13497 inst.instruction |= (ubit != 0) << 24;
13498
13499 if (size != -1)
13500 inst.instruction |= neon_logbits (size) << 18;
13501
13502 neon_dp_fixup (&inst);
13503 }
13504
13505 /* Neon instruction encoders, in approximate order of appearance. */
13506
13507 static void
13508 do_neon_dyadic_i_su (void)
13509 {
13510 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13511 struct neon_type_el et = neon_check_type (3, rs,
13512 N_EQK, N_EQK, N_SU_32 | N_KEY);
13513 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13514 }
13515
13516 static void
13517 do_neon_dyadic_i64_su (void)
13518 {
13519 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13520 struct neon_type_el et = neon_check_type (3, rs,
13521 N_EQK, N_EQK, N_SU_ALL | N_KEY);
13522 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13523 }
13524
13525 static void
13526 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
13527 unsigned immbits)
13528 {
13529 unsigned size = et.size >> 3;
13530 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13531 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13532 inst.instruction |= LOW4 (inst.operands[1].reg);
13533 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13534 inst.instruction |= (isquad != 0) << 6;
13535 inst.instruction |= immbits << 16;
13536 inst.instruction |= (size >> 3) << 7;
13537 inst.instruction |= (size & 0x7) << 19;
13538 if (write_ubit)
13539 inst.instruction |= (uval != 0) << 24;
13540
13541 neon_dp_fixup (&inst);
13542 }
13543
13544 static void
13545 do_neon_shl_imm (void)
13546 {
13547 if (!inst.operands[2].isreg)
13548 {
13549 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13550 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
13551 NEON_ENCODE (IMMED, inst);
13552 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
13553 }
13554 else
13555 {
13556 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13557 struct neon_type_el et = neon_check_type (3, rs,
13558 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13559 unsigned int tmp;
13560
13561 /* VSHL/VQSHL 3-register variants have syntax such as:
13562 vshl.xx Dd, Dm, Dn
13563 whereas other 3-register operations encoded by neon_three_same have
13564 syntax like:
13565 vadd.xx Dd, Dn, Dm
13566 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
13567 here. */
13568 tmp = inst.operands[2].reg;
13569 inst.operands[2].reg = inst.operands[1].reg;
13570 inst.operands[1].reg = tmp;
13571 NEON_ENCODE (INTEGER, inst);
13572 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13573 }
13574 }
13575
13576 static void
13577 do_neon_qshl_imm (void)
13578 {
13579 if (!inst.operands[2].isreg)
13580 {
13581 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13582 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13583
13584 NEON_ENCODE (IMMED, inst);
13585 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13586 inst.operands[2].imm);
13587 }
13588 else
13589 {
13590 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13591 struct neon_type_el et = neon_check_type (3, rs,
13592 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13593 unsigned int tmp;
13594
13595 /* See note in do_neon_shl_imm. */
13596 tmp = inst.operands[2].reg;
13597 inst.operands[2].reg = inst.operands[1].reg;
13598 inst.operands[1].reg = tmp;
13599 NEON_ENCODE (INTEGER, inst);
13600 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13601 }
13602 }
13603
13604 static void
13605 do_neon_rshl (void)
13606 {
13607 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13608 struct neon_type_el et = neon_check_type (3, rs,
13609 N_EQK, N_EQK, N_SU_ALL | N_KEY);
13610 unsigned int tmp;
13611
13612 tmp = inst.operands[2].reg;
13613 inst.operands[2].reg = inst.operands[1].reg;
13614 inst.operands[1].reg = tmp;
13615 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13616 }
13617
13618 static int
13619 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
13620 {
13621 /* Handle .I8 pseudo-instructions. */
13622 if (size == 8)
13623 {
13624 /* Unfortunately, this will make everything apart from zero out-of-range.
13625 FIXME is this the intended semantics? There doesn't seem much point in
13626 accepting .I8 if so. */
13627 immediate |= immediate << 8;
13628 size = 16;
13629 }
13630
13631 if (size >= 32)
13632 {
13633 if (immediate == (immediate & 0x000000ff))
13634 {
13635 *immbits = immediate;
13636 return 0x1;
13637 }
13638 else if (immediate == (immediate & 0x0000ff00))
13639 {
13640 *immbits = immediate >> 8;
13641 return 0x3;
13642 }
13643 else if (immediate == (immediate & 0x00ff0000))
13644 {
13645 *immbits = immediate >> 16;
13646 return 0x5;
13647 }
13648 else if (immediate == (immediate & 0xff000000))
13649 {
13650 *immbits = immediate >> 24;
13651 return 0x7;
13652 }
13653 if ((immediate & 0xffff) != (immediate >> 16))
13654 goto bad_immediate;
13655 immediate &= 0xffff;
13656 }
13657
13658 if (immediate == (immediate & 0x000000ff))
13659 {
13660 *immbits = immediate;
13661 return 0x9;
13662 }
13663 else if (immediate == (immediate & 0x0000ff00))
13664 {
13665 *immbits = immediate >> 8;
13666 return 0xb;
13667 }
13668
13669 bad_immediate:
13670 first_error (_("immediate value out of range"));
13671 return FAIL;
13672 }
13673
13674 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
13675 A, B, C, D. */
13676
13677 static int
13678 neon_bits_same_in_bytes (unsigned imm)
13679 {
13680 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
13681 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
13682 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
13683 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
13684 }
13685
13686 /* For immediate of above form, return 0bABCD. */
13687
13688 static unsigned
13689 neon_squash_bits (unsigned imm)
13690 {
13691 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
13692 | ((imm & 0x01000000) >> 21);
13693 }
13694
13695 /* Compress quarter-float representation to 0b...000 abcdefgh. */
13696
13697 static unsigned
13698 neon_qfloat_bits (unsigned imm)
13699 {
13700 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
13701 }
13702
13703 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
13704 the instruction. *OP is passed as the initial value of the op field, and
13705 may be set to a different value depending on the constant (i.e.
13706 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
13707 MVN). If the immediate looks like a repeated pattern then also
13708 try smaller element sizes. */
13709
13710 static int
13711 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
13712 unsigned *immbits, int *op, int size,
13713 enum neon_el_type type)
13714 {
13715 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
13716 float. */
13717 if (type == NT_float && !float_p)
13718 return FAIL;
13719
13720 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
13721 {
13722 if (size != 32 || *op == 1)
13723 return FAIL;
13724 *immbits = neon_qfloat_bits (immlo);
13725 return 0xf;
13726 }
13727
13728 if (size == 64)
13729 {
13730 if (neon_bits_same_in_bytes (immhi)
13731 && neon_bits_same_in_bytes (immlo))
13732 {
13733 if (*op == 1)
13734 return FAIL;
13735 *immbits = (neon_squash_bits (immhi) << 4)
13736 | neon_squash_bits (immlo);
13737 *op = 1;
13738 return 0xe;
13739 }
13740
13741 if (immhi != immlo)
13742 return FAIL;
13743 }
13744
13745 if (size >= 32)
13746 {
13747 if (immlo == (immlo & 0x000000ff))
13748 {
13749 *immbits = immlo;
13750 return 0x0;
13751 }
13752 else if (immlo == (immlo & 0x0000ff00))
13753 {
13754 *immbits = immlo >> 8;
13755 return 0x2;
13756 }
13757 else if (immlo == (immlo & 0x00ff0000))
13758 {
13759 *immbits = immlo >> 16;
13760 return 0x4;
13761 }
13762 else if (immlo == (immlo & 0xff000000))
13763 {
13764 *immbits = immlo >> 24;
13765 return 0x6;
13766 }
13767 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
13768 {
13769 *immbits = (immlo >> 8) & 0xff;
13770 return 0xc;
13771 }
13772 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
13773 {
13774 *immbits = (immlo >> 16) & 0xff;
13775 return 0xd;
13776 }
13777
13778 if ((immlo & 0xffff) != (immlo >> 16))
13779 return FAIL;
13780 immlo &= 0xffff;
13781 }
13782
13783 if (size >= 16)
13784 {
13785 if (immlo == (immlo & 0x000000ff))
13786 {
13787 *immbits = immlo;
13788 return 0x8;
13789 }
13790 else if (immlo == (immlo & 0x0000ff00))
13791 {
13792 *immbits = immlo >> 8;
13793 return 0xa;
13794 }
13795
13796 if ((immlo & 0xff) != (immlo >> 8))
13797 return FAIL;
13798 immlo &= 0xff;
13799 }
13800
13801 if (immlo == (immlo & 0x000000ff))
13802 {
13803 /* Don't allow MVN with 8-bit immediate. */
13804 if (*op == 1)
13805 return FAIL;
13806 *immbits = immlo;
13807 return 0xe;
13808 }
13809
13810 return FAIL;
13811 }
13812
13813 /* Write immediate bits [7:0] to the following locations:
13814
13815 |28/24|23 19|18 16|15 4|3 0|
13816 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
13817
13818 This function is used by VMOV/VMVN/VORR/VBIC. */
13819
13820 static void
13821 neon_write_immbits (unsigned immbits)
13822 {
13823 inst.instruction |= immbits & 0xf;
13824 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
13825 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
13826 }
13827
13828 /* Invert low-order SIZE bits of XHI:XLO. */
13829
13830 static void
13831 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
13832 {
13833 unsigned immlo = xlo ? *xlo : 0;
13834 unsigned immhi = xhi ? *xhi : 0;
13835
13836 switch (size)
13837 {
13838 case 8:
13839 immlo = (~immlo) & 0xff;
13840 break;
13841
13842 case 16:
13843 immlo = (~immlo) & 0xffff;
13844 break;
13845
13846 case 64:
13847 immhi = (~immhi) & 0xffffffff;
13848 /* fall through. */
13849
13850 case 32:
13851 immlo = (~immlo) & 0xffffffff;
13852 break;
13853
13854 default:
13855 abort ();
13856 }
13857
13858 if (xlo)
13859 *xlo = immlo;
13860
13861 if (xhi)
13862 *xhi = immhi;
13863 }
13864
13865 static void
13866 do_neon_logic (void)
13867 {
13868 if (inst.operands[2].present && inst.operands[2].isreg)
13869 {
13870 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13871 neon_check_type (3, rs, N_IGNORE_TYPE);
13872 /* U bit and size field were set as part of the bitmask. */
13873 NEON_ENCODE (INTEGER, inst);
13874 neon_three_same (neon_quad (rs), 0, -1);
13875 }
13876 else
13877 {
13878 const int three_ops_form = (inst.operands[2].present
13879 && !inst.operands[2].isreg);
13880 const int immoperand = (three_ops_form ? 2 : 1);
13881 enum neon_shape rs = (three_ops_form
13882 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
13883 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
13884 struct neon_type_el et = neon_check_type (2, rs,
13885 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13886 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
13887 unsigned immbits;
13888 int cmode;
13889
13890 if (et.type == NT_invtype)
13891 return;
13892
13893 if (three_ops_form)
13894 constraint (inst.operands[0].reg != inst.operands[1].reg,
13895 _("first and second operands shall be the same register"));
13896
13897 NEON_ENCODE (IMMED, inst);
13898
13899 immbits = inst.operands[immoperand].imm;
13900 if (et.size == 64)
13901 {
13902 /* .i64 is a pseudo-op, so the immediate must be a repeating
13903 pattern. */
13904 if (immbits != (inst.operands[immoperand].regisimm ?
13905 inst.operands[immoperand].reg : 0))
13906 {
13907 /* Set immbits to an invalid constant. */
13908 immbits = 0xdeadbeef;
13909 }
13910 }
13911
13912 switch (opcode)
13913 {
13914 case N_MNEM_vbic:
13915 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13916 break;
13917
13918 case N_MNEM_vorr:
13919 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13920 break;
13921
13922 case N_MNEM_vand:
13923 /* Pseudo-instruction for VBIC. */
13924 neon_invert_size (&immbits, 0, et.size);
13925 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13926 break;
13927
13928 case N_MNEM_vorn:
13929 /* Pseudo-instruction for VORR. */
13930 neon_invert_size (&immbits, 0, et.size);
13931 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13932 break;
13933
13934 default:
13935 abort ();
13936 }
13937
13938 if (cmode == FAIL)
13939 return;
13940
13941 inst.instruction |= neon_quad (rs) << 6;
13942 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13943 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13944 inst.instruction |= cmode << 8;
13945 neon_write_immbits (immbits);
13946
13947 neon_dp_fixup (&inst);
13948 }
13949 }
13950
13951 static void
13952 do_neon_bitfield (void)
13953 {
13954 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13955 neon_check_type (3, rs, N_IGNORE_TYPE);
13956 neon_three_same (neon_quad (rs), 0, -1);
13957 }
13958
13959 static void
13960 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
13961 unsigned destbits)
13962 {
13963 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13964 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
13965 types | N_KEY);
13966 if (et.type == NT_float)
13967 {
13968 NEON_ENCODE (FLOAT, inst);
13969 neon_three_same (neon_quad (rs), 0, -1);
13970 }
13971 else
13972 {
13973 NEON_ENCODE (INTEGER, inst);
13974 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
13975 }
13976 }
13977
13978 static void
13979 do_neon_dyadic_if_su (void)
13980 {
13981 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13982 }
13983
13984 static void
13985 do_neon_dyadic_if_su_d (void)
13986 {
13987 /* This version only allow D registers, but that constraint is enforced during
13988 operand parsing so we don't need to do anything extra here. */
13989 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13990 }
13991
13992 static void
13993 do_neon_dyadic_if_i_d (void)
13994 {
13995 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13996 affected if we specify unsigned args. */
13997 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13998 }
13999
14000 enum vfp_or_neon_is_neon_bits
14001 {
14002 NEON_CHECK_CC = 1,
14003 NEON_CHECK_ARCH = 2,
14004 NEON_CHECK_ARCH8 = 4
14005 };
14006
14007 /* Call this function if an instruction which may have belonged to the VFP or
14008 Neon instruction sets, but turned out to be a Neon instruction (due to the
14009 operand types involved, etc.). We have to check and/or fix-up a couple of
14010 things:
14011
14012 - Make sure the user hasn't attempted to make a Neon instruction
14013 conditional.
14014 - Alter the value in the condition code field if necessary.
14015 - Make sure that the arch supports Neon instructions.
14016
14017 Which of these operations take place depends on bits from enum
14018 vfp_or_neon_is_neon_bits.
14019
14020 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14021 current instruction's condition is COND_ALWAYS, the condition field is
14022 changed to inst.uncond_value. This is necessary because instructions shared
14023 between VFP and Neon may be conditional for the VFP variants only, and the
14024 unconditional Neon version must have, e.g., 0xF in the condition field. */
14025
14026 static int
14027 vfp_or_neon_is_neon (unsigned check)
14028 {
14029 /* Conditions are always legal in Thumb mode (IT blocks). */
14030 if (!thumb_mode && (check & NEON_CHECK_CC))
14031 {
14032 if (inst.cond != COND_ALWAYS)
14033 {
14034 first_error (_(BAD_COND));
14035 return FAIL;
14036 }
14037 if (inst.uncond_value != -1)
14038 inst.instruction |= inst.uncond_value << 28;
14039 }
14040
14041 if ((check & NEON_CHECK_ARCH)
14042 && !mark_feature_used (&fpu_neon_ext_v1))
14043 {
14044 first_error (_(BAD_FPU));
14045 return FAIL;
14046 }
14047
14048 if ((check & NEON_CHECK_ARCH8)
14049 && !mark_feature_used (&fpu_neon_ext_armv8))
14050 {
14051 first_error (_(BAD_FPU));
14052 return FAIL;
14053 }
14054
14055 return SUCCESS;
14056 }
14057
14058 static void
14059 do_neon_addsub_if_i (void)
14060 {
14061 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14062 return;
14063
14064 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14065 return;
14066
14067 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14068 affected if we specify unsigned args. */
14069 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14070 }
14071
14072 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14073 result to be:
14074 V<op> A,B (A is operand 0, B is operand 2)
14075 to mean:
14076 V<op> A,B,A
14077 not:
14078 V<op> A,B,B
14079 so handle that case specially. */
14080
14081 static void
14082 neon_exchange_operands (void)
14083 {
14084 void *scratch = alloca (sizeof (inst.operands[0]));
14085 if (inst.operands[1].present)
14086 {
14087 /* Swap operands[1] and operands[2]. */
14088 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14089 inst.operands[1] = inst.operands[2];
14090 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14091 }
14092 else
14093 {
14094 inst.operands[1] = inst.operands[2];
14095 inst.operands[2] = inst.operands[0];
14096 }
14097 }
14098
14099 static void
14100 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14101 {
14102 if (inst.operands[2].isreg)
14103 {
14104 if (invert)
14105 neon_exchange_operands ();
14106 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14107 }
14108 else
14109 {
14110 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14111 struct neon_type_el et = neon_check_type (2, rs,
14112 N_EQK | N_SIZ, immtypes | N_KEY);
14113
14114 NEON_ENCODE (IMMED, inst);
14115 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14116 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14117 inst.instruction |= LOW4 (inst.operands[1].reg);
14118 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14119 inst.instruction |= neon_quad (rs) << 6;
14120 inst.instruction |= (et.type == NT_float) << 10;
14121 inst.instruction |= neon_logbits (et.size) << 18;
14122
14123 neon_dp_fixup (&inst);
14124 }
14125 }
14126
14127 static void
14128 do_neon_cmp (void)
14129 {
14130 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14131 }
14132
14133 static void
14134 do_neon_cmp_inv (void)
14135 {
14136 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14137 }
14138
14139 static void
14140 do_neon_ceq (void)
14141 {
14142 neon_compare (N_IF_32, N_IF_32, FALSE);
14143 }
14144
14145 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14146 scalars, which are encoded in 5 bits, M : Rm.
14147 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14148 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14149 index in M. */
14150
14151 static unsigned
14152 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14153 {
14154 unsigned regno = NEON_SCALAR_REG (scalar);
14155 unsigned elno = NEON_SCALAR_INDEX (scalar);
14156
14157 switch (elsize)
14158 {
14159 case 16:
14160 if (regno > 7 || elno > 3)
14161 goto bad_scalar;
14162 return regno | (elno << 3);
14163
14164 case 32:
14165 if (regno > 15 || elno > 1)
14166 goto bad_scalar;
14167 return regno | (elno << 4);
14168
14169 default:
14170 bad_scalar:
14171 first_error (_("scalar out of range for multiply instruction"));
14172 }
14173
14174 return 0;
14175 }
14176
14177 /* Encode multiply / multiply-accumulate scalar instructions. */
14178
14179 static void
14180 neon_mul_mac (struct neon_type_el et, int ubit)
14181 {
14182 unsigned scalar;
14183
14184 /* Give a more helpful error message if we have an invalid type. */
14185 if (et.type == NT_invtype)
14186 return;
14187
14188 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14189 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14190 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14191 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14192 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14193 inst.instruction |= LOW4 (scalar);
14194 inst.instruction |= HI1 (scalar) << 5;
14195 inst.instruction |= (et.type == NT_float) << 8;
14196 inst.instruction |= neon_logbits (et.size) << 20;
14197 inst.instruction |= (ubit != 0) << 24;
14198
14199 neon_dp_fixup (&inst);
14200 }
14201
14202 static void
14203 do_neon_mac_maybe_scalar (void)
14204 {
14205 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14206 return;
14207
14208 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14209 return;
14210
14211 if (inst.operands[2].isscalar)
14212 {
14213 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14214 struct neon_type_el et = neon_check_type (3, rs,
14215 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14216 NEON_ENCODE (SCALAR, inst);
14217 neon_mul_mac (et, neon_quad (rs));
14218 }
14219 else
14220 {
14221 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14222 affected if we specify unsigned args. */
14223 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14224 }
14225 }
14226
14227 static void
14228 do_neon_fmac (void)
14229 {
14230 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14231 return;
14232
14233 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14234 return;
14235
14236 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14237 }
14238
14239 static void
14240 do_neon_tst (void)
14241 {
14242 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14243 struct neon_type_el et = neon_check_type (3, rs,
14244 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14245 neon_three_same (neon_quad (rs), 0, et.size);
14246 }
14247
14248 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14249 same types as the MAC equivalents. The polynomial type for this instruction
14250 is encoded the same as the integer type. */
14251
14252 static void
14253 do_neon_mul (void)
14254 {
14255 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14256 return;
14257
14258 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14259 return;
14260
14261 if (inst.operands[2].isscalar)
14262 do_neon_mac_maybe_scalar ();
14263 else
14264 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14265 }
14266
14267 static void
14268 do_neon_qdmulh (void)
14269 {
14270 if (inst.operands[2].isscalar)
14271 {
14272 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14273 struct neon_type_el et = neon_check_type (3, rs,
14274 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14275 NEON_ENCODE (SCALAR, inst);
14276 neon_mul_mac (et, neon_quad (rs));
14277 }
14278 else
14279 {
14280 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14281 struct neon_type_el et = neon_check_type (3, rs,
14282 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14283 NEON_ENCODE (INTEGER, inst);
14284 /* The U bit (rounding) comes from bit mask. */
14285 neon_three_same (neon_quad (rs), 0, et.size);
14286 }
14287 }
14288
14289 static void
14290 do_neon_fcmp_absolute (void)
14291 {
14292 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14293 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14294 /* Size field comes from bit mask. */
14295 neon_three_same (neon_quad (rs), 1, -1);
14296 }
14297
14298 static void
14299 do_neon_fcmp_absolute_inv (void)
14300 {
14301 neon_exchange_operands ();
14302 do_neon_fcmp_absolute ();
14303 }
14304
14305 static void
14306 do_neon_step (void)
14307 {
14308 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14309 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14310 neon_three_same (neon_quad (rs), 0, -1);
14311 }
14312
14313 static void
14314 do_neon_abs_neg (void)
14315 {
14316 enum neon_shape rs;
14317 struct neon_type_el et;
14318
14319 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14320 return;
14321
14322 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14323 return;
14324
14325 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14326 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14327
14328 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14329 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14330 inst.instruction |= LOW4 (inst.operands[1].reg);
14331 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14332 inst.instruction |= neon_quad (rs) << 6;
14333 inst.instruction |= (et.type == NT_float) << 10;
14334 inst.instruction |= neon_logbits (et.size) << 18;
14335
14336 neon_dp_fixup (&inst);
14337 }
14338
14339 static void
14340 do_neon_sli (void)
14341 {
14342 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14343 struct neon_type_el et = neon_check_type (2, rs,
14344 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14345 int imm = inst.operands[2].imm;
14346 constraint (imm < 0 || (unsigned)imm >= et.size,
14347 _("immediate out of range for insert"));
14348 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14349 }
14350
14351 static void
14352 do_neon_sri (void)
14353 {
14354 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14355 struct neon_type_el et = neon_check_type (2, rs,
14356 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14357 int imm = inst.operands[2].imm;
14358 constraint (imm < 1 || (unsigned)imm > et.size,
14359 _("immediate out of range for insert"));
14360 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14361 }
14362
14363 static void
14364 do_neon_qshlu_imm (void)
14365 {
14366 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14367 struct neon_type_el et = neon_check_type (2, rs,
14368 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14369 int imm = inst.operands[2].imm;
14370 constraint (imm < 0 || (unsigned)imm >= et.size,
14371 _("immediate out of range for shift"));
14372 /* Only encodes the 'U present' variant of the instruction.
14373 In this case, signed types have OP (bit 8) set to 0.
14374 Unsigned types have OP set to 1. */
14375 inst.instruction |= (et.type == NT_unsigned) << 8;
14376 /* The rest of the bits are the same as other immediate shifts. */
14377 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14378 }
14379
14380 static void
14381 do_neon_qmovn (void)
14382 {
14383 struct neon_type_el et = neon_check_type (2, NS_DQ,
14384 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14385 /* Saturating move where operands can be signed or unsigned, and the
14386 destination has the same signedness. */
14387 NEON_ENCODE (INTEGER, inst);
14388 if (et.type == NT_unsigned)
14389 inst.instruction |= 0xc0;
14390 else
14391 inst.instruction |= 0x80;
14392 neon_two_same (0, 1, et.size / 2);
14393 }
14394
14395 static void
14396 do_neon_qmovun (void)
14397 {
14398 struct neon_type_el et = neon_check_type (2, NS_DQ,
14399 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14400 /* Saturating move with unsigned results. Operands must be signed. */
14401 NEON_ENCODE (INTEGER, inst);
14402 neon_two_same (0, 1, et.size / 2);
14403 }
14404
14405 static void
14406 do_neon_rshift_sat_narrow (void)
14407 {
14408 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14409 or unsigned. If operands are unsigned, results must also be unsigned. */
14410 struct neon_type_el et = neon_check_type (2, NS_DQI,
14411 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14412 int imm = inst.operands[2].imm;
14413 /* This gets the bounds check, size encoding and immediate bits calculation
14414 right. */
14415 et.size /= 2;
14416
14417 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14418 VQMOVN.I<size> <Dd>, <Qm>. */
14419 if (imm == 0)
14420 {
14421 inst.operands[2].present = 0;
14422 inst.instruction = N_MNEM_vqmovn;
14423 do_neon_qmovn ();
14424 return;
14425 }
14426
14427 constraint (imm < 1 || (unsigned)imm > et.size,
14428 _("immediate out of range"));
14429 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14430 }
14431
14432 static void
14433 do_neon_rshift_sat_narrow_u (void)
14434 {
14435 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14436 or unsigned. If operands are unsigned, results must also be unsigned. */
14437 struct neon_type_el et = neon_check_type (2, NS_DQI,
14438 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14439 int imm = inst.operands[2].imm;
14440 /* This gets the bounds check, size encoding and immediate bits calculation
14441 right. */
14442 et.size /= 2;
14443
14444 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14445 VQMOVUN.I<size> <Dd>, <Qm>. */
14446 if (imm == 0)
14447 {
14448 inst.operands[2].present = 0;
14449 inst.instruction = N_MNEM_vqmovun;
14450 do_neon_qmovun ();
14451 return;
14452 }
14453
14454 constraint (imm < 1 || (unsigned)imm > et.size,
14455 _("immediate out of range"));
14456 /* FIXME: The manual is kind of unclear about what value U should have in
14457 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14458 must be 1. */
14459 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14460 }
14461
14462 static void
14463 do_neon_movn (void)
14464 {
14465 struct neon_type_el et = neon_check_type (2, NS_DQ,
14466 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14467 NEON_ENCODE (INTEGER, inst);
14468 neon_two_same (0, 1, et.size / 2);
14469 }
14470
14471 static void
14472 do_neon_rshift_narrow (void)
14473 {
14474 struct neon_type_el et = neon_check_type (2, NS_DQI,
14475 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14476 int imm = inst.operands[2].imm;
14477 /* This gets the bounds check, size encoding and immediate bits calculation
14478 right. */
14479 et.size /= 2;
14480
14481 /* If immediate is zero then we are a pseudo-instruction for
14482 VMOVN.I<size> <Dd>, <Qm> */
14483 if (imm == 0)
14484 {
14485 inst.operands[2].present = 0;
14486 inst.instruction = N_MNEM_vmovn;
14487 do_neon_movn ();
14488 return;
14489 }
14490
14491 constraint (imm < 1 || (unsigned)imm > et.size,
14492 _("immediate out of range for narrowing operation"));
14493 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14494 }
14495
14496 static void
14497 do_neon_shll (void)
14498 {
14499 /* FIXME: Type checking when lengthening. */
14500 struct neon_type_el et = neon_check_type (2, NS_QDI,
14501 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14502 unsigned imm = inst.operands[2].imm;
14503
14504 if (imm == et.size)
14505 {
14506 /* Maximum shift variant. */
14507 NEON_ENCODE (INTEGER, inst);
14508 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14509 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14510 inst.instruction |= LOW4 (inst.operands[1].reg);
14511 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14512 inst.instruction |= neon_logbits (et.size) << 18;
14513
14514 neon_dp_fixup (&inst);
14515 }
14516 else
14517 {
14518 /* A more-specific type check for non-max versions. */
14519 et = neon_check_type (2, NS_QDI,
14520 N_EQK | N_DBL, N_SU_32 | N_KEY);
14521 NEON_ENCODE (IMMED, inst);
14522 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14523 }
14524 }
14525
14526 /* Check the various types for the VCVT instruction, and return which version
14527 the current instruction is. */
14528
14529 #define CVT_FLAVOUR_VAR \
14530 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
14531 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
14532 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
14533 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
14534 /* Half-precision conversions. */ \
14535 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
14536 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
14537 /* VFP instructions. */ \
14538 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
14539 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
14540 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
14541 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
14542 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
14543 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
14544 /* VFP instructions with bitshift. */ \
14545 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
14546 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
14547 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
14548 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
14549 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
14550 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
14551 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
14552 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
14553
14554 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
14555 neon_cvt_flavour_##C,
14556
14557 /* The different types of conversions we can do. */
14558 enum neon_cvt_flavour
14559 {
14560 CVT_FLAVOUR_VAR
14561 neon_cvt_flavour_invalid,
14562 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
14563 };
14564
14565 #undef CVT_VAR
14566
14567 static enum neon_cvt_flavour
14568 get_neon_cvt_flavour (enum neon_shape rs)
14569 {
14570 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
14571 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
14572 if (et.type != NT_invtype) \
14573 { \
14574 inst.error = NULL; \
14575 return (neon_cvt_flavour_##C); \
14576 }
14577
14578 struct neon_type_el et;
14579 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14580 || rs == NS_FF) ? N_VFP : 0;
14581 /* The instruction versions which take an immediate take one register
14582 argument, which is extended to the width of the full register. Thus the
14583 "source" and "destination" registers must have the same width. Hack that
14584 here by making the size equal to the key (wider, in this case) operand. */
14585 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14586
14587 CVT_FLAVOUR_VAR;
14588
14589 return neon_cvt_flavour_invalid;
14590 #undef CVT_VAR
14591 }
14592
14593 enum neon_cvt_mode
14594 {
14595 neon_cvt_mode_a,
14596 neon_cvt_mode_n,
14597 neon_cvt_mode_p,
14598 neon_cvt_mode_m,
14599 neon_cvt_mode_z,
14600 neon_cvt_mode_x,
14601 neon_cvt_mode_r
14602 };
14603
14604 /* Neon-syntax VFP conversions. */
14605
14606 static void
14607 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
14608 {
14609 const char *opname = 0;
14610
14611 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
14612 {
14613 /* Conversions with immediate bitshift. */
14614 const char *enc[] =
14615 {
14616 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
14617 CVT_FLAVOUR_VAR
14618 NULL
14619 #undef CVT_VAR
14620 };
14621
14622 if (flavour < (int) ARRAY_SIZE (enc))
14623 {
14624 opname = enc[flavour];
14625 constraint (inst.operands[0].reg != inst.operands[1].reg,
14626 _("operands 0 and 1 must be the same register"));
14627 inst.operands[1] = inst.operands[2];
14628 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
14629 }
14630 }
14631 else
14632 {
14633 /* Conversions without bitshift. */
14634 const char *enc[] =
14635 {
14636 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
14637 CVT_FLAVOUR_VAR
14638 NULL
14639 #undef CVT_VAR
14640 };
14641
14642 if (flavour < (int) ARRAY_SIZE (enc))
14643 opname = enc[flavour];
14644 }
14645
14646 if (opname)
14647 do_vfp_nsyn_opcode (opname);
14648 }
14649
14650 static void
14651 do_vfp_nsyn_cvtz (void)
14652 {
14653 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
14654 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
14655 const char *enc[] =
14656 {
14657 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
14658 CVT_FLAVOUR_VAR
14659 NULL
14660 #undef CVT_VAR
14661 };
14662
14663 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
14664 do_vfp_nsyn_opcode (enc[flavour]);
14665 }
14666
14667 static void
14668 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
14669 enum neon_cvt_mode mode)
14670 {
14671 int sz, op;
14672 int rm;
14673
14674 set_it_insn_type (OUTSIDE_IT_INSN);
14675
14676 switch (flavour)
14677 {
14678 case neon_cvt_flavour_s32_f64:
14679 sz = 1;
14680 op = 0;
14681 break;
14682 case neon_cvt_flavour_s32_f32:
14683 sz = 0;
14684 op = 1;
14685 break;
14686 case neon_cvt_flavour_u32_f64:
14687 sz = 1;
14688 op = 0;
14689 break;
14690 case neon_cvt_flavour_u32_f32:
14691 sz = 0;
14692 op = 0;
14693 break;
14694 default:
14695 first_error (_("invalid instruction shape"));
14696 return;
14697 }
14698
14699 switch (mode)
14700 {
14701 case neon_cvt_mode_a: rm = 0; break;
14702 case neon_cvt_mode_n: rm = 1; break;
14703 case neon_cvt_mode_p: rm = 2; break;
14704 case neon_cvt_mode_m: rm = 3; break;
14705 default: first_error (_("invalid rounding mode")); return;
14706 }
14707
14708 NEON_ENCODE (FPV8, inst);
14709 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
14710 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
14711 inst.instruction |= sz << 8;
14712 inst.instruction |= op << 7;
14713 inst.instruction |= rm << 16;
14714 inst.instruction |= 0xf0000000;
14715 inst.is_neon = TRUE;
14716 }
14717
14718 static void
14719 do_neon_cvt_1 (enum neon_cvt_mode mode)
14720 {
14721 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
14722 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
14723 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
14724
14725 /* PR11109: Handle round-to-zero for VCVT conversions. */
14726 if (mode == neon_cvt_mode_z
14727 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
14728 && (flavour == neon_cvt_flavour_s32_f32
14729 || flavour == neon_cvt_flavour_u32_f32
14730 || flavour == neon_cvt_flavour_s32_f64
14731 || flavour == neon_cvt_flavour_u32_f64)
14732 && (rs == NS_FD || rs == NS_FF))
14733 {
14734 do_vfp_nsyn_cvtz ();
14735 return;
14736 }
14737
14738 /* VFP rather than Neon conversions. */
14739 if (flavour >= neon_cvt_flavour_first_fp)
14740 {
14741 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
14742 do_vfp_nsyn_cvt (rs, flavour);
14743 else
14744 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
14745
14746 return;
14747 }
14748
14749 switch (rs)
14750 {
14751 case NS_DDI:
14752 case NS_QQI:
14753 {
14754 unsigned immbits;
14755 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
14756
14757 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14758 return;
14759
14760 /* Fixed-point conversion with #0 immediate is encoded as an
14761 integer conversion. */
14762 if (inst.operands[2].present && inst.operands[2].imm == 0)
14763 goto int_encode;
14764 immbits = 32 - inst.operands[2].imm;
14765 NEON_ENCODE (IMMED, inst);
14766 if (flavour != neon_cvt_flavour_invalid)
14767 inst.instruction |= enctab[flavour];
14768 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14769 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14770 inst.instruction |= LOW4 (inst.operands[1].reg);
14771 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14772 inst.instruction |= neon_quad (rs) << 6;
14773 inst.instruction |= 1 << 21;
14774 inst.instruction |= immbits << 16;
14775
14776 neon_dp_fixup (&inst);
14777 }
14778 break;
14779
14780 case NS_DD:
14781 case NS_QQ:
14782 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
14783 {
14784 NEON_ENCODE (FLOAT, inst);
14785 set_it_insn_type (OUTSIDE_IT_INSN);
14786
14787 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
14788 return;
14789
14790 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14791 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14792 inst.instruction |= LOW4 (inst.operands[1].reg);
14793 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14794 inst.instruction |= neon_quad (rs) << 6;
14795 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
14796 inst.instruction |= mode << 8;
14797 if (thumb_mode)
14798 inst.instruction |= 0xfc000000;
14799 else
14800 inst.instruction |= 0xf0000000;
14801 }
14802 else
14803 {
14804 int_encode:
14805 {
14806 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
14807
14808 NEON_ENCODE (INTEGER, inst);
14809
14810 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14811 return;
14812
14813 if (flavour != neon_cvt_flavour_invalid)
14814 inst.instruction |= enctab[flavour];
14815
14816 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14817 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14818 inst.instruction |= LOW4 (inst.operands[1].reg);
14819 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14820 inst.instruction |= neon_quad (rs) << 6;
14821 inst.instruction |= 2 << 18;
14822
14823 neon_dp_fixup (&inst);
14824 }
14825 }
14826 break;
14827
14828 /* Half-precision conversions for Advanced SIMD -- neon. */
14829 case NS_QD:
14830 case NS_DQ:
14831
14832 if ((rs == NS_DQ)
14833 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
14834 {
14835 as_bad (_("operand size must match register width"));
14836 break;
14837 }
14838
14839 if ((rs == NS_QD)
14840 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
14841 {
14842 as_bad (_("operand size must match register width"));
14843 break;
14844 }
14845
14846 if (rs == NS_DQ)
14847 inst.instruction = 0x3b60600;
14848 else
14849 inst.instruction = 0x3b60700;
14850
14851 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14852 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14853 inst.instruction |= LOW4 (inst.operands[1].reg);
14854 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14855 neon_dp_fixup (&inst);
14856 break;
14857
14858 default:
14859 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
14860 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
14861 do_vfp_nsyn_cvt (rs, flavour);
14862 else
14863 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
14864 }
14865 }
14866
14867 static void
14868 do_neon_cvtr (void)
14869 {
14870 do_neon_cvt_1 (neon_cvt_mode_x);
14871 }
14872
14873 static void
14874 do_neon_cvt (void)
14875 {
14876 do_neon_cvt_1 (neon_cvt_mode_z);
14877 }
14878
14879 static void
14880 do_neon_cvta (void)
14881 {
14882 do_neon_cvt_1 (neon_cvt_mode_a);
14883 }
14884
14885 static void
14886 do_neon_cvtn (void)
14887 {
14888 do_neon_cvt_1 (neon_cvt_mode_n);
14889 }
14890
14891 static void
14892 do_neon_cvtp (void)
14893 {
14894 do_neon_cvt_1 (neon_cvt_mode_p);
14895 }
14896
14897 static void
14898 do_neon_cvtm (void)
14899 {
14900 do_neon_cvt_1 (neon_cvt_mode_m);
14901 }
14902
14903 static void
14904 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
14905 {
14906 if (is_double)
14907 mark_feature_used (&fpu_vfp_ext_armv8);
14908
14909 encode_arm_vfp_reg (inst.operands[0].reg,
14910 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
14911 encode_arm_vfp_reg (inst.operands[1].reg,
14912 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
14913 inst.instruction |= to ? 0x10000 : 0;
14914 inst.instruction |= t ? 0x80 : 0;
14915 inst.instruction |= is_double ? 0x100 : 0;
14916 do_vfp_cond_or_thumb ();
14917 }
14918
14919 static void
14920 do_neon_cvttb_1 (bfd_boolean t)
14921 {
14922 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
14923
14924 if (rs == NS_NULL)
14925 return;
14926 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
14927 {
14928 inst.error = NULL;
14929 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
14930 }
14931 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
14932 {
14933 inst.error = NULL;
14934 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
14935 }
14936 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
14937 {
14938 inst.error = NULL;
14939 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
14940 }
14941 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
14942 {
14943 inst.error = NULL;
14944 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
14945 }
14946 else
14947 return;
14948 }
14949
14950 static void
14951 do_neon_cvtb (void)
14952 {
14953 do_neon_cvttb_1 (FALSE);
14954 }
14955
14956
14957 static void
14958 do_neon_cvtt (void)
14959 {
14960 do_neon_cvttb_1 (TRUE);
14961 }
14962
14963 static void
14964 neon_move_immediate (void)
14965 {
14966 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
14967 struct neon_type_el et = neon_check_type (2, rs,
14968 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14969 unsigned immlo, immhi = 0, immbits;
14970 int op, cmode, float_p;
14971
14972 constraint (et.type == NT_invtype,
14973 _("operand size must be specified for immediate VMOV"));
14974
14975 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
14976 op = (inst.instruction & (1 << 5)) != 0;
14977
14978 immlo = inst.operands[1].imm;
14979 if (inst.operands[1].regisimm)
14980 immhi = inst.operands[1].reg;
14981
14982 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
14983 _("immediate has bits set outside the operand size"));
14984
14985 float_p = inst.operands[1].immisfloat;
14986
14987 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
14988 et.size, et.type)) == FAIL)
14989 {
14990 /* Invert relevant bits only. */
14991 neon_invert_size (&immlo, &immhi, et.size);
14992 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
14993 with one or the other; those cases are caught by
14994 neon_cmode_for_move_imm. */
14995 op = !op;
14996 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
14997 &op, et.size, et.type)) == FAIL)
14998 {
14999 first_error (_("immediate out of range"));
15000 return;
15001 }
15002 }
15003
15004 inst.instruction &= ~(1 << 5);
15005 inst.instruction |= op << 5;
15006
15007 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15008 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15009 inst.instruction |= neon_quad (rs) << 6;
15010 inst.instruction |= cmode << 8;
15011
15012 neon_write_immbits (immbits);
15013 }
15014
15015 static void
15016 do_neon_mvn (void)
15017 {
15018 if (inst.operands[1].isreg)
15019 {
15020 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15021
15022 NEON_ENCODE (INTEGER, inst);
15023 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15024 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15025 inst.instruction |= LOW4 (inst.operands[1].reg);
15026 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15027 inst.instruction |= neon_quad (rs) << 6;
15028 }
15029 else
15030 {
15031 NEON_ENCODE (IMMED, inst);
15032 neon_move_immediate ();
15033 }
15034
15035 neon_dp_fixup (&inst);
15036 }
15037
15038 /* Encode instructions of form:
15039
15040 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15041 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15042
15043 static void
15044 neon_mixed_length (struct neon_type_el et, unsigned size)
15045 {
15046 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15047 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15048 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15049 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15050 inst.instruction |= LOW4 (inst.operands[2].reg);
15051 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15052 inst.instruction |= (et.type == NT_unsigned) << 24;
15053 inst.instruction |= neon_logbits (size) << 20;
15054
15055 neon_dp_fixup (&inst);
15056 }
15057
15058 static void
15059 do_neon_dyadic_long (void)
15060 {
15061 /* FIXME: Type checking for lengthening op. */
15062 struct neon_type_el et = neon_check_type (3, NS_QDD,
15063 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15064 neon_mixed_length (et, et.size);
15065 }
15066
15067 static void
15068 do_neon_abal (void)
15069 {
15070 struct neon_type_el et = neon_check_type (3, NS_QDD,
15071 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15072 neon_mixed_length (et, et.size);
15073 }
15074
15075 static void
15076 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15077 {
15078 if (inst.operands[2].isscalar)
15079 {
15080 struct neon_type_el et = neon_check_type (3, NS_QDS,
15081 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15082 NEON_ENCODE (SCALAR, inst);
15083 neon_mul_mac (et, et.type == NT_unsigned);
15084 }
15085 else
15086 {
15087 struct neon_type_el et = neon_check_type (3, NS_QDD,
15088 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15089 NEON_ENCODE (INTEGER, inst);
15090 neon_mixed_length (et, et.size);
15091 }
15092 }
15093
15094 static void
15095 do_neon_mac_maybe_scalar_long (void)
15096 {
15097 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15098 }
15099
15100 static void
15101 do_neon_dyadic_wide (void)
15102 {
15103 struct neon_type_el et = neon_check_type (3, NS_QQD,
15104 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15105 neon_mixed_length (et, et.size);
15106 }
15107
15108 static void
15109 do_neon_dyadic_narrow (void)
15110 {
15111 struct neon_type_el et = neon_check_type (3, NS_QDD,
15112 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15113 /* Operand sign is unimportant, and the U bit is part of the opcode,
15114 so force the operand type to integer. */
15115 et.type = NT_integer;
15116 neon_mixed_length (et, et.size / 2);
15117 }
15118
15119 static void
15120 do_neon_mul_sat_scalar_long (void)
15121 {
15122 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15123 }
15124
15125 static void
15126 do_neon_vmull (void)
15127 {
15128 if (inst.operands[2].isscalar)
15129 do_neon_mac_maybe_scalar_long ();
15130 else
15131 {
15132 struct neon_type_el et = neon_check_type (3, NS_QDD,
15133 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15134
15135 if (et.type == NT_poly)
15136 NEON_ENCODE (POLY, inst);
15137 else
15138 NEON_ENCODE (INTEGER, inst);
15139
15140 /* For polynomial encoding the U bit must be zero, and the size must
15141 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15142 obviously, as 0b10). */
15143 if (et.size == 64)
15144 {
15145 /* Check we're on the correct architecture. */
15146 if (!mark_feature_used (&fpu_crypto_ext_armv8))
15147 inst.error =
15148 _("Instruction form not available on this architecture.");
15149
15150 et.size = 32;
15151 }
15152
15153 neon_mixed_length (et, et.size);
15154 }
15155 }
15156
15157 static void
15158 do_neon_ext (void)
15159 {
15160 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15161 struct neon_type_el et = neon_check_type (3, rs,
15162 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15163 unsigned imm = (inst.operands[3].imm * et.size) / 8;
15164
15165 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15166 _("shift out of range"));
15167 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15168 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15169 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15170 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15171 inst.instruction |= LOW4 (inst.operands[2].reg);
15172 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15173 inst.instruction |= neon_quad (rs) << 6;
15174 inst.instruction |= imm << 8;
15175
15176 neon_dp_fixup (&inst);
15177 }
15178
15179 static void
15180 do_neon_rev (void)
15181 {
15182 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15183 struct neon_type_el et = neon_check_type (2, rs,
15184 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15185 unsigned op = (inst.instruction >> 7) & 3;
15186 /* N (width of reversed regions) is encoded as part of the bitmask. We
15187 extract it here to check the elements to be reversed are smaller.
15188 Otherwise we'd get a reserved instruction. */
15189 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15190 gas_assert (elsize != 0);
15191 constraint (et.size >= elsize,
15192 _("elements must be smaller than reversal region"));
15193 neon_two_same (neon_quad (rs), 1, et.size);
15194 }
15195
15196 static void
15197 do_neon_dup (void)
15198 {
15199 if (inst.operands[1].isscalar)
15200 {
15201 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15202 struct neon_type_el et = neon_check_type (2, rs,
15203 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15204 unsigned sizebits = et.size >> 3;
15205 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15206 int logsize = neon_logbits (et.size);
15207 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15208
15209 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15210 return;
15211
15212 NEON_ENCODE (SCALAR, inst);
15213 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15214 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15215 inst.instruction |= LOW4 (dm);
15216 inst.instruction |= HI1 (dm) << 5;
15217 inst.instruction |= neon_quad (rs) << 6;
15218 inst.instruction |= x << 17;
15219 inst.instruction |= sizebits << 16;
15220
15221 neon_dp_fixup (&inst);
15222 }
15223 else
15224 {
15225 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15226 struct neon_type_el et = neon_check_type (2, rs,
15227 N_8 | N_16 | N_32 | N_KEY, N_EQK);
15228 /* Duplicate ARM register to lanes of vector. */
15229 NEON_ENCODE (ARMREG, inst);
15230 switch (et.size)
15231 {
15232 case 8: inst.instruction |= 0x400000; break;
15233 case 16: inst.instruction |= 0x000020; break;
15234 case 32: inst.instruction |= 0x000000; break;
15235 default: break;
15236 }
15237 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15238 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15239 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15240 inst.instruction |= neon_quad (rs) << 21;
15241 /* The encoding for this instruction is identical for the ARM and Thumb
15242 variants, except for the condition field. */
15243 do_vfp_cond_or_thumb ();
15244 }
15245 }
15246
15247 /* VMOV has particularly many variations. It can be one of:
15248 0. VMOV<c><q> <Qd>, <Qm>
15249 1. VMOV<c><q> <Dd>, <Dm>
15250 (Register operations, which are VORR with Rm = Rn.)
15251 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15252 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15253 (Immediate loads.)
15254 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15255 (ARM register to scalar.)
15256 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15257 (Two ARM registers to vector.)
15258 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15259 (Scalar to ARM register.)
15260 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15261 (Vector to two ARM registers.)
15262 8. VMOV.F32 <Sd>, <Sm>
15263 9. VMOV.F64 <Dd>, <Dm>
15264 (VFP register moves.)
15265 10. VMOV.F32 <Sd>, #imm
15266 11. VMOV.F64 <Dd>, #imm
15267 (VFP float immediate load.)
15268 12. VMOV <Rd>, <Sm>
15269 (VFP single to ARM reg.)
15270 13. VMOV <Sd>, <Rm>
15271 (ARM reg to VFP single.)
15272 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15273 (Two ARM regs to two VFP singles.)
15274 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15275 (Two VFP singles to two ARM regs.)
15276
15277 These cases can be disambiguated using neon_select_shape, except cases 1/9
15278 and 3/11 which depend on the operand type too.
15279
15280 All the encoded bits are hardcoded by this function.
15281
15282 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15283 Cases 5, 7 may be used with VFPv2 and above.
15284
15285 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15286 can specify a type where it doesn't make sense to, and is ignored). */
15287
15288 static void
15289 do_neon_mov (void)
15290 {
15291 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15292 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15293 NS_NULL);
15294 struct neon_type_el et;
15295 const char *ldconst = 0;
15296
15297 switch (rs)
15298 {
15299 case NS_DD: /* case 1/9. */
15300 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15301 /* It is not an error here if no type is given. */
15302 inst.error = NULL;
15303 if (et.type == NT_float && et.size == 64)
15304 {
15305 do_vfp_nsyn_opcode ("fcpyd");
15306 break;
15307 }
15308 /* fall through. */
15309
15310 case NS_QQ: /* case 0/1. */
15311 {
15312 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15313 return;
15314 /* The architecture manual I have doesn't explicitly state which
15315 value the U bit should have for register->register moves, but
15316 the equivalent VORR instruction has U = 0, so do that. */
15317 inst.instruction = 0x0200110;
15318 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15319 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15320 inst.instruction |= LOW4 (inst.operands[1].reg);
15321 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15322 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15323 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15324 inst.instruction |= neon_quad (rs) << 6;
15325
15326 neon_dp_fixup (&inst);
15327 }
15328 break;
15329
15330 case NS_DI: /* case 3/11. */
15331 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15332 inst.error = NULL;
15333 if (et.type == NT_float && et.size == 64)
15334 {
15335 /* case 11 (fconstd). */
15336 ldconst = "fconstd";
15337 goto encode_fconstd;
15338 }
15339 /* fall through. */
15340
15341 case NS_QI: /* case 2/3. */
15342 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15343 return;
15344 inst.instruction = 0x0800010;
15345 neon_move_immediate ();
15346 neon_dp_fixup (&inst);
15347 break;
15348
15349 case NS_SR: /* case 4. */
15350 {
15351 unsigned bcdebits = 0;
15352 int logsize;
15353 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15354 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15355
15356 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15357 logsize = neon_logbits (et.size);
15358
15359 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15360 _(BAD_FPU));
15361 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15362 && et.size != 32, _(BAD_FPU));
15363 constraint (et.type == NT_invtype, _("bad type for scalar"));
15364 constraint (x >= 64 / et.size, _("scalar index out of range"));
15365
15366 switch (et.size)
15367 {
15368 case 8: bcdebits = 0x8; break;
15369 case 16: bcdebits = 0x1; break;
15370 case 32: bcdebits = 0x0; break;
15371 default: ;
15372 }
15373
15374 bcdebits |= x << logsize;
15375
15376 inst.instruction = 0xe000b10;
15377 do_vfp_cond_or_thumb ();
15378 inst.instruction |= LOW4 (dn) << 16;
15379 inst.instruction |= HI1 (dn) << 7;
15380 inst.instruction |= inst.operands[1].reg << 12;
15381 inst.instruction |= (bcdebits & 3) << 5;
15382 inst.instruction |= (bcdebits >> 2) << 21;
15383 }
15384 break;
15385
15386 case NS_DRR: /* case 5 (fmdrr). */
15387 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15388 _(BAD_FPU));
15389
15390 inst.instruction = 0xc400b10;
15391 do_vfp_cond_or_thumb ();
15392 inst.instruction |= LOW4 (inst.operands[0].reg);
15393 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
15394 inst.instruction |= inst.operands[1].reg << 12;
15395 inst.instruction |= inst.operands[2].reg << 16;
15396 break;
15397
15398 case NS_RS: /* case 6. */
15399 {
15400 unsigned logsize;
15401 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
15402 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
15403 unsigned abcdebits = 0;
15404
15405 et = neon_check_type (2, NS_NULL,
15406 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
15407 logsize = neon_logbits (et.size);
15408
15409 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15410 _(BAD_FPU));
15411 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15412 && et.size != 32, _(BAD_FPU));
15413 constraint (et.type == NT_invtype, _("bad type for scalar"));
15414 constraint (x >= 64 / et.size, _("scalar index out of range"));
15415
15416 switch (et.size)
15417 {
15418 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
15419 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
15420 case 32: abcdebits = 0x00; break;
15421 default: ;
15422 }
15423
15424 abcdebits |= x << logsize;
15425 inst.instruction = 0xe100b10;
15426 do_vfp_cond_or_thumb ();
15427 inst.instruction |= LOW4 (dn) << 16;
15428 inst.instruction |= HI1 (dn) << 7;
15429 inst.instruction |= inst.operands[0].reg << 12;
15430 inst.instruction |= (abcdebits & 3) << 5;
15431 inst.instruction |= (abcdebits >> 2) << 21;
15432 }
15433 break;
15434
15435 case NS_RRD: /* case 7 (fmrrd). */
15436 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15437 _(BAD_FPU));
15438
15439 inst.instruction = 0xc500b10;
15440 do_vfp_cond_or_thumb ();
15441 inst.instruction |= inst.operands[0].reg << 12;
15442 inst.instruction |= inst.operands[1].reg << 16;
15443 inst.instruction |= LOW4 (inst.operands[2].reg);
15444 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15445 break;
15446
15447 case NS_FF: /* case 8 (fcpys). */
15448 do_vfp_nsyn_opcode ("fcpys");
15449 break;
15450
15451 case NS_FI: /* case 10 (fconsts). */
15452 ldconst = "fconsts";
15453 encode_fconstd:
15454 if (is_quarter_float (inst.operands[1].imm))
15455 {
15456 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
15457 do_vfp_nsyn_opcode (ldconst);
15458 }
15459 else
15460 first_error (_("immediate out of range"));
15461 break;
15462
15463 case NS_RF: /* case 12 (fmrs). */
15464 do_vfp_nsyn_opcode ("fmrs");
15465 break;
15466
15467 case NS_FR: /* case 13 (fmsr). */
15468 do_vfp_nsyn_opcode ("fmsr");
15469 break;
15470
15471 /* The encoders for the fmrrs and fmsrr instructions expect three operands
15472 (one of which is a list), but we have parsed four. Do some fiddling to
15473 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15474 expect. */
15475 case NS_RRFF: /* case 14 (fmrrs). */
15476 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15477 _("VFP registers must be adjacent"));
15478 inst.operands[2].imm = 2;
15479 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15480 do_vfp_nsyn_opcode ("fmrrs");
15481 break;
15482
15483 case NS_FFRR: /* case 15 (fmsrr). */
15484 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15485 _("VFP registers must be adjacent"));
15486 inst.operands[1] = inst.operands[2];
15487 inst.operands[2] = inst.operands[3];
15488 inst.operands[0].imm = 2;
15489 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15490 do_vfp_nsyn_opcode ("fmsrr");
15491 break;
15492
15493 default:
15494 abort ();
15495 }
15496 }
15497
15498 static void
15499 do_neon_rshift_round_imm (void)
15500 {
15501 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15502 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15503 int imm = inst.operands[2].imm;
15504
15505 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
15506 if (imm == 0)
15507 {
15508 inst.operands[2].present = 0;
15509 do_neon_mov ();
15510 return;
15511 }
15512
15513 constraint (imm < 1 || (unsigned)imm > et.size,
15514 _("immediate out of range for shift"));
15515 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15516 et.size - imm);
15517 }
15518
15519 static void
15520 do_neon_movl (void)
15521 {
15522 struct neon_type_el et = neon_check_type (2, NS_QD,
15523 N_EQK | N_DBL, N_SU_32 | N_KEY);
15524 unsigned sizebits = et.size >> 3;
15525 inst.instruction |= sizebits << 19;
15526 neon_two_same (0, et.type == NT_unsigned, -1);
15527 }
15528
15529 static void
15530 do_neon_trn (void)
15531 {
15532 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15533 struct neon_type_el et = neon_check_type (2, rs,
15534 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15535 NEON_ENCODE (INTEGER, inst);
15536 neon_two_same (neon_quad (rs), 1, et.size);
15537 }
15538
15539 static void
15540 do_neon_zip_uzp (void)
15541 {
15542 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15543 struct neon_type_el et = neon_check_type (2, rs,
15544 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15545 if (rs == NS_DD && et.size == 32)
15546 {
15547 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
15548 inst.instruction = N_MNEM_vtrn;
15549 do_neon_trn ();
15550 return;
15551 }
15552 neon_two_same (neon_quad (rs), 1, et.size);
15553 }
15554
15555 static void
15556 do_neon_sat_abs_neg (void)
15557 {
15558 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15559 struct neon_type_el et = neon_check_type (2, rs,
15560 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15561 neon_two_same (neon_quad (rs), 1, et.size);
15562 }
15563
15564 static void
15565 do_neon_pair_long (void)
15566 {
15567 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15568 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
15569 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
15570 inst.instruction |= (et.type == NT_unsigned) << 7;
15571 neon_two_same (neon_quad (rs), 1, et.size);
15572 }
15573
15574 static void
15575 do_neon_recip_est (void)
15576 {
15577 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15578 struct neon_type_el et = neon_check_type (2, rs,
15579 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
15580 inst.instruction |= (et.type == NT_float) << 8;
15581 neon_two_same (neon_quad (rs), 1, et.size);
15582 }
15583
15584 static void
15585 do_neon_cls (void)
15586 {
15587 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15588 struct neon_type_el et = neon_check_type (2, rs,
15589 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15590 neon_two_same (neon_quad (rs), 1, et.size);
15591 }
15592
15593 static void
15594 do_neon_clz (void)
15595 {
15596 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15597 struct neon_type_el et = neon_check_type (2, rs,
15598 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
15599 neon_two_same (neon_quad (rs), 1, et.size);
15600 }
15601
15602 static void
15603 do_neon_cnt (void)
15604 {
15605 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15606 struct neon_type_el et = neon_check_type (2, rs,
15607 N_EQK | N_INT, N_8 | N_KEY);
15608 neon_two_same (neon_quad (rs), 1, et.size);
15609 }
15610
15611 static void
15612 do_neon_swp (void)
15613 {
15614 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15615 neon_two_same (neon_quad (rs), 1, -1);
15616 }
15617
15618 static void
15619 do_neon_tbl_tbx (void)
15620 {
15621 unsigned listlenbits;
15622 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
15623
15624 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
15625 {
15626 first_error (_("bad list length for table lookup"));
15627 return;
15628 }
15629
15630 listlenbits = inst.operands[1].imm - 1;
15631 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15632 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15633 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15634 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15635 inst.instruction |= LOW4 (inst.operands[2].reg);
15636 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15637 inst.instruction |= listlenbits << 8;
15638
15639 neon_dp_fixup (&inst);
15640 }
15641
15642 static void
15643 do_neon_ldm_stm (void)
15644 {
15645 /* P, U and L bits are part of bitmask. */
15646 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
15647 unsigned offsetbits = inst.operands[1].imm * 2;
15648
15649 if (inst.operands[1].issingle)
15650 {
15651 do_vfp_nsyn_ldm_stm (is_dbmode);
15652 return;
15653 }
15654
15655 constraint (is_dbmode && !inst.operands[0].writeback,
15656 _("writeback (!) must be used for VLDMDB and VSTMDB"));
15657
15658 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15659 _("register list must contain at least 1 and at most 16 "
15660 "registers"));
15661
15662 inst.instruction |= inst.operands[0].reg << 16;
15663 inst.instruction |= inst.operands[0].writeback << 21;
15664 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15665 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
15666
15667 inst.instruction |= offsetbits;
15668
15669 do_vfp_cond_or_thumb ();
15670 }
15671
15672 static void
15673 do_neon_ldr_str (void)
15674 {
15675 int is_ldr = (inst.instruction & (1 << 20)) != 0;
15676
15677 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
15678 And is UNPREDICTABLE in thumb mode. */
15679 if (!is_ldr
15680 && inst.operands[1].reg == REG_PC
15681 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
15682 {
15683 if (!thumb_mode && warn_on_deprecated)
15684 as_warn (_("Use of PC here is deprecated"));
15685 else
15686 inst.error = _("Use of PC here is UNPREDICTABLE");
15687 }
15688
15689 if (inst.operands[0].issingle)
15690 {
15691 if (is_ldr)
15692 do_vfp_nsyn_opcode ("flds");
15693 else
15694 do_vfp_nsyn_opcode ("fsts");
15695 }
15696 else
15697 {
15698 if (is_ldr)
15699 do_vfp_nsyn_opcode ("fldd");
15700 else
15701 do_vfp_nsyn_opcode ("fstd");
15702 }
15703 }
15704
15705 /* "interleave" version also handles non-interleaving register VLD1/VST1
15706 instructions. */
15707
15708 static void
15709 do_neon_ld_st_interleave (void)
15710 {
15711 struct neon_type_el et = neon_check_type (1, NS_NULL,
15712 N_8 | N_16 | N_32 | N_64);
15713 unsigned alignbits = 0;
15714 unsigned idx;
15715 /* The bits in this table go:
15716 0: register stride of one (0) or two (1)
15717 1,2: register list length, minus one (1, 2, 3, 4).
15718 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
15719 We use -1 for invalid entries. */
15720 const int typetable[] =
15721 {
15722 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
15723 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
15724 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
15725 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
15726 };
15727 int typebits;
15728
15729 if (et.type == NT_invtype)
15730 return;
15731
15732 if (inst.operands[1].immisalign)
15733 switch (inst.operands[1].imm >> 8)
15734 {
15735 case 64: alignbits = 1; break;
15736 case 128:
15737 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
15738 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15739 goto bad_alignment;
15740 alignbits = 2;
15741 break;
15742 case 256:
15743 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15744 goto bad_alignment;
15745 alignbits = 3;
15746 break;
15747 default:
15748 bad_alignment:
15749 first_error (_("bad alignment"));
15750 return;
15751 }
15752
15753 inst.instruction |= alignbits << 4;
15754 inst.instruction |= neon_logbits (et.size) << 6;
15755
15756 /* Bits [4:6] of the immediate in a list specifier encode register stride
15757 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
15758 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
15759 up the right value for "type" in a table based on this value and the given
15760 list style, then stick it back. */
15761 idx = ((inst.operands[0].imm >> 4) & 7)
15762 | (((inst.instruction >> 8) & 3) << 3);
15763
15764 typebits = typetable[idx];
15765
15766 constraint (typebits == -1, _("bad list type for instruction"));
15767
15768 inst.instruction &= ~0xf00;
15769 inst.instruction |= typebits << 8;
15770 }
15771
15772 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
15773 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
15774 otherwise. The variable arguments are a list of pairs of legal (size, align)
15775 values, terminated with -1. */
15776
15777 static int
15778 neon_alignment_bit (int size, int align, int *do_align, ...)
15779 {
15780 va_list ap;
15781 int result = FAIL, thissize, thisalign;
15782
15783 if (!inst.operands[1].immisalign)
15784 {
15785 *do_align = 0;
15786 return SUCCESS;
15787 }
15788
15789 va_start (ap, do_align);
15790
15791 do
15792 {
15793 thissize = va_arg (ap, int);
15794 if (thissize == -1)
15795 break;
15796 thisalign = va_arg (ap, int);
15797
15798 if (size == thissize && align == thisalign)
15799 result = SUCCESS;
15800 }
15801 while (result != SUCCESS);
15802
15803 va_end (ap);
15804
15805 if (result == SUCCESS)
15806 *do_align = 1;
15807 else
15808 first_error (_("unsupported alignment for instruction"));
15809
15810 return result;
15811 }
15812
15813 static void
15814 do_neon_ld_st_lane (void)
15815 {
15816 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15817 int align_good, do_align = 0;
15818 int logsize = neon_logbits (et.size);
15819 int align = inst.operands[1].imm >> 8;
15820 int n = (inst.instruction >> 8) & 3;
15821 int max_el = 64 / et.size;
15822
15823 if (et.type == NT_invtype)
15824 return;
15825
15826 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
15827 _("bad list length"));
15828 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
15829 _("scalar index out of range"));
15830 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
15831 && et.size == 8,
15832 _("stride of 2 unavailable when element size is 8"));
15833
15834 switch (n)
15835 {
15836 case 0: /* VLD1 / VST1. */
15837 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
15838 32, 32, -1);
15839 if (align_good == FAIL)
15840 return;
15841 if (do_align)
15842 {
15843 unsigned alignbits = 0;
15844 switch (et.size)
15845 {
15846 case 16: alignbits = 0x1; break;
15847 case 32: alignbits = 0x3; break;
15848 default: ;
15849 }
15850 inst.instruction |= alignbits << 4;
15851 }
15852 break;
15853
15854 case 1: /* VLD2 / VST2. */
15855 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
15856 32, 64, -1);
15857 if (align_good == FAIL)
15858 return;
15859 if (do_align)
15860 inst.instruction |= 1 << 4;
15861 break;
15862
15863 case 2: /* VLD3 / VST3. */
15864 constraint (inst.operands[1].immisalign,
15865 _("can't use alignment with this instruction"));
15866 break;
15867
15868 case 3: /* VLD4 / VST4. */
15869 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15870 16, 64, 32, 64, 32, 128, -1);
15871 if (align_good == FAIL)
15872 return;
15873 if (do_align)
15874 {
15875 unsigned alignbits = 0;
15876 switch (et.size)
15877 {
15878 case 8: alignbits = 0x1; break;
15879 case 16: alignbits = 0x1; break;
15880 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
15881 default: ;
15882 }
15883 inst.instruction |= alignbits << 4;
15884 }
15885 break;
15886
15887 default: ;
15888 }
15889
15890 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
15891 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15892 inst.instruction |= 1 << (4 + logsize);
15893
15894 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
15895 inst.instruction |= logsize << 10;
15896 }
15897
15898 /* Encode single n-element structure to all lanes VLD<n> instructions. */
15899
15900 static void
15901 do_neon_ld_dup (void)
15902 {
15903 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15904 int align_good, do_align = 0;
15905
15906 if (et.type == NT_invtype)
15907 return;
15908
15909 switch ((inst.instruction >> 8) & 3)
15910 {
15911 case 0: /* VLD1. */
15912 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
15913 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15914 &do_align, 16, 16, 32, 32, -1);
15915 if (align_good == FAIL)
15916 return;
15917 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
15918 {
15919 case 1: break;
15920 case 2: inst.instruction |= 1 << 5; break;
15921 default: first_error (_("bad list length")); return;
15922 }
15923 inst.instruction |= neon_logbits (et.size) << 6;
15924 break;
15925
15926 case 1: /* VLD2. */
15927 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15928 &do_align, 8, 16, 16, 32, 32, 64, -1);
15929 if (align_good == FAIL)
15930 return;
15931 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
15932 _("bad list length"));
15933 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15934 inst.instruction |= 1 << 5;
15935 inst.instruction |= neon_logbits (et.size) << 6;
15936 break;
15937
15938 case 2: /* VLD3. */
15939 constraint (inst.operands[1].immisalign,
15940 _("can't use alignment with this instruction"));
15941 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
15942 _("bad list length"));
15943 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15944 inst.instruction |= 1 << 5;
15945 inst.instruction |= neon_logbits (et.size) << 6;
15946 break;
15947
15948 case 3: /* VLD4. */
15949 {
15950 int align = inst.operands[1].imm >> 8;
15951 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15952 16, 64, 32, 64, 32, 128, -1);
15953 if (align_good == FAIL)
15954 return;
15955 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
15956 _("bad list length"));
15957 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15958 inst.instruction |= 1 << 5;
15959 if (et.size == 32 && align == 128)
15960 inst.instruction |= 0x3 << 6;
15961 else
15962 inst.instruction |= neon_logbits (et.size) << 6;
15963 }
15964 break;
15965
15966 default: ;
15967 }
15968
15969 inst.instruction |= do_align << 4;
15970 }
15971
15972 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
15973 apart from bits [11:4]. */
15974
15975 static void
15976 do_neon_ldx_stx (void)
15977 {
15978 if (inst.operands[1].isreg)
15979 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
15980
15981 switch (NEON_LANE (inst.operands[0].imm))
15982 {
15983 case NEON_INTERLEAVE_LANES:
15984 NEON_ENCODE (INTERLV, inst);
15985 do_neon_ld_st_interleave ();
15986 break;
15987
15988 case NEON_ALL_LANES:
15989 NEON_ENCODE (DUP, inst);
15990 do_neon_ld_dup ();
15991 break;
15992
15993 default:
15994 NEON_ENCODE (LANE, inst);
15995 do_neon_ld_st_lane ();
15996 }
15997
15998 /* L bit comes from bit mask. */
15999 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16000 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16001 inst.instruction |= inst.operands[1].reg << 16;
16002
16003 if (inst.operands[1].postind)
16004 {
16005 int postreg = inst.operands[1].imm & 0xf;
16006 constraint (!inst.operands[1].immisreg,
16007 _("post-index must be a register"));
16008 constraint (postreg == 0xd || postreg == 0xf,
16009 _("bad register for post-index"));
16010 inst.instruction |= postreg;
16011 }
16012 else if (inst.operands[1].writeback)
16013 {
16014 inst.instruction |= 0xd;
16015 }
16016 else
16017 inst.instruction |= 0xf;
16018
16019 if (thumb_mode)
16020 inst.instruction |= 0xf9000000;
16021 else
16022 inst.instruction |= 0xf4000000;
16023 }
16024
16025 /* FP v8. */
16026 static void
16027 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16028 {
16029 NEON_ENCODE (FPV8, inst);
16030
16031 if (rs == NS_FFF)
16032 do_vfp_sp_dyadic ();
16033 else
16034 do_vfp_dp_rd_rn_rm ();
16035
16036 if (rs == NS_DDD)
16037 inst.instruction |= 0x100;
16038
16039 inst.instruction |= 0xf0000000;
16040 }
16041
16042 static void
16043 do_vsel (void)
16044 {
16045 set_it_insn_type (OUTSIDE_IT_INSN);
16046
16047 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16048 first_error (_("invalid instruction shape"));
16049 }
16050
16051 static void
16052 do_vmaxnm (void)
16053 {
16054 set_it_insn_type (OUTSIDE_IT_INSN);
16055
16056 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16057 return;
16058
16059 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16060 return;
16061
16062 neon_dyadic_misc (NT_untyped, N_F32, 0);
16063 }
16064
16065 static void
16066 do_vrint_1 (enum neon_cvt_mode mode)
16067 {
16068 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16069 struct neon_type_el et;
16070
16071 if (rs == NS_NULL)
16072 return;
16073
16074 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16075 if (et.type != NT_invtype)
16076 {
16077 /* VFP encodings. */
16078 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16079 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16080 set_it_insn_type (OUTSIDE_IT_INSN);
16081
16082 NEON_ENCODE (FPV8, inst);
16083 if (rs == NS_FF)
16084 do_vfp_sp_monadic ();
16085 else
16086 do_vfp_dp_rd_rm ();
16087
16088 switch (mode)
16089 {
16090 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16091 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16092 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16093 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16094 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16095 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16096 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16097 default: abort ();
16098 }
16099
16100 inst.instruction |= (rs == NS_DD) << 8;
16101 do_vfp_cond_or_thumb ();
16102 }
16103 else
16104 {
16105 /* Neon encodings (or something broken...). */
16106 inst.error = NULL;
16107 et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16108
16109 if (et.type == NT_invtype)
16110 return;
16111
16112 set_it_insn_type (OUTSIDE_IT_INSN);
16113 NEON_ENCODE (FLOAT, inst);
16114
16115 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16116 return;
16117
16118 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16119 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16120 inst.instruction |= LOW4 (inst.operands[1].reg);
16121 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16122 inst.instruction |= neon_quad (rs) << 6;
16123 switch (mode)
16124 {
16125 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16126 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16127 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16128 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16129 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16130 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16131 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16132 default: abort ();
16133 }
16134
16135 if (thumb_mode)
16136 inst.instruction |= 0xfc000000;
16137 else
16138 inst.instruction |= 0xf0000000;
16139 }
16140 }
16141
16142 static void
16143 do_vrintx (void)
16144 {
16145 do_vrint_1 (neon_cvt_mode_x);
16146 }
16147
16148 static void
16149 do_vrintz (void)
16150 {
16151 do_vrint_1 (neon_cvt_mode_z);
16152 }
16153
16154 static void
16155 do_vrintr (void)
16156 {
16157 do_vrint_1 (neon_cvt_mode_r);
16158 }
16159
16160 static void
16161 do_vrinta (void)
16162 {
16163 do_vrint_1 (neon_cvt_mode_a);
16164 }
16165
16166 static void
16167 do_vrintn (void)
16168 {
16169 do_vrint_1 (neon_cvt_mode_n);
16170 }
16171
16172 static void
16173 do_vrintp (void)
16174 {
16175 do_vrint_1 (neon_cvt_mode_p);
16176 }
16177
16178 static void
16179 do_vrintm (void)
16180 {
16181 do_vrint_1 (neon_cvt_mode_m);
16182 }
16183
16184 /* Crypto v1 instructions. */
16185 static void
16186 do_crypto_2op_1 (unsigned elttype, int op)
16187 {
16188 set_it_insn_type (OUTSIDE_IT_INSN);
16189
16190 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16191 == NT_invtype)
16192 return;
16193
16194 inst.error = NULL;
16195
16196 NEON_ENCODE (INTEGER, inst);
16197 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16198 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16199 inst.instruction |= LOW4 (inst.operands[1].reg);
16200 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16201 if (op != -1)
16202 inst.instruction |= op << 6;
16203
16204 if (thumb_mode)
16205 inst.instruction |= 0xfc000000;
16206 else
16207 inst.instruction |= 0xf0000000;
16208 }
16209
16210 static void
16211 do_crypto_3op_1 (int u, int op)
16212 {
16213 set_it_insn_type (OUTSIDE_IT_INSN);
16214
16215 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16216 N_32 | N_UNT | N_KEY).type == NT_invtype)
16217 return;
16218
16219 inst.error = NULL;
16220
16221 NEON_ENCODE (INTEGER, inst);
16222 neon_three_same (1, u, 8 << op);
16223 }
16224
16225 static void
16226 do_aese (void)
16227 {
16228 do_crypto_2op_1 (N_8, 0);
16229 }
16230
16231 static void
16232 do_aesd (void)
16233 {
16234 do_crypto_2op_1 (N_8, 1);
16235 }
16236
16237 static void
16238 do_aesmc (void)
16239 {
16240 do_crypto_2op_1 (N_8, 2);
16241 }
16242
16243 static void
16244 do_aesimc (void)
16245 {
16246 do_crypto_2op_1 (N_8, 3);
16247 }
16248
16249 static void
16250 do_sha1c (void)
16251 {
16252 do_crypto_3op_1 (0, 0);
16253 }
16254
16255 static void
16256 do_sha1p (void)
16257 {
16258 do_crypto_3op_1 (0, 1);
16259 }
16260
16261 static void
16262 do_sha1m (void)
16263 {
16264 do_crypto_3op_1 (0, 2);
16265 }
16266
16267 static void
16268 do_sha1su0 (void)
16269 {
16270 do_crypto_3op_1 (0, 3);
16271 }
16272
16273 static void
16274 do_sha256h (void)
16275 {
16276 do_crypto_3op_1 (1, 0);
16277 }
16278
16279 static void
16280 do_sha256h2 (void)
16281 {
16282 do_crypto_3op_1 (1, 1);
16283 }
16284
16285 static void
16286 do_sha256su1 (void)
16287 {
16288 do_crypto_3op_1 (1, 2);
16289 }
16290
16291 static void
16292 do_sha1h (void)
16293 {
16294 do_crypto_2op_1 (N_32, -1);
16295 }
16296
16297 static void
16298 do_sha1su1 (void)
16299 {
16300 do_crypto_2op_1 (N_32, 0);
16301 }
16302
16303 static void
16304 do_sha256su0 (void)
16305 {
16306 do_crypto_2op_1 (N_32, 1);
16307 }
16308 \f
16309 /* Overall per-instruction processing. */
16310
16311 /* We need to be able to fix up arbitrary expressions in some statements.
16312 This is so that we can handle symbols that are an arbitrary distance from
16313 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
16314 which returns part of an address in a form which will be valid for
16315 a data instruction. We do this by pushing the expression into a symbol
16316 in the expr_section, and creating a fix for that. */
16317
16318 static void
16319 fix_new_arm (fragS * frag,
16320 int where,
16321 short int size,
16322 expressionS * exp,
16323 int pc_rel,
16324 int reloc)
16325 {
16326 fixS * new_fix;
16327
16328 switch (exp->X_op)
16329 {
16330 case O_constant:
16331 if (pc_rel)
16332 {
16333 /* Create an absolute valued symbol, so we have something to
16334 refer to in the object file. Unfortunately for us, gas's
16335 generic expression parsing will already have folded out
16336 any use of .set foo/.type foo %function that may have
16337 been used to set type information of the target location,
16338 that's being specified symbolically. We have to presume
16339 the user knows what they are doing. */
16340 char name[16 + 8];
16341 symbolS *symbol;
16342
16343 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
16344
16345 symbol = symbol_find_or_make (name);
16346 S_SET_SEGMENT (symbol, absolute_section);
16347 symbol_set_frag (symbol, &zero_address_frag);
16348 S_SET_VALUE (symbol, exp->X_add_number);
16349 exp->X_op = O_symbol;
16350 exp->X_add_symbol = symbol;
16351 exp->X_add_number = 0;
16352 }
16353 /* FALLTHROUGH */
16354 case O_symbol:
16355 case O_add:
16356 case O_subtract:
16357 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
16358 (enum bfd_reloc_code_real) reloc);
16359 break;
16360
16361 default:
16362 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
16363 pc_rel, (enum bfd_reloc_code_real) reloc);
16364 break;
16365 }
16366
16367 /* Mark whether the fix is to a THUMB instruction, or an ARM
16368 instruction. */
16369 new_fix->tc_fix_data = thumb_mode;
16370 }
16371
16372 /* Create a frg for an instruction requiring relaxation. */
16373 static void
16374 output_relax_insn (void)
16375 {
16376 char * to;
16377 symbolS *sym;
16378 int offset;
16379
16380 /* The size of the instruction is unknown, so tie the debug info to the
16381 start of the instruction. */
16382 dwarf2_emit_insn (0);
16383
16384 switch (inst.reloc.exp.X_op)
16385 {
16386 case O_symbol:
16387 sym = inst.reloc.exp.X_add_symbol;
16388 offset = inst.reloc.exp.X_add_number;
16389 break;
16390 case O_constant:
16391 sym = NULL;
16392 offset = inst.reloc.exp.X_add_number;
16393 break;
16394 default:
16395 sym = make_expr_symbol (&inst.reloc.exp);
16396 offset = 0;
16397 break;
16398 }
16399 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
16400 inst.relax, sym, offset, NULL/*offset, opcode*/);
16401 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
16402 }
16403
16404 /* Write a 32-bit thumb instruction to buf. */
16405 static void
16406 put_thumb32_insn (char * buf, unsigned long insn)
16407 {
16408 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
16409 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
16410 }
16411
16412 static void
16413 output_inst (const char * str)
16414 {
16415 char * to = NULL;
16416
16417 if (inst.error)
16418 {
16419 as_bad ("%s -- `%s'", inst.error, str);
16420 return;
16421 }
16422 if (inst.relax)
16423 {
16424 output_relax_insn ();
16425 return;
16426 }
16427 if (inst.size == 0)
16428 return;
16429
16430 to = frag_more (inst.size);
16431 /* PR 9814: Record the thumb mode into the current frag so that we know
16432 what type of NOP padding to use, if necessary. We override any previous
16433 setting so that if the mode has changed then the NOPS that we use will
16434 match the encoding of the last instruction in the frag. */
16435 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
16436
16437 if (thumb_mode && (inst.size > THUMB_SIZE))
16438 {
16439 gas_assert (inst.size == (2 * THUMB_SIZE));
16440 put_thumb32_insn (to, inst.instruction);
16441 }
16442 else if (inst.size > INSN_SIZE)
16443 {
16444 gas_assert (inst.size == (2 * INSN_SIZE));
16445 md_number_to_chars (to, inst.instruction, INSN_SIZE);
16446 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
16447 }
16448 else
16449 md_number_to_chars (to, inst.instruction, inst.size);
16450
16451 if (inst.reloc.type != BFD_RELOC_UNUSED)
16452 fix_new_arm (frag_now, to - frag_now->fr_literal,
16453 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
16454 inst.reloc.type);
16455
16456 dwarf2_emit_insn (inst.size);
16457 }
16458
16459 static char *
16460 output_it_inst (int cond, int mask, char * to)
16461 {
16462 unsigned long instruction = 0xbf00;
16463
16464 mask &= 0xf;
16465 instruction |= mask;
16466 instruction |= cond << 4;
16467
16468 if (to == NULL)
16469 {
16470 to = frag_more (2);
16471 #ifdef OBJ_ELF
16472 dwarf2_emit_insn (2);
16473 #endif
16474 }
16475
16476 md_number_to_chars (to, instruction, 2);
16477
16478 return to;
16479 }
16480
16481 /* Tag values used in struct asm_opcode's tag field. */
16482 enum opcode_tag
16483 {
16484 OT_unconditional, /* Instruction cannot be conditionalized.
16485 The ARM condition field is still 0xE. */
16486 OT_unconditionalF, /* Instruction cannot be conditionalized
16487 and carries 0xF in its ARM condition field. */
16488 OT_csuffix, /* Instruction takes a conditional suffix. */
16489 OT_csuffixF, /* Some forms of the instruction take a conditional
16490 suffix, others place 0xF where the condition field
16491 would be. */
16492 OT_cinfix3, /* Instruction takes a conditional infix,
16493 beginning at character index 3. (In
16494 unified mode, it becomes a suffix.) */
16495 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
16496 tsts, cmps, cmns, and teqs. */
16497 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
16498 character index 3, even in unified mode. Used for
16499 legacy instructions where suffix and infix forms
16500 may be ambiguous. */
16501 OT_csuf_or_in3, /* Instruction takes either a conditional
16502 suffix or an infix at character index 3. */
16503 OT_odd_infix_unc, /* This is the unconditional variant of an
16504 instruction that takes a conditional infix
16505 at an unusual position. In unified mode,
16506 this variant will accept a suffix. */
16507 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
16508 are the conditional variants of instructions that
16509 take conditional infixes in unusual positions.
16510 The infix appears at character index
16511 (tag - OT_odd_infix_0). These are not accepted
16512 in unified mode. */
16513 };
16514
16515 /* Subroutine of md_assemble, responsible for looking up the primary
16516 opcode from the mnemonic the user wrote. STR points to the
16517 beginning of the mnemonic.
16518
16519 This is not simply a hash table lookup, because of conditional
16520 variants. Most instructions have conditional variants, which are
16521 expressed with a _conditional affix_ to the mnemonic. If we were
16522 to encode each conditional variant as a literal string in the opcode
16523 table, it would have approximately 20,000 entries.
16524
16525 Most mnemonics take this affix as a suffix, and in unified syntax,
16526 'most' is upgraded to 'all'. However, in the divided syntax, some
16527 instructions take the affix as an infix, notably the s-variants of
16528 the arithmetic instructions. Of those instructions, all but six
16529 have the infix appear after the third character of the mnemonic.
16530
16531 Accordingly, the algorithm for looking up primary opcodes given
16532 an identifier is:
16533
16534 1. Look up the identifier in the opcode table.
16535 If we find a match, go to step U.
16536
16537 2. Look up the last two characters of the identifier in the
16538 conditions table. If we find a match, look up the first N-2
16539 characters of the identifier in the opcode table. If we
16540 find a match, go to step CE.
16541
16542 3. Look up the fourth and fifth characters of the identifier in
16543 the conditions table. If we find a match, extract those
16544 characters from the identifier, and look up the remaining
16545 characters in the opcode table. If we find a match, go
16546 to step CM.
16547
16548 4. Fail.
16549
16550 U. Examine the tag field of the opcode structure, in case this is
16551 one of the six instructions with its conditional infix in an
16552 unusual place. If it is, the tag tells us where to find the
16553 infix; look it up in the conditions table and set inst.cond
16554 accordingly. Otherwise, this is an unconditional instruction.
16555 Again set inst.cond accordingly. Return the opcode structure.
16556
16557 CE. Examine the tag field to make sure this is an instruction that
16558 should receive a conditional suffix. If it is not, fail.
16559 Otherwise, set inst.cond from the suffix we already looked up,
16560 and return the opcode structure.
16561
16562 CM. Examine the tag field to make sure this is an instruction that
16563 should receive a conditional infix after the third character.
16564 If it is not, fail. Otherwise, undo the edits to the current
16565 line of input and proceed as for case CE. */
16566
16567 static const struct asm_opcode *
16568 opcode_lookup (char **str)
16569 {
16570 char *end, *base;
16571 char *affix;
16572 const struct asm_opcode *opcode;
16573 const struct asm_cond *cond;
16574 char save[2];
16575
16576 /* Scan up to the end of the mnemonic, which must end in white space,
16577 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
16578 for (base = end = *str; *end != '\0'; end++)
16579 if (*end == ' ' || *end == '.')
16580 break;
16581
16582 if (end == base)
16583 return NULL;
16584
16585 /* Handle a possible width suffix and/or Neon type suffix. */
16586 if (end[0] == '.')
16587 {
16588 int offset = 2;
16589
16590 /* The .w and .n suffixes are only valid if the unified syntax is in
16591 use. */
16592 if (unified_syntax && end[1] == 'w')
16593 inst.size_req = 4;
16594 else if (unified_syntax && end[1] == 'n')
16595 inst.size_req = 2;
16596 else
16597 offset = 0;
16598
16599 inst.vectype.elems = 0;
16600
16601 *str = end + offset;
16602
16603 if (end[offset] == '.')
16604 {
16605 /* See if we have a Neon type suffix (possible in either unified or
16606 non-unified ARM syntax mode). */
16607 if (parse_neon_type (&inst.vectype, str) == FAIL)
16608 return NULL;
16609 }
16610 else if (end[offset] != '\0' && end[offset] != ' ')
16611 return NULL;
16612 }
16613 else
16614 *str = end;
16615
16616 /* Look for unaffixed or special-case affixed mnemonic. */
16617 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16618 end - base);
16619 if (opcode)
16620 {
16621 /* step U */
16622 if (opcode->tag < OT_odd_infix_0)
16623 {
16624 inst.cond = COND_ALWAYS;
16625 return opcode;
16626 }
16627
16628 if (warn_on_deprecated && unified_syntax)
16629 as_warn (_("conditional infixes are deprecated in unified syntax"));
16630 affix = base + (opcode->tag - OT_odd_infix_0);
16631 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16632 gas_assert (cond);
16633
16634 inst.cond = cond->value;
16635 return opcode;
16636 }
16637
16638 /* Cannot have a conditional suffix on a mnemonic of less than two
16639 characters. */
16640 if (end - base < 3)
16641 return NULL;
16642
16643 /* Look for suffixed mnemonic. */
16644 affix = end - 2;
16645 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16646 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16647 affix - base);
16648 if (opcode && cond)
16649 {
16650 /* step CE */
16651 switch (opcode->tag)
16652 {
16653 case OT_cinfix3_legacy:
16654 /* Ignore conditional suffixes matched on infix only mnemonics. */
16655 break;
16656
16657 case OT_cinfix3:
16658 case OT_cinfix3_deprecated:
16659 case OT_odd_infix_unc:
16660 if (!unified_syntax)
16661 return 0;
16662 /* else fall through */
16663
16664 case OT_csuffix:
16665 case OT_csuffixF:
16666 case OT_csuf_or_in3:
16667 inst.cond = cond->value;
16668 return opcode;
16669
16670 case OT_unconditional:
16671 case OT_unconditionalF:
16672 if (thumb_mode)
16673 inst.cond = cond->value;
16674 else
16675 {
16676 /* Delayed diagnostic. */
16677 inst.error = BAD_COND;
16678 inst.cond = COND_ALWAYS;
16679 }
16680 return opcode;
16681
16682 default:
16683 return NULL;
16684 }
16685 }
16686
16687 /* Cannot have a usual-position infix on a mnemonic of less than
16688 six characters (five would be a suffix). */
16689 if (end - base < 6)
16690 return NULL;
16691
16692 /* Look for infixed mnemonic in the usual position. */
16693 affix = base + 3;
16694 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16695 if (!cond)
16696 return NULL;
16697
16698 memcpy (save, affix, 2);
16699 memmove (affix, affix + 2, (end - affix) - 2);
16700 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16701 (end - base) - 2);
16702 memmove (affix + 2, affix, (end - affix) - 2);
16703 memcpy (affix, save, 2);
16704
16705 if (opcode
16706 && (opcode->tag == OT_cinfix3
16707 || opcode->tag == OT_cinfix3_deprecated
16708 || opcode->tag == OT_csuf_or_in3
16709 || opcode->tag == OT_cinfix3_legacy))
16710 {
16711 /* Step CM. */
16712 if (warn_on_deprecated && unified_syntax
16713 && (opcode->tag == OT_cinfix3
16714 || opcode->tag == OT_cinfix3_deprecated))
16715 as_warn (_("conditional infixes are deprecated in unified syntax"));
16716
16717 inst.cond = cond->value;
16718 return opcode;
16719 }
16720
16721 return NULL;
16722 }
16723
16724 /* This function generates an initial IT instruction, leaving its block
16725 virtually open for the new instructions. Eventually,
16726 the mask will be updated by now_it_add_mask () each time
16727 a new instruction needs to be included in the IT block.
16728 Finally, the block is closed with close_automatic_it_block ().
16729 The block closure can be requested either from md_assemble (),
16730 a tencode (), or due to a label hook. */
16731
16732 static void
16733 new_automatic_it_block (int cond)
16734 {
16735 now_it.state = AUTOMATIC_IT_BLOCK;
16736 now_it.mask = 0x18;
16737 now_it.cc = cond;
16738 now_it.block_length = 1;
16739 mapping_state (MAP_THUMB);
16740 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
16741 now_it.warn_deprecated = FALSE;
16742 now_it.insn_cond = TRUE;
16743 }
16744
16745 /* Close an automatic IT block.
16746 See comments in new_automatic_it_block (). */
16747
16748 static void
16749 close_automatic_it_block (void)
16750 {
16751 now_it.mask = 0x10;
16752 now_it.block_length = 0;
16753 }
16754
16755 /* Update the mask of the current automatically-generated IT
16756 instruction. See comments in new_automatic_it_block (). */
16757
16758 static void
16759 now_it_add_mask (int cond)
16760 {
16761 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
16762 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
16763 | ((bitvalue) << (nbit)))
16764 const int resulting_bit = (cond & 1);
16765
16766 now_it.mask &= 0xf;
16767 now_it.mask = SET_BIT_VALUE (now_it.mask,
16768 resulting_bit,
16769 (5 - now_it.block_length));
16770 now_it.mask = SET_BIT_VALUE (now_it.mask,
16771 1,
16772 ((5 - now_it.block_length) - 1) );
16773 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
16774
16775 #undef CLEAR_BIT
16776 #undef SET_BIT_VALUE
16777 }
16778
16779 /* The IT blocks handling machinery is accessed through the these functions:
16780 it_fsm_pre_encode () from md_assemble ()
16781 set_it_insn_type () optional, from the tencode functions
16782 set_it_insn_type_last () ditto
16783 in_it_block () ditto
16784 it_fsm_post_encode () from md_assemble ()
16785 force_automatic_it_block_close () from label habdling functions
16786
16787 Rationale:
16788 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
16789 initializing the IT insn type with a generic initial value depending
16790 on the inst.condition.
16791 2) During the tencode function, two things may happen:
16792 a) The tencode function overrides the IT insn type by
16793 calling either set_it_insn_type (type) or set_it_insn_type_last ().
16794 b) The tencode function queries the IT block state by
16795 calling in_it_block () (i.e. to determine narrow/not narrow mode).
16796
16797 Both set_it_insn_type and in_it_block run the internal FSM state
16798 handling function (handle_it_state), because: a) setting the IT insn
16799 type may incur in an invalid state (exiting the function),
16800 and b) querying the state requires the FSM to be updated.
16801 Specifically we want to avoid creating an IT block for conditional
16802 branches, so it_fsm_pre_encode is actually a guess and we can't
16803 determine whether an IT block is required until the tencode () routine
16804 has decided what type of instruction this actually it.
16805 Because of this, if set_it_insn_type and in_it_block have to be used,
16806 set_it_insn_type has to be called first.
16807
16808 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
16809 determines the insn IT type depending on the inst.cond code.
16810 When a tencode () routine encodes an instruction that can be
16811 either outside an IT block, or, in the case of being inside, has to be
16812 the last one, set_it_insn_type_last () will determine the proper
16813 IT instruction type based on the inst.cond code. Otherwise,
16814 set_it_insn_type can be called for overriding that logic or
16815 for covering other cases.
16816
16817 Calling handle_it_state () may not transition the IT block state to
16818 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
16819 still queried. Instead, if the FSM determines that the state should
16820 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
16821 after the tencode () function: that's what it_fsm_post_encode () does.
16822
16823 Since in_it_block () calls the state handling function to get an
16824 updated state, an error may occur (due to invalid insns combination).
16825 In that case, inst.error is set.
16826 Therefore, inst.error has to be checked after the execution of
16827 the tencode () routine.
16828
16829 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
16830 any pending state change (if any) that didn't take place in
16831 handle_it_state () as explained above. */
16832
16833 static void
16834 it_fsm_pre_encode (void)
16835 {
16836 if (inst.cond != COND_ALWAYS)
16837 inst.it_insn_type = INSIDE_IT_INSN;
16838 else
16839 inst.it_insn_type = OUTSIDE_IT_INSN;
16840
16841 now_it.state_handled = 0;
16842 }
16843
16844 /* IT state FSM handling function. */
16845
16846 static int
16847 handle_it_state (void)
16848 {
16849 now_it.state_handled = 1;
16850 now_it.insn_cond = FALSE;
16851
16852 switch (now_it.state)
16853 {
16854 case OUTSIDE_IT_BLOCK:
16855 switch (inst.it_insn_type)
16856 {
16857 case OUTSIDE_IT_INSN:
16858 break;
16859
16860 case INSIDE_IT_INSN:
16861 case INSIDE_IT_LAST_INSN:
16862 if (thumb_mode == 0)
16863 {
16864 if (unified_syntax
16865 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
16866 as_tsktsk (_("Warning: conditional outside an IT block"\
16867 " for Thumb."));
16868 }
16869 else
16870 {
16871 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
16872 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
16873 {
16874 /* Automatically generate the IT instruction. */
16875 new_automatic_it_block (inst.cond);
16876 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
16877 close_automatic_it_block ();
16878 }
16879 else
16880 {
16881 inst.error = BAD_OUT_IT;
16882 return FAIL;
16883 }
16884 }
16885 break;
16886
16887 case IF_INSIDE_IT_LAST_INSN:
16888 case NEUTRAL_IT_INSN:
16889 break;
16890
16891 case IT_INSN:
16892 now_it.state = MANUAL_IT_BLOCK;
16893 now_it.block_length = 0;
16894 break;
16895 }
16896 break;
16897
16898 case AUTOMATIC_IT_BLOCK:
16899 /* Three things may happen now:
16900 a) We should increment current it block size;
16901 b) We should close current it block (closing insn or 4 insns);
16902 c) We should close current it block and start a new one (due
16903 to incompatible conditions or
16904 4 insns-length block reached). */
16905
16906 switch (inst.it_insn_type)
16907 {
16908 case OUTSIDE_IT_INSN:
16909 /* The closure of the block shall happen immediatelly,
16910 so any in_it_block () call reports the block as closed. */
16911 force_automatic_it_block_close ();
16912 break;
16913
16914 case INSIDE_IT_INSN:
16915 case INSIDE_IT_LAST_INSN:
16916 case IF_INSIDE_IT_LAST_INSN:
16917 now_it.block_length++;
16918
16919 if (now_it.block_length > 4
16920 || !now_it_compatible (inst.cond))
16921 {
16922 force_automatic_it_block_close ();
16923 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
16924 new_automatic_it_block (inst.cond);
16925 }
16926 else
16927 {
16928 now_it.insn_cond = TRUE;
16929 now_it_add_mask (inst.cond);
16930 }
16931
16932 if (now_it.state == AUTOMATIC_IT_BLOCK
16933 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
16934 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
16935 close_automatic_it_block ();
16936 break;
16937
16938 case NEUTRAL_IT_INSN:
16939 now_it.block_length++;
16940 now_it.insn_cond = TRUE;
16941
16942 if (now_it.block_length > 4)
16943 force_automatic_it_block_close ();
16944 else
16945 now_it_add_mask (now_it.cc & 1);
16946 break;
16947
16948 case IT_INSN:
16949 close_automatic_it_block ();
16950 now_it.state = MANUAL_IT_BLOCK;
16951 break;
16952 }
16953 break;
16954
16955 case MANUAL_IT_BLOCK:
16956 {
16957 /* Check conditional suffixes. */
16958 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
16959 int is_last;
16960 now_it.mask <<= 1;
16961 now_it.mask &= 0x1f;
16962 is_last = (now_it.mask == 0x10);
16963 now_it.insn_cond = TRUE;
16964
16965 switch (inst.it_insn_type)
16966 {
16967 case OUTSIDE_IT_INSN:
16968 inst.error = BAD_NOT_IT;
16969 return FAIL;
16970
16971 case INSIDE_IT_INSN:
16972 if (cond != inst.cond)
16973 {
16974 inst.error = BAD_IT_COND;
16975 return FAIL;
16976 }
16977 break;
16978
16979 case INSIDE_IT_LAST_INSN:
16980 case IF_INSIDE_IT_LAST_INSN:
16981 if (cond != inst.cond)
16982 {
16983 inst.error = BAD_IT_COND;
16984 return FAIL;
16985 }
16986 if (!is_last)
16987 {
16988 inst.error = BAD_BRANCH;
16989 return FAIL;
16990 }
16991 break;
16992
16993 case NEUTRAL_IT_INSN:
16994 /* The BKPT instruction is unconditional even in an IT block. */
16995 break;
16996
16997 case IT_INSN:
16998 inst.error = BAD_IT_IT;
16999 return FAIL;
17000 }
17001 }
17002 break;
17003 }
17004
17005 return SUCCESS;
17006 }
17007
17008 struct depr_insn_mask
17009 {
17010 unsigned long pattern;
17011 unsigned long mask;
17012 const char* description;
17013 };
17014
17015 /* List of 16-bit instruction patterns deprecated in an IT block in
17016 ARMv8. */
17017 static const struct depr_insn_mask depr_it_insns[] = {
17018 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17019 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17020 { 0xa000, 0xb800, N_("ADR") },
17021 { 0x4800, 0xf800, N_("Literal loads") },
17022 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17023 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17024 { 0, 0, NULL }
17025 };
17026
17027 static void
17028 it_fsm_post_encode (void)
17029 {
17030 int is_last;
17031
17032 if (!now_it.state_handled)
17033 handle_it_state ();
17034
17035 if (now_it.insn_cond
17036 && !now_it.warn_deprecated
17037 && warn_on_deprecated
17038 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17039 {
17040 if (inst.instruction >= 0x10000)
17041 {
17042 as_warn (_("it blocks containing wide Thumb instructions are "
17043 "deprecated in ARMv8"));
17044 now_it.warn_deprecated = TRUE;
17045 }
17046 else
17047 {
17048 const struct depr_insn_mask *p = depr_it_insns;
17049
17050 while (p->mask != 0)
17051 {
17052 if ((inst.instruction & p->mask) == p->pattern)
17053 {
17054 as_warn (_("it blocks containing 16-bit Thumb intsructions "
17055 "of the following class are deprecated in ARMv8: "
17056 "%s"), p->description);
17057 now_it.warn_deprecated = TRUE;
17058 break;
17059 }
17060
17061 ++p;
17062 }
17063 }
17064
17065 if (now_it.block_length > 1)
17066 {
17067 as_warn (_("it blocks of more than one conditional instruction are "
17068 "deprecated in ARMv8"));
17069 now_it.warn_deprecated = TRUE;
17070 }
17071 }
17072
17073 is_last = (now_it.mask == 0x10);
17074 if (is_last)
17075 {
17076 now_it.state = OUTSIDE_IT_BLOCK;
17077 now_it.mask = 0;
17078 }
17079 }
17080
17081 static void
17082 force_automatic_it_block_close (void)
17083 {
17084 if (now_it.state == AUTOMATIC_IT_BLOCK)
17085 {
17086 close_automatic_it_block ();
17087 now_it.state = OUTSIDE_IT_BLOCK;
17088 now_it.mask = 0;
17089 }
17090 }
17091
17092 static int
17093 in_it_block (void)
17094 {
17095 if (!now_it.state_handled)
17096 handle_it_state ();
17097
17098 return now_it.state != OUTSIDE_IT_BLOCK;
17099 }
17100
17101 void
17102 md_assemble (char *str)
17103 {
17104 char *p = str;
17105 const struct asm_opcode * opcode;
17106
17107 /* Align the previous label if needed. */
17108 if (last_label_seen != NULL)
17109 {
17110 symbol_set_frag (last_label_seen, frag_now);
17111 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17112 S_SET_SEGMENT (last_label_seen, now_seg);
17113 }
17114
17115 memset (&inst, '\0', sizeof (inst));
17116 inst.reloc.type = BFD_RELOC_UNUSED;
17117
17118 opcode = opcode_lookup (&p);
17119 if (!opcode)
17120 {
17121 /* It wasn't an instruction, but it might be a register alias of
17122 the form alias .req reg, or a Neon .dn/.qn directive. */
17123 if (! create_register_alias (str, p)
17124 && ! create_neon_reg_alias (str, p))
17125 as_bad (_("bad instruction `%s'"), str);
17126
17127 return;
17128 }
17129
17130 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17131 as_warn (_("s suffix on comparison instruction is deprecated"));
17132
17133 /* The value which unconditional instructions should have in place of the
17134 condition field. */
17135 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17136
17137 if (thumb_mode)
17138 {
17139 arm_feature_set variant;
17140
17141 variant = cpu_variant;
17142 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17143 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17144 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17145 /* Check that this instruction is supported for this CPU. */
17146 if (!opcode->tvariant
17147 || (thumb_mode == 1
17148 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17149 {
17150 as_bad (_("selected processor does not support Thumb mode `%s'"), str);
17151 return;
17152 }
17153 if (inst.cond != COND_ALWAYS && !unified_syntax
17154 && opcode->tencode != do_t_branch)
17155 {
17156 as_bad (_("Thumb does not support conditional execution"));
17157 return;
17158 }
17159
17160 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
17161 {
17162 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
17163 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
17164 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
17165 {
17166 /* Two things are addressed here.
17167 1) Implicit require narrow instructions on Thumb-1.
17168 This avoids relaxation accidentally introducing Thumb-2
17169 instructions.
17170 2) Reject wide instructions in non Thumb-2 cores. */
17171 if (inst.size_req == 0)
17172 inst.size_req = 2;
17173 else if (inst.size_req == 4)
17174 {
17175 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
17176 return;
17177 }
17178 }
17179 }
17180
17181 inst.instruction = opcode->tvalue;
17182
17183 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
17184 {
17185 /* Prepare the it_insn_type for those encodings that don't set
17186 it. */
17187 it_fsm_pre_encode ();
17188
17189 opcode->tencode ();
17190
17191 it_fsm_post_encode ();
17192 }
17193
17194 if (!(inst.error || inst.relax))
17195 {
17196 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
17197 inst.size = (inst.instruction > 0xffff ? 4 : 2);
17198 if (inst.size_req && inst.size_req != inst.size)
17199 {
17200 as_bad (_("cannot honor width suffix -- `%s'"), str);
17201 return;
17202 }
17203 }
17204
17205 /* Something has gone badly wrong if we try to relax a fixed size
17206 instruction. */
17207 gas_assert (inst.size_req == 0 || !inst.relax);
17208
17209 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17210 *opcode->tvariant);
17211 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17212 set those bits when Thumb-2 32-bit instructions are seen. ie.
17213 anything other than bl/blx and v6-M instructions.
17214 This is overly pessimistic for relaxable instructions. */
17215 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
17216 || inst.relax)
17217 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17218 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
17219 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17220 arm_ext_v6t2);
17221
17222 check_neon_suffixes;
17223
17224 if (!inst.error)
17225 {
17226 mapping_state (MAP_THUMB);
17227 }
17228 }
17229 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
17230 {
17231 bfd_boolean is_bx;
17232
17233 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
17234 is_bx = (opcode->aencode == do_bx);
17235
17236 /* Check that this instruction is supported for this CPU. */
17237 if (!(is_bx && fix_v4bx)
17238 && !(opcode->avariant &&
17239 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
17240 {
17241 as_bad (_("selected processor does not support ARM mode `%s'"), str);
17242 return;
17243 }
17244 if (inst.size_req)
17245 {
17246 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
17247 return;
17248 }
17249
17250 inst.instruction = opcode->avalue;
17251 if (opcode->tag == OT_unconditionalF)
17252 inst.instruction |= 0xF << 28;
17253 else
17254 inst.instruction |= inst.cond << 28;
17255 inst.size = INSN_SIZE;
17256 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
17257 {
17258 it_fsm_pre_encode ();
17259 opcode->aencode ();
17260 it_fsm_post_encode ();
17261 }
17262 /* Arm mode bx is marked as both v4T and v5 because it's still required
17263 on a hypothetical non-thumb v5 core. */
17264 if (is_bx)
17265 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
17266 else
17267 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
17268 *opcode->avariant);
17269
17270 check_neon_suffixes;
17271
17272 if (!inst.error)
17273 {
17274 mapping_state (MAP_ARM);
17275 }
17276 }
17277 else
17278 {
17279 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
17280 "-- `%s'"), str);
17281 return;
17282 }
17283 output_inst (str);
17284 }
17285
17286 static void
17287 check_it_blocks_finished (void)
17288 {
17289 #ifdef OBJ_ELF
17290 asection *sect;
17291
17292 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
17293 if (seg_info (sect)->tc_segment_info_data.current_it.state
17294 == MANUAL_IT_BLOCK)
17295 {
17296 as_warn (_("section '%s' finished with an open IT block."),
17297 sect->name);
17298 }
17299 #else
17300 if (now_it.state == MANUAL_IT_BLOCK)
17301 as_warn (_("file finished with an open IT block."));
17302 #endif
17303 }
17304
17305 /* Various frobbings of labels and their addresses. */
17306
17307 void
17308 arm_start_line_hook (void)
17309 {
17310 last_label_seen = NULL;
17311 }
17312
17313 void
17314 arm_frob_label (symbolS * sym)
17315 {
17316 last_label_seen = sym;
17317
17318 ARM_SET_THUMB (sym, thumb_mode);
17319
17320 #if defined OBJ_COFF || defined OBJ_ELF
17321 ARM_SET_INTERWORK (sym, support_interwork);
17322 #endif
17323
17324 force_automatic_it_block_close ();
17325
17326 /* Note - do not allow local symbols (.Lxxx) to be labelled
17327 as Thumb functions. This is because these labels, whilst
17328 they exist inside Thumb code, are not the entry points for
17329 possible ARM->Thumb calls. Also, these labels can be used
17330 as part of a computed goto or switch statement. eg gcc
17331 can generate code that looks like this:
17332
17333 ldr r2, [pc, .Laaa]
17334 lsl r3, r3, #2
17335 ldr r2, [r3, r2]
17336 mov pc, r2
17337
17338 .Lbbb: .word .Lxxx
17339 .Lccc: .word .Lyyy
17340 ..etc...
17341 .Laaa: .word Lbbb
17342
17343 The first instruction loads the address of the jump table.
17344 The second instruction converts a table index into a byte offset.
17345 The third instruction gets the jump address out of the table.
17346 The fourth instruction performs the jump.
17347
17348 If the address stored at .Laaa is that of a symbol which has the
17349 Thumb_Func bit set, then the linker will arrange for this address
17350 to have the bottom bit set, which in turn would mean that the
17351 address computation performed by the third instruction would end
17352 up with the bottom bit set. Since the ARM is capable of unaligned
17353 word loads, the instruction would then load the incorrect address
17354 out of the jump table, and chaos would ensue. */
17355 if (label_is_thumb_function_name
17356 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
17357 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
17358 {
17359 /* When the address of a Thumb function is taken the bottom
17360 bit of that address should be set. This will allow
17361 interworking between Arm and Thumb functions to work
17362 correctly. */
17363
17364 THUMB_SET_FUNC (sym, 1);
17365
17366 label_is_thumb_function_name = FALSE;
17367 }
17368
17369 dwarf2_emit_label (sym);
17370 }
17371
17372 bfd_boolean
17373 arm_data_in_code (void)
17374 {
17375 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
17376 {
17377 *input_line_pointer = '/';
17378 input_line_pointer += 5;
17379 *input_line_pointer = 0;
17380 return TRUE;
17381 }
17382
17383 return FALSE;
17384 }
17385
17386 char *
17387 arm_canonicalize_symbol_name (char * name)
17388 {
17389 int len;
17390
17391 if (thumb_mode && (len = strlen (name)) > 5
17392 && streq (name + len - 5, "/data"))
17393 *(name + len - 5) = 0;
17394
17395 return name;
17396 }
17397 \f
17398 /* Table of all register names defined by default. The user can
17399 define additional names with .req. Note that all register names
17400 should appear in both upper and lowercase variants. Some registers
17401 also have mixed-case names. */
17402
17403 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
17404 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
17405 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
17406 #define REGSET(p,t) \
17407 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
17408 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
17409 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
17410 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
17411 #define REGSETH(p,t) \
17412 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
17413 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
17414 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
17415 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
17416 #define REGSET2(p,t) \
17417 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
17418 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
17419 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
17420 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
17421 #define SPLRBANK(base,bank,t) \
17422 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
17423 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
17424 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
17425 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
17426 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
17427 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
17428
17429 static const struct reg_entry reg_names[] =
17430 {
17431 /* ARM integer registers. */
17432 REGSET(r, RN), REGSET(R, RN),
17433
17434 /* ATPCS synonyms. */
17435 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
17436 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
17437 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
17438
17439 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
17440 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
17441 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
17442
17443 /* Well-known aliases. */
17444 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
17445 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
17446
17447 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
17448 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
17449
17450 /* Coprocessor numbers. */
17451 REGSET(p, CP), REGSET(P, CP),
17452
17453 /* Coprocessor register numbers. The "cr" variants are for backward
17454 compatibility. */
17455 REGSET(c, CN), REGSET(C, CN),
17456 REGSET(cr, CN), REGSET(CR, CN),
17457
17458 /* ARM banked registers. */
17459 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
17460 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
17461 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
17462 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
17463 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
17464 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
17465 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
17466
17467 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
17468 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
17469 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
17470 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
17471 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
17472 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(SP_fiq,512|(13<<16),RNB),
17473 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
17474 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
17475
17476 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
17477 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
17478 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
17479 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
17480 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
17481 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
17482 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
17483 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
17484 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
17485
17486 /* FPA registers. */
17487 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
17488 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
17489
17490 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
17491 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
17492
17493 /* VFP SP registers. */
17494 REGSET(s,VFS), REGSET(S,VFS),
17495 REGSETH(s,VFS), REGSETH(S,VFS),
17496
17497 /* VFP DP Registers. */
17498 REGSET(d,VFD), REGSET(D,VFD),
17499 /* Extra Neon DP registers. */
17500 REGSETH(d,VFD), REGSETH(D,VFD),
17501
17502 /* Neon QP registers. */
17503 REGSET2(q,NQ), REGSET2(Q,NQ),
17504
17505 /* VFP control registers. */
17506 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
17507 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
17508 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
17509 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
17510 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
17511 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
17512
17513 /* Maverick DSP coprocessor registers. */
17514 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
17515 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
17516
17517 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
17518 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
17519 REGDEF(dspsc,0,DSPSC),
17520
17521 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
17522 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
17523 REGDEF(DSPSC,0,DSPSC),
17524
17525 /* iWMMXt data registers - p0, c0-15. */
17526 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
17527
17528 /* iWMMXt control registers - p1, c0-3. */
17529 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
17530 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
17531 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
17532 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
17533
17534 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
17535 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
17536 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
17537 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
17538 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
17539
17540 /* XScale accumulator registers. */
17541 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
17542 };
17543 #undef REGDEF
17544 #undef REGNUM
17545 #undef REGSET
17546
17547 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
17548 within psr_required_here. */
17549 static const struct asm_psr psrs[] =
17550 {
17551 /* Backward compatibility notation. Note that "all" is no longer
17552 truly all possible PSR bits. */
17553 {"all", PSR_c | PSR_f},
17554 {"flg", PSR_f},
17555 {"ctl", PSR_c},
17556
17557 /* Individual flags. */
17558 {"f", PSR_f},
17559 {"c", PSR_c},
17560 {"x", PSR_x},
17561 {"s", PSR_s},
17562
17563 /* Combinations of flags. */
17564 {"fs", PSR_f | PSR_s},
17565 {"fx", PSR_f | PSR_x},
17566 {"fc", PSR_f | PSR_c},
17567 {"sf", PSR_s | PSR_f},
17568 {"sx", PSR_s | PSR_x},
17569 {"sc", PSR_s | PSR_c},
17570 {"xf", PSR_x | PSR_f},
17571 {"xs", PSR_x | PSR_s},
17572 {"xc", PSR_x | PSR_c},
17573 {"cf", PSR_c | PSR_f},
17574 {"cs", PSR_c | PSR_s},
17575 {"cx", PSR_c | PSR_x},
17576 {"fsx", PSR_f | PSR_s | PSR_x},
17577 {"fsc", PSR_f | PSR_s | PSR_c},
17578 {"fxs", PSR_f | PSR_x | PSR_s},
17579 {"fxc", PSR_f | PSR_x | PSR_c},
17580 {"fcs", PSR_f | PSR_c | PSR_s},
17581 {"fcx", PSR_f | PSR_c | PSR_x},
17582 {"sfx", PSR_s | PSR_f | PSR_x},
17583 {"sfc", PSR_s | PSR_f | PSR_c},
17584 {"sxf", PSR_s | PSR_x | PSR_f},
17585 {"sxc", PSR_s | PSR_x | PSR_c},
17586 {"scf", PSR_s | PSR_c | PSR_f},
17587 {"scx", PSR_s | PSR_c | PSR_x},
17588 {"xfs", PSR_x | PSR_f | PSR_s},
17589 {"xfc", PSR_x | PSR_f | PSR_c},
17590 {"xsf", PSR_x | PSR_s | PSR_f},
17591 {"xsc", PSR_x | PSR_s | PSR_c},
17592 {"xcf", PSR_x | PSR_c | PSR_f},
17593 {"xcs", PSR_x | PSR_c | PSR_s},
17594 {"cfs", PSR_c | PSR_f | PSR_s},
17595 {"cfx", PSR_c | PSR_f | PSR_x},
17596 {"csf", PSR_c | PSR_s | PSR_f},
17597 {"csx", PSR_c | PSR_s | PSR_x},
17598 {"cxf", PSR_c | PSR_x | PSR_f},
17599 {"cxs", PSR_c | PSR_x | PSR_s},
17600 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
17601 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
17602 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
17603 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
17604 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
17605 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
17606 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
17607 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
17608 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
17609 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
17610 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
17611 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
17612 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
17613 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
17614 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
17615 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
17616 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
17617 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
17618 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
17619 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
17620 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
17621 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
17622 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
17623 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
17624 };
17625
17626 /* Table of V7M psr names. */
17627 static const struct asm_psr v7m_psrs[] =
17628 {
17629 {"apsr", 0 }, {"APSR", 0 },
17630 {"iapsr", 1 }, {"IAPSR", 1 },
17631 {"eapsr", 2 }, {"EAPSR", 2 },
17632 {"psr", 3 }, {"PSR", 3 },
17633 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
17634 {"ipsr", 5 }, {"IPSR", 5 },
17635 {"epsr", 6 }, {"EPSR", 6 },
17636 {"iepsr", 7 }, {"IEPSR", 7 },
17637 {"msp", 8 }, {"MSP", 8 },
17638 {"psp", 9 }, {"PSP", 9 },
17639 {"primask", 16}, {"PRIMASK", 16},
17640 {"basepri", 17}, {"BASEPRI", 17},
17641 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
17642 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
17643 {"faultmask", 19}, {"FAULTMASK", 19},
17644 {"control", 20}, {"CONTROL", 20}
17645 };
17646
17647 /* Table of all shift-in-operand names. */
17648 static const struct asm_shift_name shift_names [] =
17649 {
17650 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
17651 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
17652 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
17653 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
17654 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
17655 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
17656 };
17657
17658 /* Table of all explicit relocation names. */
17659 #ifdef OBJ_ELF
17660 static struct reloc_entry reloc_names[] =
17661 {
17662 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
17663 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
17664 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
17665 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
17666 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
17667 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
17668 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
17669 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
17670 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
17671 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
17672 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
17673 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
17674 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
17675 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
17676 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
17677 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
17678 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
17679 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
17680 };
17681 #endif
17682
17683 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
17684 static const struct asm_cond conds[] =
17685 {
17686 {"eq", 0x0},
17687 {"ne", 0x1},
17688 {"cs", 0x2}, {"hs", 0x2},
17689 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
17690 {"mi", 0x4},
17691 {"pl", 0x5},
17692 {"vs", 0x6},
17693 {"vc", 0x7},
17694 {"hi", 0x8},
17695 {"ls", 0x9},
17696 {"ge", 0xa},
17697 {"lt", 0xb},
17698 {"gt", 0xc},
17699 {"le", 0xd},
17700 {"al", 0xe}
17701 };
17702
17703 #define UL_BARRIER(L,U,CODE,FEAT) \
17704 { L, CODE, ARM_FEATURE (FEAT, 0) }, \
17705 { U, CODE, ARM_FEATURE (FEAT, 0) }
17706
17707 static struct asm_barrier_opt barrier_opt_names[] =
17708 {
17709 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
17710 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
17711 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
17712 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
17713 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
17714 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
17715 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
17716 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
17717 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
17718 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
17719 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
17720 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
17721 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
17722 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
17723 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
17724 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
17725 };
17726
17727 #undef UL_BARRIER
17728
17729 /* Table of ARM-format instructions. */
17730
17731 /* Macros for gluing together operand strings. N.B. In all cases
17732 other than OPS0, the trailing OP_stop comes from default
17733 zero-initialization of the unspecified elements of the array. */
17734 #define OPS0() { OP_stop, }
17735 #define OPS1(a) { OP_##a, }
17736 #define OPS2(a,b) { OP_##a,OP_##b, }
17737 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
17738 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
17739 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
17740 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
17741
17742 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
17743 This is useful when mixing operands for ARM and THUMB, i.e. using the
17744 MIX_ARM_THUMB_OPERANDS macro.
17745 In order to use these macros, prefix the number of operands with _
17746 e.g. _3. */
17747 #define OPS_1(a) { a, }
17748 #define OPS_2(a,b) { a,b, }
17749 #define OPS_3(a,b,c) { a,b,c, }
17750 #define OPS_4(a,b,c,d) { a,b,c,d, }
17751 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
17752 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
17753
17754 /* These macros abstract out the exact format of the mnemonic table and
17755 save some repeated characters. */
17756
17757 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
17758 #define TxCE(mnem, op, top, nops, ops, ae, te) \
17759 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
17760 THUMB_VARIANT, do_##ae, do_##te }
17761
17762 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
17763 a T_MNEM_xyz enumerator. */
17764 #define TCE(mnem, aop, top, nops, ops, ae, te) \
17765 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
17766 #define tCE(mnem, aop, top, nops, ops, ae, te) \
17767 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17768
17769 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
17770 infix after the third character. */
17771 #define TxC3(mnem, op, top, nops, ops, ae, te) \
17772 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
17773 THUMB_VARIANT, do_##ae, do_##te }
17774 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
17775 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
17776 THUMB_VARIANT, do_##ae, do_##te }
17777 #define TC3(mnem, aop, top, nops, ops, ae, te) \
17778 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
17779 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
17780 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
17781 #define tC3(mnem, aop, top, nops, ops, ae, te) \
17782 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17783 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
17784 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17785
17786 /* Mnemonic that cannot be conditionalized. The ARM condition-code
17787 field is still 0xE. Many of the Thumb variants can be executed
17788 conditionally, so this is checked separately. */
17789 #define TUE(mnem, op, top, nops, ops, ae, te) \
17790 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
17791 THUMB_VARIANT, do_##ae, do_##te }
17792
17793 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
17794 condition code field. */
17795 #define TUF(mnem, op, top, nops, ops, ae, te) \
17796 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
17797 THUMB_VARIANT, do_##ae, do_##te }
17798
17799 /* ARM-only variants of all the above. */
17800 #define CE(mnem, op, nops, ops, ae) \
17801 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17802
17803 #define C3(mnem, op, nops, ops, ae) \
17804 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17805
17806 /* Legacy mnemonics that always have conditional infix after the third
17807 character. */
17808 #define CL(mnem, op, nops, ops, ae) \
17809 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17810 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17811
17812 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
17813 #define cCE(mnem, op, nops, ops, ae) \
17814 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17815
17816 /* Legacy coprocessor instructions where conditional infix and conditional
17817 suffix are ambiguous. For consistency this includes all FPA instructions,
17818 not just the potentially ambiguous ones. */
17819 #define cCL(mnem, op, nops, ops, ae) \
17820 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17821 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17822
17823 /* Coprocessor, takes either a suffix or a position-3 infix
17824 (for an FPA corner case). */
17825 #define C3E(mnem, op, nops, ops, ae) \
17826 { mnem, OPS##nops ops, OT_csuf_or_in3, \
17827 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17828
17829 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
17830 { m1 #m2 m3, OPS##nops ops, \
17831 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
17832 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17833
17834 #define CM(m1, m2, op, nops, ops, ae) \
17835 xCM_ (m1, , m2, op, nops, ops, ae), \
17836 xCM_ (m1, eq, m2, op, nops, ops, ae), \
17837 xCM_ (m1, ne, m2, op, nops, ops, ae), \
17838 xCM_ (m1, cs, m2, op, nops, ops, ae), \
17839 xCM_ (m1, hs, m2, op, nops, ops, ae), \
17840 xCM_ (m1, cc, m2, op, nops, ops, ae), \
17841 xCM_ (m1, ul, m2, op, nops, ops, ae), \
17842 xCM_ (m1, lo, m2, op, nops, ops, ae), \
17843 xCM_ (m1, mi, m2, op, nops, ops, ae), \
17844 xCM_ (m1, pl, m2, op, nops, ops, ae), \
17845 xCM_ (m1, vs, m2, op, nops, ops, ae), \
17846 xCM_ (m1, vc, m2, op, nops, ops, ae), \
17847 xCM_ (m1, hi, m2, op, nops, ops, ae), \
17848 xCM_ (m1, ls, m2, op, nops, ops, ae), \
17849 xCM_ (m1, ge, m2, op, nops, ops, ae), \
17850 xCM_ (m1, lt, m2, op, nops, ops, ae), \
17851 xCM_ (m1, gt, m2, op, nops, ops, ae), \
17852 xCM_ (m1, le, m2, op, nops, ops, ae), \
17853 xCM_ (m1, al, m2, op, nops, ops, ae)
17854
17855 #define UE(mnem, op, nops, ops, ae) \
17856 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17857
17858 #define UF(mnem, op, nops, ops, ae) \
17859 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17860
17861 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
17862 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
17863 use the same encoding function for each. */
17864 #define NUF(mnem, op, nops, ops, enc) \
17865 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
17866 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17867
17868 /* Neon data processing, version which indirects through neon_enc_tab for
17869 the various overloaded versions of opcodes. */
17870 #define nUF(mnem, op, nops, ops, enc) \
17871 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
17872 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17873
17874 /* Neon insn with conditional suffix for the ARM version, non-overloaded
17875 version. */
17876 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
17877 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
17878 THUMB_VARIANT, do_##enc, do_##enc }
17879
17880 #define NCE(mnem, op, nops, ops, enc) \
17881 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17882
17883 #define NCEF(mnem, op, nops, ops, enc) \
17884 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17885
17886 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
17887 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
17888 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
17889 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17890
17891 #define nCE(mnem, op, nops, ops, enc) \
17892 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17893
17894 #define nCEF(mnem, op, nops, ops, enc) \
17895 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17896
17897 #define do_0 0
17898
17899 static const struct asm_opcode insns[] =
17900 {
17901 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
17902 #define THUMB_VARIANT &arm_ext_v4t
17903 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
17904 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
17905 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
17906 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
17907 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
17908 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
17909 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
17910 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
17911 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
17912 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
17913 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
17914 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
17915 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
17916 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
17917 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
17918 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
17919
17920 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
17921 for setting PSR flag bits. They are obsolete in V6 and do not
17922 have Thumb equivalents. */
17923 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
17924 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
17925 CL("tstp", 110f000, 2, (RR, SH), cmp),
17926 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
17927 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
17928 CL("cmpp", 150f000, 2, (RR, SH), cmp),
17929 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
17930 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
17931 CL("cmnp", 170f000, 2, (RR, SH), cmp),
17932
17933 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
17934 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
17935 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
17936 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
17937
17938 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
17939 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17940 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
17941 OP_RRnpc),
17942 OP_ADDRGLDR),ldst, t_ldst),
17943 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17944
17945 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17946 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17947 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17948 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17949 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17950 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17951
17952 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
17953 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
17954 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
17955 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
17956
17957 /* Pseudo ops. */
17958 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
17959 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
17960 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
17961
17962 /* Thumb-compatibility pseudo ops. */
17963 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
17964 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
17965 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
17966 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
17967 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
17968 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
17969 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
17970 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
17971 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
17972 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
17973 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
17974 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
17975
17976 /* These may simplify to neg. */
17977 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
17978 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
17979
17980 #undef THUMB_VARIANT
17981 #define THUMB_VARIANT & arm_ext_v6
17982
17983 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
17984
17985 /* V1 instructions with no Thumb analogue prior to V6T2. */
17986 #undef THUMB_VARIANT
17987 #define THUMB_VARIANT & arm_ext_v6t2
17988
17989 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
17990 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
17991 CL("teqp", 130f000, 2, (RR, SH), cmp),
17992
17993 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17994 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17995 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
17996 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17997
17998 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17999 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18000
18001 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18002 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18003
18004 /* V1 instructions with no Thumb analogue at all. */
18005 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
18006 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
18007
18008 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
18009 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
18010 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
18011 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
18012 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
18013 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
18014 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
18015 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
18016
18017 #undef ARM_VARIANT
18018 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18019 #undef THUMB_VARIANT
18020 #define THUMB_VARIANT & arm_ext_v4t
18021
18022 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18023 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18024
18025 #undef THUMB_VARIANT
18026 #define THUMB_VARIANT & arm_ext_v6t2
18027
18028 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18029 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18030
18031 /* Generic coprocessor instructions. */
18032 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18033 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18034 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18035 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18036 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18037 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18038 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
18039
18040 #undef ARM_VARIANT
18041 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18042
18043 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18044 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18045
18046 #undef ARM_VARIANT
18047 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18048 #undef THUMB_VARIANT
18049 #define THUMB_VARIANT & arm_ext_msr
18050
18051 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18052 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18053
18054 #undef ARM_VARIANT
18055 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18056 #undef THUMB_VARIANT
18057 #define THUMB_VARIANT & arm_ext_v6t2
18058
18059 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18060 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18061 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18062 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18063 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18064 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18065 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18066 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18067
18068 #undef ARM_VARIANT
18069 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18070 #undef THUMB_VARIANT
18071 #define THUMB_VARIANT & arm_ext_v4t
18072
18073 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18074 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18075 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18076 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18077 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18078 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18079
18080 #undef ARM_VARIANT
18081 #define ARM_VARIANT & arm_ext_v4t_5
18082
18083 /* ARM Architecture 4T. */
18084 /* Note: bx (and blx) are required on V5, even if the processor does
18085 not support Thumb. */
18086 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
18087
18088 #undef ARM_VARIANT
18089 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18090 #undef THUMB_VARIANT
18091 #define THUMB_VARIANT & arm_ext_v5t
18092
18093 /* Note: blx has 2 variants; the .value coded here is for
18094 BLX(2). Only this variant has conditional execution. */
18095 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
18096 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
18097
18098 #undef THUMB_VARIANT
18099 #define THUMB_VARIANT & arm_ext_v6t2
18100
18101 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
18102 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18103 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18104 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18105 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18106 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18107 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18108 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18109
18110 #undef ARM_VARIANT
18111 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18112 #undef THUMB_VARIANT
18113 #define THUMB_VARIANT &arm_ext_v5exp
18114
18115 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18116 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18117 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18118 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18119
18120 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18121 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18122
18123 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18124 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18125 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18126 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18127
18128 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18129 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18130 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18131 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18132
18133 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18134 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18135
18136 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18137 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18138 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18139 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18140
18141 #undef ARM_VARIANT
18142 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18143 #undef THUMB_VARIANT
18144 #define THUMB_VARIANT &arm_ext_v6t2
18145
18146 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
18147 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
18148 ldrd, t_ldstd),
18149 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
18150 ADDRGLDRS), ldrd, t_ldstd),
18151
18152 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18153 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18154
18155 #undef ARM_VARIANT
18156 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18157
18158 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
18159
18160 #undef ARM_VARIANT
18161 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18162 #undef THUMB_VARIANT
18163 #define THUMB_VARIANT & arm_ext_v6
18164
18165 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
18166 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
18167 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18168 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18169 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18170 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18171 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18172 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18173 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18174 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
18175
18176 #undef THUMB_VARIANT
18177 #define THUMB_VARIANT & arm_ext_v6t2
18178
18179 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
18180 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18181 strex, t_strex),
18182 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18183 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18184
18185 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
18186 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
18187
18188 /* ARM V6 not included in V7M. */
18189 #undef THUMB_VARIANT
18190 #define THUMB_VARIANT & arm_ext_v6_notm
18191 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18192 UF(rfeib, 9900a00, 1, (RRw), rfe),
18193 UF(rfeda, 8100a00, 1, (RRw), rfe),
18194 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18195 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18196 UF(rfefa, 9900a00, 1, (RRw), rfe),
18197 UF(rfeea, 8100a00, 1, (RRw), rfe),
18198 TUF("rfeed", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18199 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18200 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
18201 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
18202 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18203
18204 /* ARM V6 not included in V7M (eg. integer SIMD). */
18205 #undef THUMB_VARIANT
18206 #define THUMB_VARIANT & arm_ext_v6_dsp
18207 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
18208 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
18209 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
18210 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18211 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18212 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18213 /* Old name for QASX. */
18214 TCE("qaddsubx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18215 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18216 /* Old name for QSAX. */
18217 TCE("qsubaddx", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18218 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18219 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18220 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18221 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18222 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18223 /* Old name for SASX. */
18224 TCE("saddsubx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18225 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18226 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18227 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18228 /* Old name for SHASX. */
18229 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18230 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18231 /* Old name for SHSAX. */
18232 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18233 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18234 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18235 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18236 /* Old name for SSAX. */
18237 TCE("ssubaddx", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18238 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18239 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18240 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18241 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18242 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18243 /* Old name for UASX. */
18244 TCE("uaddsubx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18245 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18246 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18247 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18248 /* Old name for UHASX. */
18249 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18250 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18251 /* Old name for UHSAX. */
18252 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18253 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18254 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18255 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18256 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18257 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18258 /* Old name for UQASX. */
18259 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18260 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18261 /* Old name for UQSAX. */
18262 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18263 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18264 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18265 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18266 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18267 /* Old name for USAX. */
18268 TCE("usubaddx", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18269 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18270 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18271 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18272 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18273 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18274 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18275 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18276 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18277 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18278 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18279 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18280 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18281 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18282 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18283 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18284 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18285 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18286 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18287 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18288 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18289 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18290 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18291 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18292 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18293 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18294 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18295 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18296 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18297 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
18298 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
18299 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18300 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18301 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
18302
18303 #undef ARM_VARIANT
18304 #define ARM_VARIANT & arm_ext_v6k
18305 #undef THUMB_VARIANT
18306 #define THUMB_VARIANT & arm_ext_v6k
18307
18308 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
18309 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
18310 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
18311 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
18312
18313 #undef THUMB_VARIANT
18314 #define THUMB_VARIANT & arm_ext_v6_notm
18315 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
18316 ldrexd, t_ldrexd),
18317 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
18318 RRnpcb), strexd, t_strexd),
18319
18320 #undef THUMB_VARIANT
18321 #define THUMB_VARIANT & arm_ext_v6t2
18322 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
18323 rd_rn, rd_rn),
18324 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
18325 rd_rn, rd_rn),
18326 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18327 strex, t_strexbh),
18328 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18329 strex, t_strexbh),
18330 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
18331
18332 #undef ARM_VARIANT
18333 #define ARM_VARIANT & arm_ext_sec
18334 #undef THUMB_VARIANT
18335 #define THUMB_VARIANT & arm_ext_sec
18336
18337 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
18338
18339 #undef ARM_VARIANT
18340 #define ARM_VARIANT & arm_ext_virt
18341 #undef THUMB_VARIANT
18342 #define THUMB_VARIANT & arm_ext_virt
18343
18344 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
18345 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
18346
18347 #undef ARM_VARIANT
18348 #define ARM_VARIANT & arm_ext_v6t2
18349 #undef THUMB_VARIANT
18350 #define THUMB_VARIANT & arm_ext_v6t2
18351
18352 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
18353 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
18354 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18355 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18356
18357 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18358 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
18359 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
18360 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
18361
18362 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18363 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18364 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18365 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18366
18367 /* Thumb-only instructions. */
18368 #undef ARM_VARIANT
18369 #define ARM_VARIANT NULL
18370 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
18371 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
18372
18373 /* ARM does not really have an IT instruction, so always allow it.
18374 The opcode is copied from Thumb in order to allow warnings in
18375 -mimplicit-it=[never | arm] modes. */
18376 #undef ARM_VARIANT
18377 #define ARM_VARIANT & arm_ext_v1
18378
18379 TUE("it", bf08, bf08, 1, (COND), it, t_it),
18380 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
18381 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
18382 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
18383 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
18384 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
18385 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
18386 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
18387 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
18388 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
18389 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
18390 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
18391 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
18392 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
18393 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
18394 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
18395 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
18396 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
18397
18398 /* Thumb2 only instructions. */
18399 #undef ARM_VARIANT
18400 #define ARM_VARIANT NULL
18401
18402 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18403 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18404 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
18405 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
18406 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
18407 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
18408
18409 /* Hardware division instructions. */
18410 #undef ARM_VARIANT
18411 #define ARM_VARIANT & arm_ext_adiv
18412 #undef THUMB_VARIANT
18413 #define THUMB_VARIANT & arm_ext_div
18414
18415 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
18416 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
18417
18418 /* ARM V6M/V7 instructions. */
18419 #undef ARM_VARIANT
18420 #define ARM_VARIANT & arm_ext_barrier
18421 #undef THUMB_VARIANT
18422 #define THUMB_VARIANT & arm_ext_barrier
18423
18424 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, t_barrier),
18425 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, t_barrier),
18426 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, t_barrier),
18427
18428 /* ARM V7 instructions. */
18429 #undef ARM_VARIANT
18430 #define ARM_VARIANT & arm_ext_v7
18431 #undef THUMB_VARIANT
18432 #define THUMB_VARIANT & arm_ext_v7
18433
18434 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
18435 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
18436
18437 #undef ARM_VARIANT
18438 #define ARM_VARIANT & arm_ext_mp
18439 #undef THUMB_VARIANT
18440 #define THUMB_VARIANT & arm_ext_mp
18441
18442 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
18443
18444 /* AArchv8 instructions. */
18445 #undef ARM_VARIANT
18446 #define ARM_VARIANT & arm_ext_v8
18447 #undef THUMB_VARIANT
18448 #define THUMB_VARIANT & arm_ext_v8
18449
18450 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
18451 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
18452 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18453 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
18454 ldrexd, t_ldrexd),
18455 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
18456 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18457 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
18458 stlex, t_stlex),
18459 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
18460 strexd, t_strexd),
18461 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
18462 stlex, t_stlex),
18463 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
18464 stlex, t_stlex),
18465 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18466 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18467 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18468 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18469 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18470 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18471
18472 /* ARMv8 T32 only. */
18473 #undef ARM_VARIANT
18474 #define ARM_VARIANT NULL
18475 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
18476 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
18477 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
18478
18479 /* FP for ARMv8. */
18480 #undef ARM_VARIANT
18481 #define ARM_VARIANT & fpu_vfp_ext_armv8
18482 #undef THUMB_VARIANT
18483 #define THUMB_VARIANT & fpu_vfp_ext_armv8
18484
18485 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
18486 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
18487 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
18488 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
18489 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
18490 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
18491 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
18492 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
18493 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
18494 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
18495 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
18496 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
18497 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
18498 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
18499 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
18500 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
18501 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
18502
18503 /* Crypto v1 extensions. */
18504 #undef ARM_VARIANT
18505 #define ARM_VARIANT & fpu_crypto_ext_armv8
18506 #undef THUMB_VARIANT
18507 #define THUMB_VARIANT & fpu_crypto_ext_armv8
18508
18509 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
18510 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
18511 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
18512 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
18513 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
18514 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
18515 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
18516 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
18517 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
18518 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
18519 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
18520 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
18521 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
18522 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
18523
18524 #undef ARM_VARIANT
18525 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
18526 #undef THUMB_VARIANT
18527 #define THUMB_VARIANT NULL
18528
18529 cCE("wfs", e200110, 1, (RR), rd),
18530 cCE("rfs", e300110, 1, (RR), rd),
18531 cCE("wfc", e400110, 1, (RR), rd),
18532 cCE("rfc", e500110, 1, (RR), rd),
18533
18534 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
18535 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
18536 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
18537 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
18538
18539 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
18540 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
18541 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
18542 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
18543
18544 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
18545 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
18546 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
18547 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
18548 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
18549 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
18550 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
18551 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
18552 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
18553 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
18554 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
18555 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
18556
18557 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
18558 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
18559 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
18560 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
18561 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
18562 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
18563 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
18564 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
18565 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
18566 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
18567 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
18568 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
18569
18570 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
18571 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
18572 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
18573 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
18574 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
18575 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
18576 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
18577 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
18578 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
18579 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
18580 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
18581 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
18582
18583 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
18584 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
18585 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
18586 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
18587 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
18588 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
18589 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
18590 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
18591 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
18592 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
18593 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
18594 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
18595
18596 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
18597 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
18598 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
18599 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
18600 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
18601 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
18602 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
18603 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
18604 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
18605 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
18606 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
18607 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
18608
18609 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
18610 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
18611 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
18612 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
18613 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
18614 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
18615 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
18616 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
18617 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
18618 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
18619 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
18620 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
18621
18622 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
18623 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
18624 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
18625 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
18626 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
18627 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
18628 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
18629 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
18630 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
18631 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
18632 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
18633 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
18634
18635 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
18636 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
18637 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
18638 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
18639 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
18640 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
18641 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
18642 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
18643 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
18644 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
18645 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
18646 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
18647
18648 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
18649 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
18650 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
18651 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
18652 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
18653 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
18654 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
18655 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
18656 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
18657 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
18658 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
18659 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
18660
18661 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
18662 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
18663 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
18664 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
18665 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
18666 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
18667 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
18668 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
18669 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
18670 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
18671 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
18672 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
18673
18674 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
18675 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
18676 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
18677 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
18678 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
18679 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
18680 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
18681 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
18682 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
18683 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
18684 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
18685 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
18686
18687 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
18688 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
18689 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
18690 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
18691 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
18692 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
18693 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
18694 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
18695 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
18696 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
18697 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
18698 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
18699
18700 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
18701 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
18702 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
18703 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
18704 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
18705 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
18706 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
18707 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
18708 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
18709 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
18710 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
18711 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
18712
18713 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
18714 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
18715 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
18716 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
18717 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
18718 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
18719 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
18720 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
18721 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
18722 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
18723 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
18724 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
18725
18726 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
18727 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
18728 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
18729 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
18730 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
18731 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
18732 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
18733 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
18734 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
18735 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
18736 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
18737 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
18738
18739 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
18740 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
18741 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
18742 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
18743 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
18744 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
18745 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
18746 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
18747 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
18748 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
18749 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
18750 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
18751
18752 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
18753 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
18754 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
18755 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
18756 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
18757 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18758 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18759 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18760 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
18761 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
18762 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
18763 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
18764
18765 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
18766 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
18767 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
18768 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
18769 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
18770 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18771 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18772 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18773 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
18774 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
18775 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
18776 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
18777
18778 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
18779 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
18780 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
18781 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
18782 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
18783 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18784 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18785 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18786 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
18787 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
18788 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
18789 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
18790
18791 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
18792 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
18793 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
18794 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
18795 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
18796 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18797 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18798 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18799 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
18800 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
18801 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
18802 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
18803
18804 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
18805 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
18806 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
18807 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
18808 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
18809 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18810 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18811 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18812 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
18813 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
18814 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
18815 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
18816
18817 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
18818 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
18819 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
18820 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
18821 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
18822 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18823 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18824 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18825 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
18826 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
18827 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
18828 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
18829
18830 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
18831 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
18832 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
18833 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
18834 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
18835 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18836 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18837 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18838 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
18839 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
18840 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
18841 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
18842
18843 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
18844 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
18845 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
18846 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
18847 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
18848 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18849 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18850 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18851 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
18852 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
18853 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
18854 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
18855
18856 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
18857 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
18858 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
18859 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
18860 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
18861 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18862 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18863 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18864 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
18865 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
18866 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
18867 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
18868
18869 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
18870 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
18871 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
18872 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
18873 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
18874 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18875 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18876 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18877 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
18878 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
18879 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
18880 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
18881
18882 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18883 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18884 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18885 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18886 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18887 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18888 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18889 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18890 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18891 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18892 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18893 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18894
18895 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18896 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18897 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18898 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18899 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18900 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18901 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18902 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18903 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18904 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18905 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18906 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18907
18908 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18909 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18910 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18911 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18912 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18913 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18914 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18915 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18916 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18917 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18918 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18919 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18920
18921 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
18922 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
18923 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
18924 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
18925
18926 cCL("flts", e000110, 2, (RF, RR), rn_rd),
18927 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
18928 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
18929 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
18930 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
18931 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
18932 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
18933 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
18934 cCL("flte", e080110, 2, (RF, RR), rn_rd),
18935 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
18936 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
18937 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
18938
18939 /* The implementation of the FIX instruction is broken on some
18940 assemblers, in that it accepts a precision specifier as well as a
18941 rounding specifier, despite the fact that this is meaningless.
18942 To be more compatible, we accept it as well, though of course it
18943 does not set any bits. */
18944 cCE("fix", e100110, 2, (RR, RF), rd_rm),
18945 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
18946 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
18947 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
18948 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
18949 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
18950 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
18951 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
18952 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
18953 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
18954 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
18955 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
18956 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
18957
18958 /* Instructions that were new with the real FPA, call them V2. */
18959 #undef ARM_VARIANT
18960 #define ARM_VARIANT & fpu_fpa_ext_v2
18961
18962 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18963 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18964 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18965 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18966 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18967 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18968
18969 #undef ARM_VARIANT
18970 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
18971
18972 /* Moves and type conversions. */
18973 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
18974 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
18975 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
18976 cCE("fmstat", ef1fa10, 0, (), noargs),
18977 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
18978 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
18979 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
18980 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
18981 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
18982 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
18983 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
18984 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
18985 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
18986 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
18987
18988 /* Memory operations. */
18989 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
18990 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
18991 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
18992 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
18993 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
18994 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
18995 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
18996 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
18997 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
18998 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
18999 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19000 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19001 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19002 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19003 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19004 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19005 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19006 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19007
19008 /* Monadic operations. */
19009 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
19010 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
19011 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
19012
19013 /* Dyadic operations. */
19014 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19015 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19016 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19017 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19018 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19019 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19020 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19021 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19022 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19023
19024 /* Comparisons. */
19025 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
19026 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
19027 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
19028 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
19029
19030 /* Double precision load/store are still present on single precision
19031 implementations. */
19032 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19033 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19034 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19035 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19036 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19037 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19038 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19039 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19040 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19041 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19042
19043 #undef ARM_VARIANT
19044 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19045
19046 /* Moves and type conversions. */
19047 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19048 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19049 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19050 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
19051 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
19052 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
19053 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
19054 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19055 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
19056 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19057 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19058 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19059 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19060
19061 /* Monadic operations. */
19062 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19063 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19064 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19065
19066 /* Dyadic operations. */
19067 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19068 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19069 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19070 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19071 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19072 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19073 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19074 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19075 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19076
19077 /* Comparisons. */
19078 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19079 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
19080 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19081 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
19082
19083 #undef ARM_VARIANT
19084 #define ARM_VARIANT & fpu_vfp_ext_v2
19085
19086 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19087 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19088 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
19089 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
19090
19091 /* Instructions which may belong to either the Neon or VFP instruction sets.
19092 Individual encoder functions perform additional architecture checks. */
19093 #undef ARM_VARIANT
19094 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19095 #undef THUMB_VARIANT
19096 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19097
19098 /* These mnemonics are unique to VFP. */
19099 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
19100 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
19101 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19102 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19103 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19104 nCE(vcmp, _vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
19105 nCE(vcmpe, _vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
19106 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
19107 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
19108 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
19109
19110 /* Mnemonics shared by Neon and VFP. */
19111 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
19112 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19113 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19114
19115 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19116 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19117
19118 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19119 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19120
19121 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19122 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19123 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19124 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19125 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19126 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19127 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19128 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19129
19130 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
19131 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
19132 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
19133 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
19134
19135
19136 /* NOTE: All VMOV encoding is special-cased! */
19137 NCE(vmov, 0, 1, (VMOV), neon_mov),
19138 NCE(vmovq, 0, 1, (VMOV), neon_mov),
19139
19140 #undef THUMB_VARIANT
19141 #define THUMB_VARIANT & fpu_neon_ext_v1
19142 #undef ARM_VARIANT
19143 #define ARM_VARIANT & fpu_neon_ext_v1
19144
19145 /* Data processing with three registers of the same length. */
19146 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19147 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
19148 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
19149 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19150 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19151 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19152 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19153 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19154 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19155 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19156 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19157 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19158 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19159 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19160 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19161 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19162 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19163 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19164 /* If not immediate, fall back to neon_dyadic_i64_su.
19165 shl_imm should accept I8 I16 I32 I64,
19166 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
19167 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
19168 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
19169 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
19170 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
19171 /* Logic ops, types optional & ignored. */
19172 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19173 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19174 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19175 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19176 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19177 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19178 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19179 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19180 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
19181 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
19182 /* Bitfield ops, untyped. */
19183 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19184 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19185 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19186 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19187 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19188 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19189 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
19190 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19191 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19192 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19193 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19194 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19195 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19196 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
19197 back to neon_dyadic_if_su. */
19198 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19199 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19200 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19201 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19202 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19203 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19204 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19205 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19206 /* Comparison. Type I8 I16 I32 F32. */
19207 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
19208 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
19209 /* As above, D registers only. */
19210 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19211 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19212 /* Int and float variants, signedness unimportant. */
19213 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19214 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19215 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
19216 /* Add/sub take types I8 I16 I32 I64 F32. */
19217 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19218 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19219 /* vtst takes sizes 8, 16, 32. */
19220 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
19221 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
19222 /* VMUL takes I8 I16 I32 F32 P8. */
19223 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
19224 /* VQD{R}MULH takes S16 S32. */
19225 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19226 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19227 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19228 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19229 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19230 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19231 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19232 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19233 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19234 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19235 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19236 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19237 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19238 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19239 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19240 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19241
19242 /* Two address, int/float. Types S8 S16 S32 F32. */
19243 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
19244 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
19245
19246 /* Data processing with two registers and a shift amount. */
19247 /* Right shifts, and variants with rounding.
19248 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
19249 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19250 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19251 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19252 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19253 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19254 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19255 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19256 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19257 /* Shift and insert. Sizes accepted 8 16 32 64. */
19258 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
19259 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
19260 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
19261 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
19262 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
19263 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
19264 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
19265 /* Right shift immediate, saturating & narrowing, with rounding variants.
19266 Types accepted S16 S32 S64 U16 U32 U64. */
19267 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19268 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19269 /* As above, unsigned. Types accepted S16 S32 S64. */
19270 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19271 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19272 /* Right shift narrowing. Types accepted I16 I32 I64. */
19273 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19274 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19275 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
19276 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
19277 /* CVT with optional immediate for fixed-point variant. */
19278 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
19279
19280 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
19281 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
19282
19283 /* Data processing, three registers of different lengths. */
19284 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
19285 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
19286 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
19287 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
19288 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
19289 /* If not scalar, fall back to neon_dyadic_long.
19290 Vector types as above, scalar types S16 S32 U16 U32. */
19291 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19292 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19293 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
19294 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19295 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19296 /* Dyadic, narrowing insns. Types I16 I32 I64. */
19297 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19298 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19299 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19300 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19301 /* Saturating doubling multiplies. Types S16 S32. */
19302 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19303 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19304 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19305 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
19306 S16 S32 U16 U32. */
19307 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
19308
19309 /* Extract. Size 8. */
19310 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
19311 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
19312
19313 /* Two registers, miscellaneous. */
19314 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
19315 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
19316 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
19317 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
19318 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
19319 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
19320 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
19321 /* Vector replicate. Sizes 8 16 32. */
19322 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
19323 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
19324 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
19325 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
19326 /* VMOVN. Types I16 I32 I64. */
19327 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
19328 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
19329 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
19330 /* VQMOVUN. Types S16 S32 S64. */
19331 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
19332 /* VZIP / VUZP. Sizes 8 16 32. */
19333 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
19334 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
19335 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
19336 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
19337 /* VQABS / VQNEG. Types S8 S16 S32. */
19338 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19339 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
19340 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19341 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
19342 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
19343 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
19344 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
19345 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
19346 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
19347 /* Reciprocal estimates. Types U32 F32. */
19348 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
19349 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
19350 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
19351 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
19352 /* VCLS. Types S8 S16 S32. */
19353 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
19354 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
19355 /* VCLZ. Types I8 I16 I32. */
19356 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
19357 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
19358 /* VCNT. Size 8. */
19359 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
19360 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
19361 /* Two address, untyped. */
19362 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
19363 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
19364 /* VTRN. Sizes 8 16 32. */
19365 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
19366 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
19367
19368 /* Table lookup. Size 8. */
19369 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19370 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19371
19372 #undef THUMB_VARIANT
19373 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
19374 #undef ARM_VARIANT
19375 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
19376
19377 /* Neon element/structure load/store. */
19378 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19379 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19380 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19381 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19382 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19383 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19384 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19385 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19386
19387 #undef THUMB_VARIANT
19388 #define THUMB_VARIANT &fpu_vfp_ext_v3xd
19389 #undef ARM_VARIANT
19390 #define ARM_VARIANT &fpu_vfp_ext_v3xd
19391 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
19392 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19393 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19394 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19395 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19396 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19397 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19398 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19399 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19400
19401 #undef THUMB_VARIANT
19402 #define THUMB_VARIANT & fpu_vfp_ext_v3
19403 #undef ARM_VARIANT
19404 #define ARM_VARIANT & fpu_vfp_ext_v3
19405
19406 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
19407 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19408 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19409 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19410 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19411 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19412 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19413 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19414 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19415
19416 #undef ARM_VARIANT
19417 #define ARM_VARIANT &fpu_vfp_ext_fma
19418 #undef THUMB_VARIANT
19419 #define THUMB_VARIANT &fpu_vfp_ext_fma
19420 /* Mnemonics shared by Neon and VFP. These are included in the
19421 VFP FMA variant; NEON and VFP FMA always includes the NEON
19422 FMA instructions. */
19423 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19424 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19425 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
19426 the v form should always be used. */
19427 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19428 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19429 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19430 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19431 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19432 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19433
19434 #undef THUMB_VARIANT
19435 #undef ARM_VARIANT
19436 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
19437
19438 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19439 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19440 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19441 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19442 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19443 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19444 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
19445 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
19446
19447 #undef ARM_VARIANT
19448 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
19449
19450 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
19451 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
19452 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
19453 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
19454 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
19455 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
19456 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
19457 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
19458 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
19459 cCE("textrmub", e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19460 cCE("textrmuh", e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19461 cCE("textrmuw", e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19462 cCE("textrmsb", e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19463 cCE("textrmsh", e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19464 cCE("textrmsw", e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19465 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19466 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19467 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19468 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
19469 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
19470 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19471 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19472 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19473 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19474 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19475 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19476 cCE("tmovmskb", e100030, 2, (RR, RIWR), rd_rn),
19477 cCE("tmovmskh", e500030, 2, (RR, RIWR), rd_rn),
19478 cCE("tmovmskw", e900030, 2, (RR, RIWR), rd_rn),
19479 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
19480 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
19481 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
19482 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
19483 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
19484 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
19485 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
19486 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
19487 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19488 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19489 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19490 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19491 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19492 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19493 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19494 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19495 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19496 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
19497 cCE("walignr0", e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19498 cCE("walignr1", e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19499 cCE("walignr2", ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19500 cCE("walignr3", eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19501 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19502 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19503 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19504 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19505 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19506 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19507 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19508 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19509 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19510 cCE("wcmpgtub", e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19511 cCE("wcmpgtuh", e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19512 cCE("wcmpgtuw", e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19513 cCE("wcmpgtsb", e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19514 cCE("wcmpgtsh", e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19515 cCE("wcmpgtsw", eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19516 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
19517 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
19518 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
19519 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
19520 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19521 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19522 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19523 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19524 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19525 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19526 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19527 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19528 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19529 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19530 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19531 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19532 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19533 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19534 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19535 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19536 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19537 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19538 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
19539 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19540 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19541 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19542 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19543 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19544 cCE("wpackhss", e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19545 cCE("wpackhus", e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19546 cCE("wpackwss", eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19547 cCE("wpackwus", e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19548 cCE("wpackdss", ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19549 cCE("wpackdus", ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19550 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19551 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19552 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19553 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19554 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19555 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19556 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19557 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19558 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19559 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19560 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
19561 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19562 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19563 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19564 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19565 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19566 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19567 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19568 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19569 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19570 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19571 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19572 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19573 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19574 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19575 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19576 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19577 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19578 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19579 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
19580 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
19581 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
19582 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
19583 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19584 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19585 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19586 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19587 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19588 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19589 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19590 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19591 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19592 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
19593 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
19594 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
19595 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
19596 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
19597 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
19598 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19599 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19600 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19601 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
19602 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
19603 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
19604 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
19605 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
19606 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
19607 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19608 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19609 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19610 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19611 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
19612
19613 #undef ARM_VARIANT
19614 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
19615
19616 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
19617 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
19618 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
19619 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
19620 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
19621 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
19622 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19623 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19624 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19625 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19626 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19627 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19628 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19629 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19630 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19631 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19632 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19633 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19634 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19635 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19636 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
19637 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19638 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19639 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19640 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19641 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19642 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19643 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19644 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19645 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19646 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19647 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19648 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19649 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19650 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19651 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19652 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19653 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19654 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19655 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19656 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19657 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19658 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19659 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19660 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19661 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19662 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19663 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19664 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19665 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19666 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19667 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19668 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19669 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19670 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19671 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19672 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19673
19674 #undef ARM_VARIANT
19675 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
19676
19677 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
19678 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
19679 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
19680 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
19681 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
19682 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
19683 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
19684 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
19685 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
19686 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
19687 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
19688 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
19689 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
19690 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
19691 cCE("cfmv64lr", e000510, 2, (RMDX, RR), rn_rd),
19692 cCE("cfmvr64l", e100510, 2, (RR, RMDX), rd_rn),
19693 cCE("cfmv64hr", e000530, 2, (RMDX, RR), rn_rd),
19694 cCE("cfmvr64h", e100530, 2, (RR, RMDX), rd_rn),
19695 cCE("cfmval32", e200440, 2, (RMAX, RMFX), rd_rn),
19696 cCE("cfmv32al", e100440, 2, (RMFX, RMAX), rd_rn),
19697 cCE("cfmvam32", e200460, 2, (RMAX, RMFX), rd_rn),
19698 cCE("cfmv32am", e100460, 2, (RMFX, RMAX), rd_rn),
19699 cCE("cfmvah32", e200480, 2, (RMAX, RMFX), rd_rn),
19700 cCE("cfmv32ah", e100480, 2, (RMFX, RMAX), rd_rn),
19701 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
19702 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
19703 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
19704 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
19705 cCE("cfmvsc32", e2004e0, 2, (RMDS, RMDX), mav_dspsc),
19706 cCE("cfmv32sc", e1004e0, 2, (RMDX, RMDS), rd),
19707 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
19708 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
19709 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
19710 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
19711 cCE("cfcvt32s", e000480, 2, (RMF, RMFX), rd_rn),
19712 cCE("cfcvt32d", e0004a0, 2, (RMD, RMFX), rd_rn),
19713 cCE("cfcvt64s", e0004c0, 2, (RMF, RMDX), rd_rn),
19714 cCE("cfcvt64d", e0004e0, 2, (RMD, RMDX), rd_rn),
19715 cCE("cfcvts32", e100580, 2, (RMFX, RMF), rd_rn),
19716 cCE("cfcvtd32", e1005a0, 2, (RMFX, RMD), rd_rn),
19717 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
19718 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
19719 cCE("cfrshl32", e000550, 3, (RMFX, RMFX, RR), mav_triple),
19720 cCE("cfrshl64", e000570, 3, (RMDX, RMDX, RR), mav_triple),
19721 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
19722 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
19723 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
19724 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
19725 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
19726 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
19727 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
19728 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
19729 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
19730 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
19731 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
19732 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
19733 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
19734 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
19735 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
19736 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
19737 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
19738 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
19739 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
19740 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
19741 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19742 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
19743 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19744 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
19745 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19746 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
19747 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19748 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19749 cCE("cfmadd32", e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
19750 cCE("cfmsub32", e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
19751 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
19752 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
19753 };
19754 #undef ARM_VARIANT
19755 #undef THUMB_VARIANT
19756 #undef TCE
19757 #undef TUE
19758 #undef TUF
19759 #undef TCC
19760 #undef cCE
19761 #undef cCL
19762 #undef C3E
19763 #undef CE
19764 #undef CM
19765 #undef UE
19766 #undef UF
19767 #undef UT
19768 #undef NUF
19769 #undef nUF
19770 #undef NCE
19771 #undef nCE
19772 #undef OPS0
19773 #undef OPS1
19774 #undef OPS2
19775 #undef OPS3
19776 #undef OPS4
19777 #undef OPS5
19778 #undef OPS6
19779 #undef do_0
19780 \f
19781 /* MD interface: bits in the object file. */
19782
19783 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
19784 for use in the a.out file, and stores them in the array pointed to by buf.
19785 This knows about the endian-ness of the target machine and does
19786 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
19787 2 (short) and 4 (long) Floating numbers are put out as a series of
19788 LITTLENUMS (shorts, here at least). */
19789
19790 void
19791 md_number_to_chars (char * buf, valueT val, int n)
19792 {
19793 if (target_big_endian)
19794 number_to_chars_bigendian (buf, val, n);
19795 else
19796 number_to_chars_littleendian (buf, val, n);
19797 }
19798
19799 static valueT
19800 md_chars_to_number (char * buf, int n)
19801 {
19802 valueT result = 0;
19803 unsigned char * where = (unsigned char *) buf;
19804
19805 if (target_big_endian)
19806 {
19807 while (n--)
19808 {
19809 result <<= 8;
19810 result |= (*where++ & 255);
19811 }
19812 }
19813 else
19814 {
19815 while (n--)
19816 {
19817 result <<= 8;
19818 result |= (where[n] & 255);
19819 }
19820 }
19821
19822 return result;
19823 }
19824
19825 /* MD interface: Sections. */
19826
19827 /* Calculate the maximum variable size (i.e., excluding fr_fix)
19828 that an rs_machine_dependent frag may reach. */
19829
19830 unsigned int
19831 arm_frag_max_var (fragS *fragp)
19832 {
19833 /* We only use rs_machine_dependent for variable-size Thumb instructions,
19834 which are either THUMB_SIZE (2) or INSN_SIZE (4).
19835
19836 Note that we generate relaxable instructions even for cases that don't
19837 really need it, like an immediate that's a trivial constant. So we're
19838 overestimating the instruction size for some of those cases. Rather
19839 than putting more intelligence here, it would probably be better to
19840 avoid generating a relaxation frag in the first place when it can be
19841 determined up front that a short instruction will suffice. */
19842
19843 gas_assert (fragp->fr_type == rs_machine_dependent);
19844 return INSN_SIZE;
19845 }
19846
19847 /* Estimate the size of a frag before relaxing. Assume everything fits in
19848 2 bytes. */
19849
19850 int
19851 md_estimate_size_before_relax (fragS * fragp,
19852 segT segtype ATTRIBUTE_UNUSED)
19853 {
19854 fragp->fr_var = 2;
19855 return 2;
19856 }
19857
19858 /* Convert a machine dependent frag. */
19859
19860 void
19861 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
19862 {
19863 unsigned long insn;
19864 unsigned long old_op;
19865 char *buf;
19866 expressionS exp;
19867 fixS *fixp;
19868 int reloc_type;
19869 int pc_rel;
19870 int opcode;
19871
19872 buf = fragp->fr_literal + fragp->fr_fix;
19873
19874 old_op = bfd_get_16(abfd, buf);
19875 if (fragp->fr_symbol)
19876 {
19877 exp.X_op = O_symbol;
19878 exp.X_add_symbol = fragp->fr_symbol;
19879 }
19880 else
19881 {
19882 exp.X_op = O_constant;
19883 }
19884 exp.X_add_number = fragp->fr_offset;
19885 opcode = fragp->fr_subtype;
19886 switch (opcode)
19887 {
19888 case T_MNEM_ldr_pc:
19889 case T_MNEM_ldr_pc2:
19890 case T_MNEM_ldr_sp:
19891 case T_MNEM_str_sp:
19892 case T_MNEM_ldr:
19893 case T_MNEM_ldrb:
19894 case T_MNEM_ldrh:
19895 case T_MNEM_str:
19896 case T_MNEM_strb:
19897 case T_MNEM_strh:
19898 if (fragp->fr_var == 4)
19899 {
19900 insn = THUMB_OP32 (opcode);
19901 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
19902 {
19903 insn |= (old_op & 0x700) << 4;
19904 }
19905 else
19906 {
19907 insn |= (old_op & 7) << 12;
19908 insn |= (old_op & 0x38) << 13;
19909 }
19910 insn |= 0x00000c00;
19911 put_thumb32_insn (buf, insn);
19912 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
19913 }
19914 else
19915 {
19916 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
19917 }
19918 pc_rel = (opcode == T_MNEM_ldr_pc2);
19919 break;
19920 case T_MNEM_adr:
19921 if (fragp->fr_var == 4)
19922 {
19923 insn = THUMB_OP32 (opcode);
19924 insn |= (old_op & 0xf0) << 4;
19925 put_thumb32_insn (buf, insn);
19926 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
19927 }
19928 else
19929 {
19930 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19931 exp.X_add_number -= 4;
19932 }
19933 pc_rel = 1;
19934 break;
19935 case T_MNEM_mov:
19936 case T_MNEM_movs:
19937 case T_MNEM_cmp:
19938 case T_MNEM_cmn:
19939 if (fragp->fr_var == 4)
19940 {
19941 int r0off = (opcode == T_MNEM_mov
19942 || opcode == T_MNEM_movs) ? 0 : 8;
19943 insn = THUMB_OP32 (opcode);
19944 insn = (insn & 0xe1ffffff) | 0x10000000;
19945 insn |= (old_op & 0x700) << r0off;
19946 put_thumb32_insn (buf, insn);
19947 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
19948 }
19949 else
19950 {
19951 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
19952 }
19953 pc_rel = 0;
19954 break;
19955 case T_MNEM_b:
19956 if (fragp->fr_var == 4)
19957 {
19958 insn = THUMB_OP32(opcode);
19959 put_thumb32_insn (buf, insn);
19960 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
19961 }
19962 else
19963 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
19964 pc_rel = 1;
19965 break;
19966 case T_MNEM_bcond:
19967 if (fragp->fr_var == 4)
19968 {
19969 insn = THUMB_OP32(opcode);
19970 insn |= (old_op & 0xf00) << 14;
19971 put_thumb32_insn (buf, insn);
19972 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
19973 }
19974 else
19975 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
19976 pc_rel = 1;
19977 break;
19978 case T_MNEM_add_sp:
19979 case T_MNEM_add_pc:
19980 case T_MNEM_inc_sp:
19981 case T_MNEM_dec_sp:
19982 if (fragp->fr_var == 4)
19983 {
19984 /* ??? Choose between add and addw. */
19985 insn = THUMB_OP32 (opcode);
19986 insn |= (old_op & 0xf0) << 4;
19987 put_thumb32_insn (buf, insn);
19988 if (opcode == T_MNEM_add_pc)
19989 reloc_type = BFD_RELOC_ARM_T32_IMM12;
19990 else
19991 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
19992 }
19993 else
19994 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19995 pc_rel = 0;
19996 break;
19997
19998 case T_MNEM_addi:
19999 case T_MNEM_addis:
20000 case T_MNEM_subi:
20001 case T_MNEM_subis:
20002 if (fragp->fr_var == 4)
20003 {
20004 insn = THUMB_OP32 (opcode);
20005 insn |= (old_op & 0xf0) << 4;
20006 insn |= (old_op & 0xf) << 16;
20007 put_thumb32_insn (buf, insn);
20008 if (insn & (1 << 20))
20009 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20010 else
20011 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20012 }
20013 else
20014 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20015 pc_rel = 0;
20016 break;
20017 default:
20018 abort ();
20019 }
20020 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20021 (enum bfd_reloc_code_real) reloc_type);
20022 fixp->fx_file = fragp->fr_file;
20023 fixp->fx_line = fragp->fr_line;
20024 fragp->fr_fix += fragp->fr_var;
20025 }
20026
20027 /* Return the size of a relaxable immediate operand instruction.
20028 SHIFT and SIZE specify the form of the allowable immediate. */
20029 static int
20030 relax_immediate (fragS *fragp, int size, int shift)
20031 {
20032 offsetT offset;
20033 offsetT mask;
20034 offsetT low;
20035
20036 /* ??? Should be able to do better than this. */
20037 if (fragp->fr_symbol)
20038 return 4;
20039
20040 low = (1 << shift) - 1;
20041 mask = (1 << (shift + size)) - (1 << shift);
20042 offset = fragp->fr_offset;
20043 /* Force misaligned offsets to 32-bit variant. */
20044 if (offset & low)
20045 return 4;
20046 if (offset & ~mask)
20047 return 4;
20048 return 2;
20049 }
20050
20051 /* Get the address of a symbol during relaxation. */
20052 static addressT
20053 relaxed_symbol_addr (fragS *fragp, long stretch)
20054 {
20055 fragS *sym_frag;
20056 addressT addr;
20057 symbolS *sym;
20058
20059 sym = fragp->fr_symbol;
20060 sym_frag = symbol_get_frag (sym);
20061 know (S_GET_SEGMENT (sym) != absolute_section
20062 || sym_frag == &zero_address_frag);
20063 addr = S_GET_VALUE (sym) + fragp->fr_offset;
20064
20065 /* If frag has yet to be reached on this pass, assume it will
20066 move by STRETCH just as we did. If this is not so, it will
20067 be because some frag between grows, and that will force
20068 another pass. */
20069
20070 if (stretch != 0
20071 && sym_frag->relax_marker != fragp->relax_marker)
20072 {
20073 fragS *f;
20074
20075 /* Adjust stretch for any alignment frag. Note that if have
20076 been expanding the earlier code, the symbol may be
20077 defined in what appears to be an earlier frag. FIXME:
20078 This doesn't handle the fr_subtype field, which specifies
20079 a maximum number of bytes to skip when doing an
20080 alignment. */
20081 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20082 {
20083 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
20084 {
20085 if (stretch < 0)
20086 stretch = - ((- stretch)
20087 & ~ ((1 << (int) f->fr_offset) - 1));
20088 else
20089 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
20090 if (stretch == 0)
20091 break;
20092 }
20093 }
20094 if (f != NULL)
20095 addr += stretch;
20096 }
20097
20098 return addr;
20099 }
20100
20101 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20102 load. */
20103 static int
20104 relax_adr (fragS *fragp, asection *sec, long stretch)
20105 {
20106 addressT addr;
20107 offsetT val;
20108
20109 /* Assume worst case for symbols not known to be in the same section. */
20110 if (fragp->fr_symbol == NULL
20111 || !S_IS_DEFINED (fragp->fr_symbol)
20112 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20113 || S_IS_WEAK (fragp->fr_symbol))
20114 return 4;
20115
20116 val = relaxed_symbol_addr (fragp, stretch);
20117 addr = fragp->fr_address + fragp->fr_fix;
20118 addr = (addr + 4) & ~3;
20119 /* Force misaligned targets to 32-bit variant. */
20120 if (val & 3)
20121 return 4;
20122 val -= addr;
20123 if (val < 0 || val > 1020)
20124 return 4;
20125 return 2;
20126 }
20127
20128 /* Return the size of a relaxable add/sub immediate instruction. */
20129 static int
20130 relax_addsub (fragS *fragp, asection *sec)
20131 {
20132 char *buf;
20133 int op;
20134
20135 buf = fragp->fr_literal + fragp->fr_fix;
20136 op = bfd_get_16(sec->owner, buf);
20137 if ((op & 0xf) == ((op >> 4) & 0xf))
20138 return relax_immediate (fragp, 8, 0);
20139 else
20140 return relax_immediate (fragp, 3, 0);
20141 }
20142
20143
20144 /* Return the size of a relaxable branch instruction. BITS is the
20145 size of the offset field in the narrow instruction. */
20146
20147 static int
20148 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
20149 {
20150 addressT addr;
20151 offsetT val;
20152 offsetT limit;
20153
20154 /* Assume worst case for symbols not known to be in the same section. */
20155 if (!S_IS_DEFINED (fragp->fr_symbol)
20156 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20157 || S_IS_WEAK (fragp->fr_symbol))
20158 return 4;
20159
20160 #ifdef OBJ_ELF
20161 if (S_IS_DEFINED (fragp->fr_symbol)
20162 && ARM_IS_FUNC (fragp->fr_symbol))
20163 return 4;
20164
20165 /* PR 12532. Global symbols with default visibility might
20166 be preempted, so do not relax relocations to them. */
20167 if ((ELF_ST_VISIBILITY (S_GET_OTHER (fragp->fr_symbol)) == STV_DEFAULT)
20168 && (! S_IS_LOCAL (fragp->fr_symbol)))
20169 return 4;
20170 #endif
20171
20172 val = relaxed_symbol_addr (fragp, stretch);
20173 addr = fragp->fr_address + fragp->fr_fix + 4;
20174 val -= addr;
20175
20176 /* Offset is a signed value *2 */
20177 limit = 1 << bits;
20178 if (val >= limit || val < -limit)
20179 return 4;
20180 return 2;
20181 }
20182
20183
20184 /* Relax a machine dependent frag. This returns the amount by which
20185 the current size of the frag should change. */
20186
20187 int
20188 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
20189 {
20190 int oldsize;
20191 int newsize;
20192
20193 oldsize = fragp->fr_var;
20194 switch (fragp->fr_subtype)
20195 {
20196 case T_MNEM_ldr_pc2:
20197 newsize = relax_adr (fragp, sec, stretch);
20198 break;
20199 case T_MNEM_ldr_pc:
20200 case T_MNEM_ldr_sp:
20201 case T_MNEM_str_sp:
20202 newsize = relax_immediate (fragp, 8, 2);
20203 break;
20204 case T_MNEM_ldr:
20205 case T_MNEM_str:
20206 newsize = relax_immediate (fragp, 5, 2);
20207 break;
20208 case T_MNEM_ldrh:
20209 case T_MNEM_strh:
20210 newsize = relax_immediate (fragp, 5, 1);
20211 break;
20212 case T_MNEM_ldrb:
20213 case T_MNEM_strb:
20214 newsize = relax_immediate (fragp, 5, 0);
20215 break;
20216 case T_MNEM_adr:
20217 newsize = relax_adr (fragp, sec, stretch);
20218 break;
20219 case T_MNEM_mov:
20220 case T_MNEM_movs:
20221 case T_MNEM_cmp:
20222 case T_MNEM_cmn:
20223 newsize = relax_immediate (fragp, 8, 0);
20224 break;
20225 case T_MNEM_b:
20226 newsize = relax_branch (fragp, sec, 11, stretch);
20227 break;
20228 case T_MNEM_bcond:
20229 newsize = relax_branch (fragp, sec, 8, stretch);
20230 break;
20231 case T_MNEM_add_sp:
20232 case T_MNEM_add_pc:
20233 newsize = relax_immediate (fragp, 8, 2);
20234 break;
20235 case T_MNEM_inc_sp:
20236 case T_MNEM_dec_sp:
20237 newsize = relax_immediate (fragp, 7, 2);
20238 break;
20239 case T_MNEM_addi:
20240 case T_MNEM_addis:
20241 case T_MNEM_subi:
20242 case T_MNEM_subis:
20243 newsize = relax_addsub (fragp, sec);
20244 break;
20245 default:
20246 abort ();
20247 }
20248
20249 fragp->fr_var = newsize;
20250 /* Freeze wide instructions that are at or before the same location as
20251 in the previous pass. This avoids infinite loops.
20252 Don't freeze them unconditionally because targets may be artificially
20253 misaligned by the expansion of preceding frags. */
20254 if (stretch <= 0 && newsize > 2)
20255 {
20256 md_convert_frag (sec->owner, sec, fragp);
20257 frag_wane (fragp);
20258 }
20259
20260 return newsize - oldsize;
20261 }
20262
20263 /* Round up a section size to the appropriate boundary. */
20264
20265 valueT
20266 md_section_align (segT segment ATTRIBUTE_UNUSED,
20267 valueT size)
20268 {
20269 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
20270 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
20271 {
20272 /* For a.out, force the section size to be aligned. If we don't do
20273 this, BFD will align it for us, but it will not write out the
20274 final bytes of the section. This may be a bug in BFD, but it is
20275 easier to fix it here since that is how the other a.out targets
20276 work. */
20277 int align;
20278
20279 align = bfd_get_section_alignment (stdoutput, segment);
20280 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
20281 }
20282 #endif
20283
20284 return size;
20285 }
20286
20287 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
20288 of an rs_align_code fragment. */
20289
20290 void
20291 arm_handle_align (fragS * fragP)
20292 {
20293 static char const arm_noop[2][2][4] =
20294 {
20295 { /* ARMv1 */
20296 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
20297 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
20298 },
20299 { /* ARMv6k */
20300 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
20301 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
20302 },
20303 };
20304 static char const thumb_noop[2][2][2] =
20305 {
20306 { /* Thumb-1 */
20307 {0xc0, 0x46}, /* LE */
20308 {0x46, 0xc0}, /* BE */
20309 },
20310 { /* Thumb-2 */
20311 {0x00, 0xbf}, /* LE */
20312 {0xbf, 0x00} /* BE */
20313 }
20314 };
20315 static char const wide_thumb_noop[2][4] =
20316 { /* Wide Thumb-2 */
20317 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
20318 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
20319 };
20320
20321 unsigned bytes, fix, noop_size;
20322 char * p;
20323 const char * noop;
20324 const char *narrow_noop = NULL;
20325 #ifdef OBJ_ELF
20326 enum mstate state;
20327 #endif
20328
20329 if (fragP->fr_type != rs_align_code)
20330 return;
20331
20332 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
20333 p = fragP->fr_literal + fragP->fr_fix;
20334 fix = 0;
20335
20336 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
20337 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
20338
20339 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
20340
20341 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
20342 {
20343 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
20344 {
20345 narrow_noop = thumb_noop[1][target_big_endian];
20346 noop = wide_thumb_noop[target_big_endian];
20347 }
20348 else
20349 noop = thumb_noop[0][target_big_endian];
20350 noop_size = 2;
20351 #ifdef OBJ_ELF
20352 state = MAP_THUMB;
20353 #endif
20354 }
20355 else
20356 {
20357 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
20358 [target_big_endian];
20359 noop_size = 4;
20360 #ifdef OBJ_ELF
20361 state = MAP_ARM;
20362 #endif
20363 }
20364
20365 fragP->fr_var = noop_size;
20366
20367 if (bytes & (noop_size - 1))
20368 {
20369 fix = bytes & (noop_size - 1);
20370 #ifdef OBJ_ELF
20371 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
20372 #endif
20373 memset (p, 0, fix);
20374 p += fix;
20375 bytes -= fix;
20376 }
20377
20378 if (narrow_noop)
20379 {
20380 if (bytes & noop_size)
20381 {
20382 /* Insert a narrow noop. */
20383 memcpy (p, narrow_noop, noop_size);
20384 p += noop_size;
20385 bytes -= noop_size;
20386 fix += noop_size;
20387 }
20388
20389 /* Use wide noops for the remainder */
20390 noop_size = 4;
20391 }
20392
20393 while (bytes >= noop_size)
20394 {
20395 memcpy (p, noop, noop_size);
20396 p += noop_size;
20397 bytes -= noop_size;
20398 fix += noop_size;
20399 }
20400
20401 fragP->fr_fix += fix;
20402 }
20403
20404 /* Called from md_do_align. Used to create an alignment
20405 frag in a code section. */
20406
20407 void
20408 arm_frag_align_code (int n, int max)
20409 {
20410 char * p;
20411
20412 /* We assume that there will never be a requirement
20413 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
20414 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
20415 {
20416 char err_msg[128];
20417
20418 sprintf (err_msg,
20419 _("alignments greater than %d bytes not supported in .text sections."),
20420 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
20421 as_fatal ("%s", err_msg);
20422 }
20423
20424 p = frag_var (rs_align_code,
20425 MAX_MEM_FOR_RS_ALIGN_CODE,
20426 1,
20427 (relax_substateT) max,
20428 (symbolS *) NULL,
20429 (offsetT) n,
20430 (char *) NULL);
20431 *p = 0;
20432 }
20433
20434 /* Perform target specific initialisation of a frag.
20435 Note - despite the name this initialisation is not done when the frag
20436 is created, but only when its type is assigned. A frag can be created
20437 and used a long time before its type is set, so beware of assuming that
20438 this initialisationis performed first. */
20439
20440 #ifndef OBJ_ELF
20441 void
20442 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
20443 {
20444 /* Record whether this frag is in an ARM or a THUMB area. */
20445 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20446 }
20447
20448 #else /* OBJ_ELF is defined. */
20449 void
20450 arm_init_frag (fragS * fragP, int max_chars)
20451 {
20452 /* If the current ARM vs THUMB mode has not already
20453 been recorded into this frag then do so now. */
20454 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
20455 {
20456 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20457
20458 /* Record a mapping symbol for alignment frags. We will delete this
20459 later if the alignment ends up empty. */
20460 switch (fragP->fr_type)
20461 {
20462 case rs_align:
20463 case rs_align_test:
20464 case rs_fill:
20465 mapping_state_2 (MAP_DATA, max_chars);
20466 break;
20467 case rs_align_code:
20468 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
20469 break;
20470 default:
20471 break;
20472 }
20473 }
20474 }
20475
20476 /* When we change sections we need to issue a new mapping symbol. */
20477
20478 void
20479 arm_elf_change_section (void)
20480 {
20481 /* Link an unlinked unwind index table section to the .text section. */
20482 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
20483 && elf_linked_to_section (now_seg) == NULL)
20484 elf_linked_to_section (now_seg) = text_section;
20485 }
20486
20487 int
20488 arm_elf_section_type (const char * str, size_t len)
20489 {
20490 if (len == 5 && strncmp (str, "exidx", 5) == 0)
20491 return SHT_ARM_EXIDX;
20492
20493 return -1;
20494 }
20495 \f
20496 /* Code to deal with unwinding tables. */
20497
20498 static void add_unwind_adjustsp (offsetT);
20499
20500 /* Generate any deferred unwind frame offset. */
20501
20502 static void
20503 flush_pending_unwind (void)
20504 {
20505 offsetT offset;
20506
20507 offset = unwind.pending_offset;
20508 unwind.pending_offset = 0;
20509 if (offset != 0)
20510 add_unwind_adjustsp (offset);
20511 }
20512
20513 /* Add an opcode to this list for this function. Two-byte opcodes should
20514 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
20515 order. */
20516
20517 static void
20518 add_unwind_opcode (valueT op, int length)
20519 {
20520 /* Add any deferred stack adjustment. */
20521 if (unwind.pending_offset)
20522 flush_pending_unwind ();
20523
20524 unwind.sp_restored = 0;
20525
20526 if (unwind.opcode_count + length > unwind.opcode_alloc)
20527 {
20528 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
20529 if (unwind.opcodes)
20530 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
20531 unwind.opcode_alloc);
20532 else
20533 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
20534 }
20535 while (length > 0)
20536 {
20537 length--;
20538 unwind.opcodes[unwind.opcode_count] = op & 0xff;
20539 op >>= 8;
20540 unwind.opcode_count++;
20541 }
20542 }
20543
20544 /* Add unwind opcodes to adjust the stack pointer. */
20545
20546 static void
20547 add_unwind_adjustsp (offsetT offset)
20548 {
20549 valueT op;
20550
20551 if (offset > 0x200)
20552 {
20553 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
20554 char bytes[5];
20555 int n;
20556 valueT o;
20557
20558 /* Long form: 0xb2, uleb128. */
20559 /* This might not fit in a word so add the individual bytes,
20560 remembering the list is built in reverse order. */
20561 o = (valueT) ((offset - 0x204) >> 2);
20562 if (o == 0)
20563 add_unwind_opcode (0, 1);
20564
20565 /* Calculate the uleb128 encoding of the offset. */
20566 n = 0;
20567 while (o)
20568 {
20569 bytes[n] = o & 0x7f;
20570 o >>= 7;
20571 if (o)
20572 bytes[n] |= 0x80;
20573 n++;
20574 }
20575 /* Add the insn. */
20576 for (; n; n--)
20577 add_unwind_opcode (bytes[n - 1], 1);
20578 add_unwind_opcode (0xb2, 1);
20579 }
20580 else if (offset > 0x100)
20581 {
20582 /* Two short opcodes. */
20583 add_unwind_opcode (0x3f, 1);
20584 op = (offset - 0x104) >> 2;
20585 add_unwind_opcode (op, 1);
20586 }
20587 else if (offset > 0)
20588 {
20589 /* Short opcode. */
20590 op = (offset - 4) >> 2;
20591 add_unwind_opcode (op, 1);
20592 }
20593 else if (offset < 0)
20594 {
20595 offset = -offset;
20596 while (offset > 0x100)
20597 {
20598 add_unwind_opcode (0x7f, 1);
20599 offset -= 0x100;
20600 }
20601 op = ((offset - 4) >> 2) | 0x40;
20602 add_unwind_opcode (op, 1);
20603 }
20604 }
20605
20606 /* Finish the list of unwind opcodes for this function. */
20607 static void
20608 finish_unwind_opcodes (void)
20609 {
20610 valueT op;
20611
20612 if (unwind.fp_used)
20613 {
20614 /* Adjust sp as necessary. */
20615 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
20616 flush_pending_unwind ();
20617
20618 /* After restoring sp from the frame pointer. */
20619 op = 0x90 | unwind.fp_reg;
20620 add_unwind_opcode (op, 1);
20621 }
20622 else
20623 flush_pending_unwind ();
20624 }
20625
20626
20627 /* Start an exception table entry. If idx is nonzero this is an index table
20628 entry. */
20629
20630 static void
20631 start_unwind_section (const segT text_seg, int idx)
20632 {
20633 const char * text_name;
20634 const char * prefix;
20635 const char * prefix_once;
20636 const char * group_name;
20637 size_t prefix_len;
20638 size_t text_len;
20639 char * sec_name;
20640 size_t sec_name_len;
20641 int type;
20642 int flags;
20643 int linkonce;
20644
20645 if (idx)
20646 {
20647 prefix = ELF_STRING_ARM_unwind;
20648 prefix_once = ELF_STRING_ARM_unwind_once;
20649 type = SHT_ARM_EXIDX;
20650 }
20651 else
20652 {
20653 prefix = ELF_STRING_ARM_unwind_info;
20654 prefix_once = ELF_STRING_ARM_unwind_info_once;
20655 type = SHT_PROGBITS;
20656 }
20657
20658 text_name = segment_name (text_seg);
20659 if (streq (text_name, ".text"))
20660 text_name = "";
20661
20662 if (strncmp (text_name, ".gnu.linkonce.t.",
20663 strlen (".gnu.linkonce.t.")) == 0)
20664 {
20665 prefix = prefix_once;
20666 text_name += strlen (".gnu.linkonce.t.");
20667 }
20668
20669 prefix_len = strlen (prefix);
20670 text_len = strlen (text_name);
20671 sec_name_len = prefix_len + text_len;
20672 sec_name = (char *) xmalloc (sec_name_len + 1);
20673 memcpy (sec_name, prefix, prefix_len);
20674 memcpy (sec_name + prefix_len, text_name, text_len);
20675 sec_name[prefix_len + text_len] = '\0';
20676
20677 flags = SHF_ALLOC;
20678 linkonce = 0;
20679 group_name = 0;
20680
20681 /* Handle COMDAT group. */
20682 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
20683 {
20684 group_name = elf_group_name (text_seg);
20685 if (group_name == NULL)
20686 {
20687 as_bad (_("Group section `%s' has no group signature"),
20688 segment_name (text_seg));
20689 ignore_rest_of_line ();
20690 return;
20691 }
20692 flags |= SHF_GROUP;
20693 linkonce = 1;
20694 }
20695
20696 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
20697
20698 /* Set the section link for index tables. */
20699 if (idx)
20700 elf_linked_to_section (now_seg) = text_seg;
20701 }
20702
20703
20704 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
20705 personality routine data. Returns zero, or the index table value for
20706 and inline entry. */
20707
20708 static valueT
20709 create_unwind_entry (int have_data)
20710 {
20711 int size;
20712 addressT where;
20713 char *ptr;
20714 /* The current word of data. */
20715 valueT data;
20716 /* The number of bytes left in this word. */
20717 int n;
20718
20719 finish_unwind_opcodes ();
20720
20721 /* Remember the current text section. */
20722 unwind.saved_seg = now_seg;
20723 unwind.saved_subseg = now_subseg;
20724
20725 start_unwind_section (now_seg, 0);
20726
20727 if (unwind.personality_routine == NULL)
20728 {
20729 if (unwind.personality_index == -2)
20730 {
20731 if (have_data)
20732 as_bad (_("handlerdata in cantunwind frame"));
20733 return 1; /* EXIDX_CANTUNWIND. */
20734 }
20735
20736 /* Use a default personality routine if none is specified. */
20737 if (unwind.personality_index == -1)
20738 {
20739 if (unwind.opcode_count > 3)
20740 unwind.personality_index = 1;
20741 else
20742 unwind.personality_index = 0;
20743 }
20744
20745 /* Space for the personality routine entry. */
20746 if (unwind.personality_index == 0)
20747 {
20748 if (unwind.opcode_count > 3)
20749 as_bad (_("too many unwind opcodes for personality routine 0"));
20750
20751 if (!have_data)
20752 {
20753 /* All the data is inline in the index table. */
20754 data = 0x80;
20755 n = 3;
20756 while (unwind.opcode_count > 0)
20757 {
20758 unwind.opcode_count--;
20759 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
20760 n--;
20761 }
20762
20763 /* Pad with "finish" opcodes. */
20764 while (n--)
20765 data = (data << 8) | 0xb0;
20766
20767 return data;
20768 }
20769 size = 0;
20770 }
20771 else
20772 /* We get two opcodes "free" in the first word. */
20773 size = unwind.opcode_count - 2;
20774 }
20775 else
20776 {
20777 gas_assert (unwind.personality_index == -1);
20778
20779 /* An extra byte is required for the opcode count. */
20780 size = unwind.opcode_count + 1;
20781 }
20782
20783 size = (size + 3) >> 2;
20784 if (size > 0xff)
20785 as_bad (_("too many unwind opcodes"));
20786
20787 frag_align (2, 0, 0);
20788 record_alignment (now_seg, 2);
20789 unwind.table_entry = expr_build_dot ();
20790
20791 /* Allocate the table entry. */
20792 ptr = frag_more ((size << 2) + 4);
20793 /* PR 13449: Zero the table entries in case some of them are not used. */
20794 memset (ptr, 0, (size << 2) + 4);
20795 where = frag_now_fix () - ((size << 2) + 4);
20796
20797 switch (unwind.personality_index)
20798 {
20799 case -1:
20800 /* ??? Should this be a PLT generating relocation? */
20801 /* Custom personality routine. */
20802 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
20803 BFD_RELOC_ARM_PREL31);
20804
20805 where += 4;
20806 ptr += 4;
20807
20808 /* Set the first byte to the number of additional words. */
20809 data = size > 0 ? size - 1 : 0;
20810 n = 3;
20811 break;
20812
20813 /* ABI defined personality routines. */
20814 case 0:
20815 /* Three opcodes bytes are packed into the first word. */
20816 data = 0x80;
20817 n = 3;
20818 break;
20819
20820 case 1:
20821 case 2:
20822 /* The size and first two opcode bytes go in the first word. */
20823 data = ((0x80 + unwind.personality_index) << 8) | size;
20824 n = 2;
20825 break;
20826
20827 default:
20828 /* Should never happen. */
20829 abort ();
20830 }
20831
20832 /* Pack the opcodes into words (MSB first), reversing the list at the same
20833 time. */
20834 while (unwind.opcode_count > 0)
20835 {
20836 if (n == 0)
20837 {
20838 md_number_to_chars (ptr, data, 4);
20839 ptr += 4;
20840 n = 4;
20841 data = 0;
20842 }
20843 unwind.opcode_count--;
20844 n--;
20845 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
20846 }
20847
20848 /* Finish off the last word. */
20849 if (n < 4)
20850 {
20851 /* Pad with "finish" opcodes. */
20852 while (n--)
20853 data = (data << 8) | 0xb0;
20854
20855 md_number_to_chars (ptr, data, 4);
20856 }
20857
20858 if (!have_data)
20859 {
20860 /* Add an empty descriptor if there is no user-specified data. */
20861 ptr = frag_more (4);
20862 md_number_to_chars (ptr, 0, 4);
20863 }
20864
20865 return 0;
20866 }
20867
20868
20869 /* Initialize the DWARF-2 unwind information for this procedure. */
20870
20871 void
20872 tc_arm_frame_initial_instructions (void)
20873 {
20874 cfi_add_CFA_def_cfa (REG_SP, 0);
20875 }
20876 #endif /* OBJ_ELF */
20877
20878 /* Convert REGNAME to a DWARF-2 register number. */
20879
20880 int
20881 tc_arm_regname_to_dw2regnum (char *regname)
20882 {
20883 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
20884
20885 if (reg == FAIL)
20886 return -1;
20887
20888 return reg;
20889 }
20890
20891 #ifdef TE_PE
20892 void
20893 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
20894 {
20895 expressionS exp;
20896
20897 exp.X_op = O_secrel;
20898 exp.X_add_symbol = symbol;
20899 exp.X_add_number = 0;
20900 emit_expr (&exp, size);
20901 }
20902 #endif
20903
20904 /* MD interface: Symbol and relocation handling. */
20905
20906 /* Return the address within the segment that a PC-relative fixup is
20907 relative to. For ARM, PC-relative fixups applied to instructions
20908 are generally relative to the location of the fixup plus 8 bytes.
20909 Thumb branches are offset by 4, and Thumb loads relative to PC
20910 require special handling. */
20911
20912 long
20913 md_pcrel_from_section (fixS * fixP, segT seg)
20914 {
20915 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
20916
20917 /* If this is pc-relative and we are going to emit a relocation
20918 then we just want to put out any pipeline compensation that the linker
20919 will need. Otherwise we want to use the calculated base.
20920 For WinCE we skip the bias for externals as well, since this
20921 is how the MS ARM-CE assembler behaves and we want to be compatible. */
20922 if (fixP->fx_pcrel
20923 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
20924 || (arm_force_relocation (fixP)
20925 #ifdef TE_WINCE
20926 && !S_IS_EXTERNAL (fixP->fx_addsy)
20927 #endif
20928 )))
20929 base = 0;
20930
20931
20932 switch (fixP->fx_r_type)
20933 {
20934 /* PC relative addressing on the Thumb is slightly odd as the
20935 bottom two bits of the PC are forced to zero for the
20936 calculation. This happens *after* application of the
20937 pipeline offset. However, Thumb adrl already adjusts for
20938 this, so we need not do it again. */
20939 case BFD_RELOC_ARM_THUMB_ADD:
20940 return base & ~3;
20941
20942 case BFD_RELOC_ARM_THUMB_OFFSET:
20943 case BFD_RELOC_ARM_T32_OFFSET_IMM:
20944 case BFD_RELOC_ARM_T32_ADD_PC12:
20945 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
20946 return (base + 4) & ~3;
20947
20948 /* Thumb branches are simply offset by +4. */
20949 case BFD_RELOC_THUMB_PCREL_BRANCH7:
20950 case BFD_RELOC_THUMB_PCREL_BRANCH9:
20951 case BFD_RELOC_THUMB_PCREL_BRANCH12:
20952 case BFD_RELOC_THUMB_PCREL_BRANCH20:
20953 case BFD_RELOC_THUMB_PCREL_BRANCH25:
20954 return base + 4;
20955
20956 case BFD_RELOC_THUMB_PCREL_BRANCH23:
20957 if (fixP->fx_addsy
20958 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20959 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20960 && ARM_IS_FUNC (fixP->fx_addsy)
20961 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20962 base = fixP->fx_where + fixP->fx_frag->fr_address;
20963 return base + 4;
20964
20965 /* BLX is like branches above, but forces the low two bits of PC to
20966 zero. */
20967 case BFD_RELOC_THUMB_PCREL_BLX:
20968 if (fixP->fx_addsy
20969 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20970 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20971 && THUMB_IS_FUNC (fixP->fx_addsy)
20972 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20973 base = fixP->fx_where + fixP->fx_frag->fr_address;
20974 return (base + 4) & ~3;
20975
20976 /* ARM mode branches are offset by +8. However, the Windows CE
20977 loader expects the relocation not to take this into account. */
20978 case BFD_RELOC_ARM_PCREL_BLX:
20979 if (fixP->fx_addsy
20980 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20981 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20982 && ARM_IS_FUNC (fixP->fx_addsy)
20983 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20984 base = fixP->fx_where + fixP->fx_frag->fr_address;
20985 return base + 8;
20986
20987 case BFD_RELOC_ARM_PCREL_CALL:
20988 if (fixP->fx_addsy
20989 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20990 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20991 && THUMB_IS_FUNC (fixP->fx_addsy)
20992 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20993 base = fixP->fx_where + fixP->fx_frag->fr_address;
20994 return base + 8;
20995
20996 case BFD_RELOC_ARM_PCREL_BRANCH:
20997 case BFD_RELOC_ARM_PCREL_JUMP:
20998 case BFD_RELOC_ARM_PLT32:
20999 #ifdef TE_WINCE
21000 /* When handling fixups immediately, because we have already
21001 discovered the value of a symbol, or the address of the frag involved
21002 we must account for the offset by +8, as the OS loader will never see the reloc.
21003 see fixup_segment() in write.c
21004 The S_IS_EXTERNAL test handles the case of global symbols.
21005 Those need the calculated base, not just the pipe compensation the linker will need. */
21006 if (fixP->fx_pcrel
21007 && fixP->fx_addsy != NULL
21008 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21009 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21010 return base + 8;
21011 return base;
21012 #else
21013 return base + 8;
21014 #endif
21015
21016
21017 /* ARM mode loads relative to PC are also offset by +8. Unlike
21018 branches, the Windows CE loader *does* expect the relocation
21019 to take this into account. */
21020 case BFD_RELOC_ARM_OFFSET_IMM:
21021 case BFD_RELOC_ARM_OFFSET_IMM8:
21022 case BFD_RELOC_ARM_HWLITERAL:
21023 case BFD_RELOC_ARM_LITERAL:
21024 case BFD_RELOC_ARM_CP_OFF_IMM:
21025 return base + 8;
21026
21027
21028 /* Other PC-relative relocations are un-offset. */
21029 default:
21030 return base;
21031 }
21032 }
21033
21034 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21035 Otherwise we have no need to default values of symbols. */
21036
21037 symbolS *
21038 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
21039 {
21040 #ifdef OBJ_ELF
21041 if (name[0] == '_' && name[1] == 'G'
21042 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
21043 {
21044 if (!GOT_symbol)
21045 {
21046 if (symbol_find (name))
21047 as_bad (_("GOT already in the symbol table"));
21048
21049 GOT_symbol = symbol_new (name, undefined_section,
21050 (valueT) 0, & zero_address_frag);
21051 }
21052
21053 return GOT_symbol;
21054 }
21055 #endif
21056
21057 return NULL;
21058 }
21059
21060 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21061 computed as two separate immediate values, added together. We
21062 already know that this value cannot be computed by just one ARM
21063 instruction. */
21064
21065 static unsigned int
21066 validate_immediate_twopart (unsigned int val,
21067 unsigned int * highpart)
21068 {
21069 unsigned int a;
21070 unsigned int i;
21071
21072 for (i = 0; i < 32; i += 2)
21073 if (((a = rotate_left (val, i)) & 0xff) != 0)
21074 {
21075 if (a & 0xff00)
21076 {
21077 if (a & ~ 0xffff)
21078 continue;
21079 * highpart = (a >> 8) | ((i + 24) << 7);
21080 }
21081 else if (a & 0xff0000)
21082 {
21083 if (a & 0xff000000)
21084 continue;
21085 * highpart = (a >> 16) | ((i + 16) << 7);
21086 }
21087 else
21088 {
21089 gas_assert (a & 0xff000000);
21090 * highpart = (a >> 24) | ((i + 8) << 7);
21091 }
21092
21093 return (a & 0xff) | (i << 7);
21094 }
21095
21096 return FAIL;
21097 }
21098
21099 static int
21100 validate_offset_imm (unsigned int val, int hwse)
21101 {
21102 if ((hwse && val > 255) || val > 4095)
21103 return FAIL;
21104 return val;
21105 }
21106
21107 /* Subroutine of md_apply_fix. Do those data_ops which can take a
21108 negative immediate constant by altering the instruction. A bit of
21109 a hack really.
21110 MOV <-> MVN
21111 AND <-> BIC
21112 ADC <-> SBC
21113 by inverting the second operand, and
21114 ADD <-> SUB
21115 CMP <-> CMN
21116 by negating the second operand. */
21117
21118 static int
21119 negate_data_op (unsigned long * instruction,
21120 unsigned long value)
21121 {
21122 int op, new_inst;
21123 unsigned long negated, inverted;
21124
21125 negated = encode_arm_immediate (-value);
21126 inverted = encode_arm_immediate (~value);
21127
21128 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
21129 switch (op)
21130 {
21131 /* First negates. */
21132 case OPCODE_SUB: /* ADD <-> SUB */
21133 new_inst = OPCODE_ADD;
21134 value = negated;
21135 break;
21136
21137 case OPCODE_ADD:
21138 new_inst = OPCODE_SUB;
21139 value = negated;
21140 break;
21141
21142 case OPCODE_CMP: /* CMP <-> CMN */
21143 new_inst = OPCODE_CMN;
21144 value = negated;
21145 break;
21146
21147 case OPCODE_CMN:
21148 new_inst = OPCODE_CMP;
21149 value = negated;
21150 break;
21151
21152 /* Now Inverted ops. */
21153 case OPCODE_MOV: /* MOV <-> MVN */
21154 new_inst = OPCODE_MVN;
21155 value = inverted;
21156 break;
21157
21158 case OPCODE_MVN:
21159 new_inst = OPCODE_MOV;
21160 value = inverted;
21161 break;
21162
21163 case OPCODE_AND: /* AND <-> BIC */
21164 new_inst = OPCODE_BIC;
21165 value = inverted;
21166 break;
21167
21168 case OPCODE_BIC:
21169 new_inst = OPCODE_AND;
21170 value = inverted;
21171 break;
21172
21173 case OPCODE_ADC: /* ADC <-> SBC */
21174 new_inst = OPCODE_SBC;
21175 value = inverted;
21176 break;
21177
21178 case OPCODE_SBC:
21179 new_inst = OPCODE_ADC;
21180 value = inverted;
21181 break;
21182
21183 /* We cannot do anything. */
21184 default:
21185 return FAIL;
21186 }
21187
21188 if (value == (unsigned) FAIL)
21189 return FAIL;
21190
21191 *instruction &= OPCODE_MASK;
21192 *instruction |= new_inst << DATA_OP_SHIFT;
21193 return value;
21194 }
21195
21196 /* Like negate_data_op, but for Thumb-2. */
21197
21198 static unsigned int
21199 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
21200 {
21201 int op, new_inst;
21202 int rd;
21203 unsigned int negated, inverted;
21204
21205 negated = encode_thumb32_immediate (-value);
21206 inverted = encode_thumb32_immediate (~value);
21207
21208 rd = (*instruction >> 8) & 0xf;
21209 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
21210 switch (op)
21211 {
21212 /* ADD <-> SUB. Includes CMP <-> CMN. */
21213 case T2_OPCODE_SUB:
21214 new_inst = T2_OPCODE_ADD;
21215 value = negated;
21216 break;
21217
21218 case T2_OPCODE_ADD:
21219 new_inst = T2_OPCODE_SUB;
21220 value = negated;
21221 break;
21222
21223 /* ORR <-> ORN. Includes MOV <-> MVN. */
21224 case T2_OPCODE_ORR:
21225 new_inst = T2_OPCODE_ORN;
21226 value = inverted;
21227 break;
21228
21229 case T2_OPCODE_ORN:
21230 new_inst = T2_OPCODE_ORR;
21231 value = inverted;
21232 break;
21233
21234 /* AND <-> BIC. TST has no inverted equivalent. */
21235 case T2_OPCODE_AND:
21236 new_inst = T2_OPCODE_BIC;
21237 if (rd == 15)
21238 value = FAIL;
21239 else
21240 value = inverted;
21241 break;
21242
21243 case T2_OPCODE_BIC:
21244 new_inst = T2_OPCODE_AND;
21245 value = inverted;
21246 break;
21247
21248 /* ADC <-> SBC */
21249 case T2_OPCODE_ADC:
21250 new_inst = T2_OPCODE_SBC;
21251 value = inverted;
21252 break;
21253
21254 case T2_OPCODE_SBC:
21255 new_inst = T2_OPCODE_ADC;
21256 value = inverted;
21257 break;
21258
21259 /* We cannot do anything. */
21260 default:
21261 return FAIL;
21262 }
21263
21264 if (value == (unsigned int)FAIL)
21265 return FAIL;
21266
21267 *instruction &= T2_OPCODE_MASK;
21268 *instruction |= new_inst << T2_DATA_OP_SHIFT;
21269 return value;
21270 }
21271
21272 /* Read a 32-bit thumb instruction from buf. */
21273 static unsigned long
21274 get_thumb32_insn (char * buf)
21275 {
21276 unsigned long insn;
21277 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
21278 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21279
21280 return insn;
21281 }
21282
21283
21284 /* We usually want to set the low bit on the address of thumb function
21285 symbols. In particular .word foo - . should have the low bit set.
21286 Generic code tries to fold the difference of two symbols to
21287 a constant. Prevent this and force a relocation when the first symbols
21288 is a thumb function. */
21289
21290 bfd_boolean
21291 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
21292 {
21293 if (op == O_subtract
21294 && l->X_op == O_symbol
21295 && r->X_op == O_symbol
21296 && THUMB_IS_FUNC (l->X_add_symbol))
21297 {
21298 l->X_op = O_subtract;
21299 l->X_op_symbol = r->X_add_symbol;
21300 l->X_add_number -= r->X_add_number;
21301 return TRUE;
21302 }
21303
21304 /* Process as normal. */
21305 return FALSE;
21306 }
21307
21308 /* Encode Thumb2 unconditional branches and calls. The encoding
21309 for the 2 are identical for the immediate values. */
21310
21311 static void
21312 encode_thumb2_b_bl_offset (char * buf, offsetT value)
21313 {
21314 #define T2I1I2MASK ((1 << 13) | (1 << 11))
21315 offsetT newval;
21316 offsetT newval2;
21317 addressT S, I1, I2, lo, hi;
21318
21319 S = (value >> 24) & 0x01;
21320 I1 = (value >> 23) & 0x01;
21321 I2 = (value >> 22) & 0x01;
21322 hi = (value >> 12) & 0x3ff;
21323 lo = (value >> 1) & 0x7ff;
21324 newval = md_chars_to_number (buf, THUMB_SIZE);
21325 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21326 newval |= (S << 10) | hi;
21327 newval2 &= ~T2I1I2MASK;
21328 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
21329 md_number_to_chars (buf, newval, THUMB_SIZE);
21330 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21331 }
21332
21333 void
21334 md_apply_fix (fixS * fixP,
21335 valueT * valP,
21336 segT seg)
21337 {
21338 offsetT value = * valP;
21339 offsetT newval;
21340 unsigned int newimm;
21341 unsigned long temp;
21342 int sign;
21343 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
21344
21345 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
21346
21347 /* Note whether this will delete the relocation. */
21348
21349 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
21350 fixP->fx_done = 1;
21351
21352 /* On a 64-bit host, silently truncate 'value' to 32 bits for
21353 consistency with the behaviour on 32-bit hosts. Remember value
21354 for emit_reloc. */
21355 value &= 0xffffffff;
21356 value ^= 0x80000000;
21357 value -= 0x80000000;
21358
21359 *valP = value;
21360 fixP->fx_addnumber = value;
21361
21362 /* Same treatment for fixP->fx_offset. */
21363 fixP->fx_offset &= 0xffffffff;
21364 fixP->fx_offset ^= 0x80000000;
21365 fixP->fx_offset -= 0x80000000;
21366
21367 switch (fixP->fx_r_type)
21368 {
21369 case BFD_RELOC_NONE:
21370 /* This will need to go in the object file. */
21371 fixP->fx_done = 0;
21372 break;
21373
21374 case BFD_RELOC_ARM_IMMEDIATE:
21375 /* We claim that this fixup has been processed here,
21376 even if in fact we generate an error because we do
21377 not have a reloc for it, so tc_gen_reloc will reject it. */
21378 fixP->fx_done = 1;
21379
21380 if (fixP->fx_addsy)
21381 {
21382 const char *msg = 0;
21383
21384 if (! S_IS_DEFINED (fixP->fx_addsy))
21385 msg = _("undefined symbol %s used as an immediate value");
21386 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21387 msg = _("symbol %s is in a different section");
21388 else if (S_IS_WEAK (fixP->fx_addsy))
21389 msg = _("symbol %s is weak and may be overridden later");
21390
21391 if (msg)
21392 {
21393 as_bad_where (fixP->fx_file, fixP->fx_line,
21394 msg, S_GET_NAME (fixP->fx_addsy));
21395 break;
21396 }
21397 }
21398
21399 temp = md_chars_to_number (buf, INSN_SIZE);
21400
21401 /* If the offset is negative, we should use encoding A2 for ADR. */
21402 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
21403 newimm = negate_data_op (&temp, value);
21404 else
21405 {
21406 newimm = encode_arm_immediate (value);
21407
21408 /* If the instruction will fail, see if we can fix things up by
21409 changing the opcode. */
21410 if (newimm == (unsigned int) FAIL)
21411 newimm = negate_data_op (&temp, value);
21412 }
21413
21414 if (newimm == (unsigned int) FAIL)
21415 {
21416 as_bad_where (fixP->fx_file, fixP->fx_line,
21417 _("invalid constant (%lx) after fixup"),
21418 (unsigned long) value);
21419 break;
21420 }
21421
21422 newimm |= (temp & 0xfffff000);
21423 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
21424 break;
21425
21426 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
21427 {
21428 unsigned int highpart = 0;
21429 unsigned int newinsn = 0xe1a00000; /* nop. */
21430
21431 if (fixP->fx_addsy)
21432 {
21433 const char *msg = 0;
21434
21435 if (! S_IS_DEFINED (fixP->fx_addsy))
21436 msg = _("undefined symbol %s used as an immediate value");
21437 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21438 msg = _("symbol %s is in a different section");
21439 else if (S_IS_WEAK (fixP->fx_addsy))
21440 msg = _("symbol %s is weak and may be overridden later");
21441
21442 if (msg)
21443 {
21444 as_bad_where (fixP->fx_file, fixP->fx_line,
21445 msg, S_GET_NAME (fixP->fx_addsy));
21446 break;
21447 }
21448 }
21449
21450 newimm = encode_arm_immediate (value);
21451 temp = md_chars_to_number (buf, INSN_SIZE);
21452
21453 /* If the instruction will fail, see if we can fix things up by
21454 changing the opcode. */
21455 if (newimm == (unsigned int) FAIL
21456 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
21457 {
21458 /* No ? OK - try using two ADD instructions to generate
21459 the value. */
21460 newimm = validate_immediate_twopart (value, & highpart);
21461
21462 /* Yes - then make sure that the second instruction is
21463 also an add. */
21464 if (newimm != (unsigned int) FAIL)
21465 newinsn = temp;
21466 /* Still No ? Try using a negated value. */
21467 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
21468 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
21469 /* Otherwise - give up. */
21470 else
21471 {
21472 as_bad_where (fixP->fx_file, fixP->fx_line,
21473 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
21474 (long) value);
21475 break;
21476 }
21477
21478 /* Replace the first operand in the 2nd instruction (which
21479 is the PC) with the destination register. We have
21480 already added in the PC in the first instruction and we
21481 do not want to do it again. */
21482 newinsn &= ~ 0xf0000;
21483 newinsn |= ((newinsn & 0x0f000) << 4);
21484 }
21485
21486 newimm |= (temp & 0xfffff000);
21487 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
21488
21489 highpart |= (newinsn & 0xfffff000);
21490 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
21491 }
21492 break;
21493
21494 case BFD_RELOC_ARM_OFFSET_IMM:
21495 if (!fixP->fx_done && seg->use_rela_p)
21496 value = 0;
21497
21498 case BFD_RELOC_ARM_LITERAL:
21499 sign = value > 0;
21500
21501 if (value < 0)
21502 value = - value;
21503
21504 if (validate_offset_imm (value, 0) == FAIL)
21505 {
21506 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
21507 as_bad_where (fixP->fx_file, fixP->fx_line,
21508 _("invalid literal constant: pool needs to be closer"));
21509 else
21510 as_bad_where (fixP->fx_file, fixP->fx_line,
21511 _("bad immediate value for offset (%ld)"),
21512 (long) value);
21513 break;
21514 }
21515
21516 newval = md_chars_to_number (buf, INSN_SIZE);
21517 if (value == 0)
21518 newval &= 0xfffff000;
21519 else
21520 {
21521 newval &= 0xff7ff000;
21522 newval |= value | (sign ? INDEX_UP : 0);
21523 }
21524 md_number_to_chars (buf, newval, INSN_SIZE);
21525 break;
21526
21527 case BFD_RELOC_ARM_OFFSET_IMM8:
21528 case BFD_RELOC_ARM_HWLITERAL:
21529 sign = value > 0;
21530
21531 if (value < 0)
21532 value = - value;
21533
21534 if (validate_offset_imm (value, 1) == FAIL)
21535 {
21536 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
21537 as_bad_where (fixP->fx_file, fixP->fx_line,
21538 _("invalid literal constant: pool needs to be closer"));
21539 else
21540 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
21541 (long) value);
21542 break;
21543 }
21544
21545 newval = md_chars_to_number (buf, INSN_SIZE);
21546 if (value == 0)
21547 newval &= 0xfffff0f0;
21548 else
21549 {
21550 newval &= 0xff7ff0f0;
21551 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
21552 }
21553 md_number_to_chars (buf, newval, INSN_SIZE);
21554 break;
21555
21556 case BFD_RELOC_ARM_T32_OFFSET_U8:
21557 if (value < 0 || value > 1020 || value % 4 != 0)
21558 as_bad_where (fixP->fx_file, fixP->fx_line,
21559 _("bad immediate value for offset (%ld)"), (long) value);
21560 value /= 4;
21561
21562 newval = md_chars_to_number (buf+2, THUMB_SIZE);
21563 newval |= value;
21564 md_number_to_chars (buf+2, newval, THUMB_SIZE);
21565 break;
21566
21567 case BFD_RELOC_ARM_T32_OFFSET_IMM:
21568 /* This is a complicated relocation used for all varieties of Thumb32
21569 load/store instruction with immediate offset:
21570
21571 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
21572 *4, optional writeback(W)
21573 (doubleword load/store)
21574
21575 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
21576 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
21577 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
21578 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
21579 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
21580
21581 Uppercase letters indicate bits that are already encoded at
21582 this point. Lowercase letters are our problem. For the
21583 second block of instructions, the secondary opcode nybble
21584 (bits 8..11) is present, and bit 23 is zero, even if this is
21585 a PC-relative operation. */
21586 newval = md_chars_to_number (buf, THUMB_SIZE);
21587 newval <<= 16;
21588 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
21589
21590 if ((newval & 0xf0000000) == 0xe0000000)
21591 {
21592 /* Doubleword load/store: 8-bit offset, scaled by 4. */
21593 if (value >= 0)
21594 newval |= (1 << 23);
21595 else
21596 value = -value;
21597 if (value % 4 != 0)
21598 {
21599 as_bad_where (fixP->fx_file, fixP->fx_line,
21600 _("offset not a multiple of 4"));
21601 break;
21602 }
21603 value /= 4;
21604 if (value > 0xff)
21605 {
21606 as_bad_where (fixP->fx_file, fixP->fx_line,
21607 _("offset out of range"));
21608 break;
21609 }
21610 newval &= ~0xff;
21611 }
21612 else if ((newval & 0x000f0000) == 0x000f0000)
21613 {
21614 /* PC-relative, 12-bit offset. */
21615 if (value >= 0)
21616 newval |= (1 << 23);
21617 else
21618 value = -value;
21619 if (value > 0xfff)
21620 {
21621 as_bad_where (fixP->fx_file, fixP->fx_line,
21622 _("offset out of range"));
21623 break;
21624 }
21625 newval &= ~0xfff;
21626 }
21627 else if ((newval & 0x00000100) == 0x00000100)
21628 {
21629 /* Writeback: 8-bit, +/- offset. */
21630 if (value >= 0)
21631 newval |= (1 << 9);
21632 else
21633 value = -value;
21634 if (value > 0xff)
21635 {
21636 as_bad_where (fixP->fx_file, fixP->fx_line,
21637 _("offset out of range"));
21638 break;
21639 }
21640 newval &= ~0xff;
21641 }
21642 else if ((newval & 0x00000f00) == 0x00000e00)
21643 {
21644 /* T-instruction: positive 8-bit offset. */
21645 if (value < 0 || value > 0xff)
21646 {
21647 as_bad_where (fixP->fx_file, fixP->fx_line,
21648 _("offset out of range"));
21649 break;
21650 }
21651 newval &= ~0xff;
21652 newval |= value;
21653 }
21654 else
21655 {
21656 /* Positive 12-bit or negative 8-bit offset. */
21657 int limit;
21658 if (value >= 0)
21659 {
21660 newval |= (1 << 23);
21661 limit = 0xfff;
21662 }
21663 else
21664 {
21665 value = -value;
21666 limit = 0xff;
21667 }
21668 if (value > limit)
21669 {
21670 as_bad_where (fixP->fx_file, fixP->fx_line,
21671 _("offset out of range"));
21672 break;
21673 }
21674 newval &= ~limit;
21675 }
21676
21677 newval |= value;
21678 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
21679 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
21680 break;
21681
21682 case BFD_RELOC_ARM_SHIFT_IMM:
21683 newval = md_chars_to_number (buf, INSN_SIZE);
21684 if (((unsigned long) value) > 32
21685 || (value == 32
21686 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
21687 {
21688 as_bad_where (fixP->fx_file, fixP->fx_line,
21689 _("shift expression is too large"));
21690 break;
21691 }
21692
21693 if (value == 0)
21694 /* Shifts of zero must be done as lsl. */
21695 newval &= ~0x60;
21696 else if (value == 32)
21697 value = 0;
21698 newval &= 0xfffff07f;
21699 newval |= (value & 0x1f) << 7;
21700 md_number_to_chars (buf, newval, INSN_SIZE);
21701 break;
21702
21703 case BFD_RELOC_ARM_T32_IMMEDIATE:
21704 case BFD_RELOC_ARM_T32_ADD_IMM:
21705 case BFD_RELOC_ARM_T32_IMM12:
21706 case BFD_RELOC_ARM_T32_ADD_PC12:
21707 /* We claim that this fixup has been processed here,
21708 even if in fact we generate an error because we do
21709 not have a reloc for it, so tc_gen_reloc will reject it. */
21710 fixP->fx_done = 1;
21711
21712 if (fixP->fx_addsy
21713 && ! S_IS_DEFINED (fixP->fx_addsy))
21714 {
21715 as_bad_where (fixP->fx_file, fixP->fx_line,
21716 _("undefined symbol %s used as an immediate value"),
21717 S_GET_NAME (fixP->fx_addsy));
21718 break;
21719 }
21720
21721 newval = md_chars_to_number (buf, THUMB_SIZE);
21722 newval <<= 16;
21723 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
21724
21725 newimm = FAIL;
21726 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
21727 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
21728 {
21729 newimm = encode_thumb32_immediate (value);
21730 if (newimm == (unsigned int) FAIL)
21731 newimm = thumb32_negate_data_op (&newval, value);
21732 }
21733 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
21734 && newimm == (unsigned int) FAIL)
21735 {
21736 /* Turn add/sum into addw/subw. */
21737 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
21738 newval = (newval & 0xfeffffff) | 0x02000000;
21739 /* No flat 12-bit imm encoding for addsw/subsw. */
21740 if ((newval & 0x00100000) == 0)
21741 {
21742 /* 12 bit immediate for addw/subw. */
21743 if (value < 0)
21744 {
21745 value = -value;
21746 newval ^= 0x00a00000;
21747 }
21748 if (value > 0xfff)
21749 newimm = (unsigned int) FAIL;
21750 else
21751 newimm = value;
21752 }
21753 }
21754
21755 if (newimm == (unsigned int)FAIL)
21756 {
21757 as_bad_where (fixP->fx_file, fixP->fx_line,
21758 _("invalid constant (%lx) after fixup"),
21759 (unsigned long) value);
21760 break;
21761 }
21762
21763 newval |= (newimm & 0x800) << 15;
21764 newval |= (newimm & 0x700) << 4;
21765 newval |= (newimm & 0x0ff);
21766
21767 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
21768 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
21769 break;
21770
21771 case BFD_RELOC_ARM_SMC:
21772 if (((unsigned long) value) > 0xffff)
21773 as_bad_where (fixP->fx_file, fixP->fx_line,
21774 _("invalid smc expression"));
21775 newval = md_chars_to_number (buf, INSN_SIZE);
21776 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
21777 md_number_to_chars (buf, newval, INSN_SIZE);
21778 break;
21779
21780 case BFD_RELOC_ARM_HVC:
21781 if (((unsigned long) value) > 0xffff)
21782 as_bad_where (fixP->fx_file, fixP->fx_line,
21783 _("invalid hvc expression"));
21784 newval = md_chars_to_number (buf, INSN_SIZE);
21785 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
21786 md_number_to_chars (buf, newval, INSN_SIZE);
21787 break;
21788
21789 case BFD_RELOC_ARM_SWI:
21790 if (fixP->tc_fix_data != 0)
21791 {
21792 if (((unsigned long) value) > 0xff)
21793 as_bad_where (fixP->fx_file, fixP->fx_line,
21794 _("invalid swi expression"));
21795 newval = md_chars_to_number (buf, THUMB_SIZE);
21796 newval |= value;
21797 md_number_to_chars (buf, newval, THUMB_SIZE);
21798 }
21799 else
21800 {
21801 if (((unsigned long) value) > 0x00ffffff)
21802 as_bad_where (fixP->fx_file, fixP->fx_line,
21803 _("invalid swi expression"));
21804 newval = md_chars_to_number (buf, INSN_SIZE);
21805 newval |= value;
21806 md_number_to_chars (buf, newval, INSN_SIZE);
21807 }
21808 break;
21809
21810 case BFD_RELOC_ARM_MULTI:
21811 if (((unsigned long) value) > 0xffff)
21812 as_bad_where (fixP->fx_file, fixP->fx_line,
21813 _("invalid expression in load/store multiple"));
21814 newval = value | md_chars_to_number (buf, INSN_SIZE);
21815 md_number_to_chars (buf, newval, INSN_SIZE);
21816 break;
21817
21818 #ifdef OBJ_ELF
21819 case BFD_RELOC_ARM_PCREL_CALL:
21820
21821 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21822 && fixP->fx_addsy
21823 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21824 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21825 && THUMB_IS_FUNC (fixP->fx_addsy))
21826 /* Flip the bl to blx. This is a simple flip
21827 bit here because we generate PCREL_CALL for
21828 unconditional bls. */
21829 {
21830 newval = md_chars_to_number (buf, INSN_SIZE);
21831 newval = newval | 0x10000000;
21832 md_number_to_chars (buf, newval, INSN_SIZE);
21833 temp = 1;
21834 fixP->fx_done = 1;
21835 }
21836 else
21837 temp = 3;
21838 goto arm_branch_common;
21839
21840 case BFD_RELOC_ARM_PCREL_JUMP:
21841 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21842 && fixP->fx_addsy
21843 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21844 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21845 && THUMB_IS_FUNC (fixP->fx_addsy))
21846 {
21847 /* This would map to a bl<cond>, b<cond>,
21848 b<always> to a Thumb function. We
21849 need to force a relocation for this particular
21850 case. */
21851 newval = md_chars_to_number (buf, INSN_SIZE);
21852 fixP->fx_done = 0;
21853 }
21854
21855 case BFD_RELOC_ARM_PLT32:
21856 #endif
21857 case BFD_RELOC_ARM_PCREL_BRANCH:
21858 temp = 3;
21859 goto arm_branch_common;
21860
21861 case BFD_RELOC_ARM_PCREL_BLX:
21862
21863 temp = 1;
21864 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21865 && fixP->fx_addsy
21866 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21867 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21868 && ARM_IS_FUNC (fixP->fx_addsy))
21869 {
21870 /* Flip the blx to a bl and warn. */
21871 const char *name = S_GET_NAME (fixP->fx_addsy);
21872 newval = 0xeb000000;
21873 as_warn_where (fixP->fx_file, fixP->fx_line,
21874 _("blx to '%s' an ARM ISA state function changed to bl"),
21875 name);
21876 md_number_to_chars (buf, newval, INSN_SIZE);
21877 temp = 3;
21878 fixP->fx_done = 1;
21879 }
21880
21881 #ifdef OBJ_ELF
21882 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
21883 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
21884 #endif
21885
21886 arm_branch_common:
21887 /* We are going to store value (shifted right by two) in the
21888 instruction, in a 24 bit, signed field. Bits 26 through 32 either
21889 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
21890 also be be clear. */
21891 if (value & temp)
21892 as_bad_where (fixP->fx_file, fixP->fx_line,
21893 _("misaligned branch destination"));
21894 if ((value & (offsetT)0xfe000000) != (offsetT)0
21895 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
21896 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21897
21898 if (fixP->fx_done || !seg->use_rela_p)
21899 {
21900 newval = md_chars_to_number (buf, INSN_SIZE);
21901 newval |= (value >> 2) & 0x00ffffff;
21902 /* Set the H bit on BLX instructions. */
21903 if (temp == 1)
21904 {
21905 if (value & 2)
21906 newval |= 0x01000000;
21907 else
21908 newval &= ~0x01000000;
21909 }
21910 md_number_to_chars (buf, newval, INSN_SIZE);
21911 }
21912 break;
21913
21914 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
21915 /* CBZ can only branch forward. */
21916
21917 /* Attempts to use CBZ to branch to the next instruction
21918 (which, strictly speaking, are prohibited) will be turned into
21919 no-ops.
21920
21921 FIXME: It may be better to remove the instruction completely and
21922 perform relaxation. */
21923 if (value == -2)
21924 {
21925 newval = md_chars_to_number (buf, THUMB_SIZE);
21926 newval = 0xbf00; /* NOP encoding T1 */
21927 md_number_to_chars (buf, newval, THUMB_SIZE);
21928 }
21929 else
21930 {
21931 if (value & ~0x7e)
21932 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21933
21934 if (fixP->fx_done || !seg->use_rela_p)
21935 {
21936 newval = md_chars_to_number (buf, THUMB_SIZE);
21937 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
21938 md_number_to_chars (buf, newval, THUMB_SIZE);
21939 }
21940 }
21941 break;
21942
21943 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
21944 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
21945 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21946
21947 if (fixP->fx_done || !seg->use_rela_p)
21948 {
21949 newval = md_chars_to_number (buf, THUMB_SIZE);
21950 newval |= (value & 0x1ff) >> 1;
21951 md_number_to_chars (buf, newval, THUMB_SIZE);
21952 }
21953 break;
21954
21955 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
21956 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
21957 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21958
21959 if (fixP->fx_done || !seg->use_rela_p)
21960 {
21961 newval = md_chars_to_number (buf, THUMB_SIZE);
21962 newval |= (value & 0xfff) >> 1;
21963 md_number_to_chars (buf, newval, THUMB_SIZE);
21964 }
21965 break;
21966
21967 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21968 if (fixP->fx_addsy
21969 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21970 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21971 && ARM_IS_FUNC (fixP->fx_addsy)
21972 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21973 {
21974 /* Force a relocation for a branch 20 bits wide. */
21975 fixP->fx_done = 0;
21976 }
21977 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
21978 as_bad_where (fixP->fx_file, fixP->fx_line,
21979 _("conditional branch out of range"));
21980
21981 if (fixP->fx_done || !seg->use_rela_p)
21982 {
21983 offsetT newval2;
21984 addressT S, J1, J2, lo, hi;
21985
21986 S = (value & 0x00100000) >> 20;
21987 J2 = (value & 0x00080000) >> 19;
21988 J1 = (value & 0x00040000) >> 18;
21989 hi = (value & 0x0003f000) >> 12;
21990 lo = (value & 0x00000ffe) >> 1;
21991
21992 newval = md_chars_to_number (buf, THUMB_SIZE);
21993 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21994 newval |= (S << 10) | hi;
21995 newval2 |= (J1 << 13) | (J2 << 11) | lo;
21996 md_number_to_chars (buf, newval, THUMB_SIZE);
21997 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21998 }
21999 break;
22000
22001 case BFD_RELOC_THUMB_PCREL_BLX:
22002 /* If there is a blx from a thumb state function to
22003 another thumb function flip this to a bl and warn
22004 about it. */
22005
22006 if (fixP->fx_addsy
22007 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22008 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22009 && THUMB_IS_FUNC (fixP->fx_addsy))
22010 {
22011 const char *name = S_GET_NAME (fixP->fx_addsy);
22012 as_warn_where (fixP->fx_file, fixP->fx_line,
22013 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22014 name);
22015 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22016 newval = newval | 0x1000;
22017 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22018 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22019 fixP->fx_done = 1;
22020 }
22021
22022
22023 goto thumb_bl_common;
22024
22025 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22026 /* A bl from Thumb state ISA to an internal ARM state function
22027 is converted to a blx. */
22028 if (fixP->fx_addsy
22029 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22030 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22031 && ARM_IS_FUNC (fixP->fx_addsy)
22032 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22033 {
22034 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22035 newval = newval & ~0x1000;
22036 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22037 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
22038 fixP->fx_done = 1;
22039 }
22040
22041 thumb_bl_common:
22042
22043 #ifdef OBJ_ELF
22044 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
22045 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22046 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22047 #endif
22048
22049 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22050 /* For a BLX instruction, make sure that the relocation is rounded up
22051 to a word boundary. This follows the semantics of the instruction
22052 which specifies that bit 1 of the target address will come from bit
22053 1 of the base address. */
22054 value = (value + 1) & ~ 1;
22055
22056 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
22057 {
22058 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
22059 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22060 else if ((value & ~0x1ffffff)
22061 && ((value & ~0x1ffffff) != ~0x1ffffff))
22062 as_bad_where (fixP->fx_file, fixP->fx_line,
22063 _("Thumb2 branch out of range"));
22064 }
22065
22066 if (fixP->fx_done || !seg->use_rela_p)
22067 encode_thumb2_b_bl_offset (buf, value);
22068
22069 break;
22070
22071 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22072 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
22073 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22074
22075 if (fixP->fx_done || !seg->use_rela_p)
22076 encode_thumb2_b_bl_offset (buf, value);
22077
22078 break;
22079
22080 case BFD_RELOC_8:
22081 if (fixP->fx_done || !seg->use_rela_p)
22082 md_number_to_chars (buf, value, 1);
22083 break;
22084
22085 case BFD_RELOC_16:
22086 if (fixP->fx_done || !seg->use_rela_p)
22087 md_number_to_chars (buf, value, 2);
22088 break;
22089
22090 #ifdef OBJ_ELF
22091 case BFD_RELOC_ARM_TLS_CALL:
22092 case BFD_RELOC_ARM_THM_TLS_CALL:
22093 case BFD_RELOC_ARM_TLS_DESCSEQ:
22094 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22095 S_SET_THREAD_LOCAL (fixP->fx_addsy);
22096 break;
22097
22098 case BFD_RELOC_ARM_TLS_GOTDESC:
22099 case BFD_RELOC_ARM_TLS_GD32:
22100 case BFD_RELOC_ARM_TLS_LE32:
22101 case BFD_RELOC_ARM_TLS_IE32:
22102 case BFD_RELOC_ARM_TLS_LDM32:
22103 case BFD_RELOC_ARM_TLS_LDO32:
22104 S_SET_THREAD_LOCAL (fixP->fx_addsy);
22105 /* fall through */
22106
22107 case BFD_RELOC_ARM_GOT32:
22108 case BFD_RELOC_ARM_GOTOFF:
22109 if (fixP->fx_done || !seg->use_rela_p)
22110 md_number_to_chars (buf, 0, 4);
22111 break;
22112
22113 case BFD_RELOC_ARM_GOT_PREL:
22114 if (fixP->fx_done || !seg->use_rela_p)
22115 md_number_to_chars (buf, value, 4);
22116 break;
22117
22118 case BFD_RELOC_ARM_TARGET2:
22119 /* TARGET2 is not partial-inplace, so we need to write the
22120 addend here for REL targets, because it won't be written out
22121 during reloc processing later. */
22122 if (fixP->fx_done || !seg->use_rela_p)
22123 md_number_to_chars (buf, fixP->fx_offset, 4);
22124 break;
22125 #endif
22126
22127 case BFD_RELOC_RVA:
22128 case BFD_RELOC_32:
22129 case BFD_RELOC_ARM_TARGET1:
22130 case BFD_RELOC_ARM_ROSEGREL32:
22131 case BFD_RELOC_ARM_SBREL32:
22132 case BFD_RELOC_32_PCREL:
22133 #ifdef TE_PE
22134 case BFD_RELOC_32_SECREL:
22135 #endif
22136 if (fixP->fx_done || !seg->use_rela_p)
22137 #ifdef TE_WINCE
22138 /* For WinCE we only do this for pcrel fixups. */
22139 if (fixP->fx_done || fixP->fx_pcrel)
22140 #endif
22141 md_number_to_chars (buf, value, 4);
22142 break;
22143
22144 #ifdef OBJ_ELF
22145 case BFD_RELOC_ARM_PREL31:
22146 if (fixP->fx_done || !seg->use_rela_p)
22147 {
22148 newval = md_chars_to_number (buf, 4) & 0x80000000;
22149 if ((value ^ (value >> 1)) & 0x40000000)
22150 {
22151 as_bad_where (fixP->fx_file, fixP->fx_line,
22152 _("rel31 relocation overflow"));
22153 }
22154 newval |= value & 0x7fffffff;
22155 md_number_to_chars (buf, newval, 4);
22156 }
22157 break;
22158 #endif
22159
22160 case BFD_RELOC_ARM_CP_OFF_IMM:
22161 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22162 if (value < -1023 || value > 1023 || (value & 3))
22163 as_bad_where (fixP->fx_file, fixP->fx_line,
22164 _("co-processor offset out of range"));
22165 cp_off_common:
22166 sign = value > 0;
22167 if (value < 0)
22168 value = -value;
22169 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22170 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22171 newval = md_chars_to_number (buf, INSN_SIZE);
22172 else
22173 newval = get_thumb32_insn (buf);
22174 if (value == 0)
22175 newval &= 0xffffff00;
22176 else
22177 {
22178 newval &= 0xff7fff00;
22179 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
22180 }
22181 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22182 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22183 md_number_to_chars (buf, newval, INSN_SIZE);
22184 else
22185 put_thumb32_insn (buf, newval);
22186 break;
22187
22188 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
22189 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
22190 if (value < -255 || value > 255)
22191 as_bad_where (fixP->fx_file, fixP->fx_line,
22192 _("co-processor offset out of range"));
22193 value *= 4;
22194 goto cp_off_common;
22195
22196 case BFD_RELOC_ARM_THUMB_OFFSET:
22197 newval = md_chars_to_number (buf, THUMB_SIZE);
22198 /* Exactly what ranges, and where the offset is inserted depends
22199 on the type of instruction, we can establish this from the
22200 top 4 bits. */
22201 switch (newval >> 12)
22202 {
22203 case 4: /* PC load. */
22204 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
22205 forced to zero for these loads; md_pcrel_from has already
22206 compensated for this. */
22207 if (value & 3)
22208 as_bad_where (fixP->fx_file, fixP->fx_line,
22209 _("invalid offset, target not word aligned (0x%08lX)"),
22210 (((unsigned long) fixP->fx_frag->fr_address
22211 + (unsigned long) fixP->fx_where) & ~3)
22212 + (unsigned long) value);
22213
22214 if (value & ~0x3fc)
22215 as_bad_where (fixP->fx_file, fixP->fx_line,
22216 _("invalid offset, value too big (0x%08lX)"),
22217 (long) value);
22218
22219 newval |= value >> 2;
22220 break;
22221
22222 case 9: /* SP load/store. */
22223 if (value & ~0x3fc)
22224 as_bad_where (fixP->fx_file, fixP->fx_line,
22225 _("invalid offset, value too big (0x%08lX)"),
22226 (long) value);
22227 newval |= value >> 2;
22228 break;
22229
22230 case 6: /* Word load/store. */
22231 if (value & ~0x7c)
22232 as_bad_where (fixP->fx_file, fixP->fx_line,
22233 _("invalid offset, value too big (0x%08lX)"),
22234 (long) value);
22235 newval |= value << 4; /* 6 - 2. */
22236 break;
22237
22238 case 7: /* Byte load/store. */
22239 if (value & ~0x1f)
22240 as_bad_where (fixP->fx_file, fixP->fx_line,
22241 _("invalid offset, value too big (0x%08lX)"),
22242 (long) value);
22243 newval |= value << 6;
22244 break;
22245
22246 case 8: /* Halfword load/store. */
22247 if (value & ~0x3e)
22248 as_bad_where (fixP->fx_file, fixP->fx_line,
22249 _("invalid offset, value too big (0x%08lX)"),
22250 (long) value);
22251 newval |= value << 5; /* 6 - 1. */
22252 break;
22253
22254 default:
22255 as_bad_where (fixP->fx_file, fixP->fx_line,
22256 "Unable to process relocation for thumb opcode: %lx",
22257 (unsigned long) newval);
22258 break;
22259 }
22260 md_number_to_chars (buf, newval, THUMB_SIZE);
22261 break;
22262
22263 case BFD_RELOC_ARM_THUMB_ADD:
22264 /* This is a complicated relocation, since we use it for all of
22265 the following immediate relocations:
22266
22267 3bit ADD/SUB
22268 8bit ADD/SUB
22269 9bit ADD/SUB SP word-aligned
22270 10bit ADD PC/SP word-aligned
22271
22272 The type of instruction being processed is encoded in the
22273 instruction field:
22274
22275 0x8000 SUB
22276 0x00F0 Rd
22277 0x000F Rs
22278 */
22279 newval = md_chars_to_number (buf, THUMB_SIZE);
22280 {
22281 int rd = (newval >> 4) & 0xf;
22282 int rs = newval & 0xf;
22283 int subtract = !!(newval & 0x8000);
22284
22285 /* Check for HI regs, only very restricted cases allowed:
22286 Adjusting SP, and using PC or SP to get an address. */
22287 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
22288 || (rs > 7 && rs != REG_SP && rs != REG_PC))
22289 as_bad_where (fixP->fx_file, fixP->fx_line,
22290 _("invalid Hi register with immediate"));
22291
22292 /* If value is negative, choose the opposite instruction. */
22293 if (value < 0)
22294 {
22295 value = -value;
22296 subtract = !subtract;
22297 if (value < 0)
22298 as_bad_where (fixP->fx_file, fixP->fx_line,
22299 _("immediate value out of range"));
22300 }
22301
22302 if (rd == REG_SP)
22303 {
22304 if (value & ~0x1fc)
22305 as_bad_where (fixP->fx_file, fixP->fx_line,
22306 _("invalid immediate for stack address calculation"));
22307 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
22308 newval |= value >> 2;
22309 }
22310 else if (rs == REG_PC || rs == REG_SP)
22311 {
22312 if (subtract || value & ~0x3fc)
22313 as_bad_where (fixP->fx_file, fixP->fx_line,
22314 _("invalid immediate for address calculation (value = 0x%08lX)"),
22315 (unsigned long) value);
22316 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
22317 newval |= rd << 8;
22318 newval |= value >> 2;
22319 }
22320 else if (rs == rd)
22321 {
22322 if (value & ~0xff)
22323 as_bad_where (fixP->fx_file, fixP->fx_line,
22324 _("immediate value out of range"));
22325 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
22326 newval |= (rd << 8) | value;
22327 }
22328 else
22329 {
22330 if (value & ~0x7)
22331 as_bad_where (fixP->fx_file, fixP->fx_line,
22332 _("immediate value out of range"));
22333 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
22334 newval |= rd | (rs << 3) | (value << 6);
22335 }
22336 }
22337 md_number_to_chars (buf, newval, THUMB_SIZE);
22338 break;
22339
22340 case BFD_RELOC_ARM_THUMB_IMM:
22341 newval = md_chars_to_number (buf, THUMB_SIZE);
22342 if (value < 0 || value > 255)
22343 as_bad_where (fixP->fx_file, fixP->fx_line,
22344 _("invalid immediate: %ld is out of range"),
22345 (long) value);
22346 newval |= value;
22347 md_number_to_chars (buf, newval, THUMB_SIZE);
22348 break;
22349
22350 case BFD_RELOC_ARM_THUMB_SHIFT:
22351 /* 5bit shift value (0..32). LSL cannot take 32. */
22352 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
22353 temp = newval & 0xf800;
22354 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
22355 as_bad_where (fixP->fx_file, fixP->fx_line,
22356 _("invalid shift value: %ld"), (long) value);
22357 /* Shifts of zero must be encoded as LSL. */
22358 if (value == 0)
22359 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
22360 /* Shifts of 32 are encoded as zero. */
22361 else if (value == 32)
22362 value = 0;
22363 newval |= value << 6;
22364 md_number_to_chars (buf, newval, THUMB_SIZE);
22365 break;
22366
22367 case BFD_RELOC_VTABLE_INHERIT:
22368 case BFD_RELOC_VTABLE_ENTRY:
22369 fixP->fx_done = 0;
22370 return;
22371
22372 case BFD_RELOC_ARM_MOVW:
22373 case BFD_RELOC_ARM_MOVT:
22374 case BFD_RELOC_ARM_THUMB_MOVW:
22375 case BFD_RELOC_ARM_THUMB_MOVT:
22376 if (fixP->fx_done || !seg->use_rela_p)
22377 {
22378 /* REL format relocations are limited to a 16-bit addend. */
22379 if (!fixP->fx_done)
22380 {
22381 if (value < -0x8000 || value > 0x7fff)
22382 as_bad_where (fixP->fx_file, fixP->fx_line,
22383 _("offset out of range"));
22384 }
22385 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22386 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22387 {
22388 value >>= 16;
22389 }
22390
22391 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22392 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22393 {
22394 newval = get_thumb32_insn (buf);
22395 newval &= 0xfbf08f00;
22396 newval |= (value & 0xf000) << 4;
22397 newval |= (value & 0x0800) << 15;
22398 newval |= (value & 0x0700) << 4;
22399 newval |= (value & 0x00ff);
22400 put_thumb32_insn (buf, newval);
22401 }
22402 else
22403 {
22404 newval = md_chars_to_number (buf, 4);
22405 newval &= 0xfff0f000;
22406 newval |= value & 0x0fff;
22407 newval |= (value & 0xf000) << 4;
22408 md_number_to_chars (buf, newval, 4);
22409 }
22410 }
22411 return;
22412
22413 case BFD_RELOC_ARM_ALU_PC_G0_NC:
22414 case BFD_RELOC_ARM_ALU_PC_G0:
22415 case BFD_RELOC_ARM_ALU_PC_G1_NC:
22416 case BFD_RELOC_ARM_ALU_PC_G1:
22417 case BFD_RELOC_ARM_ALU_PC_G2:
22418 case BFD_RELOC_ARM_ALU_SB_G0_NC:
22419 case BFD_RELOC_ARM_ALU_SB_G0:
22420 case BFD_RELOC_ARM_ALU_SB_G1_NC:
22421 case BFD_RELOC_ARM_ALU_SB_G1:
22422 case BFD_RELOC_ARM_ALU_SB_G2:
22423 gas_assert (!fixP->fx_done);
22424 if (!seg->use_rela_p)
22425 {
22426 bfd_vma insn;
22427 bfd_vma encoded_addend;
22428 bfd_vma addend_abs = abs (value);
22429
22430 /* Check that the absolute value of the addend can be
22431 expressed as an 8-bit constant plus a rotation. */
22432 encoded_addend = encode_arm_immediate (addend_abs);
22433 if (encoded_addend == (unsigned int) FAIL)
22434 as_bad_where (fixP->fx_file, fixP->fx_line,
22435 _("the offset 0x%08lX is not representable"),
22436 (unsigned long) addend_abs);
22437
22438 /* Extract the instruction. */
22439 insn = md_chars_to_number (buf, INSN_SIZE);
22440
22441 /* If the addend is positive, use an ADD instruction.
22442 Otherwise use a SUB. Take care not to destroy the S bit. */
22443 insn &= 0xff1fffff;
22444 if (value < 0)
22445 insn |= 1 << 22;
22446 else
22447 insn |= 1 << 23;
22448
22449 /* Place the encoded addend into the first 12 bits of the
22450 instruction. */
22451 insn &= 0xfffff000;
22452 insn |= encoded_addend;
22453
22454 /* Update the instruction. */
22455 md_number_to_chars (buf, insn, INSN_SIZE);
22456 }
22457 break;
22458
22459 case BFD_RELOC_ARM_LDR_PC_G0:
22460 case BFD_RELOC_ARM_LDR_PC_G1:
22461 case BFD_RELOC_ARM_LDR_PC_G2:
22462 case BFD_RELOC_ARM_LDR_SB_G0:
22463 case BFD_RELOC_ARM_LDR_SB_G1:
22464 case BFD_RELOC_ARM_LDR_SB_G2:
22465 gas_assert (!fixP->fx_done);
22466 if (!seg->use_rela_p)
22467 {
22468 bfd_vma insn;
22469 bfd_vma addend_abs = abs (value);
22470
22471 /* Check that the absolute value of the addend can be
22472 encoded in 12 bits. */
22473 if (addend_abs >= 0x1000)
22474 as_bad_where (fixP->fx_file, fixP->fx_line,
22475 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
22476 (unsigned long) addend_abs);
22477
22478 /* Extract the instruction. */
22479 insn = md_chars_to_number (buf, INSN_SIZE);
22480
22481 /* If the addend is negative, clear bit 23 of the instruction.
22482 Otherwise set it. */
22483 if (value < 0)
22484 insn &= ~(1 << 23);
22485 else
22486 insn |= 1 << 23;
22487
22488 /* Place the absolute value of the addend into the first 12 bits
22489 of the instruction. */
22490 insn &= 0xfffff000;
22491 insn |= addend_abs;
22492
22493 /* Update the instruction. */
22494 md_number_to_chars (buf, insn, INSN_SIZE);
22495 }
22496 break;
22497
22498 case BFD_RELOC_ARM_LDRS_PC_G0:
22499 case BFD_RELOC_ARM_LDRS_PC_G1:
22500 case BFD_RELOC_ARM_LDRS_PC_G2:
22501 case BFD_RELOC_ARM_LDRS_SB_G0:
22502 case BFD_RELOC_ARM_LDRS_SB_G1:
22503 case BFD_RELOC_ARM_LDRS_SB_G2:
22504 gas_assert (!fixP->fx_done);
22505 if (!seg->use_rela_p)
22506 {
22507 bfd_vma insn;
22508 bfd_vma addend_abs = abs (value);
22509
22510 /* Check that the absolute value of the addend can be
22511 encoded in 8 bits. */
22512 if (addend_abs >= 0x100)
22513 as_bad_where (fixP->fx_file, fixP->fx_line,
22514 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
22515 (unsigned long) addend_abs);
22516
22517 /* Extract the instruction. */
22518 insn = md_chars_to_number (buf, INSN_SIZE);
22519
22520 /* If the addend is negative, clear bit 23 of the instruction.
22521 Otherwise set it. */
22522 if (value < 0)
22523 insn &= ~(1 << 23);
22524 else
22525 insn |= 1 << 23;
22526
22527 /* Place the first four bits of the absolute value of the addend
22528 into the first 4 bits of the instruction, and the remaining
22529 four into bits 8 .. 11. */
22530 insn &= 0xfffff0f0;
22531 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
22532
22533 /* Update the instruction. */
22534 md_number_to_chars (buf, insn, INSN_SIZE);
22535 }
22536 break;
22537
22538 case BFD_RELOC_ARM_LDC_PC_G0:
22539 case BFD_RELOC_ARM_LDC_PC_G1:
22540 case BFD_RELOC_ARM_LDC_PC_G2:
22541 case BFD_RELOC_ARM_LDC_SB_G0:
22542 case BFD_RELOC_ARM_LDC_SB_G1:
22543 case BFD_RELOC_ARM_LDC_SB_G2:
22544 gas_assert (!fixP->fx_done);
22545 if (!seg->use_rela_p)
22546 {
22547 bfd_vma insn;
22548 bfd_vma addend_abs = abs (value);
22549
22550 /* Check that the absolute value of the addend is a multiple of
22551 four and, when divided by four, fits in 8 bits. */
22552 if (addend_abs & 0x3)
22553 as_bad_where (fixP->fx_file, fixP->fx_line,
22554 _("bad offset 0x%08lX (must be word-aligned)"),
22555 (unsigned long) addend_abs);
22556
22557 if ((addend_abs >> 2) > 0xff)
22558 as_bad_where (fixP->fx_file, fixP->fx_line,
22559 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
22560 (unsigned long) addend_abs);
22561
22562 /* Extract the instruction. */
22563 insn = md_chars_to_number (buf, INSN_SIZE);
22564
22565 /* If the addend is negative, clear bit 23 of the instruction.
22566 Otherwise set it. */
22567 if (value < 0)
22568 insn &= ~(1 << 23);
22569 else
22570 insn |= 1 << 23;
22571
22572 /* Place the addend (divided by four) into the first eight
22573 bits of the instruction. */
22574 insn &= 0xfffffff0;
22575 insn |= addend_abs >> 2;
22576
22577 /* Update the instruction. */
22578 md_number_to_chars (buf, insn, INSN_SIZE);
22579 }
22580 break;
22581
22582 case BFD_RELOC_ARM_V4BX:
22583 /* This will need to go in the object file. */
22584 fixP->fx_done = 0;
22585 break;
22586
22587 case BFD_RELOC_UNUSED:
22588 default:
22589 as_bad_where (fixP->fx_file, fixP->fx_line,
22590 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
22591 }
22592 }
22593
22594 /* Translate internal representation of relocation info to BFD target
22595 format. */
22596
22597 arelent *
22598 tc_gen_reloc (asection *section, fixS *fixp)
22599 {
22600 arelent * reloc;
22601 bfd_reloc_code_real_type code;
22602
22603 reloc = (arelent *) xmalloc (sizeof (arelent));
22604
22605 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
22606 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
22607 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
22608
22609 if (fixp->fx_pcrel)
22610 {
22611 if (section->use_rela_p)
22612 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
22613 else
22614 fixp->fx_offset = reloc->address;
22615 }
22616 reloc->addend = fixp->fx_offset;
22617
22618 switch (fixp->fx_r_type)
22619 {
22620 case BFD_RELOC_8:
22621 if (fixp->fx_pcrel)
22622 {
22623 code = BFD_RELOC_8_PCREL;
22624 break;
22625 }
22626
22627 case BFD_RELOC_16:
22628 if (fixp->fx_pcrel)
22629 {
22630 code = BFD_RELOC_16_PCREL;
22631 break;
22632 }
22633
22634 case BFD_RELOC_32:
22635 if (fixp->fx_pcrel)
22636 {
22637 code = BFD_RELOC_32_PCREL;
22638 break;
22639 }
22640
22641 case BFD_RELOC_ARM_MOVW:
22642 if (fixp->fx_pcrel)
22643 {
22644 code = BFD_RELOC_ARM_MOVW_PCREL;
22645 break;
22646 }
22647
22648 case BFD_RELOC_ARM_MOVT:
22649 if (fixp->fx_pcrel)
22650 {
22651 code = BFD_RELOC_ARM_MOVT_PCREL;
22652 break;
22653 }
22654
22655 case BFD_RELOC_ARM_THUMB_MOVW:
22656 if (fixp->fx_pcrel)
22657 {
22658 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
22659 break;
22660 }
22661
22662 case BFD_RELOC_ARM_THUMB_MOVT:
22663 if (fixp->fx_pcrel)
22664 {
22665 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
22666 break;
22667 }
22668
22669 case BFD_RELOC_NONE:
22670 case BFD_RELOC_ARM_PCREL_BRANCH:
22671 case BFD_RELOC_ARM_PCREL_BLX:
22672 case BFD_RELOC_RVA:
22673 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22674 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22675 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22676 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22677 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22678 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22679 case BFD_RELOC_VTABLE_ENTRY:
22680 case BFD_RELOC_VTABLE_INHERIT:
22681 #ifdef TE_PE
22682 case BFD_RELOC_32_SECREL:
22683 #endif
22684 code = fixp->fx_r_type;
22685 break;
22686
22687 case BFD_RELOC_THUMB_PCREL_BLX:
22688 #ifdef OBJ_ELF
22689 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22690 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
22691 else
22692 #endif
22693 code = BFD_RELOC_THUMB_PCREL_BLX;
22694 break;
22695
22696 case BFD_RELOC_ARM_LITERAL:
22697 case BFD_RELOC_ARM_HWLITERAL:
22698 /* If this is called then the a literal has
22699 been referenced across a section boundary. */
22700 as_bad_where (fixp->fx_file, fixp->fx_line,
22701 _("literal referenced across section boundary"));
22702 return NULL;
22703
22704 #ifdef OBJ_ELF
22705 case BFD_RELOC_ARM_TLS_CALL:
22706 case BFD_RELOC_ARM_THM_TLS_CALL:
22707 case BFD_RELOC_ARM_TLS_DESCSEQ:
22708 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22709 case BFD_RELOC_ARM_GOT32:
22710 case BFD_RELOC_ARM_GOTOFF:
22711 case BFD_RELOC_ARM_GOT_PREL:
22712 case BFD_RELOC_ARM_PLT32:
22713 case BFD_RELOC_ARM_TARGET1:
22714 case BFD_RELOC_ARM_ROSEGREL32:
22715 case BFD_RELOC_ARM_SBREL32:
22716 case BFD_RELOC_ARM_PREL31:
22717 case BFD_RELOC_ARM_TARGET2:
22718 case BFD_RELOC_ARM_TLS_LE32:
22719 case BFD_RELOC_ARM_TLS_LDO32:
22720 case BFD_RELOC_ARM_PCREL_CALL:
22721 case BFD_RELOC_ARM_PCREL_JUMP:
22722 case BFD_RELOC_ARM_ALU_PC_G0_NC:
22723 case BFD_RELOC_ARM_ALU_PC_G0:
22724 case BFD_RELOC_ARM_ALU_PC_G1_NC:
22725 case BFD_RELOC_ARM_ALU_PC_G1:
22726 case BFD_RELOC_ARM_ALU_PC_G2:
22727 case BFD_RELOC_ARM_LDR_PC_G0:
22728 case BFD_RELOC_ARM_LDR_PC_G1:
22729 case BFD_RELOC_ARM_LDR_PC_G2:
22730 case BFD_RELOC_ARM_LDRS_PC_G0:
22731 case BFD_RELOC_ARM_LDRS_PC_G1:
22732 case BFD_RELOC_ARM_LDRS_PC_G2:
22733 case BFD_RELOC_ARM_LDC_PC_G0:
22734 case BFD_RELOC_ARM_LDC_PC_G1:
22735 case BFD_RELOC_ARM_LDC_PC_G2:
22736 case BFD_RELOC_ARM_ALU_SB_G0_NC:
22737 case BFD_RELOC_ARM_ALU_SB_G0:
22738 case BFD_RELOC_ARM_ALU_SB_G1_NC:
22739 case BFD_RELOC_ARM_ALU_SB_G1:
22740 case BFD_RELOC_ARM_ALU_SB_G2:
22741 case BFD_RELOC_ARM_LDR_SB_G0:
22742 case BFD_RELOC_ARM_LDR_SB_G1:
22743 case BFD_RELOC_ARM_LDR_SB_G2:
22744 case BFD_RELOC_ARM_LDRS_SB_G0:
22745 case BFD_RELOC_ARM_LDRS_SB_G1:
22746 case BFD_RELOC_ARM_LDRS_SB_G2:
22747 case BFD_RELOC_ARM_LDC_SB_G0:
22748 case BFD_RELOC_ARM_LDC_SB_G1:
22749 case BFD_RELOC_ARM_LDC_SB_G2:
22750 case BFD_RELOC_ARM_V4BX:
22751 code = fixp->fx_r_type;
22752 break;
22753
22754 case BFD_RELOC_ARM_TLS_GOTDESC:
22755 case BFD_RELOC_ARM_TLS_GD32:
22756 case BFD_RELOC_ARM_TLS_IE32:
22757 case BFD_RELOC_ARM_TLS_LDM32:
22758 /* BFD will include the symbol's address in the addend.
22759 But we don't want that, so subtract it out again here. */
22760 if (!S_IS_COMMON (fixp->fx_addsy))
22761 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
22762 code = fixp->fx_r_type;
22763 break;
22764 #endif
22765
22766 case BFD_RELOC_ARM_IMMEDIATE:
22767 as_bad_where (fixp->fx_file, fixp->fx_line,
22768 _("internal relocation (type: IMMEDIATE) not fixed up"));
22769 return NULL;
22770
22771 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22772 as_bad_where (fixp->fx_file, fixp->fx_line,
22773 _("ADRL used for a symbol not defined in the same file"));
22774 return NULL;
22775
22776 case BFD_RELOC_ARM_OFFSET_IMM:
22777 if (section->use_rela_p)
22778 {
22779 code = fixp->fx_r_type;
22780 break;
22781 }
22782
22783 if (fixp->fx_addsy != NULL
22784 && !S_IS_DEFINED (fixp->fx_addsy)
22785 && S_IS_LOCAL (fixp->fx_addsy))
22786 {
22787 as_bad_where (fixp->fx_file, fixp->fx_line,
22788 _("undefined local label `%s'"),
22789 S_GET_NAME (fixp->fx_addsy));
22790 return NULL;
22791 }
22792
22793 as_bad_where (fixp->fx_file, fixp->fx_line,
22794 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
22795 return NULL;
22796
22797 default:
22798 {
22799 char * type;
22800
22801 switch (fixp->fx_r_type)
22802 {
22803 case BFD_RELOC_NONE: type = "NONE"; break;
22804 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
22805 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
22806 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
22807 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
22808 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
22809 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
22810 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
22811 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
22812 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
22813 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
22814 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
22815 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
22816 default: type = _("<unknown>"); break;
22817 }
22818 as_bad_where (fixp->fx_file, fixp->fx_line,
22819 _("cannot represent %s relocation in this object file format"),
22820 type);
22821 return NULL;
22822 }
22823 }
22824
22825 #ifdef OBJ_ELF
22826 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
22827 && GOT_symbol
22828 && fixp->fx_addsy == GOT_symbol)
22829 {
22830 code = BFD_RELOC_ARM_GOTPC;
22831 reloc->addend = fixp->fx_offset = reloc->address;
22832 }
22833 #endif
22834
22835 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
22836
22837 if (reloc->howto == NULL)
22838 {
22839 as_bad_where (fixp->fx_file, fixp->fx_line,
22840 _("cannot represent %s relocation in this object file format"),
22841 bfd_get_reloc_code_name (code));
22842 return NULL;
22843 }
22844
22845 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
22846 vtable entry to be used in the relocation's section offset. */
22847 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
22848 reloc->address = fixp->fx_offset;
22849
22850 return reloc;
22851 }
22852
22853 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
22854
22855 void
22856 cons_fix_new_arm (fragS * frag,
22857 int where,
22858 int size,
22859 expressionS * exp)
22860 {
22861 bfd_reloc_code_real_type type;
22862 int pcrel = 0;
22863
22864 /* Pick a reloc.
22865 FIXME: @@ Should look at CPU word size. */
22866 switch (size)
22867 {
22868 case 1:
22869 type = BFD_RELOC_8;
22870 break;
22871 case 2:
22872 type = BFD_RELOC_16;
22873 break;
22874 case 4:
22875 default:
22876 type = BFD_RELOC_32;
22877 break;
22878 case 8:
22879 type = BFD_RELOC_64;
22880 break;
22881 }
22882
22883 #ifdef TE_PE
22884 if (exp->X_op == O_secrel)
22885 {
22886 exp->X_op = O_symbol;
22887 type = BFD_RELOC_32_SECREL;
22888 }
22889 #endif
22890
22891 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
22892 }
22893
22894 #if defined (OBJ_COFF)
22895 void
22896 arm_validate_fix (fixS * fixP)
22897 {
22898 /* If the destination of the branch is a defined symbol which does not have
22899 the THUMB_FUNC attribute, then we must be calling a function which has
22900 the (interfacearm) attribute. We look for the Thumb entry point to that
22901 function and change the branch to refer to that function instead. */
22902 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
22903 && fixP->fx_addsy != NULL
22904 && S_IS_DEFINED (fixP->fx_addsy)
22905 && ! THUMB_IS_FUNC (fixP->fx_addsy))
22906 {
22907 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
22908 }
22909 }
22910 #endif
22911
22912
22913 int
22914 arm_force_relocation (struct fix * fixp)
22915 {
22916 #if defined (OBJ_COFF) && defined (TE_PE)
22917 if (fixp->fx_r_type == BFD_RELOC_RVA)
22918 return 1;
22919 #endif
22920
22921 /* In case we have a call or a branch to a function in ARM ISA mode from
22922 a thumb function or vice-versa force the relocation. These relocations
22923 are cleared off for some cores that might have blx and simple transformations
22924 are possible. */
22925
22926 #ifdef OBJ_ELF
22927 switch (fixp->fx_r_type)
22928 {
22929 case BFD_RELOC_ARM_PCREL_JUMP:
22930 case BFD_RELOC_ARM_PCREL_CALL:
22931 case BFD_RELOC_THUMB_PCREL_BLX:
22932 if (THUMB_IS_FUNC (fixp->fx_addsy))
22933 return 1;
22934 break;
22935
22936 case BFD_RELOC_ARM_PCREL_BLX:
22937 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22938 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22939 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22940 if (ARM_IS_FUNC (fixp->fx_addsy))
22941 return 1;
22942 break;
22943
22944 default:
22945 break;
22946 }
22947 #endif
22948
22949 /* Resolve these relocations even if the symbol is extern or weak.
22950 Technically this is probably wrong due to symbol preemption.
22951 In practice these relocations do not have enough range to be useful
22952 at dynamic link time, and some code (e.g. in the Linux kernel)
22953 expects these references to be resolved. */
22954 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
22955 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
22956 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
22957 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
22958 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22959 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
22960 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
22961 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
22962 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22963 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
22964 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
22965 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
22966 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
22967 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
22968 return 0;
22969
22970 /* Always leave these relocations for the linker. */
22971 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
22972 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
22973 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
22974 return 1;
22975
22976 /* Always generate relocations against function symbols. */
22977 if (fixp->fx_r_type == BFD_RELOC_32
22978 && fixp->fx_addsy
22979 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
22980 return 1;
22981
22982 return generic_force_reloc (fixp);
22983 }
22984
22985 #if defined (OBJ_ELF) || defined (OBJ_COFF)
22986 /* Relocations against function names must be left unadjusted,
22987 so that the linker can use this information to generate interworking
22988 stubs. The MIPS version of this function
22989 also prevents relocations that are mips-16 specific, but I do not
22990 know why it does this.
22991
22992 FIXME:
22993 There is one other problem that ought to be addressed here, but
22994 which currently is not: Taking the address of a label (rather
22995 than a function) and then later jumping to that address. Such
22996 addresses also ought to have their bottom bit set (assuming that
22997 they reside in Thumb code), but at the moment they will not. */
22998
22999 bfd_boolean
23000 arm_fix_adjustable (fixS * fixP)
23001 {
23002 if (fixP->fx_addsy == NULL)
23003 return 1;
23004
23005 /* Preserve relocations against symbols with function type. */
23006 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
23007 return FALSE;
23008
23009 if (THUMB_IS_FUNC (fixP->fx_addsy)
23010 && fixP->fx_subsy == NULL)
23011 return FALSE;
23012
23013 /* We need the symbol name for the VTABLE entries. */
23014 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
23015 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23016 return FALSE;
23017
23018 /* Don't allow symbols to be discarded on GOT related relocs. */
23019 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
23020 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
23021 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
23022 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
23023 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
23024 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
23025 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
23026 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
23027 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
23028 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
23029 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
23030 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
23031 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
23032 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
23033 return FALSE;
23034
23035 /* Similarly for group relocations. */
23036 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23037 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23038 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23039 return FALSE;
23040
23041 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
23042 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
23043 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23044 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
23045 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
23046 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23047 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
23048 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
23049 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
23050 return FALSE;
23051
23052 return TRUE;
23053 }
23054 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
23055
23056 #ifdef OBJ_ELF
23057
23058 const char *
23059 elf32_arm_target_format (void)
23060 {
23061 #ifdef TE_SYMBIAN
23062 return (target_big_endian
23063 ? "elf32-bigarm-symbian"
23064 : "elf32-littlearm-symbian");
23065 #elif defined (TE_VXWORKS)
23066 return (target_big_endian
23067 ? "elf32-bigarm-vxworks"
23068 : "elf32-littlearm-vxworks");
23069 #elif defined (TE_NACL)
23070 return (target_big_endian
23071 ? "elf32-bigarm-nacl"
23072 : "elf32-littlearm-nacl");
23073 #else
23074 if (target_big_endian)
23075 return "elf32-bigarm";
23076 else
23077 return "elf32-littlearm";
23078 #endif
23079 }
23080
23081 void
23082 armelf_frob_symbol (symbolS * symp,
23083 int * puntp)
23084 {
23085 elf_frob_symbol (symp, puntp);
23086 }
23087 #endif
23088
23089 /* MD interface: Finalization. */
23090
23091 void
23092 arm_cleanup (void)
23093 {
23094 literal_pool * pool;
23095
23096 /* Ensure that all the IT blocks are properly closed. */
23097 check_it_blocks_finished ();
23098
23099 for (pool = list_of_pools; pool; pool = pool->next)
23100 {
23101 /* Put it at the end of the relevant section. */
23102 subseg_set (pool->section, pool->sub_section);
23103 #ifdef OBJ_ELF
23104 arm_elf_change_section ();
23105 #endif
23106 s_ltorg (0);
23107 }
23108 }
23109
23110 #ifdef OBJ_ELF
23111 /* Remove any excess mapping symbols generated for alignment frags in
23112 SEC. We may have created a mapping symbol before a zero byte
23113 alignment; remove it if there's a mapping symbol after the
23114 alignment. */
23115 static void
23116 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
23117 void *dummy ATTRIBUTE_UNUSED)
23118 {
23119 segment_info_type *seginfo = seg_info (sec);
23120 fragS *fragp;
23121
23122 if (seginfo == NULL || seginfo->frchainP == NULL)
23123 return;
23124
23125 for (fragp = seginfo->frchainP->frch_root;
23126 fragp != NULL;
23127 fragp = fragp->fr_next)
23128 {
23129 symbolS *sym = fragp->tc_frag_data.last_map;
23130 fragS *next = fragp->fr_next;
23131
23132 /* Variable-sized frags have been converted to fixed size by
23133 this point. But if this was variable-sized to start with,
23134 there will be a fixed-size frag after it. So don't handle
23135 next == NULL. */
23136 if (sym == NULL || next == NULL)
23137 continue;
23138
23139 if (S_GET_VALUE (sym) < next->fr_address)
23140 /* Not at the end of this frag. */
23141 continue;
23142 know (S_GET_VALUE (sym) == next->fr_address);
23143
23144 do
23145 {
23146 if (next->tc_frag_data.first_map != NULL)
23147 {
23148 /* Next frag starts with a mapping symbol. Discard this
23149 one. */
23150 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23151 break;
23152 }
23153
23154 if (next->fr_next == NULL)
23155 {
23156 /* This mapping symbol is at the end of the section. Discard
23157 it. */
23158 know (next->fr_fix == 0 && next->fr_var == 0);
23159 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23160 break;
23161 }
23162
23163 /* As long as we have empty frags without any mapping symbols,
23164 keep looking. */
23165 /* If the next frag is non-empty and does not start with a
23166 mapping symbol, then this mapping symbol is required. */
23167 if (next->fr_address != next->fr_next->fr_address)
23168 break;
23169
23170 next = next->fr_next;
23171 }
23172 while (next != NULL);
23173 }
23174 }
23175 #endif
23176
23177 /* Adjust the symbol table. This marks Thumb symbols as distinct from
23178 ARM ones. */
23179
23180 void
23181 arm_adjust_symtab (void)
23182 {
23183 #ifdef OBJ_COFF
23184 symbolS * sym;
23185
23186 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23187 {
23188 if (ARM_IS_THUMB (sym))
23189 {
23190 if (THUMB_IS_FUNC (sym))
23191 {
23192 /* Mark the symbol as a Thumb function. */
23193 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
23194 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
23195 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
23196
23197 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
23198 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
23199 else
23200 as_bad (_("%s: unexpected function type: %d"),
23201 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
23202 }
23203 else switch (S_GET_STORAGE_CLASS (sym))
23204 {
23205 case C_EXT:
23206 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
23207 break;
23208 case C_STAT:
23209 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
23210 break;
23211 case C_LABEL:
23212 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
23213 break;
23214 default:
23215 /* Do nothing. */
23216 break;
23217 }
23218 }
23219
23220 if (ARM_IS_INTERWORK (sym))
23221 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
23222 }
23223 #endif
23224 #ifdef OBJ_ELF
23225 symbolS * sym;
23226 char bind;
23227
23228 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23229 {
23230 if (ARM_IS_THUMB (sym))
23231 {
23232 elf_symbol_type * elf_sym;
23233
23234 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
23235 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
23236
23237 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
23238 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
23239 {
23240 /* If it's a .thumb_func, declare it as so,
23241 otherwise tag label as .code 16. */
23242 if (THUMB_IS_FUNC (sym))
23243 elf_sym->internal_elf_sym.st_target_internal
23244 = ST_BRANCH_TO_THUMB;
23245 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23246 elf_sym->internal_elf_sym.st_info =
23247 ELF_ST_INFO (bind, STT_ARM_16BIT);
23248 }
23249 }
23250 }
23251
23252 /* Remove any overlapping mapping symbols generated by alignment frags. */
23253 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
23254 /* Now do generic ELF adjustments. */
23255 elf_adjust_symtab ();
23256 #endif
23257 }
23258
23259 /* MD interface: Initialization. */
23260
23261 static void
23262 set_constant_flonums (void)
23263 {
23264 int i;
23265
23266 for (i = 0; i < NUM_FLOAT_VALS; i++)
23267 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
23268 abort ();
23269 }
23270
23271 /* Auto-select Thumb mode if it's the only available instruction set for the
23272 given architecture. */
23273
23274 static void
23275 autoselect_thumb_from_cpu_variant (void)
23276 {
23277 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23278 opcode_select (16);
23279 }
23280
23281 void
23282 md_begin (void)
23283 {
23284 unsigned mach;
23285 unsigned int i;
23286
23287 if ( (arm_ops_hsh = hash_new ()) == NULL
23288 || (arm_cond_hsh = hash_new ()) == NULL
23289 || (arm_shift_hsh = hash_new ()) == NULL
23290 || (arm_psr_hsh = hash_new ()) == NULL
23291 || (arm_v7m_psr_hsh = hash_new ()) == NULL
23292 || (arm_reg_hsh = hash_new ()) == NULL
23293 || (arm_reloc_hsh = hash_new ()) == NULL
23294 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
23295 as_fatal (_("virtual memory exhausted"));
23296
23297 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
23298 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
23299 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
23300 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
23301 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
23302 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
23303 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
23304 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
23305 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
23306 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
23307 (void *) (v7m_psrs + i));
23308 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
23309 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
23310 for (i = 0;
23311 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
23312 i++)
23313 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
23314 (void *) (barrier_opt_names + i));
23315 #ifdef OBJ_ELF
23316 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
23317 {
23318 struct reloc_entry * entry = reloc_names + i;
23319
23320 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
23321 /* This makes encode_branch() use the EABI versions of this relocation. */
23322 entry->reloc = BFD_RELOC_UNUSED;
23323
23324 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
23325 }
23326 #endif
23327
23328 set_constant_flonums ();
23329
23330 /* Set the cpu variant based on the command-line options. We prefer
23331 -mcpu= over -march= if both are set (as for GCC); and we prefer
23332 -mfpu= over any other way of setting the floating point unit.
23333 Use of legacy options with new options are faulted. */
23334 if (legacy_cpu)
23335 {
23336 if (mcpu_cpu_opt || march_cpu_opt)
23337 as_bad (_("use of old and new-style options to set CPU type"));
23338
23339 mcpu_cpu_opt = legacy_cpu;
23340 }
23341 else if (!mcpu_cpu_opt)
23342 mcpu_cpu_opt = march_cpu_opt;
23343
23344 if (legacy_fpu)
23345 {
23346 if (mfpu_opt)
23347 as_bad (_("use of old and new-style options to set FPU type"));
23348
23349 mfpu_opt = legacy_fpu;
23350 }
23351 else if (!mfpu_opt)
23352 {
23353 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
23354 || defined (TE_NetBSD) || defined (TE_VXWORKS))
23355 /* Some environments specify a default FPU. If they don't, infer it
23356 from the processor. */
23357 if (mcpu_fpu_opt)
23358 mfpu_opt = mcpu_fpu_opt;
23359 else
23360 mfpu_opt = march_fpu_opt;
23361 #else
23362 mfpu_opt = &fpu_default;
23363 #endif
23364 }
23365
23366 if (!mfpu_opt)
23367 {
23368 if (mcpu_cpu_opt != NULL)
23369 mfpu_opt = &fpu_default;
23370 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
23371 mfpu_opt = &fpu_arch_vfp_v2;
23372 else
23373 mfpu_opt = &fpu_arch_fpa;
23374 }
23375
23376 #ifdef CPU_DEFAULT
23377 if (!mcpu_cpu_opt)
23378 {
23379 mcpu_cpu_opt = &cpu_default;
23380 selected_cpu = cpu_default;
23381 }
23382 #else
23383 if (mcpu_cpu_opt)
23384 selected_cpu = *mcpu_cpu_opt;
23385 else
23386 mcpu_cpu_opt = &arm_arch_any;
23387 #endif
23388
23389 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23390
23391 autoselect_thumb_from_cpu_variant ();
23392
23393 arm_arch_used = thumb_arch_used = arm_arch_none;
23394
23395 #if defined OBJ_COFF || defined OBJ_ELF
23396 {
23397 unsigned int flags = 0;
23398
23399 #if defined OBJ_ELF
23400 flags = meabi_flags;
23401
23402 switch (meabi_flags)
23403 {
23404 case EF_ARM_EABI_UNKNOWN:
23405 #endif
23406 /* Set the flags in the private structure. */
23407 if (uses_apcs_26) flags |= F_APCS26;
23408 if (support_interwork) flags |= F_INTERWORK;
23409 if (uses_apcs_float) flags |= F_APCS_FLOAT;
23410 if (pic_code) flags |= F_PIC;
23411 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
23412 flags |= F_SOFT_FLOAT;
23413
23414 switch (mfloat_abi_opt)
23415 {
23416 case ARM_FLOAT_ABI_SOFT:
23417 case ARM_FLOAT_ABI_SOFTFP:
23418 flags |= F_SOFT_FLOAT;
23419 break;
23420
23421 case ARM_FLOAT_ABI_HARD:
23422 if (flags & F_SOFT_FLOAT)
23423 as_bad (_("hard-float conflicts with specified fpu"));
23424 break;
23425 }
23426
23427 /* Using pure-endian doubles (even if soft-float). */
23428 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
23429 flags |= F_VFP_FLOAT;
23430
23431 #if defined OBJ_ELF
23432 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
23433 flags |= EF_ARM_MAVERICK_FLOAT;
23434 break;
23435
23436 case EF_ARM_EABI_VER4:
23437 case EF_ARM_EABI_VER5:
23438 /* No additional flags to set. */
23439 break;
23440
23441 default:
23442 abort ();
23443 }
23444 #endif
23445 bfd_set_private_flags (stdoutput, flags);
23446
23447 /* We have run out flags in the COFF header to encode the
23448 status of ATPCS support, so instead we create a dummy,
23449 empty, debug section called .arm.atpcs. */
23450 if (atpcs)
23451 {
23452 asection * sec;
23453
23454 sec = bfd_make_section (stdoutput, ".arm.atpcs");
23455
23456 if (sec != NULL)
23457 {
23458 bfd_set_section_flags
23459 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
23460 bfd_set_section_size (stdoutput, sec, 0);
23461 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
23462 }
23463 }
23464 }
23465 #endif
23466
23467 /* Record the CPU type as well. */
23468 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
23469 mach = bfd_mach_arm_iWMMXt2;
23470 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
23471 mach = bfd_mach_arm_iWMMXt;
23472 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
23473 mach = bfd_mach_arm_XScale;
23474 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
23475 mach = bfd_mach_arm_ep9312;
23476 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
23477 mach = bfd_mach_arm_5TE;
23478 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
23479 {
23480 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
23481 mach = bfd_mach_arm_5T;
23482 else
23483 mach = bfd_mach_arm_5;
23484 }
23485 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
23486 {
23487 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
23488 mach = bfd_mach_arm_4T;
23489 else
23490 mach = bfd_mach_arm_4;
23491 }
23492 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
23493 mach = bfd_mach_arm_3M;
23494 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
23495 mach = bfd_mach_arm_3;
23496 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
23497 mach = bfd_mach_arm_2a;
23498 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
23499 mach = bfd_mach_arm_2;
23500 else
23501 mach = bfd_mach_arm_unknown;
23502
23503 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
23504 }
23505
23506 /* Command line processing. */
23507
23508 /* md_parse_option
23509 Invocation line includes a switch not recognized by the base assembler.
23510 See if it's a processor-specific option.
23511
23512 This routine is somewhat complicated by the need for backwards
23513 compatibility (since older releases of gcc can't be changed).
23514 The new options try to make the interface as compatible as
23515 possible with GCC.
23516
23517 New options (supported) are:
23518
23519 -mcpu=<cpu name> Assemble for selected processor
23520 -march=<architecture name> Assemble for selected architecture
23521 -mfpu=<fpu architecture> Assemble for selected FPU.
23522 -EB/-mbig-endian Big-endian
23523 -EL/-mlittle-endian Little-endian
23524 -k Generate PIC code
23525 -mthumb Start in Thumb mode
23526 -mthumb-interwork Code supports ARM/Thumb interworking
23527
23528 -m[no-]warn-deprecated Warn about deprecated features
23529
23530 For now we will also provide support for:
23531
23532 -mapcs-32 32-bit Program counter
23533 -mapcs-26 26-bit Program counter
23534 -macps-float Floats passed in FP registers
23535 -mapcs-reentrant Reentrant code
23536 -matpcs
23537 (sometime these will probably be replaced with -mapcs=<list of options>
23538 and -matpcs=<list of options>)
23539
23540 The remaining options are only supported for back-wards compatibility.
23541 Cpu variants, the arm part is optional:
23542 -m[arm]1 Currently not supported.
23543 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
23544 -m[arm]3 Arm 3 processor
23545 -m[arm]6[xx], Arm 6 processors
23546 -m[arm]7[xx][t][[d]m] Arm 7 processors
23547 -m[arm]8[10] Arm 8 processors
23548 -m[arm]9[20][tdmi] Arm 9 processors
23549 -mstrongarm[110[0]] StrongARM processors
23550 -mxscale XScale processors
23551 -m[arm]v[2345[t[e]]] Arm architectures
23552 -mall All (except the ARM1)
23553 FP variants:
23554 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
23555 -mfpe-old (No float load/store multiples)
23556 -mvfpxd VFP Single precision
23557 -mvfp All VFP
23558 -mno-fpu Disable all floating point instructions
23559
23560 The following CPU names are recognized:
23561 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
23562 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
23563 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
23564 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
23565 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
23566 arm10t arm10e, arm1020t, arm1020e, arm10200e,
23567 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
23568
23569 */
23570
23571 const char * md_shortopts = "m:k";
23572
23573 #ifdef ARM_BI_ENDIAN
23574 #define OPTION_EB (OPTION_MD_BASE + 0)
23575 #define OPTION_EL (OPTION_MD_BASE + 1)
23576 #else
23577 #if TARGET_BYTES_BIG_ENDIAN
23578 #define OPTION_EB (OPTION_MD_BASE + 0)
23579 #else
23580 #define OPTION_EL (OPTION_MD_BASE + 1)
23581 #endif
23582 #endif
23583 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
23584
23585 struct option md_longopts[] =
23586 {
23587 #ifdef OPTION_EB
23588 {"EB", no_argument, NULL, OPTION_EB},
23589 #endif
23590 #ifdef OPTION_EL
23591 {"EL", no_argument, NULL, OPTION_EL},
23592 #endif
23593 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
23594 {NULL, no_argument, NULL, 0}
23595 };
23596
23597 size_t md_longopts_size = sizeof (md_longopts);
23598
23599 struct arm_option_table
23600 {
23601 char *option; /* Option name to match. */
23602 char *help; /* Help information. */
23603 int *var; /* Variable to change. */
23604 int value; /* What to change it to. */
23605 char *deprecated; /* If non-null, print this message. */
23606 };
23607
23608 struct arm_option_table arm_opts[] =
23609 {
23610 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
23611 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
23612 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
23613 &support_interwork, 1, NULL},
23614 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
23615 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
23616 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
23617 1, NULL},
23618 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
23619 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
23620 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
23621 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
23622 NULL},
23623
23624 /* These are recognized by the assembler, but have no affect on code. */
23625 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
23626 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
23627
23628 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
23629 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
23630 &warn_on_deprecated, 0, NULL},
23631 {NULL, NULL, NULL, 0, NULL}
23632 };
23633
23634 struct arm_legacy_option_table
23635 {
23636 char *option; /* Option name to match. */
23637 const arm_feature_set **var; /* Variable to change. */
23638 const arm_feature_set value; /* What to change it to. */
23639 char *deprecated; /* If non-null, print this message. */
23640 };
23641
23642 const struct arm_legacy_option_table arm_legacy_opts[] =
23643 {
23644 /* DON'T add any new processors to this list -- we want the whole list
23645 to go away... Add them to the processors table instead. */
23646 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
23647 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
23648 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
23649 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
23650 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
23651 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
23652 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
23653 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
23654 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
23655 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
23656 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
23657 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
23658 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
23659 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
23660 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
23661 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
23662 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
23663 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
23664 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
23665 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
23666 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
23667 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
23668 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
23669 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
23670 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
23671 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
23672 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
23673 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
23674 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
23675 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
23676 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
23677 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
23678 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
23679 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
23680 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
23681 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
23682 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
23683 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
23684 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
23685 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
23686 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
23687 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
23688 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
23689 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
23690 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
23691 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
23692 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23693 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23694 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23695 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23696 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
23697 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
23698 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
23699 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
23700 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
23701 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
23702 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
23703 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
23704 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
23705 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
23706 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
23707 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
23708 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
23709 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
23710 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
23711 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
23712 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
23713 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
23714 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
23715 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
23716 N_("use -mcpu=strongarm110")},
23717 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
23718 N_("use -mcpu=strongarm1100")},
23719 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
23720 N_("use -mcpu=strongarm1110")},
23721 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
23722 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
23723 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
23724
23725 /* Architecture variants -- don't add any more to this list either. */
23726 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
23727 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
23728 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
23729 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
23730 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
23731 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
23732 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
23733 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
23734 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
23735 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
23736 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
23737 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
23738 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
23739 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
23740 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
23741 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
23742 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
23743 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
23744
23745 /* Floating point variants -- don't add any more to this list either. */
23746 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
23747 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
23748 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
23749 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
23750 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
23751
23752 {NULL, NULL, ARM_ARCH_NONE, NULL}
23753 };
23754
23755 struct arm_cpu_option_table
23756 {
23757 char *name;
23758 size_t name_len;
23759 const arm_feature_set value;
23760 /* For some CPUs we assume an FPU unless the user explicitly sets
23761 -mfpu=... */
23762 const arm_feature_set default_fpu;
23763 /* The canonical name of the CPU, or NULL to use NAME converted to upper
23764 case. */
23765 const char *canonical_name;
23766 };
23767
23768 /* This list should, at a minimum, contain all the cpu names
23769 recognized by GCC. */
23770 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
23771 static const struct arm_cpu_option_table arm_cpus[] =
23772 {
23773 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
23774 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
23775 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
23776 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
23777 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
23778 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23779 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23780 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23781 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23782 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23783 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23784 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
23785 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23786 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
23787 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23788 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
23789 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23790 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23791 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23792 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23793 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23794 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23795 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23796 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23797 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23798 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23799 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23800 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23801 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23802 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23803 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23804 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23805 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23806 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23807 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23808 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23809 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23810 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23811 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23812 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
23813 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23814 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23815 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23816 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23817 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23818 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23819 /* For V5 or later processors we default to using VFP; but the user
23820 should really set the FPU type explicitly. */
23821 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23822 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23823 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
23824 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
23825 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
23826 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23827 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
23828 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23829 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23830 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
23831 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23832 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23833 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
23834 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
23835 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23836 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
23837 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
23838 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23839 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23840 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
23841 "ARM1026EJ-S"),
23842 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
23843 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23844 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23845 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23846 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23847 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23848 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
23849 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
23850 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
23851 "ARM1136JF-S"),
23852 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
23853 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
23854 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
23855 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
23856 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
23857 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL),
23858 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL),
23859 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
23860 FPU_NONE, "Cortex-A5"),
23861 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
23862 FPU_ARCH_NEON_VFP_V4,
23863 "Cortex-A7"),
23864 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
23865 ARM_FEATURE (0, FPU_VFP_V3
23866 | FPU_NEON_EXT_V1),
23867 "Cortex-A8"),
23868 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
23869 ARM_FEATURE (0, FPU_VFP_V3
23870 | FPU_NEON_EXT_V1),
23871 "Cortex-A9"),
23872 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
23873 FPU_ARCH_NEON_VFP_V4,
23874 "Cortex-A15"),
23875 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
23876 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
23877 "Cortex-R4F"),
23878 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
23879 FPU_NONE, "Cortex-R5"),
23880 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
23881 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
23882 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
23883 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
23884 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
23885 /* ??? XSCALE is really an architecture. */
23886 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
23887 /* ??? iwmmxt is not a processor. */
23888 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
23889 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
23890 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
23891 /* Maverick */
23892 ARM_CPU_OPT ("ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
23893 FPU_ARCH_MAVERICK,
23894 "ARM920T"),
23895 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
23896 };
23897 #undef ARM_CPU_OPT
23898
23899 struct arm_arch_option_table
23900 {
23901 char *name;
23902 size_t name_len;
23903 const arm_feature_set value;
23904 const arm_feature_set default_fpu;
23905 };
23906
23907 /* This list should, at a minimum, contain all the architecture names
23908 recognized by GCC. */
23909 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
23910 static const struct arm_arch_option_table arm_archs[] =
23911 {
23912 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
23913 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
23914 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
23915 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
23916 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
23917 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
23918 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
23919 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
23920 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
23921 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
23922 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
23923 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
23924 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
23925 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
23926 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
23927 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
23928 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
23929 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
23930 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
23931 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
23932 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
23933 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP),
23934 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
23935 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
23936 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
23937 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP),
23938 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
23939 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
23940 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
23941 /* The official spelling of the ARMv7 profile variants is the dashed form.
23942 Accept the non-dashed form for compatibility with old toolchains. */
23943 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
23944 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
23945 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
23946 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
23947 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
23948 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
23949 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
23950 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
23951 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
23952 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
23953 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
23954 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
23955 };
23956 #undef ARM_ARCH_OPT
23957
23958 /* ISA extensions in the co-processor and main instruction set space. */
23959 struct arm_option_extension_value_table
23960 {
23961 char *name;
23962 size_t name_len;
23963 const arm_feature_set value;
23964 const arm_feature_set allowed_archs;
23965 };
23966
23967 /* The following table must be in alphabetical order with a NULL last entry.
23968 */
23969 #define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA }
23970 static const struct arm_option_extension_value_table arm_extensions[] =
23971 {
23972 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
23973 ARM_FEATURE (ARM_EXT_V8, 0)),
23974 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8,
23975 ARM_FEATURE (ARM_EXT_V8, 0)),
23976 ARM_EXT_OPT ("idiv", ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
23977 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
23978 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT), ARM_ANY),
23979 ARM_EXT_OPT ("iwmmxt2",
23980 ARM_FEATURE (0, ARM_CEXT_IWMMXT2), ARM_ANY),
23981 ARM_EXT_OPT ("maverick",
23982 ARM_FEATURE (0, ARM_CEXT_MAVERICK), ARM_ANY),
23983 ARM_EXT_OPT ("mp", ARM_FEATURE (ARM_EXT_MP, 0),
23984 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
23985 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
23986 ARM_FEATURE (ARM_EXT_V8, 0)),
23987 ARM_EXT_OPT ("os", ARM_FEATURE (ARM_EXT_OS, 0),
23988 ARM_FEATURE (ARM_EXT_V6M, 0)),
23989 ARM_EXT_OPT ("sec", ARM_FEATURE (ARM_EXT_SEC, 0),
23990 ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)),
23991 ARM_EXT_OPT ("virt", ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV
23992 | ARM_EXT_DIV, 0),
23993 ARM_FEATURE (ARM_EXT_V7A, 0)),
23994 ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE), ARM_ANY),
23995 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
23996 };
23997 #undef ARM_EXT_OPT
23998
23999 /* ISA floating-point and Advanced SIMD extensions. */
24000 struct arm_option_fpu_value_table
24001 {
24002 char *name;
24003 const arm_feature_set value;
24004 };
24005
24006 /* This list should, at a minimum, contain all the fpu names
24007 recognized by GCC. */
24008 static const struct arm_option_fpu_value_table arm_fpus[] =
24009 {
24010 {"softfpa", FPU_NONE},
24011 {"fpe", FPU_ARCH_FPE},
24012 {"fpe2", FPU_ARCH_FPE},
24013 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
24014 {"fpa", FPU_ARCH_FPA},
24015 {"fpa10", FPU_ARCH_FPA},
24016 {"fpa11", FPU_ARCH_FPA},
24017 {"arm7500fe", FPU_ARCH_FPA},
24018 {"softvfp", FPU_ARCH_VFP},
24019 {"softvfp+vfp", FPU_ARCH_VFP_V2},
24020 {"vfp", FPU_ARCH_VFP_V2},
24021 {"vfp9", FPU_ARCH_VFP_V2},
24022 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
24023 {"vfp10", FPU_ARCH_VFP_V2},
24024 {"vfp10-r0", FPU_ARCH_VFP_V1},
24025 {"vfpxd", FPU_ARCH_VFP_V1xD},
24026 {"vfpv2", FPU_ARCH_VFP_V2},
24027 {"vfpv3", FPU_ARCH_VFP_V3},
24028 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
24029 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
24030 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
24031 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
24032 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
24033 {"arm1020t", FPU_ARCH_VFP_V1},
24034 {"arm1020e", FPU_ARCH_VFP_V2},
24035 {"arm1136jfs", FPU_ARCH_VFP_V2},
24036 {"arm1136jf-s", FPU_ARCH_VFP_V2},
24037 {"maverick", FPU_ARCH_MAVERICK},
24038 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
24039 {"neon-fp16", FPU_ARCH_NEON_FP16},
24040 {"vfpv4", FPU_ARCH_VFP_V4},
24041 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
24042 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
24043 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
24044 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
24045 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
24046 {"crypto-neon-fp-armv8",
24047 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
24048 {NULL, ARM_ARCH_NONE}
24049 };
24050
24051 struct arm_option_value_table
24052 {
24053 char *name;
24054 long value;
24055 };
24056
24057 static const struct arm_option_value_table arm_float_abis[] =
24058 {
24059 {"hard", ARM_FLOAT_ABI_HARD},
24060 {"softfp", ARM_FLOAT_ABI_SOFTFP},
24061 {"soft", ARM_FLOAT_ABI_SOFT},
24062 {NULL, 0}
24063 };
24064
24065 #ifdef OBJ_ELF
24066 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
24067 static const struct arm_option_value_table arm_eabis[] =
24068 {
24069 {"gnu", EF_ARM_EABI_UNKNOWN},
24070 {"4", EF_ARM_EABI_VER4},
24071 {"5", EF_ARM_EABI_VER5},
24072 {NULL, 0}
24073 };
24074 #endif
24075
24076 struct arm_long_option_table
24077 {
24078 char * option; /* Substring to match. */
24079 char * help; /* Help information. */
24080 int (* func) (char * subopt); /* Function to decode sub-option. */
24081 char * deprecated; /* If non-null, print this message. */
24082 };
24083
24084 static bfd_boolean
24085 arm_parse_extension (char *str, const arm_feature_set **opt_p)
24086 {
24087 arm_feature_set *ext_set = (arm_feature_set *)
24088 xmalloc (sizeof (arm_feature_set));
24089
24090 /* We insist on extensions being specified in alphabetical order, and with
24091 extensions being added before being removed. We achieve this by having
24092 the global ARM_EXTENSIONS table in alphabetical order, and using the
24093 ADDING_VALUE variable to indicate whether we are adding an extension (1)
24094 or removing it (0) and only allowing it to change in the order
24095 -1 -> 1 -> 0. */
24096 const struct arm_option_extension_value_table * opt = NULL;
24097 int adding_value = -1;
24098
24099 /* Copy the feature set, so that we can modify it. */
24100 *ext_set = **opt_p;
24101 *opt_p = ext_set;
24102
24103 while (str != NULL && *str != 0)
24104 {
24105 char *ext;
24106 size_t len;
24107
24108 if (*str != '+')
24109 {
24110 as_bad (_("invalid architectural extension"));
24111 return FALSE;
24112 }
24113
24114 str++;
24115 ext = strchr (str, '+');
24116
24117 if (ext != NULL)
24118 len = ext - str;
24119 else
24120 len = strlen (str);
24121
24122 if (len >= 2 && strncmp (str, "no", 2) == 0)
24123 {
24124 if (adding_value != 0)
24125 {
24126 adding_value = 0;
24127 opt = arm_extensions;
24128 }
24129
24130 len -= 2;
24131 str += 2;
24132 }
24133 else if (len > 0)
24134 {
24135 if (adding_value == -1)
24136 {
24137 adding_value = 1;
24138 opt = arm_extensions;
24139 }
24140 else if (adding_value != 1)
24141 {
24142 as_bad (_("must specify extensions to add before specifying "
24143 "those to remove"));
24144 return FALSE;
24145 }
24146 }
24147
24148 if (len == 0)
24149 {
24150 as_bad (_("missing architectural extension"));
24151 return FALSE;
24152 }
24153
24154 gas_assert (adding_value != -1);
24155 gas_assert (opt != NULL);
24156
24157 /* Scan over the options table trying to find an exact match. */
24158 for (; opt->name != NULL; opt++)
24159 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24160 {
24161 /* Check we can apply the extension to this architecture. */
24162 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
24163 {
24164 as_bad (_("extension does not apply to the base architecture"));
24165 return FALSE;
24166 }
24167
24168 /* Add or remove the extension. */
24169 if (adding_value)
24170 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
24171 else
24172 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
24173
24174 break;
24175 }
24176
24177 if (opt->name == NULL)
24178 {
24179 /* Did we fail to find an extension because it wasn't specified in
24180 alphabetical order, or because it does not exist? */
24181
24182 for (opt = arm_extensions; opt->name != NULL; opt++)
24183 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24184 break;
24185
24186 if (opt->name == NULL)
24187 as_bad (_("unknown architectural extension `%s'"), str);
24188 else
24189 as_bad (_("architectural extensions must be specified in "
24190 "alphabetical order"));
24191
24192 return FALSE;
24193 }
24194 else
24195 {
24196 /* We should skip the extension we've just matched the next time
24197 round. */
24198 opt++;
24199 }
24200
24201 str = ext;
24202 };
24203
24204 return TRUE;
24205 }
24206
24207 static bfd_boolean
24208 arm_parse_cpu (char *str)
24209 {
24210 const struct arm_cpu_option_table *opt;
24211 char *ext = strchr (str, '+');
24212 size_t len;
24213
24214 if (ext != NULL)
24215 len = ext - str;
24216 else
24217 len = strlen (str);
24218
24219 if (len == 0)
24220 {
24221 as_bad (_("missing cpu name `%s'"), str);
24222 return FALSE;
24223 }
24224
24225 for (opt = arm_cpus; opt->name != NULL; opt++)
24226 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24227 {
24228 mcpu_cpu_opt = &opt->value;
24229 mcpu_fpu_opt = &opt->default_fpu;
24230 if (opt->canonical_name)
24231 strcpy (selected_cpu_name, opt->canonical_name);
24232 else
24233 {
24234 size_t i;
24235
24236 for (i = 0; i < len; i++)
24237 selected_cpu_name[i] = TOUPPER (opt->name[i]);
24238 selected_cpu_name[i] = 0;
24239 }
24240
24241 if (ext != NULL)
24242 return arm_parse_extension (ext, &mcpu_cpu_opt);
24243
24244 return TRUE;
24245 }
24246
24247 as_bad (_("unknown cpu `%s'"), str);
24248 return FALSE;
24249 }
24250
24251 static bfd_boolean
24252 arm_parse_arch (char *str)
24253 {
24254 const struct arm_arch_option_table *opt;
24255 char *ext = strchr (str, '+');
24256 size_t len;
24257
24258 if (ext != NULL)
24259 len = ext - str;
24260 else
24261 len = strlen (str);
24262
24263 if (len == 0)
24264 {
24265 as_bad (_("missing architecture name `%s'"), str);
24266 return FALSE;
24267 }
24268
24269 for (opt = arm_archs; opt->name != NULL; opt++)
24270 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24271 {
24272 march_cpu_opt = &opt->value;
24273 march_fpu_opt = &opt->default_fpu;
24274 strcpy (selected_cpu_name, opt->name);
24275
24276 if (ext != NULL)
24277 return arm_parse_extension (ext, &march_cpu_opt);
24278
24279 return TRUE;
24280 }
24281
24282 as_bad (_("unknown architecture `%s'\n"), str);
24283 return FALSE;
24284 }
24285
24286 static bfd_boolean
24287 arm_parse_fpu (char * str)
24288 {
24289 const struct arm_option_fpu_value_table * opt;
24290
24291 for (opt = arm_fpus; opt->name != NULL; opt++)
24292 if (streq (opt->name, str))
24293 {
24294 mfpu_opt = &opt->value;
24295 return TRUE;
24296 }
24297
24298 as_bad (_("unknown floating point format `%s'\n"), str);
24299 return FALSE;
24300 }
24301
24302 static bfd_boolean
24303 arm_parse_float_abi (char * str)
24304 {
24305 const struct arm_option_value_table * opt;
24306
24307 for (opt = arm_float_abis; opt->name != NULL; opt++)
24308 if (streq (opt->name, str))
24309 {
24310 mfloat_abi_opt = opt->value;
24311 return TRUE;
24312 }
24313
24314 as_bad (_("unknown floating point abi `%s'\n"), str);
24315 return FALSE;
24316 }
24317
24318 #ifdef OBJ_ELF
24319 static bfd_boolean
24320 arm_parse_eabi (char * str)
24321 {
24322 const struct arm_option_value_table *opt;
24323
24324 for (opt = arm_eabis; opt->name != NULL; opt++)
24325 if (streq (opt->name, str))
24326 {
24327 meabi_flags = opt->value;
24328 return TRUE;
24329 }
24330 as_bad (_("unknown EABI `%s'\n"), str);
24331 return FALSE;
24332 }
24333 #endif
24334
24335 static bfd_boolean
24336 arm_parse_it_mode (char * str)
24337 {
24338 bfd_boolean ret = TRUE;
24339
24340 if (streq ("arm", str))
24341 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
24342 else if (streq ("thumb", str))
24343 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
24344 else if (streq ("always", str))
24345 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
24346 else if (streq ("never", str))
24347 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
24348 else
24349 {
24350 as_bad (_("unknown implicit IT mode `%s', should be "\
24351 "arm, thumb, always, or never."), str);
24352 ret = FALSE;
24353 }
24354
24355 return ret;
24356 }
24357
24358 struct arm_long_option_table arm_long_opts[] =
24359 {
24360 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
24361 arm_parse_cpu, NULL},
24362 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
24363 arm_parse_arch, NULL},
24364 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
24365 arm_parse_fpu, NULL},
24366 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
24367 arm_parse_float_abi, NULL},
24368 #ifdef OBJ_ELF
24369 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
24370 arm_parse_eabi, NULL},
24371 #endif
24372 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
24373 arm_parse_it_mode, NULL},
24374 {NULL, NULL, 0, NULL}
24375 };
24376
24377 int
24378 md_parse_option (int c, char * arg)
24379 {
24380 struct arm_option_table *opt;
24381 const struct arm_legacy_option_table *fopt;
24382 struct arm_long_option_table *lopt;
24383
24384 switch (c)
24385 {
24386 #ifdef OPTION_EB
24387 case OPTION_EB:
24388 target_big_endian = 1;
24389 break;
24390 #endif
24391
24392 #ifdef OPTION_EL
24393 case OPTION_EL:
24394 target_big_endian = 0;
24395 break;
24396 #endif
24397
24398 case OPTION_FIX_V4BX:
24399 fix_v4bx = TRUE;
24400 break;
24401
24402 case 'a':
24403 /* Listing option. Just ignore these, we don't support additional
24404 ones. */
24405 return 0;
24406
24407 default:
24408 for (opt = arm_opts; opt->option != NULL; opt++)
24409 {
24410 if (c == opt->option[0]
24411 && ((arg == NULL && opt->option[1] == 0)
24412 || streq (arg, opt->option + 1)))
24413 {
24414 /* If the option is deprecated, tell the user. */
24415 if (warn_on_deprecated && opt->deprecated != NULL)
24416 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24417 arg ? arg : "", _(opt->deprecated));
24418
24419 if (opt->var != NULL)
24420 *opt->var = opt->value;
24421
24422 return 1;
24423 }
24424 }
24425
24426 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
24427 {
24428 if (c == fopt->option[0]
24429 && ((arg == NULL && fopt->option[1] == 0)
24430 || streq (arg, fopt->option + 1)))
24431 {
24432 /* If the option is deprecated, tell the user. */
24433 if (warn_on_deprecated && fopt->deprecated != NULL)
24434 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24435 arg ? arg : "", _(fopt->deprecated));
24436
24437 if (fopt->var != NULL)
24438 *fopt->var = &fopt->value;
24439
24440 return 1;
24441 }
24442 }
24443
24444 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
24445 {
24446 /* These options are expected to have an argument. */
24447 if (c == lopt->option[0]
24448 && arg != NULL
24449 && strncmp (arg, lopt->option + 1,
24450 strlen (lopt->option + 1)) == 0)
24451 {
24452 /* If the option is deprecated, tell the user. */
24453 if (warn_on_deprecated && lopt->deprecated != NULL)
24454 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
24455 _(lopt->deprecated));
24456
24457 /* Call the sup-option parser. */
24458 return lopt->func (arg + strlen (lopt->option) - 1);
24459 }
24460 }
24461
24462 return 0;
24463 }
24464
24465 return 1;
24466 }
24467
24468 void
24469 md_show_usage (FILE * fp)
24470 {
24471 struct arm_option_table *opt;
24472 struct arm_long_option_table *lopt;
24473
24474 fprintf (fp, _(" ARM-specific assembler options:\n"));
24475
24476 for (opt = arm_opts; opt->option != NULL; opt++)
24477 if (opt->help != NULL)
24478 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
24479
24480 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
24481 if (lopt->help != NULL)
24482 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
24483
24484 #ifdef OPTION_EB
24485 fprintf (fp, _("\
24486 -EB assemble code for a big-endian cpu\n"));
24487 #endif
24488
24489 #ifdef OPTION_EL
24490 fprintf (fp, _("\
24491 -EL assemble code for a little-endian cpu\n"));
24492 #endif
24493
24494 fprintf (fp, _("\
24495 --fix-v4bx Allow BX in ARMv4 code\n"));
24496 }
24497
24498
24499 #ifdef OBJ_ELF
24500 typedef struct
24501 {
24502 int val;
24503 arm_feature_set flags;
24504 } cpu_arch_ver_table;
24505
24506 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
24507 least features first. */
24508 static const cpu_arch_ver_table cpu_arch_ver[] =
24509 {
24510 {1, ARM_ARCH_V4},
24511 {2, ARM_ARCH_V4T},
24512 {3, ARM_ARCH_V5},
24513 {3, ARM_ARCH_V5T},
24514 {4, ARM_ARCH_V5TE},
24515 {5, ARM_ARCH_V5TEJ},
24516 {6, ARM_ARCH_V6},
24517 {9, ARM_ARCH_V6K},
24518 {7, ARM_ARCH_V6Z},
24519 {11, ARM_ARCH_V6M},
24520 {12, ARM_ARCH_V6SM},
24521 {8, ARM_ARCH_V6T2},
24522 {10, ARM_ARCH_V7A_IDIV_MP_SEC_VIRT},
24523 {10, ARM_ARCH_V7R},
24524 {10, ARM_ARCH_V7M},
24525 {14, ARM_ARCH_V8A},
24526 {0, ARM_ARCH_NONE}
24527 };
24528
24529 /* Set an attribute if it has not already been set by the user. */
24530 static void
24531 aeabi_set_attribute_int (int tag, int value)
24532 {
24533 if (tag < 1
24534 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
24535 || !attributes_set_explicitly[tag])
24536 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
24537 }
24538
24539 static void
24540 aeabi_set_attribute_string (int tag, const char *value)
24541 {
24542 if (tag < 1
24543 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
24544 || !attributes_set_explicitly[tag])
24545 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
24546 }
24547
24548 /* Set the public EABI object attributes. */
24549 static void
24550 aeabi_set_public_attributes (void)
24551 {
24552 int arch;
24553 char profile;
24554 int virt_sec = 0;
24555 int fp16_optional = 0;
24556 arm_feature_set flags;
24557 arm_feature_set tmp;
24558 const cpu_arch_ver_table *p;
24559
24560 /* Choose the architecture based on the capabilities of the requested cpu
24561 (if any) and/or the instructions actually used. */
24562 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
24563 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
24564 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
24565
24566 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
24567 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
24568
24569 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
24570 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
24571
24572 /* Allow the user to override the reported architecture. */
24573 if (object_arch)
24574 {
24575 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
24576 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
24577 }
24578
24579 /* We need to make sure that the attributes do not identify us as v6S-M
24580 when the only v6S-M feature in use is the Operating System Extensions. */
24581 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
24582 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
24583 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
24584
24585 tmp = flags;
24586 arch = 0;
24587 for (p = cpu_arch_ver; p->val; p++)
24588 {
24589 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
24590 {
24591 arch = p->val;
24592 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
24593 }
24594 }
24595
24596 /* The table lookup above finds the last architecture to contribute
24597 a new feature. Unfortunately, Tag13 is a subset of the union of
24598 v6T2 and v7-M, so it is never seen as contributing a new feature.
24599 We can not search for the last entry which is entirely used,
24600 because if no CPU is specified we build up only those flags
24601 actually used. Perhaps we should separate out the specified
24602 and implicit cases. Avoid taking this path for -march=all by
24603 checking for contradictory v7-A / v7-M features. */
24604 if (arch == 10
24605 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
24606 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
24607 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
24608 arch = 13;
24609
24610 /* Tag_CPU_name. */
24611 if (selected_cpu_name[0])
24612 {
24613 char *q;
24614
24615 q = selected_cpu_name;
24616 if (strncmp (q, "armv", 4) == 0)
24617 {
24618 int i;
24619
24620 q += 4;
24621 for (i = 0; q[i]; i++)
24622 q[i] = TOUPPER (q[i]);
24623 }
24624 aeabi_set_attribute_string (Tag_CPU_name, q);
24625 }
24626
24627 /* Tag_CPU_arch. */
24628 aeabi_set_attribute_int (Tag_CPU_arch, arch);
24629
24630 /* Tag_CPU_arch_profile. */
24631 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
24632 profile = 'A';
24633 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
24634 profile = 'R';
24635 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
24636 profile = 'M';
24637 else
24638 profile = '\0';
24639
24640 if (profile != '\0')
24641 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
24642
24643 /* Tag_ARM_ISA_use. */
24644 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
24645 || arch == 0)
24646 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
24647
24648 /* Tag_THUMB_ISA_use. */
24649 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
24650 || arch == 0)
24651 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
24652 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
24653
24654 /* Tag_VFP_arch. */
24655 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8))
24656 aeabi_set_attribute_int (Tag_VFP_arch, 7);
24657 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
24658 aeabi_set_attribute_int (Tag_VFP_arch,
24659 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
24660 ? 5 : 6);
24661 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
24662 {
24663 fp16_optional = 1;
24664 aeabi_set_attribute_int (Tag_VFP_arch, 3);
24665 }
24666 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
24667 {
24668 aeabi_set_attribute_int (Tag_VFP_arch, 4);
24669 fp16_optional = 1;
24670 }
24671 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
24672 aeabi_set_attribute_int (Tag_VFP_arch, 2);
24673 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
24674 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
24675 aeabi_set_attribute_int (Tag_VFP_arch, 1);
24676
24677 /* Tag_ABI_HardFP_use. */
24678 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
24679 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
24680 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
24681
24682 /* Tag_WMMX_arch. */
24683 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
24684 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
24685 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
24686 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
24687
24688 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
24689 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
24690 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
24691 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
24692 {
24693 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
24694 {
24695 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
24696 }
24697 else
24698 {
24699 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
24700 fp16_optional = 1;
24701 }
24702 }
24703
24704 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
24705 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
24706 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
24707
24708 /* Tag_DIV_use.
24709
24710 We set Tag_DIV_use to two when integer divide instructions have been used
24711 in ARM state, or when Thumb integer divide instructions have been used,
24712 but we have no architecture profile set, nor have we any ARM instructions.
24713
24714 For ARMv8 we set the tag to 0 as integer divide is implied by the base
24715 architecture.
24716
24717 For new architectures we will have to check these tests. */
24718 gas_assert (arch <= TAG_CPU_ARCH_V8);
24719 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
24720 aeabi_set_attribute_int (Tag_DIV_use, 0);
24721 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
24722 || (profile == '\0'
24723 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
24724 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
24725 aeabi_set_attribute_int (Tag_DIV_use, 2);
24726
24727 /* Tag_MP_extension_use. */
24728 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
24729 aeabi_set_attribute_int (Tag_MPextension_use, 1);
24730
24731 /* Tag Virtualization_use. */
24732 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
24733 virt_sec |= 1;
24734 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
24735 virt_sec |= 2;
24736 if (virt_sec != 0)
24737 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
24738 }
24739
24740 /* Add the default contents for the .ARM.attributes section. */
24741 void
24742 arm_md_end (void)
24743 {
24744 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24745 return;
24746
24747 aeabi_set_public_attributes ();
24748 }
24749 #endif /* OBJ_ELF */
24750
24751
24752 /* Parse a .cpu directive. */
24753
24754 static void
24755 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
24756 {
24757 const struct arm_cpu_option_table *opt;
24758 char *name;
24759 char saved_char;
24760
24761 name = input_line_pointer;
24762 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24763 input_line_pointer++;
24764 saved_char = *input_line_pointer;
24765 *input_line_pointer = 0;
24766
24767 /* Skip the first "all" entry. */
24768 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
24769 if (streq (opt->name, name))
24770 {
24771 mcpu_cpu_opt = &opt->value;
24772 selected_cpu = opt->value;
24773 if (opt->canonical_name)
24774 strcpy (selected_cpu_name, opt->canonical_name);
24775 else
24776 {
24777 int i;
24778 for (i = 0; opt->name[i]; i++)
24779 selected_cpu_name[i] = TOUPPER (opt->name[i]);
24780
24781 selected_cpu_name[i] = 0;
24782 }
24783 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24784 *input_line_pointer = saved_char;
24785 demand_empty_rest_of_line ();
24786 return;
24787 }
24788 as_bad (_("unknown cpu `%s'"), name);
24789 *input_line_pointer = saved_char;
24790 ignore_rest_of_line ();
24791 }
24792
24793
24794 /* Parse a .arch directive. */
24795
24796 static void
24797 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
24798 {
24799 const struct arm_arch_option_table *opt;
24800 char saved_char;
24801 char *name;
24802
24803 name = input_line_pointer;
24804 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24805 input_line_pointer++;
24806 saved_char = *input_line_pointer;
24807 *input_line_pointer = 0;
24808
24809 /* Skip the first "all" entry. */
24810 for (opt = arm_archs + 1; opt->name != NULL; opt++)
24811 if (streq (opt->name, name))
24812 {
24813 mcpu_cpu_opt = &opt->value;
24814 selected_cpu = opt->value;
24815 strcpy (selected_cpu_name, opt->name);
24816 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24817 *input_line_pointer = saved_char;
24818 demand_empty_rest_of_line ();
24819 return;
24820 }
24821
24822 as_bad (_("unknown architecture `%s'\n"), name);
24823 *input_line_pointer = saved_char;
24824 ignore_rest_of_line ();
24825 }
24826
24827
24828 /* Parse a .object_arch directive. */
24829
24830 static void
24831 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
24832 {
24833 const struct arm_arch_option_table *opt;
24834 char saved_char;
24835 char *name;
24836
24837 name = input_line_pointer;
24838 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24839 input_line_pointer++;
24840 saved_char = *input_line_pointer;
24841 *input_line_pointer = 0;
24842
24843 /* Skip the first "all" entry. */
24844 for (opt = arm_archs + 1; opt->name != NULL; opt++)
24845 if (streq (opt->name, name))
24846 {
24847 object_arch = &opt->value;
24848 *input_line_pointer = saved_char;
24849 demand_empty_rest_of_line ();
24850 return;
24851 }
24852
24853 as_bad (_("unknown architecture `%s'\n"), name);
24854 *input_line_pointer = saved_char;
24855 ignore_rest_of_line ();
24856 }
24857
24858 /* Parse a .arch_extension directive. */
24859
24860 static void
24861 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
24862 {
24863 const struct arm_option_extension_value_table *opt;
24864 char saved_char;
24865 char *name;
24866 int adding_value = 1;
24867
24868 name = input_line_pointer;
24869 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24870 input_line_pointer++;
24871 saved_char = *input_line_pointer;
24872 *input_line_pointer = 0;
24873
24874 if (strlen (name) >= 2
24875 && strncmp (name, "no", 2) == 0)
24876 {
24877 adding_value = 0;
24878 name += 2;
24879 }
24880
24881 for (opt = arm_extensions; opt->name != NULL; opt++)
24882 if (streq (opt->name, name))
24883 {
24884 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
24885 {
24886 as_bad (_("architectural extension `%s' is not allowed for the "
24887 "current base architecture"), name);
24888 break;
24889 }
24890
24891 if (adding_value)
24892 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
24893 else
24894 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
24895
24896 mcpu_cpu_opt = &selected_cpu;
24897 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24898 *input_line_pointer = saved_char;
24899 demand_empty_rest_of_line ();
24900 return;
24901 }
24902
24903 if (opt->name == NULL)
24904 as_bad (_("unknown architecture `%s'\n"), name);
24905
24906 *input_line_pointer = saved_char;
24907 ignore_rest_of_line ();
24908 }
24909
24910 /* Parse a .fpu directive. */
24911
24912 static void
24913 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
24914 {
24915 const struct arm_option_fpu_value_table *opt;
24916 char saved_char;
24917 char *name;
24918
24919 name = input_line_pointer;
24920 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24921 input_line_pointer++;
24922 saved_char = *input_line_pointer;
24923 *input_line_pointer = 0;
24924
24925 for (opt = arm_fpus; opt->name != NULL; opt++)
24926 if (streq (opt->name, name))
24927 {
24928 mfpu_opt = &opt->value;
24929 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24930 *input_line_pointer = saved_char;
24931 demand_empty_rest_of_line ();
24932 return;
24933 }
24934
24935 as_bad (_("unknown floating point format `%s'\n"), name);
24936 *input_line_pointer = saved_char;
24937 ignore_rest_of_line ();
24938 }
24939
24940 /* Copy symbol information. */
24941
24942 void
24943 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
24944 {
24945 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
24946 }
24947
24948 #ifdef OBJ_ELF
24949 /* Given a symbolic attribute NAME, return the proper integer value.
24950 Returns -1 if the attribute is not known. */
24951
24952 int
24953 arm_convert_symbolic_attribute (const char *name)
24954 {
24955 static const struct
24956 {
24957 const char * name;
24958 const int tag;
24959 }
24960 attribute_table[] =
24961 {
24962 /* When you modify this table you should
24963 also modify the list in doc/c-arm.texi. */
24964 #define T(tag) {#tag, tag}
24965 T (Tag_CPU_raw_name),
24966 T (Tag_CPU_name),
24967 T (Tag_CPU_arch),
24968 T (Tag_CPU_arch_profile),
24969 T (Tag_ARM_ISA_use),
24970 T (Tag_THUMB_ISA_use),
24971 T (Tag_FP_arch),
24972 T (Tag_VFP_arch),
24973 T (Tag_WMMX_arch),
24974 T (Tag_Advanced_SIMD_arch),
24975 T (Tag_PCS_config),
24976 T (Tag_ABI_PCS_R9_use),
24977 T (Tag_ABI_PCS_RW_data),
24978 T (Tag_ABI_PCS_RO_data),
24979 T (Tag_ABI_PCS_GOT_use),
24980 T (Tag_ABI_PCS_wchar_t),
24981 T (Tag_ABI_FP_rounding),
24982 T (Tag_ABI_FP_denormal),
24983 T (Tag_ABI_FP_exceptions),
24984 T (Tag_ABI_FP_user_exceptions),
24985 T (Tag_ABI_FP_number_model),
24986 T (Tag_ABI_align_needed),
24987 T (Tag_ABI_align8_needed),
24988 T (Tag_ABI_align_preserved),
24989 T (Tag_ABI_align8_preserved),
24990 T (Tag_ABI_enum_size),
24991 T (Tag_ABI_HardFP_use),
24992 T (Tag_ABI_VFP_args),
24993 T (Tag_ABI_WMMX_args),
24994 T (Tag_ABI_optimization_goals),
24995 T (Tag_ABI_FP_optimization_goals),
24996 T (Tag_compatibility),
24997 T (Tag_CPU_unaligned_access),
24998 T (Tag_FP_HP_extension),
24999 T (Tag_VFP_HP_extension),
25000 T (Tag_ABI_FP_16bit_format),
25001 T (Tag_MPextension_use),
25002 T (Tag_DIV_use),
25003 T (Tag_nodefaults),
25004 T (Tag_also_compatible_with),
25005 T (Tag_conformance),
25006 T (Tag_T2EE_use),
25007 T (Tag_Virtualization_use),
25008 /* We deliberately do not include Tag_MPextension_use_legacy. */
25009 #undef T
25010 };
25011 unsigned int i;
25012
25013 if (name == NULL)
25014 return -1;
25015
25016 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
25017 if (streq (name, attribute_table[i].name))
25018 return attribute_table[i].tag;
25019
25020 return -1;
25021 }
25022
25023
25024 /* Apply sym value for relocations only in the case that
25025 they are for local symbols and you have the respective
25026 architectural feature for blx and simple switches. */
25027 int
25028 arm_apply_sym_value (struct fix * fixP)
25029 {
25030 if (fixP->fx_addsy
25031 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25032 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
25033 {
25034 switch (fixP->fx_r_type)
25035 {
25036 case BFD_RELOC_ARM_PCREL_BLX:
25037 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25038 if (ARM_IS_FUNC (fixP->fx_addsy))
25039 return 1;
25040 break;
25041
25042 case BFD_RELOC_ARM_PCREL_CALL:
25043 case BFD_RELOC_THUMB_PCREL_BLX:
25044 if (THUMB_IS_FUNC (fixP->fx_addsy))
25045 return 1;
25046 break;
25047
25048 default:
25049 break;
25050 }
25051
25052 }
25053 return 0;
25054 }
25055 #endif /* OBJ_ELF */
This page took 0.620332 seconds and 4 git commands to generate.