[gdb/doc] Remove references to no-longer-supported systems
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
CommitLineData
a06ea964 1/* aarch64-opc.c -- AArch64 opcode support.
b90efa5b 2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21#include "sysdep.h"
22#include <assert.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <stdint.h>
26#include <stdarg.h>
27#include <inttypes.h>
28
29#include "opintl.h"
30
31#include "aarch64-opc.h"
32
33#ifdef DEBUG_AARCH64
34int debug_dump = FALSE;
35#endif /* DEBUG_AARCH64 */
36
37/* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
39
40static inline bfd_boolean
41vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
42{
43 return ((qualifier >= AARCH64_OPND_QLF_V_8B
44 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
45 : FALSE);
46}
47
48static inline bfd_boolean
49fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
50{
51 return ((qualifier >= AARCH64_OPND_QLF_S_B
52 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
53 : FALSE);
54}
55
56enum data_pattern
57{
58 DP_UNKNOWN,
59 DP_VECTOR_3SAME,
60 DP_VECTOR_LONG,
61 DP_VECTOR_WIDE,
62 DP_VECTOR_ACROSS_LANES,
63};
64
65static const char significant_operand_index [] =
66{
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
72};
73
74/* Given a sequence of qualifiers in QUALIFIERS, determine and return
75 the data pattern.
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
78
79static enum data_pattern
80get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
81{
82 if (vector_qualifier_p (qualifiers[0]) == TRUE)
83 {
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers[0] == qualifiers[1]
87 && vector_qualifier_p (qualifiers[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers[0])
89 == aarch64_get_qualifier_esize (qualifiers[1]))
90 && (aarch64_get_qualifier_esize (qualifiers[0])
91 == aarch64_get_qualifier_esize (qualifiers[2])))
92 return DP_VECTOR_3SAME;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
95 or v.8h, v.16b. */
96 if (vector_qualifier_p (qualifiers[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers[0])
99 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
100 return DP_VECTOR_LONG;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers[0] == qualifiers[1]
103 && vector_qualifier_p (qualifiers[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers[0])
106 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers[0])
108 == aarch64_get_qualifier_esize (qualifiers[1])))
109 return DP_VECTOR_WIDE;
110 }
111 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
112 {
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers[1]) == TRUE
115 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
116 return DP_VECTOR_ACROSS_LANES;
117 }
118
119 return DP_UNKNOWN;
120}
121
122/* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124/* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
128 benefit. */
129
130int
131aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
132{
133 return
134 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
135}
136\f
137const aarch64_field fields[] =
138{
139 { 0, 0 }, /* NIL. */
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
ee804238 195 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
a06ea964
NC
196 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
197 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
198 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
199 { 31, 1 }, /* b5: in the test bit and branch instructions. */
200 { 19, 5 }, /* b40: in the test bit and branch instructions. */
201 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
202};
203
204enum aarch64_operand_class
205aarch64_get_operand_class (enum aarch64_opnd type)
206{
207 return aarch64_operands[type].op_class;
208}
209
210const char *
211aarch64_get_operand_name (enum aarch64_opnd type)
212{
213 return aarch64_operands[type].name;
214}
215
216/* Get operand description string.
217 This is usually for the diagnosis purpose. */
218const char *
219aarch64_get_operand_desc (enum aarch64_opnd type)
220{
221 return aarch64_operands[type].desc;
222}
223
224/* Table of all conditional affixes. */
225const aarch64_cond aarch64_conds[16] =
226{
227 {{"eq"}, 0x0},
228 {{"ne"}, 0x1},
229 {{"cs", "hs"}, 0x2},
230 {{"cc", "lo", "ul"}, 0x3},
231 {{"mi"}, 0x4},
232 {{"pl"}, 0x5},
233 {{"vs"}, 0x6},
234 {{"vc"}, 0x7},
235 {{"hi"}, 0x8},
236 {{"ls"}, 0x9},
237 {{"ge"}, 0xa},
238 {{"lt"}, 0xb},
239 {{"gt"}, 0xc},
240 {{"le"}, 0xd},
241 {{"al"}, 0xe},
242 {{"nv"}, 0xf},
243};
244
245const aarch64_cond *
246get_cond_from_value (aarch64_insn value)
247{
248 assert (value < 16);
249 return &aarch64_conds[(unsigned int) value];
250}
251
252const aarch64_cond *
253get_inverted_cond (const aarch64_cond *cond)
254{
255 return &aarch64_conds[cond->value ^ 0x1];
256}
257
258/* Table describing the operand extension/shifting operators; indexed by
259 enum aarch64_modifier_kind.
260
261 The value column provides the most common values for encoding modifiers,
262 which enables table-driven encoding/decoding for the modifiers. */
263const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
264{
265 {"none", 0x0},
266 {"msl", 0x0},
267 {"ror", 0x3},
268 {"asr", 0x2},
269 {"lsr", 0x1},
270 {"lsl", 0x0},
271 {"uxtb", 0x0},
272 {"uxth", 0x1},
273 {"uxtw", 0x2},
274 {"uxtx", 0x3},
275 {"sxtb", 0x4},
276 {"sxth", 0x5},
277 {"sxtw", 0x6},
278 {"sxtx", 0x7},
279 {NULL, 0},
280};
281
282enum aarch64_modifier_kind
283aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
284{
285 return desc - aarch64_operand_modifiers;
286}
287
288aarch64_insn
289aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
290{
291 return aarch64_operand_modifiers[kind].value;
292}
293
294enum aarch64_modifier_kind
295aarch64_get_operand_modifier_from_value (aarch64_insn value,
296 bfd_boolean extend_p)
297{
298 if (extend_p == TRUE)
299 return AARCH64_MOD_UXTB + value;
300 else
301 return AARCH64_MOD_LSL - value;
302}
303
304bfd_boolean
305aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
306{
307 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
308 ? TRUE : FALSE;
309}
310
311static inline bfd_boolean
312aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
313{
314 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
315 ? TRUE : FALSE;
316}
317
318const struct aarch64_name_value_pair aarch64_barrier_options[16] =
319{
320 { "#0x00", 0x0 },
321 { "oshld", 0x1 },
322 { "oshst", 0x2 },
323 { "osh", 0x3 },
324 { "#0x04", 0x4 },
325 { "nshld", 0x5 },
326 { "nshst", 0x6 },
327 { "nsh", 0x7 },
328 { "#0x08", 0x8 },
329 { "ishld", 0x9 },
330 { "ishst", 0xa },
331 { "ish", 0xb },
332 { "#0x0c", 0xc },
333 { "ld", 0xd },
334 { "st", 0xe },
335 { "sy", 0xf },
336};
337
a32c3ff8 338/* op -> op: load = 0 instruction = 1 store = 2
a06ea964
NC
339 l -> level: 1-3
340 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
a32c3ff8 341#define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
a06ea964
NC
342const struct aarch64_name_value_pair aarch64_prfops[32] =
343{
344 { "pldl1keep", B(0, 1, 0) },
345 { "pldl1strm", B(0, 1, 1) },
346 { "pldl2keep", B(0, 2, 0) },
347 { "pldl2strm", B(0, 2, 1) },
348 { "pldl3keep", B(0, 3, 0) },
349 { "pldl3strm", B(0, 3, 1) },
a1ccaec9
YZ
350 { NULL, 0x06 },
351 { NULL, 0x07 },
a32c3ff8
NC
352 { "plil1keep", B(1, 1, 0) },
353 { "plil1strm", B(1, 1, 1) },
354 { "plil2keep", B(1, 2, 0) },
355 { "plil2strm", B(1, 2, 1) },
356 { "plil3keep", B(1, 3, 0) },
357 { "plil3strm", B(1, 3, 1) },
a1ccaec9
YZ
358 { NULL, 0x0e },
359 { NULL, 0x0f },
a32c3ff8
NC
360 { "pstl1keep", B(2, 1, 0) },
361 { "pstl1strm", B(2, 1, 1) },
362 { "pstl2keep", B(2, 2, 0) },
363 { "pstl2strm", B(2, 2, 1) },
364 { "pstl3keep", B(2, 3, 0) },
365 { "pstl3strm", B(2, 3, 1) },
a1ccaec9
YZ
366 { NULL, 0x16 },
367 { NULL, 0x17 },
368 { NULL, 0x18 },
369 { NULL, 0x19 },
370 { NULL, 0x1a },
371 { NULL, 0x1b },
372 { NULL, 0x1c },
373 { NULL, 0x1d },
374 { NULL, 0x1e },
375 { NULL, 0x1f },
a06ea964
NC
376};
377#undef B
378\f
379/* Utilities on value constraint. */
380
381static inline int
382value_in_range_p (int64_t value, int low, int high)
383{
384 return (value >= low && value <= high) ? 1 : 0;
385}
386
387static inline int
388value_aligned_p (int64_t value, int align)
389{
390 return ((value & (align - 1)) == 0) ? 1 : 0;
391}
392
393/* A signed value fits in a field. */
394static inline int
395value_fit_signed_field_p (int64_t value, unsigned width)
396{
397 assert (width < 32);
398 if (width < sizeof (value) * 8)
399 {
400 int64_t lim = (int64_t)1 << (width - 1);
401 if (value >= -lim && value < lim)
402 return 1;
403 }
404 return 0;
405}
406
407/* An unsigned value fits in a field. */
408static inline int
409value_fit_unsigned_field_p (int64_t value, unsigned width)
410{
411 assert (width < 32);
412 if (width < sizeof (value) * 8)
413 {
414 int64_t lim = (int64_t)1 << width;
415 if (value >= 0 && value < lim)
416 return 1;
417 }
418 return 0;
419}
420
421/* Return 1 if OPERAND is SP or WSP. */
422int
423aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
424{
425 return ((aarch64_get_operand_class (operand->type)
426 == AARCH64_OPND_CLASS_INT_REG)
427 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
428 && operand->reg.regno == 31);
429}
430
431/* Return 1 if OPERAND is XZR or WZP. */
432int
433aarch64_zero_register_p (const aarch64_opnd_info *operand)
434{
435 return ((aarch64_get_operand_class (operand->type)
436 == AARCH64_OPND_CLASS_INT_REG)
437 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
438 && operand->reg.regno == 31);
439}
440
441/* Return true if the operand *OPERAND that has the operand code
442 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
443 qualified by the qualifier TARGET. */
444
445static inline int
446operand_also_qualified_p (const struct aarch64_opnd_info *operand,
447 aarch64_opnd_qualifier_t target)
448{
449 switch (operand->qualifier)
450 {
451 case AARCH64_OPND_QLF_W:
452 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
453 return 1;
454 break;
455 case AARCH64_OPND_QLF_X:
456 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
457 return 1;
458 break;
459 case AARCH64_OPND_QLF_WSP:
460 if (target == AARCH64_OPND_QLF_W
461 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
462 return 1;
463 break;
464 case AARCH64_OPND_QLF_SP:
465 if (target == AARCH64_OPND_QLF_X
466 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
467 return 1;
468 break;
469 default:
470 break;
471 }
472
473 return 0;
474}
475
476/* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
477 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
478
479 Return NIL if more than one expected qualifiers are found. */
480
481aarch64_opnd_qualifier_t
482aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
483 int idx,
484 const aarch64_opnd_qualifier_t known_qlf,
485 int known_idx)
486{
487 int i, saved_i;
488
489 /* Special case.
490
491 When the known qualifier is NIL, we have to assume that there is only
492 one qualifier sequence in the *QSEQ_LIST and return the corresponding
493 qualifier directly. One scenario is that for instruction
494 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
495 which has only one possible valid qualifier sequence
496 NIL, S_D
497 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
498 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
499
500 Because the qualifier NIL has dual roles in the qualifier sequence:
501 it can mean no qualifier for the operand, or the qualifer sequence is
502 not in use (when all qualifiers in the sequence are NILs), we have to
503 handle this special case here. */
504 if (known_qlf == AARCH64_OPND_NIL)
505 {
506 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
507 return qseq_list[0][idx];
508 }
509
510 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
511 {
512 if (qseq_list[i][known_idx] == known_qlf)
513 {
514 if (saved_i != -1)
515 /* More than one sequences are found to have KNOWN_QLF at
516 KNOWN_IDX. */
517 return AARCH64_OPND_NIL;
518 saved_i = i;
519 }
520 }
521
522 return qseq_list[saved_i][idx];
523}
524
525enum operand_qualifier_kind
526{
527 OQK_NIL,
528 OQK_OPD_VARIANT,
529 OQK_VALUE_IN_RANGE,
530 OQK_MISC,
531};
532
533/* Operand qualifier description. */
534struct operand_qualifier_data
535{
536 /* The usage of the three data fields depends on the qualifier kind. */
537 int data0;
538 int data1;
539 int data2;
540 /* Description. */
541 const char *desc;
542 /* Kind. */
543 enum operand_qualifier_kind kind;
544};
545
546/* Indexed by the operand qualifier enumerators. */
547struct operand_qualifier_data aarch64_opnd_qualifiers[] =
548{
549 {0, 0, 0, "NIL", OQK_NIL},
550
551 /* Operand variant qualifiers.
552 First 3 fields:
553 element size, number of elements and common value for encoding. */
554
555 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
556 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
557 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
558 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
559
560 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
561 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
562 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
563 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
564 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
565
566 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
567 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
568 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
569 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
570 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
571 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
572 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
573 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
574 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
575
576 /* Qualifiers constraining the value range.
577 First 3 fields:
578 Lower bound, higher bound, unused. */
579
580 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
581 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
582 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
583 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
584 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
585 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
586
587 /* Qualifiers for miscellaneous purpose.
588 First 3 fields:
589 unused, unused and unused. */
590
591 {0, 0, 0, "lsl", 0},
592 {0, 0, 0, "msl", 0},
593
594 {0, 0, 0, "retrieving", 0},
595};
596
597static inline bfd_boolean
598operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
599{
600 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
601 ? TRUE : FALSE;
602}
603
604static inline bfd_boolean
605qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
606{
607 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
608 ? TRUE : FALSE;
609}
610
611const char*
612aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
613{
614 return aarch64_opnd_qualifiers[qualifier].desc;
615}
616
617/* Given an operand qualifier, return the expected data element size
618 of a qualified operand. */
619unsigned char
620aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
621{
622 assert (operand_variant_qualifier_p (qualifier) == TRUE);
623 return aarch64_opnd_qualifiers[qualifier].data0;
624}
625
626unsigned char
627aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
628{
629 assert (operand_variant_qualifier_p (qualifier) == TRUE);
630 return aarch64_opnd_qualifiers[qualifier].data1;
631}
632
633aarch64_insn
634aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
635{
636 assert (operand_variant_qualifier_p (qualifier) == TRUE);
637 return aarch64_opnd_qualifiers[qualifier].data2;
638}
639
640static int
641get_lower_bound (aarch64_opnd_qualifier_t qualifier)
642{
643 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
644 return aarch64_opnd_qualifiers[qualifier].data0;
645}
646
647static int
648get_upper_bound (aarch64_opnd_qualifier_t qualifier)
649{
650 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
651 return aarch64_opnd_qualifiers[qualifier].data1;
652}
653
654#ifdef DEBUG_AARCH64
655void
656aarch64_verbose (const char *str, ...)
657{
658 va_list ap;
659 va_start (ap, str);
660 printf ("#### ");
661 vprintf (str, ap);
662 printf ("\n");
663 va_end (ap);
664}
665
666static inline void
667dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
668{
669 int i;
670 printf ("#### \t");
671 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
672 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
673 printf ("\n");
674}
675
676static void
677dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
678 const aarch64_opnd_qualifier_t *qualifier)
679{
680 int i;
681 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
682
683 aarch64_verbose ("dump_match_qualifiers:");
684 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
685 curr[i] = opnd[i].qualifier;
686 dump_qualifier_sequence (curr);
687 aarch64_verbose ("against");
688 dump_qualifier_sequence (qualifier);
689}
690#endif /* DEBUG_AARCH64 */
691
692/* TODO improve this, we can have an extra field at the runtime to
693 store the number of operands rather than calculating it every time. */
694
695int
696aarch64_num_of_operands (const aarch64_opcode *opcode)
697{
698 int i = 0;
699 const enum aarch64_opnd *opnds = opcode->operands;
700 while (opnds[i++] != AARCH64_OPND_NIL)
701 ;
702 --i;
703 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
704 return i;
705}
706
707/* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
708 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
709
710 N.B. on the entry, it is very likely that only some operands in *INST
711 have had their qualifiers been established.
712
713 If STOP_AT is not -1, the function will only try to match
714 the qualifier sequence for operands before and including the operand
715 of index STOP_AT; and on success *RET will only be filled with the first
716 (STOP_AT+1) qualifiers.
717
718 A couple examples of the matching algorithm:
719
720 X,W,NIL should match
721 X,W,NIL
722
723 NIL,NIL should match
724 X ,NIL
725
726 Apart from serving the main encoding routine, this can also be called
727 during or after the operand decoding. */
728
729int
730aarch64_find_best_match (const aarch64_inst *inst,
731 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
732 int stop_at, aarch64_opnd_qualifier_t *ret)
733{
734 int found = 0;
735 int i, num_opnds;
736 const aarch64_opnd_qualifier_t *qualifiers;
737
738 num_opnds = aarch64_num_of_operands (inst->opcode);
739 if (num_opnds == 0)
740 {
741 DEBUG_TRACE ("SUCCEED: no operand");
742 return 1;
743 }
744
745 if (stop_at < 0 || stop_at >= num_opnds)
746 stop_at = num_opnds - 1;
747
748 /* For each pattern. */
749 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
750 {
751 int j;
752 qualifiers = *qualifiers_list;
753
754 /* Start as positive. */
755 found = 1;
756
757 DEBUG_TRACE ("%d", i);
758#ifdef DEBUG_AARCH64
759 if (debug_dump)
760 dump_match_qualifiers (inst->operands, qualifiers);
761#endif
762
763 /* Most opcodes has much fewer patterns in the list.
764 First NIL qualifier indicates the end in the list. */
765 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
766 {
767 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
768 if (i)
769 found = 0;
770 break;
771 }
772
773 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
774 {
775 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
776 {
777 /* Either the operand does not have qualifier, or the qualifier
778 for the operand needs to be deduced from the qualifier
779 sequence.
780 In the latter case, any constraint checking related with
781 the obtained qualifier should be done later in
782 operand_general_constraint_met_p. */
783 continue;
784 }
785 else if (*qualifiers != inst->operands[j].qualifier)
786 {
787 /* Unless the target qualifier can also qualify the operand
788 (which has already had a non-nil qualifier), non-equal
789 qualifiers are generally un-matched. */
790 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
791 continue;
792 else
793 {
794 found = 0;
795 break;
796 }
797 }
798 else
799 continue; /* Equal qualifiers are certainly matched. */
800 }
801
802 /* Qualifiers established. */
803 if (found == 1)
804 break;
805 }
806
807 if (found == 1)
808 {
809 /* Fill the result in *RET. */
810 int j;
811 qualifiers = *qualifiers_list;
812
813 DEBUG_TRACE ("complete qualifiers using list %d", i);
814#ifdef DEBUG_AARCH64
815 if (debug_dump)
816 dump_qualifier_sequence (qualifiers);
817#endif
818
819 for (j = 0; j <= stop_at; ++j, ++qualifiers)
820 ret[j] = *qualifiers;
821 for (; j < AARCH64_MAX_OPND_NUM; ++j)
822 ret[j] = AARCH64_OPND_QLF_NIL;
823
824 DEBUG_TRACE ("SUCCESS");
825 return 1;
826 }
827
828 DEBUG_TRACE ("FAIL");
829 return 0;
830}
831
832/* Operand qualifier matching and resolving.
833
834 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
835 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
836
837 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
838 succeeds. */
839
840static int
841match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
842{
843 int i;
844 aarch64_opnd_qualifier_seq_t qualifiers;
845
846 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
847 qualifiers))
848 {
849 DEBUG_TRACE ("matching FAIL");
850 return 0;
851 }
852
853 /* Update the qualifiers. */
854 if (update_p == TRUE)
855 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
856 {
857 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
858 break;
859 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
860 "update %s with %s for operand %d",
861 aarch64_get_qualifier_name (inst->operands[i].qualifier),
862 aarch64_get_qualifier_name (qualifiers[i]), i);
863 inst->operands[i].qualifier = qualifiers[i];
864 }
865
866 DEBUG_TRACE ("matching SUCCESS");
867 return 1;
868}
869
870/* Return TRUE if VALUE is a wide constant that can be moved into a general
871 register by MOVZ.
872
873 IS32 indicates whether value is a 32-bit immediate or not.
874 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
875 amount will be returned in *SHIFT_AMOUNT. */
876
877bfd_boolean
878aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
879{
880 int amount;
881
882 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
883
884 if (is32)
885 {
886 /* Allow all zeros or all ones in top 32-bits, so that
887 32-bit constant expressions like ~0x80000000 are
888 permitted. */
889 uint64_t ext = value;
890 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
891 /* Immediate out of range. */
892 return FALSE;
893 value &= (int64_t) 0xffffffff;
894 }
895
896 /* first, try movz then movn */
897 amount = -1;
898 if ((value & ((int64_t) 0xffff << 0)) == value)
899 amount = 0;
900 else if ((value & ((int64_t) 0xffff << 16)) == value)
901 amount = 16;
902 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
903 amount = 32;
904 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
905 amount = 48;
906
907 if (amount == -1)
908 {
909 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
910 return FALSE;
911 }
912
913 if (shift_amount != NULL)
914 *shift_amount = amount;
915
916 DEBUG_TRACE ("exit TRUE with amount %d", amount);
917
918 return TRUE;
919}
920
921/* Build the accepted values for immediate logical SIMD instructions.
922
923 The standard encodings of the immediate value are:
924 N imms immr SIMD size R S
925 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
926 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
927 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
928 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
929 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
930 0 11110s 00000r 2 UInt(r) UInt(s)
931 where all-ones value of S is reserved.
932
933 Let's call E the SIMD size.
934
935 The immediate value is: S+1 bits '1' rotated to the right by R.
936
937 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
938 (remember S != E - 1). */
939
940#define TOTAL_IMM_NB 5334
941
942typedef struct
943{
944 uint64_t imm;
945 aarch64_insn encoding;
946} simd_imm_encoding;
947
948static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
949
950static int
951simd_imm_encoding_cmp(const void *i1, const void *i2)
952{
953 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
954 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
955
956 if (imm1->imm < imm2->imm)
957 return -1;
958 if (imm1->imm > imm2->imm)
959 return +1;
960 return 0;
961}
962
963/* immediate bitfield standard encoding
964 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
965 1 ssssss rrrrrr 64 rrrrrr ssssss
966 0 0sssss 0rrrrr 32 rrrrr sssss
967 0 10ssss 00rrrr 16 rrrr ssss
968 0 110sss 000rrr 8 rrr sss
969 0 1110ss 0000rr 4 rr ss
970 0 11110s 00000r 2 r s */
971static inline int
972encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
973{
974 return (is64 << 12) | (r << 6) | s;
975}
976
977static void
978build_immediate_table (void)
979{
980 uint32_t log_e, e, s, r, s_mask;
981 uint64_t mask, imm;
982 int nb_imms;
983 int is64;
984
985 nb_imms = 0;
986 for (log_e = 1; log_e <= 6; log_e++)
987 {
988 /* Get element size. */
989 e = 1u << log_e;
990 if (log_e == 6)
991 {
992 is64 = 1;
993 mask = 0xffffffffffffffffull;
994 s_mask = 0;
995 }
996 else
997 {
998 is64 = 0;
999 mask = (1ull << e) - 1;
1000 /* log_e s_mask
1001 1 ((1 << 4) - 1) << 2 = 111100
1002 2 ((1 << 3) - 1) << 3 = 111000
1003 3 ((1 << 2) - 1) << 4 = 110000
1004 4 ((1 << 1) - 1) << 5 = 100000
1005 5 ((1 << 0) - 1) << 6 = 000000 */
1006 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1007 }
1008 for (s = 0; s < e - 1; s++)
1009 for (r = 0; r < e; r++)
1010 {
1011 /* s+1 consecutive bits to 1 (s < 63) */
1012 imm = (1ull << (s + 1)) - 1;
1013 /* rotate right by r */
1014 if (r != 0)
1015 imm = (imm >> r) | ((imm << (e - r)) & mask);
1016 /* replicate the constant depending on SIMD size */
1017 switch (log_e)
1018 {
1019 case 1: imm = (imm << 2) | imm;
1020 case 2: imm = (imm << 4) | imm;
1021 case 3: imm = (imm << 8) | imm;
1022 case 4: imm = (imm << 16) | imm;
1023 case 5: imm = (imm << 32) | imm;
1024 case 6: break;
1025 default: abort ();
1026 }
1027 simd_immediates[nb_imms].imm = imm;
1028 simd_immediates[nb_imms].encoding =
1029 encode_immediate_bitfield(is64, s | s_mask, r);
1030 nb_imms++;
1031 }
1032 }
1033 assert (nb_imms == TOTAL_IMM_NB);
1034 qsort(simd_immediates, nb_imms,
1035 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1036}
1037
1038/* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1039 be accepted by logical (immediate) instructions
1040 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1041
1042 IS32 indicates whether or not VALUE is a 32-bit immediate.
1043 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1044 VALUE will be returned in *ENCODING. */
1045
1046bfd_boolean
1047aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding)
1048{
1049 simd_imm_encoding imm_enc;
1050 const simd_imm_encoding *imm_encoding;
1051 static bfd_boolean initialized = FALSE;
1052
1053 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1054 value, is32);
1055
1056 if (initialized == FALSE)
1057 {
1058 build_immediate_table ();
1059 initialized = TRUE;
1060 }
1061
1062 if (is32)
1063 {
1064 /* Allow all zeros or all ones in top 32-bits, so that
1065 constant expressions like ~1 are permitted. */
1066 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
7e105031
NC
1067 return FALSE;
1068
a06ea964
NC
1069 /* Replicate the 32 lower bits to the 32 upper bits. */
1070 value &= 0xffffffff;
1071 value |= value << 32;
1072 }
1073
1074 imm_enc.imm = value;
1075 imm_encoding = (const simd_imm_encoding *)
1076 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1077 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1078 if (imm_encoding == NULL)
1079 {
1080 DEBUG_TRACE ("exit with FALSE");
1081 return FALSE;
1082 }
1083 if (encoding != NULL)
1084 *encoding = imm_encoding->encoding;
1085 DEBUG_TRACE ("exit with TRUE");
1086 return TRUE;
1087}
1088
1089/* If 64-bit immediate IMM is in the format of
1090 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1091 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1092 of value "abcdefgh". Otherwise return -1. */
1093int
1094aarch64_shrink_expanded_imm8 (uint64_t imm)
1095{
1096 int i, ret;
1097 uint32_t byte;
1098
1099 ret = 0;
1100 for (i = 0; i < 8; i++)
1101 {
1102 byte = (imm >> (8 * i)) & 0xff;
1103 if (byte == 0xff)
1104 ret |= 1 << i;
1105 else if (byte != 0x00)
1106 return -1;
1107 }
1108 return ret;
1109}
1110
1111/* Utility inline functions for operand_general_constraint_met_p. */
1112
1113static inline void
1114set_error (aarch64_operand_error *mismatch_detail,
1115 enum aarch64_operand_error_kind kind, int idx,
1116 const char* error)
1117{
1118 if (mismatch_detail == NULL)
1119 return;
1120 mismatch_detail->kind = kind;
1121 mismatch_detail->index = idx;
1122 mismatch_detail->error = error;
1123}
1124
4e50d5f8
YZ
1125static inline void
1126set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1127 const char* error)
1128{
1129 if (mismatch_detail == NULL)
1130 return;
1131 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1132}
1133
a06ea964
NC
1134static inline void
1135set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1136 int idx, int lower_bound, int upper_bound,
1137 const char* error)
1138{
1139 if (mismatch_detail == NULL)
1140 return;
1141 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1142 mismatch_detail->data[0] = lower_bound;
1143 mismatch_detail->data[1] = upper_bound;
1144}
1145
1146static inline void
1147set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1148 int idx, int lower_bound, int upper_bound)
1149{
1150 if (mismatch_detail == NULL)
1151 return;
1152 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1153 _("immediate value"));
1154}
1155
1156static inline void
1157set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1158 int idx, int lower_bound, int upper_bound)
1159{
1160 if (mismatch_detail == NULL)
1161 return;
1162 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1163 _("immediate offset"));
1164}
1165
1166static inline void
1167set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1168 int idx, int lower_bound, int upper_bound)
1169{
1170 if (mismatch_detail == NULL)
1171 return;
1172 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1173 _("register number"));
1174}
1175
1176static inline void
1177set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1178 int idx, int lower_bound, int upper_bound)
1179{
1180 if (mismatch_detail == NULL)
1181 return;
1182 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1183 _("register element index"));
1184}
1185
1186static inline void
1187set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1188 int idx, int lower_bound, int upper_bound)
1189{
1190 if (mismatch_detail == NULL)
1191 return;
1192 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1193 _("shift amount"));
1194}
1195
1196static inline void
1197set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1198 int alignment)
1199{
1200 if (mismatch_detail == NULL)
1201 return;
1202 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1203 mismatch_detail->data[0] = alignment;
1204}
1205
1206static inline void
1207set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1208 int expected_num)
1209{
1210 if (mismatch_detail == NULL)
1211 return;
1212 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1213 mismatch_detail->data[0] = expected_num;
1214}
1215
1216static inline void
1217set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1218 const char* error)
1219{
1220 if (mismatch_detail == NULL)
1221 return;
1222 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1223}
1224
1225/* General constraint checking based on operand code.
1226
1227 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1228 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1229
1230 This function has to be called after the qualifiers for all operands
1231 have been resolved.
1232
1233 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1234 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1235 of error message during the disassembling where error message is not
1236 wanted. We avoid the dynamic construction of strings of error messages
1237 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1238 use a combination of error code, static string and some integer data to
1239 represent an error. */
1240
1241static int
1242operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1243 enum aarch64_opnd type,
1244 const aarch64_opcode *opcode,
1245 aarch64_operand_error *mismatch_detail)
1246{
1247 unsigned num;
1248 unsigned char size;
1249 int64_t imm;
1250 const aarch64_opnd_info *opnd = opnds + idx;
1251 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1252
1253 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1254
1255 switch (aarch64_operands[type].op_class)
1256 {
1257 case AARCH64_OPND_CLASS_INT_REG:
ee804238
JW
1258 /* Check pair reg constraints for cas* instructions. */
1259 if (type == AARCH64_OPND_PAIRREG)
1260 {
1261 assert (idx == 1 || idx == 3);
1262 if (opnds[idx - 1].reg.regno % 2 != 0)
1263 {
1264 set_syntax_error (mismatch_detail, idx - 1,
1265 _("reg pair must start from even reg"));
1266 return 0;
1267 }
1268 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1269 {
1270 set_syntax_error (mismatch_detail, idx,
1271 _("reg pair must be contiguous"));
1272 return 0;
1273 }
1274 break;
1275 }
1276
a06ea964
NC
1277 /* <Xt> may be optional in some IC and TLBI instructions. */
1278 if (type == AARCH64_OPND_Rt_SYS)
1279 {
1280 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1281 == AARCH64_OPND_CLASS_SYSTEM));
ea2deeec
MW
1282 if (opnds[1].present
1283 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
a06ea964
NC
1284 {
1285 set_other_error (mismatch_detail, idx, _("extraneous register"));
1286 return 0;
1287 }
ea2deeec
MW
1288 if (!opnds[1].present
1289 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
a06ea964
NC
1290 {
1291 set_other_error (mismatch_detail, idx, _("missing register"));
1292 return 0;
1293 }
1294 }
1295 switch (qualifier)
1296 {
1297 case AARCH64_OPND_QLF_WSP:
1298 case AARCH64_OPND_QLF_SP:
1299 if (!aarch64_stack_pointer_p (opnd))
1300 {
1301 set_other_error (mismatch_detail, idx,
1302 _("stack pointer register expected"));
1303 return 0;
1304 }
1305 break;
1306 default:
1307 break;
1308 }
1309 break;
1310
68a64283
YZ
1311 case AARCH64_OPND_CLASS_COND:
1312 if (type == AARCH64_OPND_COND1
1313 && (opnds[idx].cond->value & 0xe) == 0xe)
1314 {
1315 /* Not allow AL or NV. */
1316 set_syntax_error (mismatch_detail, idx, NULL);
1317 }
1318 break;
1319
a06ea964
NC
1320 case AARCH64_OPND_CLASS_ADDRESS:
1321 /* Check writeback. */
1322 switch (opcode->iclass)
1323 {
1324 case ldst_pos:
1325 case ldst_unscaled:
1326 case ldstnapair_offs:
1327 case ldstpair_off:
1328 case ldst_unpriv:
1329 if (opnd->addr.writeback == 1)
1330 {
4e50d5f8
YZ
1331 set_syntax_error (mismatch_detail, idx,
1332 _("unexpected address writeback"));
a06ea964
NC
1333 return 0;
1334 }
1335 break;
1336 case ldst_imm9:
1337 case ldstpair_indexed:
1338 case asisdlsep:
1339 case asisdlsop:
1340 if (opnd->addr.writeback == 0)
1341 {
4e50d5f8
YZ
1342 set_syntax_error (mismatch_detail, idx,
1343 _("address writeback expected"));
a06ea964
NC
1344 return 0;
1345 }
1346 break;
1347 default:
1348 assert (opnd->addr.writeback == 0);
1349 break;
1350 }
1351 switch (type)
1352 {
1353 case AARCH64_OPND_ADDR_SIMM7:
1354 /* Scaled signed 7 bits immediate offset. */
1355 /* Get the size of the data element that is accessed, which may be
1356 different from that of the source register size,
1357 e.g. in strb/ldrb. */
1358 size = aarch64_get_qualifier_esize (opnd->qualifier);
1359 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1360 {
1361 set_offset_out_of_range_error (mismatch_detail, idx,
1362 -64 * size, 63 * size);
1363 return 0;
1364 }
1365 if (!value_aligned_p (opnd->addr.offset.imm, size))
1366 {
1367 set_unaligned_error (mismatch_detail, idx, size);
1368 return 0;
1369 }
1370 break;
1371 case AARCH64_OPND_ADDR_SIMM9:
1372 /* Unscaled signed 9 bits immediate offset. */
1373 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1374 {
1375 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1376 return 0;
1377 }
1378 break;
1379
1380 case AARCH64_OPND_ADDR_SIMM9_2:
1381 /* Unscaled signed 9 bits immediate offset, which has to be negative
1382 or unaligned. */
1383 size = aarch64_get_qualifier_esize (qualifier);
1384 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1385 && !value_aligned_p (opnd->addr.offset.imm, size))
1386 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1387 return 1;
1388 set_other_error (mismatch_detail, idx,
1389 _("negative or unaligned offset expected"));
1390 return 0;
1391
1392 case AARCH64_OPND_SIMD_ADDR_POST:
1393 /* AdvSIMD load/store multiple structures, post-index. */
1394 assert (idx == 1);
1395 if (opnd->addr.offset.is_reg)
1396 {
1397 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1398 return 1;
1399 else
1400 {
1401 set_other_error (mismatch_detail, idx,
1402 _("invalid register offset"));
1403 return 0;
1404 }
1405 }
1406 else
1407 {
1408 const aarch64_opnd_info *prev = &opnds[idx-1];
1409 unsigned num_bytes; /* total number of bytes transferred. */
1410 /* The opcode dependent area stores the number of elements in
1411 each structure to be loaded/stored. */
1412 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1413 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1414 /* Special handling of loading single structure to all lane. */
1415 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1416 * aarch64_get_qualifier_esize (prev->qualifier);
1417 else
1418 num_bytes = prev->reglist.num_regs
1419 * aarch64_get_qualifier_esize (prev->qualifier)
1420 * aarch64_get_qualifier_nelem (prev->qualifier);
1421 if ((int) num_bytes != opnd->addr.offset.imm)
1422 {
1423 set_other_error (mismatch_detail, idx,
1424 _("invalid post-increment amount"));
1425 return 0;
1426 }
1427 }
1428 break;
1429
1430 case AARCH64_OPND_ADDR_REGOFF:
1431 /* Get the size of the data element that is accessed, which may be
1432 different from that of the source register size,
1433 e.g. in strb/ldrb. */
1434 size = aarch64_get_qualifier_esize (opnd->qualifier);
1435 /* It is either no shift or shift by the binary logarithm of SIZE. */
1436 if (opnd->shifter.amount != 0
1437 && opnd->shifter.amount != (int)get_logsz (size))
1438 {
1439 set_other_error (mismatch_detail, idx,
1440 _("invalid shift amount"));
1441 return 0;
1442 }
1443 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1444 operators. */
1445 switch (opnd->shifter.kind)
1446 {
1447 case AARCH64_MOD_UXTW:
1448 case AARCH64_MOD_LSL:
1449 case AARCH64_MOD_SXTW:
1450 case AARCH64_MOD_SXTX: break;
1451 default:
1452 set_other_error (mismatch_detail, idx,
1453 _("invalid extend/shift operator"));
1454 return 0;
1455 }
1456 break;
1457
1458 case AARCH64_OPND_ADDR_UIMM12:
1459 imm = opnd->addr.offset.imm;
1460 /* Get the size of the data element that is accessed, which may be
1461 different from that of the source register size,
1462 e.g. in strb/ldrb. */
1463 size = aarch64_get_qualifier_esize (qualifier);
1464 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1465 {
1466 set_offset_out_of_range_error (mismatch_detail, idx,
1467 0, 4095 * size);
1468 return 0;
1469 }
9de794e1 1470 if (!value_aligned_p (opnd->addr.offset.imm, size))
a06ea964
NC
1471 {
1472 set_unaligned_error (mismatch_detail, idx, size);
1473 return 0;
1474 }
1475 break;
1476
1477 case AARCH64_OPND_ADDR_PCREL14:
1478 case AARCH64_OPND_ADDR_PCREL19:
1479 case AARCH64_OPND_ADDR_PCREL21:
1480 case AARCH64_OPND_ADDR_PCREL26:
1481 imm = opnd->imm.value;
1482 if (operand_need_shift_by_two (get_operand_from_code (type)))
1483 {
1484 /* The offset value in a PC-relative branch instruction is alway
1485 4-byte aligned and is encoded without the lowest 2 bits. */
1486 if (!value_aligned_p (imm, 4))
1487 {
1488 set_unaligned_error (mismatch_detail, idx, 4);
1489 return 0;
1490 }
1491 /* Right shift by 2 so that we can carry out the following check
1492 canonically. */
1493 imm >>= 2;
1494 }
1495 size = get_operand_fields_width (get_operand_from_code (type));
1496 if (!value_fit_signed_field_p (imm, size))
1497 {
1498 set_other_error (mismatch_detail, idx,
1499 _("immediate out of range"));
1500 return 0;
1501 }
1502 break;
1503
1504 default:
1505 break;
1506 }
1507 break;
1508
1509 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1510 /* The opcode dependent area stores the number of elements in
1511 each structure to be loaded/stored. */
1512 num = get_opcode_dependent_value (opcode);
1513 switch (type)
1514 {
1515 case AARCH64_OPND_LVt:
1516 assert (num >= 1 && num <= 4);
1517 /* Unless LD1/ST1, the number of registers should be equal to that
1518 of the structure elements. */
1519 if (num != 1 && opnd->reglist.num_regs != num)
1520 {
1521 set_reg_list_error (mismatch_detail, idx, num);
1522 return 0;
1523 }
1524 break;
1525 case AARCH64_OPND_LVt_AL:
1526 case AARCH64_OPND_LEt:
1527 assert (num >= 1 && num <= 4);
1528 /* The number of registers should be equal to that of the structure
1529 elements. */
1530 if (opnd->reglist.num_regs != num)
1531 {
1532 set_reg_list_error (mismatch_detail, idx, num);
1533 return 0;
1534 }
1535 break;
1536 default:
1537 break;
1538 }
1539 break;
1540
1541 case AARCH64_OPND_CLASS_IMMEDIATE:
1542 /* Constraint check on immediate operand. */
1543 imm = opnd->imm.value;
1544 /* E.g. imm_0_31 constrains value to be 0..31. */
1545 if (qualifier_value_in_range_constraint_p (qualifier)
1546 && !value_in_range_p (imm, get_lower_bound (qualifier),
1547 get_upper_bound (qualifier)))
1548 {
1549 set_imm_out_of_range_error (mismatch_detail, idx,
1550 get_lower_bound (qualifier),
1551 get_upper_bound (qualifier));
1552 return 0;
1553 }
1554
1555 switch (type)
1556 {
1557 case AARCH64_OPND_AIMM:
1558 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1559 {
1560 set_other_error (mismatch_detail, idx,
1561 _("invalid shift operator"));
1562 return 0;
1563 }
1564 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1565 {
1566 set_other_error (mismatch_detail, idx,
1567 _("shift amount expected to be 0 or 12"));
1568 return 0;
1569 }
1570 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1571 {
1572 set_other_error (mismatch_detail, idx,
1573 _("immediate out of range"));
1574 return 0;
1575 }
1576 break;
1577
1578 case AARCH64_OPND_HALF:
1579 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1580 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1581 {
1582 set_other_error (mismatch_detail, idx,
1583 _("invalid shift operator"));
1584 return 0;
1585 }
1586 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1587 if (!value_aligned_p (opnd->shifter.amount, 16))
1588 {
1589 set_other_error (mismatch_detail, idx,
1590 _("shift amount should be a multiple of 16"));
1591 return 0;
1592 }
1593 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1594 {
1595 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1596 0, size * 8 - 16);
1597 return 0;
1598 }
1599 if (opnd->imm.value < 0)
1600 {
1601 set_other_error (mismatch_detail, idx,
1602 _("negative immediate value not allowed"));
1603 return 0;
1604 }
1605 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1606 {
1607 set_other_error (mismatch_detail, idx,
1608 _("immediate out of range"));
1609 return 0;
1610 }
1611 break;
1612
1613 case AARCH64_OPND_IMM_MOV:
1614 {
1615 int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4;
1616 imm = opnd->imm.value;
1617 assert (idx == 1);
1618 switch (opcode->op)
1619 {
1620 case OP_MOV_IMM_WIDEN:
1621 imm = ~imm;
1622 /* Fall through... */
1623 case OP_MOV_IMM_WIDE:
1624 if (!aarch64_wide_constant_p (imm, is32, NULL))
1625 {
1626 set_other_error (mismatch_detail, idx,
1627 _("immediate out of range"));
1628 return 0;
1629 }
1630 break;
1631 case OP_MOV_IMM_LOG:
1632 if (!aarch64_logical_immediate_p (imm, is32, NULL))
1633 {
1634 set_other_error (mismatch_detail, idx,
1635 _("immediate out of range"));
1636 return 0;
1637 }
1638 break;
1639 default:
1640 assert (0);
1641 return 0;
1642 }
1643 }
1644 break;
1645
1646 case AARCH64_OPND_NZCV:
1647 case AARCH64_OPND_CCMP_IMM:
1648 case AARCH64_OPND_EXCEPTION:
1649 case AARCH64_OPND_UIMM4:
1650 case AARCH64_OPND_UIMM7:
1651 case AARCH64_OPND_UIMM3_OP1:
1652 case AARCH64_OPND_UIMM3_OP2:
1653 size = get_operand_fields_width (get_operand_from_code (type));
1654 assert (size < 32);
1655 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1656 {
1657 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1658 (1 << size) - 1);
1659 return 0;
1660 }
1661 break;
1662
1663 case AARCH64_OPND_WIDTH:
d685192a 1664 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
a06ea964
NC
1665 && opnds[0].type == AARCH64_OPND_Rd);
1666 size = get_upper_bound (qualifier);
1667 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1668 /* lsb+width <= reg.size */
1669 {
1670 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1671 size - opnds[idx-1].imm.value);
1672 return 0;
1673 }
1674 break;
1675
1676 case AARCH64_OPND_LIMM:
1677 {
1678 int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W;
1679 uint64_t uimm = opnd->imm.value;
1680 if (opcode->op == OP_BIC)
1681 uimm = ~uimm;
1682 if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE)
1683 {
1684 set_other_error (mismatch_detail, idx,
1685 _("immediate out of range"));
1686 return 0;
1687 }
1688 }
1689 break;
1690
1691 case AARCH64_OPND_IMM0:
1692 case AARCH64_OPND_FPIMM0:
1693 if (opnd->imm.value != 0)
1694 {
1695 set_other_error (mismatch_detail, idx,
1696 _("immediate zero expected"));
1697 return 0;
1698 }
1699 break;
1700
1701 case AARCH64_OPND_SHLL_IMM:
1702 assert (idx == 2);
1703 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1704 if (opnd->imm.value != size)
1705 {
1706 set_other_error (mismatch_detail, idx,
1707 _("invalid shift amount"));
1708 return 0;
1709 }
1710 break;
1711
1712 case AARCH64_OPND_IMM_VLSL:
1713 size = aarch64_get_qualifier_esize (qualifier);
1714 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1715 {
1716 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1717 size * 8 - 1);
1718 return 0;
1719 }
1720 break;
1721
1722 case AARCH64_OPND_IMM_VLSR:
1723 size = aarch64_get_qualifier_esize (qualifier);
1724 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1725 {
1726 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1727 return 0;
1728 }
1729 break;
1730
1731 case AARCH64_OPND_SIMD_IMM:
1732 case AARCH64_OPND_SIMD_IMM_SFT:
1733 /* Qualifier check. */
1734 switch (qualifier)
1735 {
1736 case AARCH64_OPND_QLF_LSL:
1737 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1738 {
1739 set_other_error (mismatch_detail, idx,
1740 _("invalid shift operator"));
1741 return 0;
1742 }
1743 break;
1744 case AARCH64_OPND_QLF_MSL:
1745 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1746 {
1747 set_other_error (mismatch_detail, idx,
1748 _("invalid shift operator"));
1749 return 0;
1750 }
1751 break;
1752 case AARCH64_OPND_QLF_NIL:
1753 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1754 {
1755 set_other_error (mismatch_detail, idx,
1756 _("shift is not permitted"));
1757 return 0;
1758 }
1759 break;
1760 default:
1761 assert (0);
1762 return 0;
1763 }
1764 /* Is the immediate valid? */
1765 assert (idx == 1);
1766 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1767 {
d2865ed3
YZ
1768 /* uimm8 or simm8 */
1769 if (!value_in_range_p (opnd->imm.value, -128, 255))
a06ea964 1770 {
d2865ed3 1771 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
a06ea964
NC
1772 return 0;
1773 }
1774 }
1775 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1776 {
1777 /* uimm64 is not
1778 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1779 ffffffffgggggggghhhhhhhh'. */
1780 set_other_error (mismatch_detail, idx,
1781 _("invalid value for immediate"));
1782 return 0;
1783 }
1784 /* Is the shift amount valid? */
1785 switch (opnd->shifter.kind)
1786 {
1787 case AARCH64_MOD_LSL:
1788 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
f5555712 1789 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
a06ea964 1790 {
f5555712
YZ
1791 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1792 (size - 1) * 8);
a06ea964
NC
1793 return 0;
1794 }
f5555712 1795 if (!value_aligned_p (opnd->shifter.amount, 8))
a06ea964 1796 {
f5555712 1797 set_unaligned_error (mismatch_detail, idx, 8);
a06ea964
NC
1798 return 0;
1799 }
1800 break;
1801 case AARCH64_MOD_MSL:
1802 /* Only 8 and 16 are valid shift amount. */
1803 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1804 {
1805 set_other_error (mismatch_detail, idx,
1806 _("shift amount expected to be 0 or 16"));
1807 return 0;
1808 }
1809 break;
1810 default:
1811 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1812 {
1813 set_other_error (mismatch_detail, idx,
1814 _("invalid shift operator"));
1815 return 0;
1816 }
1817 break;
1818 }
1819 break;
1820
1821 case AARCH64_OPND_FPIMM:
1822 case AARCH64_OPND_SIMD_FPIMM:
1823 if (opnd->imm.is_fp == 0)
1824 {
1825 set_other_error (mismatch_detail, idx,
1826 _("floating-point immediate expected"));
1827 return 0;
1828 }
1829 /* The value is expected to be an 8-bit floating-point constant with
1830 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1831 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1832 instruction). */
1833 if (!value_in_range_p (opnd->imm.value, 0, 255))
1834 {
1835 set_other_error (mismatch_detail, idx,
1836 _("immediate out of range"));
1837 return 0;
1838 }
1839 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1840 {
1841 set_other_error (mismatch_detail, idx,
1842 _("invalid shift operator"));
1843 return 0;
1844 }
1845 break;
1846
1847 default:
1848 break;
1849 }
1850 break;
1851
1852 case AARCH64_OPND_CLASS_CP_REG:
1853 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1854 valid range: C0 - C15. */
1855 if (opnd->reg.regno > 15)
1856 {
1857 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1858 return 0;
1859 }
1860 break;
1861
1862 case AARCH64_OPND_CLASS_SYSTEM:
1863 switch (type)
1864 {
1865 case AARCH64_OPND_PSTATEFIELD:
1866 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
c2825638
MW
1867 /* MSR PAN, #uimm4
1868 The immediate must be #0 or #1. */
1869 if (opnd->pstatefield == 0x04 /* PAN. */
1870 && opnds[1].imm.value > 1)
1871 {
1872 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1873 return 0;
1874 }
a06ea964
NC
1875 /* MSR SPSel, #uimm4
1876 Uses uimm4 as a control value to select the stack pointer: if
1877 bit 0 is set it selects the current exception level's stack
1878 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1879 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1880 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
1881 {
1882 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1883 return 0;
1884 }
1885 break;
1886 default:
1887 break;
1888 }
1889 break;
1890
1891 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
1892 /* Get the upper bound for the element index. */
1893 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1894 /* Index out-of-range. */
1895 if (!value_in_range_p (opnd->reglane.index, 0, num))
1896 {
1897 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1898 return 0;
1899 }
1900 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1901 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1902 number is encoded in "size:M:Rm":
1903 size <Vm>
1904 00 RESERVED
1905 01 0:Rm
1906 10 M:Rm
1907 11 RESERVED */
1908 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
1909 && !value_in_range_p (opnd->reglane.regno, 0, 15))
1910 {
1911 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1912 return 0;
1913 }
1914 break;
1915
1916 case AARCH64_OPND_CLASS_MODIFIED_REG:
1917 assert (idx == 1 || idx == 2);
1918 switch (type)
1919 {
1920 case AARCH64_OPND_Rm_EXT:
1921 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
1922 && opnd->shifter.kind != AARCH64_MOD_LSL)
1923 {
1924 set_other_error (mismatch_detail, idx,
1925 _("extend operator expected"));
1926 return 0;
1927 }
1928 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1929 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1930 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1931 case. */
1932 if (!aarch64_stack_pointer_p (opnds + 0)
1933 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
1934 {
1935 if (!opnd->shifter.operator_present)
1936 {
1937 set_other_error (mismatch_detail, idx,
1938 _("missing extend operator"));
1939 return 0;
1940 }
1941 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
1942 {
1943 set_other_error (mismatch_detail, idx,
1944 _("'LSL' operator not allowed"));
1945 return 0;
1946 }
1947 }
1948 assert (opnd->shifter.operator_present /* Default to LSL. */
1949 || opnd->shifter.kind == AARCH64_MOD_LSL);
1950 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
1951 {
1952 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
1953 return 0;
1954 }
1955 /* In the 64-bit form, the final register operand is written as Wm
1956 for all but the (possibly omitted) UXTX/LSL and SXTX
1957 operators.
1958 N.B. GAS allows X register to be used with any operator as a
1959 programming convenience. */
1960 if (qualifier == AARCH64_OPND_QLF_X
1961 && opnd->shifter.kind != AARCH64_MOD_LSL
1962 && opnd->shifter.kind != AARCH64_MOD_UXTX
1963 && opnd->shifter.kind != AARCH64_MOD_SXTX)
1964 {
1965 set_other_error (mismatch_detail, idx, _("W register expected"));
1966 return 0;
1967 }
1968 break;
1969
1970 case AARCH64_OPND_Rm_SFT:
1971 /* ROR is not available to the shifted register operand in
1972 arithmetic instructions. */
1973 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
1974 {
1975 set_other_error (mismatch_detail, idx,
1976 _("shift operator expected"));
1977 return 0;
1978 }
1979 if (opnd->shifter.kind == AARCH64_MOD_ROR
1980 && opcode->iclass != log_shift)
1981 {
1982 set_other_error (mismatch_detail, idx,
1983 _("'ROR' operator not allowed"));
1984 return 0;
1985 }
1986 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
1987 if (!value_in_range_p (opnd->shifter.amount, 0, num))
1988 {
1989 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
1990 return 0;
1991 }
1992 break;
1993
1994 default:
1995 break;
1996 }
1997 break;
1998
1999 default:
2000 break;
2001 }
2002
2003 return 1;
2004}
2005
2006/* Main entrypoint for the operand constraint checking.
2007
2008 Return 1 if operands of *INST meet the constraint applied by the operand
2009 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2010 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2011 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2012 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2013 error kind when it is notified that an instruction does not pass the check).
2014
2015 Un-determined operand qualifiers may get established during the process. */
2016
2017int
2018aarch64_match_operands_constraint (aarch64_inst *inst,
2019 aarch64_operand_error *mismatch_detail)
2020{
2021 int i;
2022
2023 DEBUG_TRACE ("enter");
2024
2025 /* Match operands' qualifier.
2026 *INST has already had qualifier establish for some, if not all, of
2027 its operands; we need to find out whether these established
2028 qualifiers match one of the qualifier sequence in
2029 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2030 with the corresponding qualifier in such a sequence.
2031 Only basic operand constraint checking is done here; the more thorough
2032 constraint checking will carried out by operand_general_constraint_met_p,
2033 which has be to called after this in order to get all of the operands'
2034 qualifiers established. */
2035 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2036 {
2037 DEBUG_TRACE ("FAIL on operand qualifier matching");
2038 if (mismatch_detail)
2039 {
2040 /* Return an error type to indicate that it is the qualifier
2041 matching failure; we don't care about which operand as there
2042 are enough information in the opcode table to reproduce it. */
2043 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2044 mismatch_detail->index = -1;
2045 mismatch_detail->error = NULL;
2046 }
2047 return 0;
2048 }
2049
2050 /* Match operands' constraint. */
2051 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2052 {
2053 enum aarch64_opnd type = inst->opcode->operands[i];
2054 if (type == AARCH64_OPND_NIL)
2055 break;
2056 if (inst->operands[i].skip)
2057 {
2058 DEBUG_TRACE ("skip the incomplete operand %d", i);
2059 continue;
2060 }
2061 if (operand_general_constraint_met_p (inst->operands, i, type,
2062 inst->opcode, mismatch_detail) == 0)
2063 {
2064 DEBUG_TRACE ("FAIL on operand %d", i);
2065 return 0;
2066 }
2067 }
2068
2069 DEBUG_TRACE ("PASS");
2070
2071 return 1;
2072}
2073
2074/* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2075 Also updates the TYPE of each INST->OPERANDS with the corresponding
2076 value of OPCODE->OPERANDS.
2077
2078 Note that some operand qualifiers may need to be manually cleared by
2079 the caller before it further calls the aarch64_opcode_encode; by
2080 doing this, it helps the qualifier matching facilities work
2081 properly. */
2082
2083const aarch64_opcode*
2084aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2085{
2086 int i;
2087 const aarch64_opcode *old = inst->opcode;
2088
2089 inst->opcode = opcode;
2090
2091 /* Update the operand types. */
2092 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2093 {
2094 inst->operands[i].type = opcode->operands[i];
2095 if (opcode->operands[i] == AARCH64_OPND_NIL)
2096 break;
2097 }
2098
2099 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2100
2101 return old;
2102}
2103
2104int
2105aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2106{
2107 int i;
2108 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2109 if (operands[i] == operand)
2110 return i;
2111 else if (operands[i] == AARCH64_OPND_NIL)
2112 break;
2113 return -1;
2114}
2115\f
2116/* [0][0] 32-bit integer regs with sp Wn
2117 [0][1] 64-bit integer regs with sp Xn sf=1
2118 [1][0] 32-bit integer regs with #0 Wn
2119 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2120static const char *int_reg[2][2][32] = {
2121#define R32 "w"
2122#define R64 "x"
2123 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2124 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2125 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2126 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", "wsp" },
2127 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2128 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2129 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2130 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", "sp" } },
2131 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2132 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2133 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2134 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
2135 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2136 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2137 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2138 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
2139#undef R64
2140#undef R32
2141};
2142
2143/* Return the integer register name.
2144 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2145
2146static inline const char *
2147get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2148{
2149 const int has_zr = sp_reg_p ? 0 : 1;
2150 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2151 return int_reg[has_zr][is_64][regno];
2152}
2153
2154/* Like get_int_reg_name, but IS_64 is always 1. */
2155
2156static inline const char *
2157get_64bit_int_reg_name (int regno, int sp_reg_p)
2158{
2159 const int has_zr = sp_reg_p ? 0 : 1;
2160 return int_reg[has_zr][1][regno];
2161}
2162
2163/* Types for expanding an encoded 8-bit value to a floating-point value. */
2164
2165typedef union
2166{
2167 uint64_t i;
2168 double d;
2169} double_conv_t;
2170
2171typedef union
2172{
2173 uint32_t i;
2174 float f;
2175} single_conv_t;
2176
cf86120b
MW
2177typedef union
2178{
2179 uint32_t i;
2180 float f;
2181} half_conv_t;
2182
a06ea964
NC
2183/* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2184 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2185 (depending on the type of the instruction). IMM8 will be expanded to a
cf86120b
MW
2186 single-precision floating-point value (SIZE == 4) or a double-precision
2187 floating-point value (SIZE == 8). A half-precision floating-point value
2188 (SIZE == 2) is expanded to a single-precision floating-point value. The
2189 expanded value is returned. */
a06ea964
NC
2190
2191static uint64_t
cf86120b 2192expand_fp_imm (int size, uint32_t imm8)
a06ea964
NC
2193{
2194 uint64_t imm;
2195 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2196
2197 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2198 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2199 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2200 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2201 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
cf86120b 2202 if (size == 8)
a06ea964
NC
2203 {
2204 imm = (imm8_7 << (63-32)) /* imm8<7> */
2205 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2206 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2207 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2208 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2209 imm <<= 32;
2210 }
cf86120b 2211 else if (size == 4 || size == 2)
a06ea964
NC
2212 {
2213 imm = (imm8_7 << 31) /* imm8<7> */
2214 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2215 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2216 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2217 }
cf86120b
MW
2218 else
2219 {
2220 /* An unsupported size. */
2221 assert (0);
2222 }
a06ea964
NC
2223
2224 return imm;
2225}
2226
2227/* Produce the string representation of the register list operand *OPND
2228 in the buffer pointed by BUF of size SIZE. */
2229static void
2230print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd)
2231{
2232 const int num_regs = opnd->reglist.num_regs;
2233 const int first_reg = opnd->reglist.first_regno;
2234 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2235 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2236 char tb[8]; /* Temporary buffer. */
2237
2238 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2239 assert (num_regs >= 1 && num_regs <= 4);
2240
2241 /* Prepare the index if any. */
2242 if (opnd->reglist.has_index)
2243 snprintf (tb, 8, "[%d]", opnd->reglist.index);
2244 else
2245 tb[0] = '\0';
2246
2247 /* The hyphenated form is preferred for disassembly if there are
2248 more than two registers in the list, and the register numbers
2249 are monotonically increasing in increments of one. */
2250 if (num_regs > 2 && last_reg > first_reg)
2251 snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name,
2252 last_reg, qlf_name, tb);
2253 else
2254 {
2255 const int reg0 = first_reg;
2256 const int reg1 = (first_reg + 1) & 0x1f;
2257 const int reg2 = (first_reg + 2) & 0x1f;
2258 const int reg3 = (first_reg + 3) & 0x1f;
2259
2260 switch (num_regs)
2261 {
2262 case 1:
2263 snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb);
2264 break;
2265 case 2:
2266 snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name,
2267 reg1, qlf_name, tb);
2268 break;
2269 case 3:
2270 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name,
2271 reg1, qlf_name, reg2, qlf_name, tb);
2272 break;
2273 case 4:
2274 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2275 reg0, qlf_name, reg1, qlf_name, reg2, qlf_name,
2276 reg3, qlf_name, tb);
2277 break;
2278 }
2279 }
2280}
2281
2282/* Produce the string representation of the register offset address operand
2283 *OPND in the buffer pointed by BUF of size SIZE. */
2284static void
2285print_register_offset_address (char *buf, size_t size,
2286 const aarch64_opnd_info *opnd)
2287{
2288 const size_t tblen = 16;
2289 char tb[tblen]; /* Temporary buffer. */
2290 bfd_boolean lsl_p = FALSE; /* Is LSL shift operator? */
2291 bfd_boolean wm_p = FALSE; /* Should Rm be Wm? */
2292 bfd_boolean print_extend_p = TRUE;
2293 bfd_boolean print_amount_p = TRUE;
2294 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2295
2296 switch (opnd->shifter.kind)
2297 {
2298 case AARCH64_MOD_UXTW: wm_p = TRUE; break;
2299 case AARCH64_MOD_LSL : lsl_p = TRUE; break;
2300 case AARCH64_MOD_SXTW: wm_p = TRUE; break;
2301 case AARCH64_MOD_SXTX: break;
2302 default: assert (0);
2303 }
2304
2305 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2306 || !opnd->shifter.amount_present))
2307 {
2308 /* Not print the shift/extend amount when the amount is zero and
2309 when it is not the special case of 8-bit load/store instruction. */
2310 print_amount_p = FALSE;
2311 /* Likewise, no need to print the shift operator LSL in such a
2312 situation. */
2313 if (lsl_p)
2314 print_extend_p = FALSE;
2315 }
2316
2317 /* Prepare for the extend/shift. */
2318 if (print_extend_p)
2319 {
2320 if (print_amount_p)
2321 snprintf (tb, tblen, ",%s #%d", shift_name, opnd->shifter.amount);
2322 else
2323 snprintf (tb, tblen, ",%s", shift_name);
2324 }
2325 else
2326 tb[0] = '\0';
2327
a58549dd 2328 snprintf (buf, size, "[%s,%s%s]",
a06ea964 2329 get_64bit_int_reg_name (opnd->addr.base_regno, 1),
a58549dd
YZ
2330 get_int_reg_name (opnd->addr.offset.regno,
2331 wm_p ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X,
2332 0 /* sp_reg_p */),
2333 tb);
a06ea964
NC
2334}
2335
2336/* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2337 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2338 PC, PCREL_P and ADDRESS are used to pass in and return information about
2339 the PC-relative address calculation, where the PC value is passed in
2340 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2341 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2342 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2343
2344 The function serves both the disassembler and the assembler diagnostics
2345 issuer, which is the reason why it lives in this file. */
2346
2347void
2348aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2349 const aarch64_opcode *opcode,
2350 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2351 bfd_vma *address)
2352{
2353 int i;
2354 const char *name = NULL;
2355 const aarch64_opnd_info *opnd = opnds + idx;
2356 enum aarch64_modifier_kind kind;
2357 uint64_t addr;
2358
2359 buf[0] = '\0';
2360 if (pcrel_p)
2361 *pcrel_p = 0;
2362
2363 switch (opnd->type)
2364 {
2365 case AARCH64_OPND_Rd:
2366 case AARCH64_OPND_Rn:
2367 case AARCH64_OPND_Rm:
2368 case AARCH64_OPND_Rt:
2369 case AARCH64_OPND_Rt2:
2370 case AARCH64_OPND_Rs:
2371 case AARCH64_OPND_Ra:
2372 case AARCH64_OPND_Rt_SYS:
ee804238 2373 case AARCH64_OPND_PAIRREG:
a06ea964
NC
2374 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2375 the <ic_op>, therefore we we use opnd->present to override the
2376 generic optional-ness information. */
2377 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2378 break;
2379 /* Omit the operand, e.g. RET. */
2380 if (optional_operand_p (opcode, idx)
2381 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2382 break;
2383 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2384 || opnd->qualifier == AARCH64_OPND_QLF_X);
2385 snprintf (buf, size, "%s",
2386 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2387 break;
2388
2389 case AARCH64_OPND_Rd_SP:
2390 case AARCH64_OPND_Rn_SP:
2391 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2392 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2393 || opnd->qualifier == AARCH64_OPND_QLF_X
2394 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2395 snprintf (buf, size, "%s",
2396 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2397 break;
2398
2399 case AARCH64_OPND_Rm_EXT:
2400 kind = opnd->shifter.kind;
2401 assert (idx == 1 || idx == 2);
2402 if ((aarch64_stack_pointer_p (opnds)
2403 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2404 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2405 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2406 && kind == AARCH64_MOD_UXTW)
2407 || (opnd->qualifier == AARCH64_OPND_QLF_X
2408 && kind == AARCH64_MOD_UXTX)))
2409 {
2410 /* 'LSL' is the preferred form in this case. */
2411 kind = AARCH64_MOD_LSL;
2412 if (opnd->shifter.amount == 0)
2413 {
2414 /* Shifter omitted. */
2415 snprintf (buf, size, "%s",
2416 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2417 break;
2418 }
2419 }
2420 if (opnd->shifter.amount)
2421 snprintf (buf, size, "%s, %s #%d",
2422 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2423 aarch64_operand_modifiers[kind].name,
2424 opnd->shifter.amount);
2425 else
2426 snprintf (buf, size, "%s, %s",
2427 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2428 aarch64_operand_modifiers[kind].name);
2429 break;
2430
2431 case AARCH64_OPND_Rm_SFT:
2432 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2433 || opnd->qualifier == AARCH64_OPND_QLF_X);
2434 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2435 snprintf (buf, size, "%s",
2436 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2437 else
2438 snprintf (buf, size, "%s, %s #%d",
2439 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2440 aarch64_operand_modifiers[opnd->shifter.kind].name,
2441 opnd->shifter.amount);
2442 break;
2443
2444 case AARCH64_OPND_Fd:
2445 case AARCH64_OPND_Fn:
2446 case AARCH64_OPND_Fm:
2447 case AARCH64_OPND_Fa:
2448 case AARCH64_OPND_Ft:
2449 case AARCH64_OPND_Ft2:
2450 case AARCH64_OPND_Sd:
2451 case AARCH64_OPND_Sn:
2452 case AARCH64_OPND_Sm:
2453 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2454 opnd->reg.regno);
2455 break;
2456
2457 case AARCH64_OPND_Vd:
2458 case AARCH64_OPND_Vn:
2459 case AARCH64_OPND_Vm:
2460 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2461 aarch64_get_qualifier_name (opnd->qualifier));
2462 break;
2463
2464 case AARCH64_OPND_Ed:
2465 case AARCH64_OPND_En:
2466 case AARCH64_OPND_Em:
2467 snprintf (buf, size, "v%d.%s[%d]", opnd->reglane.regno,
2468 aarch64_get_qualifier_name (opnd->qualifier),
2469 opnd->reglane.index);
2470 break;
2471
2472 case AARCH64_OPND_VdD1:
2473 case AARCH64_OPND_VnD1:
2474 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2475 break;
2476
2477 case AARCH64_OPND_LVn:
2478 case AARCH64_OPND_LVt:
2479 case AARCH64_OPND_LVt_AL:
2480 case AARCH64_OPND_LEt:
2481 print_register_list (buf, size, opnd);
2482 break;
2483
2484 case AARCH64_OPND_Cn:
2485 case AARCH64_OPND_Cm:
2486 snprintf (buf, size, "C%d", opnd->reg.regno);
2487 break;
2488
2489 case AARCH64_OPND_IDX:
2490 case AARCH64_OPND_IMM:
2491 case AARCH64_OPND_WIDTH:
2492 case AARCH64_OPND_UIMM3_OP1:
2493 case AARCH64_OPND_UIMM3_OP2:
2494 case AARCH64_OPND_BIT_NUM:
2495 case AARCH64_OPND_IMM_VLSL:
2496 case AARCH64_OPND_IMM_VLSR:
2497 case AARCH64_OPND_SHLL_IMM:
2498 case AARCH64_OPND_IMM0:
2499 case AARCH64_OPND_IMMR:
2500 case AARCH64_OPND_IMMS:
2501 case AARCH64_OPND_FBITS:
a06ea964
NC
2502 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2503 break;
2504
fb098a1e
YZ
2505 case AARCH64_OPND_IMM_MOV:
2506 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2507 {
2508 case 4: /* e.g. MOV Wd, #<imm32>. */
2509 {
2510 int imm32 = opnd->imm.value;
2511 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2512 }
2513 break;
2514 case 8: /* e.g. MOV Xd, #<imm64>. */
2515 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2516 opnd->imm.value, opnd->imm.value);
2517 break;
2518 default: assert (0);
2519 }
2520 break;
2521
a06ea964
NC
2522 case AARCH64_OPND_FPIMM0:
2523 snprintf (buf, size, "#0.0");
2524 break;
2525
2526 case AARCH64_OPND_LIMM:
2527 case AARCH64_OPND_AIMM:
2528 case AARCH64_OPND_HALF:
2529 if (opnd->shifter.amount)
2530 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2531 opnd->shifter.amount);
2532 else
2533 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2534 break;
2535
2536 case AARCH64_OPND_SIMD_IMM:
2537 case AARCH64_OPND_SIMD_IMM_SFT:
2538 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2539 || opnd->shifter.kind == AARCH64_MOD_NONE)
2540 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2541 else
2542 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2543 aarch64_operand_modifiers[opnd->shifter.kind].name,
2544 opnd->shifter.amount);
2545 break;
2546
2547 case AARCH64_OPND_FPIMM:
2548 case AARCH64_OPND_SIMD_FPIMM:
2549 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2550 {
cf86120b
MW
2551 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2552 {
2553 half_conv_t c;
2554 c.i = expand_fp_imm (2, opnd->imm.value);
2555 snprintf (buf, size, "#%.18e", c.f);
2556 }
2557 break;
a06ea964
NC
2558 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2559 {
2560 single_conv_t c;
cf86120b 2561 c.i = expand_fp_imm (4, opnd->imm.value);
a06ea964
NC
2562 snprintf (buf, size, "#%.18e", c.f);
2563 }
2564 break;
2565 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2566 {
2567 double_conv_t c;
cf86120b 2568 c.i = expand_fp_imm (8, opnd->imm.value);
a06ea964
NC
2569 snprintf (buf, size, "#%.18e", c.d);
2570 }
2571 break;
2572 default: assert (0);
2573 }
2574 break;
2575
2576 case AARCH64_OPND_CCMP_IMM:
2577 case AARCH64_OPND_NZCV:
2578 case AARCH64_OPND_EXCEPTION:
2579 case AARCH64_OPND_UIMM4:
2580 case AARCH64_OPND_UIMM7:
2581 if (optional_operand_p (opcode, idx) == TRUE
2582 && (opnd->imm.value ==
2583 (int64_t) get_optional_operand_default_value (opcode)))
2584 /* Omit the operand, e.g. DCPS1. */
2585 break;
2586 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2587 break;
2588
2589 case AARCH64_OPND_COND:
68a64283 2590 case AARCH64_OPND_COND1:
a06ea964
NC
2591 snprintf (buf, size, "%s", opnd->cond->names[0]);
2592 break;
2593
2594 case AARCH64_OPND_ADDR_ADRP:
2595 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2596 + opnd->imm.value;
2597 if (pcrel_p)
2598 *pcrel_p = 1;
2599 if (address)
2600 *address = addr;
2601 /* This is not necessary during the disassembling, as print_address_func
2602 in the disassemble_info will take care of the printing. But some
2603 other callers may be still interested in getting the string in *STR,
2604 so here we do snprintf regardless. */
2605 snprintf (buf, size, "#0x%" PRIx64, addr);
2606 break;
2607
2608 case AARCH64_OPND_ADDR_PCREL14:
2609 case AARCH64_OPND_ADDR_PCREL19:
2610 case AARCH64_OPND_ADDR_PCREL21:
2611 case AARCH64_OPND_ADDR_PCREL26:
2612 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2613 if (pcrel_p)
2614 *pcrel_p = 1;
2615 if (address)
2616 *address = addr;
2617 /* This is not necessary during the disassembling, as print_address_func
2618 in the disassemble_info will take care of the printing. But some
2619 other callers may be still interested in getting the string in *STR,
2620 so here we do snprintf regardless. */
2621 snprintf (buf, size, "#0x%" PRIx64, addr);
2622 break;
2623
2624 case AARCH64_OPND_ADDR_SIMPLE:
2625 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2626 case AARCH64_OPND_SIMD_ADDR_POST:
2627 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2628 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2629 {
2630 if (opnd->addr.offset.is_reg)
2631 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2632 else
2633 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2634 }
2635 else
2636 snprintf (buf, size, "[%s]", name);
2637 break;
2638
2639 case AARCH64_OPND_ADDR_REGOFF:
2640 print_register_offset_address (buf, size, opnd);
2641 break;
2642
2643 case AARCH64_OPND_ADDR_SIMM7:
2644 case AARCH64_OPND_ADDR_SIMM9:
2645 case AARCH64_OPND_ADDR_SIMM9_2:
2646 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2647 if (opnd->addr.writeback)
2648 {
2649 if (opnd->addr.preind)
2650 snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
2651 else
2652 snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
2653 }
2654 else
2655 {
2656 if (opnd->addr.offset.imm)
2657 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2658 else
2659 snprintf (buf, size, "[%s]", name);
2660 }
2661 break;
2662
2663 case AARCH64_OPND_ADDR_UIMM12:
2664 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2665 if (opnd->addr.offset.imm)
2666 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2667 else
2668 snprintf (buf, size, "[%s]", name);
2669 break;
2670
2671 case AARCH64_OPND_SYSREG:
2672 for (i = 0; aarch64_sys_regs[i].name; ++i)
49eec193
YZ
2673 if (aarch64_sys_regs[i].value == opnd->sysreg
2674 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
a06ea964
NC
2675 break;
2676 if (aarch64_sys_regs[i].name)
2677 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2678 else
2679 {
2680 /* Implementation defined system register. */
2681 unsigned int value = opnd->sysreg;
2682 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2683 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2684 value & 0x7);
2685 }
2686 break;
2687
2688 case AARCH64_OPND_PSTATEFIELD:
2689 for (i = 0; aarch64_pstatefields[i].name; ++i)
2690 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2691 break;
2692 assert (aarch64_pstatefields[i].name);
2693 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2694 break;
2695
2696 case AARCH64_OPND_SYSREG_AT:
2697 case AARCH64_OPND_SYSREG_DC:
2698 case AARCH64_OPND_SYSREG_IC:
2699 case AARCH64_OPND_SYSREG_TLBI:
875880c6 2700 snprintf (buf, size, "%s", opnd->sysins_op->name);
a06ea964
NC
2701 break;
2702
2703 case AARCH64_OPND_BARRIER:
2704 snprintf (buf, size, "%s", opnd->barrier->name);
2705 break;
2706
2707 case AARCH64_OPND_BARRIER_ISB:
2708 /* Operand can be omitted, e.g. in DCPS1. */
2709 if (! optional_operand_p (opcode, idx)
2710 || (opnd->barrier->value
2711 != get_optional_operand_default_value (opcode)))
2712 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2713 break;
2714
2715 case AARCH64_OPND_PRFOP:
a1ccaec9
YZ
2716 if (opnd->prfop->name != NULL)
2717 snprintf (buf, size, "%s", opnd->prfop->name);
2718 else
2719 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
a06ea964
NC
2720 break;
2721
2722 default:
2723 assert (0);
2724 }
2725}
2726\f
2727#define CPENC(op0,op1,crn,crm,op2) \
2728 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2729 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2730#define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2731 /* for 3.9.10 System Instructions */
2732#define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2733
2734#define C0 0
2735#define C1 1
2736#define C2 2
2737#define C3 3
2738#define C4 4
2739#define C5 5
2740#define C6 6
2741#define C7 7
2742#define C8 8
2743#define C9 9
2744#define C10 10
2745#define C11 11
2746#define C12 12
2747#define C13 13
2748#define C14 14
2749#define C15 15
2750
49eec193
YZ
2751#ifdef F_DEPRECATED
2752#undef F_DEPRECATED
2753#endif
2754#define F_DEPRECATED 0x1 /* Deprecated system register. */
2755
f21cce2c
MW
2756#ifdef F_ARCHEXT
2757#undef F_ARCHEXT
2758#endif
2759#define F_ARCHEXT 0x2 /* Architecture dependent system register. */
2760
ea2deeec
MW
2761#ifdef F_HASXT
2762#undef F_HASXT
2763#endif
2764#define F_HASXT 0x4 /* System instruction register <Xt>
2765 operand. */
2766
f21cce2c 2767
a06ea964
NC
2768/* TODO there are two more issues need to be resolved
2769 1. handle read-only and write-only system registers
2770 2. handle cpu-implementation-defined system registers. */
49eec193
YZ
2771const aarch64_sys_reg aarch64_sys_regs [] =
2772{
2773 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
250aafa4 2774 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
49eec193 2775 { "elr_el1", CPEN_(0,C0,1), 0 },
250aafa4 2776 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
49eec193
YZ
2777 { "sp_el0", CPEN_(0,C1,0), 0 },
2778 { "spsel", CPEN_(0,C2,0), 0 },
2779 { "daif", CPEN_(3,C2,1), 0 },
2780 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
f21cce2c 2781 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
6479e48e 2782 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
49eec193
YZ
2783 { "nzcv", CPEN_(3,C2,0), 0 },
2784 { "fpcr", CPEN_(3,C4,0), 0 },
2785 { "fpsr", CPEN_(3,C4,1), 0 },
2786 { "dspsr_el0", CPEN_(3,C5,0), 0 },
2787 { "dlr_el0", CPEN_(3,C5,1), 0 },
2788 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
2789 { "elr_el2", CPEN_(4,C0,1), 0 },
2790 { "sp_el1", CPEN_(4,C1,0), 0 },
2791 { "spsr_irq", CPEN_(4,C3,0), 0 },
2792 { "spsr_abt", CPEN_(4,C3,1), 0 },
2793 { "spsr_und", CPEN_(4,C3,2), 0 },
2794 { "spsr_fiq", CPEN_(4,C3,3), 0 },
2795 { "spsr_el3", CPEN_(6,C0,0), 0 },
2796 { "elr_el3", CPEN_(6,C0,1), 0 },
2797 { "sp_el2", CPEN_(6,C1,0), 0 },
2798 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
2799 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
2800 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
2801 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
2802 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
2803 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
2804 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
2805 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
2806 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
2807 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
2808 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
2809 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
2810 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
2811 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
2812 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
2813 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
bdfa8b95 2814 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
49eec193
YZ
2815 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
2816 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
2817 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
2818 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
2819 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
2820 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
2821 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
2822 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
2823 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
2824 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
2825 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
2826 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
2827 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
2828 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
2829 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
2830 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
2831 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
2832 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
1a04d1a7 2833 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
49eec193
YZ
2834 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
2835 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
2836 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
2837 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
2838 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
2839 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
2840 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
2841 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
2842 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
250aafa4 2843 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
49eec193
YZ
2844 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
2845 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
2846 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
2847 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
250aafa4 2848 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
49eec193
YZ
2849 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
2850 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
2851 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
2852 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
2853 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
2854 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
2855 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
2856 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
2857 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
2858 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
2859 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
250aafa4 2860 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
49eec193 2861 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
250aafa4
MW
2862 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
2863 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
49eec193
YZ
2864 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
2865 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
2866 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
2867 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
250aafa4 2868 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
49eec193
YZ
2869 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
2870 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
2871 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
2872 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
2873 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
2874 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
250aafa4 2875 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
49eec193 2876 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
250aafa4 2877 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
49eec193
YZ
2878 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
2879 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
2880 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
250aafa4 2881 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
47f81142 2882 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
49eec193 2883 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
47f81142
MW
2884 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
2885 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
2886 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
2887 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
2888 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
2889 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
2890 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
2891 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
49eec193
YZ
2892 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
2893 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
2894 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
250aafa4 2895 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
49eec193
YZ
2896 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
2897 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
2898 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
2899 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
2900 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
250aafa4 2901 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
49eec193
YZ
2902 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
2903 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
2904 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
250aafa4 2905 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
49eec193
YZ
2906 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
2907 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
2908 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
250aafa4 2909 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
49eec193
YZ
2910 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
2911 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
2912 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
2913 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
2914 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
2915 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
2916 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
47f81142
MW
2917 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
2918 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
49eec193 2919 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
250aafa4
MW
2920 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
2921 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
49eec193
YZ
2922 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
2923 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
2924 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
2925 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
2926 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
2927 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
2928 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
2929 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
2930 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
2931 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
2932 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
250aafa4 2933 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
49eec193
YZ
2934 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
2935 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
250aafa4 2936 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
49eec193 2937 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
250aafa4 2938 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
49eec193 2939 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
250aafa4 2940 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
49eec193 2941 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
250aafa4 2942 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
49eec193 2943 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
250aafa4 2944 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
49eec193 2945 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
250aafa4 2946 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
49eec193
YZ
2947 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
2948 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
2949 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
2950 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
2951 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
2952 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
250aafa4
MW
2953 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
2954 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
2955 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
49eec193
YZ
2956 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
2957 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
2958 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
2959 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
2960 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
2961 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
2962 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
2963 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
2964 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
2965 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
2966 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
2967 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
2968 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
2969 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
2970 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
2971 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
2972 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
2973 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
2974 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
2975 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
2976 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
2977 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
2978 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
2979 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
2980 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
2981 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
2982 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
2983 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
2984 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
2985 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
2986 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
2987 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
2988 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
2989 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
2990 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
2991 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
2992 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
2993 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
2994 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
2995 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
2996 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
2997 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
2998 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
2999 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3000 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3001 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3002 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3003 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3004 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3005 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3006 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3007 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3008 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3009 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3010 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3011 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3012 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3013 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3014 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3015 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3016 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3017 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3018 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3019 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3020 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3021 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3022 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3023 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3024 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3025 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3026 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3027 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3028 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3029 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3030 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3031 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3032 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3033 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3034 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3035 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3036 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3037 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3038 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3039 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3040 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3041 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3042
3043 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3044 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3045 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3046 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3047 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3048 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3049 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3050 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3051 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3052 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3053 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3054 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3055 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3056 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3057 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3058 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3059 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3060 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3061 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3062 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3063 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3064 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3065 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3066 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3067 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3068 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3069 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3070 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3071 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3072 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3073 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3074 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3075 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3076 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3077 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3078 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3079 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3080 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3081 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3082 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3083 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3084 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3085 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3086 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3087 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3088 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3089 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3090 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3091 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3092 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3093 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3094 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3095 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3096 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3097 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3098 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3099 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3100 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3101 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3102 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3103 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3104 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3105 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3106 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3107 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3108 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3109 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3110 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3111 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3112 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3113 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3114 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3115 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3116 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3117 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3118 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3119 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3120 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3121 { 0, CPENC(0,0,0,0,0), 0 },
a06ea964
NC
3122};
3123
49eec193
YZ
3124bfd_boolean
3125aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3126{
3127 return (reg->flags & F_DEPRECATED) != 0;
3128}
3129
f21cce2c
MW
3130bfd_boolean
3131aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3132 const aarch64_sys_reg *reg)
3133{
3134 if (!(reg->flags & F_ARCHEXT))
3135 return TRUE;
3136
3137 /* PAN. Values are from aarch64_sys_regs. */
3138 if (reg->value == CPEN_(0,C2,3)
3139 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3140 return FALSE;
3141
250aafa4
MW
3142 /* Virtualization host extensions: system registers. */
3143 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3144 || reg->value == CPENC (3, 4, C13, C0, 1)
3145 || reg->value == CPENC (3, 4, C14, C3, 0)
3146 || reg->value == CPENC (3, 4, C14, C3, 1)
3147 || reg->value == CPENC (3, 4, C14, C3, 2))
3148 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3149 return FALSE;
3150
3151 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3152 if ((reg->value == CPEN_ (5, C0, 0)
3153 || reg->value == CPEN_ (5, C0, 1)
3154 || reg->value == CPENC (3, 5, C1, C0, 0)
3155 || reg->value == CPENC (3, 5, C1, C0, 2)
3156 || reg->value == CPENC (3, 5, C2, C0, 0)
3157 || reg->value == CPENC (3, 5, C2, C0, 1)
3158 || reg->value == CPENC (3, 5, C2, C0, 2)
3159 || reg->value == CPENC (3, 5, C5, C1, 0)
3160 || reg->value == CPENC (3, 5, C5, C1, 1)
3161 || reg->value == CPENC (3, 5, C5, C2, 0)
3162 || reg->value == CPENC (3, 5, C6, C0, 0)
3163 || reg->value == CPENC (3, 5, C10, C2, 0)
3164 || reg->value == CPENC (3, 5, C10, C3, 0)
3165 || reg->value == CPENC (3, 5, C12, C0, 0)
3166 || reg->value == CPENC (3, 5, C13, C0, 1)
3167 || reg->value == CPENC (3, 5, C14, C1, 0))
3168 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3169 return FALSE;
3170
3171 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3172 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3173 || reg->value == CPENC (3, 5, C14, C2, 1)
3174 || reg->value == CPENC (3, 5, C14, C2, 2)
3175 || reg->value == CPENC (3, 5, C14, C3, 0)
3176 || reg->value == CPENC (3, 5, C14, C3, 1)
3177 || reg->value == CPENC (3, 5, C14, C3, 2))
3178 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
1a04d1a7
MW
3179
3180 /* ARMv8.2 features. */
6479e48e
MW
3181
3182 /* ID_AA64MMFR2_EL1. */
1a04d1a7
MW
3183 if (reg->value == CPENC (3, 0, C0, C7, 2)
3184 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
250aafa4
MW
3185 return FALSE;
3186
6479e48e
MW
3187 /* PSTATE.UAO. */
3188 if (reg->value == CPEN_ (0, C2, 4)
3189 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3190 return FALSE;
3191
47f81142
MW
3192 /* RAS extension. */
3193
3194 /* ERRIDR_EL1 and ERRSELR_EL1. */
3195 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3196 || reg->value == CPENC (3, 0, C5, C3, 1))
3197 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3198 return FALSE;
3199
3200 /* ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1, ERXMISC0_EL1 AND
3201 ERXMISC1_EL1. */
3202 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3203 || reg->value == CPENC (3, 0, C5, C3 ,1)
3204 || reg->value == CPENC (3, 0, C5, C3, 2)
3205 || reg->value == CPENC (3, 0, C5, C3, 3)
3206 || reg->value == CPENC (3, 0, C5, C5, 0)
3207 || reg->value == CPENC (3, 0, C5, C5, 1))
3208 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3209 return FALSE;
3210
3211 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3212 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3213 || reg->value == CPENC (3, 0, C12, C1, 1)
3214 || reg->value == CPENC (3, 4, C12, C1, 1))
3215 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3216 return FALSE;
3217
f21cce2c
MW
3218 return TRUE;
3219}
3220
87b8eed7 3221const aarch64_sys_reg aarch64_pstatefields [] =
a06ea964 3222{
87b8eed7
YZ
3223 { "spsel", 0x05, 0 },
3224 { "daifset", 0x1e, 0 },
3225 { "daifclr", 0x1f, 0 },
f21cce2c 3226 { "pan", 0x04, F_ARCHEXT },
6479e48e 3227 { "uao", 0x03, F_ARCHEXT },
87b8eed7 3228 { 0, CPENC(0,0,0,0,0), 0 },
a06ea964
NC
3229};
3230
f21cce2c
MW
3231bfd_boolean
3232aarch64_pstatefield_supported_p (const aarch64_feature_set features,
3233 const aarch64_sys_reg *reg)
3234{
3235 if (!(reg->flags & F_ARCHEXT))
3236 return TRUE;
3237
3238 /* PAN. Values are from aarch64_pstatefields. */
3239 if (reg->value == 0x04
3240 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3241 return FALSE;
3242
6479e48e
MW
3243 /* UAO. Values are from aarch64_pstatefields. */
3244 if (reg->value == 0x03
3245 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3246 return FALSE;
3247
f21cce2c
MW
3248 return TRUE;
3249}
3250
a06ea964
NC
3251const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3252{
3253 { "ialluis", CPENS(0,C7,C1,0), 0 },
3254 { "iallu", CPENS(0,C7,C5,0), 0 },
ea2deeec 3255 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
a06ea964
NC
3256 { 0, CPENS(0,0,0,0), 0 }
3257};
3258
3259const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3260{
ea2deeec
MW
3261 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
3262 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
3263 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
3264 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
3265 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
3266 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
d6bf7ce6 3267 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
ea2deeec
MW
3268 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
3269 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
a06ea964
NC
3270 { 0, CPENS(0,0,0,0), 0 }
3271};
3272
3273const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3274{
ea2deeec
MW
3275 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
3276 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
3277 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
3278 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
3279 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
3280 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
3281 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
3282 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
3283 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
3284 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
3285 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
3286 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
a06ea964
NC
3287 { 0, CPENS(0,0,0,0), 0 }
3288};
3289
3290const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3291{
3292 { "vmalle1", CPENS(0,C8,C7,0), 0 },
ea2deeec
MW
3293 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
3294 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
3295 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
a06ea964 3296 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
ea2deeec
MW
3297 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
3298 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
3299 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
3300 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
3301 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
3302 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
3303 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
3304 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
3305 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
a06ea964
NC
3306 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3307 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
ea2deeec
MW
3308 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
3309 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
a06ea964
NC
3310 { "alle2", CPENS(4,C8,C7,0), 0 },
3311 { "alle2is", CPENS(4,C8,C3,0), 0 },
3312 { "alle1", CPENS(4,C8,C7,4), 0 },
3313 { "alle1is", CPENS(4,C8,C3,4), 0 },
3314 { "alle3", CPENS(6,C8,C7,0), 0 },
3315 { "alle3is", CPENS(6,C8,C3,0), 0 },
ea2deeec
MW
3316 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
3317 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
3318 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
3319 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
3320 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
3321 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
3322 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
3323 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
a06ea964
NC
3324 { 0, CPENS(0,0,0,0), 0 }
3325};
3326
ea2deeec
MW
3327bfd_boolean
3328aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
3329{
3330 return (sys_ins_reg->flags & F_HASXT) != 0;
3331}
3332
d6bf7ce6
MW
3333extern bfd_boolean
3334aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
3335 const aarch64_sys_ins_reg *reg)
3336{
3337 if (!(reg->flags & F_ARCHEXT))
3338 return TRUE;
3339
3340 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3341 if (reg->value == CPENS (3, C7, C12, 1)
3342 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3343 return FALSE;
3344
3345 return TRUE;
3346}
3347
a06ea964
NC
3348#undef C0
3349#undef C1
3350#undef C2
3351#undef C3
3352#undef C4
3353#undef C5
3354#undef C6
3355#undef C7
3356#undef C8
3357#undef C9
3358#undef C10
3359#undef C11
3360#undef C12
3361#undef C13
3362#undef C14
3363#undef C15
3364
3365/* Include the opcode description table as well as the operand description
3366 table. */
3367#include "aarch64-tbl.h"
This page took 0.636717 seconds and 4 git commands to generate.