1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump
= FALSE
;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array
[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array
[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
108 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
109 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
116 return ((qualifier
>= AARCH64_OPND_QLF_S_B
117 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
127 DP_VECTOR_ACROSS_LANES
,
130 static const char significant_operand_index
[] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
147 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers
[0] == qualifiers
[1]
152 && vector_qualifier_p (qualifiers
[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers
[0])
154 == aarch64_get_qualifier_esize (qualifiers
[1]))
155 && (aarch64_get_qualifier_esize (qualifiers
[0])
156 == aarch64_get_qualifier_esize (qualifiers
[2])))
157 return DP_VECTOR_3SAME
;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers
[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers
[0])
164 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
165 return DP_VECTOR_LONG
;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers
[0] == qualifiers
[1]
168 && vector_qualifier_p (qualifiers
[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers
[0])
171 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers
[0])
173 == aarch64_get_qualifier_esize (qualifiers
[1])))
174 return DP_VECTOR_WIDE
;
176 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers
[1]) == TRUE
180 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
181 return DP_VECTOR_ACROSS_LANES
;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
199 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
202 const aarch64_field fields
[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
257 { 22, 1 }, /* N: in logical (immediate) instructions. */
258 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
259 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
260 { 31, 1 }, /* sf: in integer data processing instructions. */
261 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
262 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
263 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
264 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
265 { 31, 1 }, /* b5: in the test bit and branch instructions. */
266 { 19, 5 }, /* b40: in the test bit and branch instructions. */
267 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
268 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
269 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
270 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
271 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
272 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
273 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
274 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
275 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
276 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
277 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
278 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
279 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
280 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
281 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
282 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
283 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
284 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
285 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
286 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
287 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
288 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
289 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
290 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
291 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
292 { 5, 1 }, /* SVE_i1: single-bit immediate. */
293 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
294 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
295 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
296 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
297 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
298 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
299 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
300 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
301 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
302 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
303 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
304 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
305 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
306 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
307 { 16, 4 }, /* SVE_tsz: triangular size select. */
308 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
309 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
310 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
311 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
312 { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
315 enum aarch64_operand_class
316 aarch64_get_operand_class (enum aarch64_opnd type
)
318 return aarch64_operands
[type
].op_class
;
322 aarch64_get_operand_name (enum aarch64_opnd type
)
324 return aarch64_operands
[type
].name
;
327 /* Get operand description string.
328 This is usually for the diagnosis purpose. */
330 aarch64_get_operand_desc (enum aarch64_opnd type
)
332 return aarch64_operands
[type
].desc
;
335 /* Table of all conditional affixes. */
336 const aarch64_cond aarch64_conds
[16] =
338 {{"eq", "none"}, 0x0},
339 {{"ne", "any"}, 0x1},
340 {{"cs", "hs", "nlast"}, 0x2},
341 {{"cc", "lo", "ul", "last"}, 0x3},
342 {{"mi", "first"}, 0x4},
343 {{"pl", "nfrst"}, 0x5},
346 {{"hi", "pmore"}, 0x8},
347 {{"ls", "plast"}, 0x9},
348 {{"ge", "tcont"}, 0xa},
349 {{"lt", "tstop"}, 0xb},
357 get_cond_from_value (aarch64_insn value
)
360 return &aarch64_conds
[(unsigned int) value
];
364 get_inverted_cond (const aarch64_cond
*cond
)
366 return &aarch64_conds
[cond
->value
^ 0x1];
369 /* Table describing the operand extension/shifting operators; indexed by
370 enum aarch64_modifier_kind.
372 The value column provides the most common values for encoding modifiers,
373 which enables table-driven encoding/decoding for the modifiers. */
374 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
395 enum aarch64_modifier_kind
396 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
398 return desc
- aarch64_operand_modifiers
;
402 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
404 return aarch64_operand_modifiers
[kind
].value
;
407 enum aarch64_modifier_kind
408 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
409 bfd_boolean extend_p
)
411 if (extend_p
== TRUE
)
412 return AARCH64_MOD_UXTB
+ value
;
414 return AARCH64_MOD_LSL
- value
;
418 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
420 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
424 static inline bfd_boolean
425 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
427 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
431 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
451 /* Table describing the operands supported by the aliases of the HINT
454 The name column is the operand that is accepted for the alias. The value
455 column is the hint number of the alias. The list of operands is terminated
456 by NULL in the name column. */
458 const struct aarch64_name_value_pair aarch64_hint_options
[] =
460 { "csync", 0x11 }, /* PSB CSYNC. */
464 /* op -> op: load = 0 instruction = 1 store = 2
466 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
467 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
468 const struct aarch64_name_value_pair aarch64_prfops
[32] =
470 { "pldl1keep", B(0, 1, 0) },
471 { "pldl1strm", B(0, 1, 1) },
472 { "pldl2keep", B(0, 2, 0) },
473 { "pldl2strm", B(0, 2, 1) },
474 { "pldl3keep", B(0, 3, 0) },
475 { "pldl3strm", B(0, 3, 1) },
478 { "plil1keep", B(1, 1, 0) },
479 { "plil1strm", B(1, 1, 1) },
480 { "plil2keep", B(1, 2, 0) },
481 { "plil2strm", B(1, 2, 1) },
482 { "plil3keep", B(1, 3, 0) },
483 { "plil3strm", B(1, 3, 1) },
486 { "pstl1keep", B(2, 1, 0) },
487 { "pstl1strm", B(2, 1, 1) },
488 { "pstl2keep", B(2, 2, 0) },
489 { "pstl2strm", B(2, 2, 1) },
490 { "pstl3keep", B(2, 3, 0) },
491 { "pstl3strm", B(2, 3, 1) },
505 /* Utilities on value constraint. */
508 value_in_range_p (int64_t value
, int low
, int high
)
510 return (value
>= low
&& value
<= high
) ? 1 : 0;
513 /* Return true if VALUE is a multiple of ALIGN. */
515 value_aligned_p (int64_t value
, int align
)
517 return (value
% align
) == 0;
520 /* A signed value fits in a field. */
522 value_fit_signed_field_p (int64_t value
, unsigned width
)
525 if (width
< sizeof (value
) * 8)
527 int64_t lim
= (int64_t)1 << (width
- 1);
528 if (value
>= -lim
&& value
< lim
)
534 /* An unsigned value fits in a field. */
536 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
539 if (width
< sizeof (value
) * 8)
541 int64_t lim
= (int64_t)1 << width
;
542 if (value
>= 0 && value
< lim
)
548 /* Return 1 if OPERAND is SP or WSP. */
550 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
552 return ((aarch64_get_operand_class (operand
->type
)
553 == AARCH64_OPND_CLASS_INT_REG
)
554 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
555 && operand
->reg
.regno
== 31);
558 /* Return 1 if OPERAND is XZR or WZP. */
560 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
562 return ((aarch64_get_operand_class (operand
->type
)
563 == AARCH64_OPND_CLASS_INT_REG
)
564 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
565 && operand
->reg
.regno
== 31);
568 /* Return true if the operand *OPERAND that has the operand code
569 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
570 qualified by the qualifier TARGET. */
573 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
574 aarch64_opnd_qualifier_t target
)
576 switch (operand
->qualifier
)
578 case AARCH64_OPND_QLF_W
:
579 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
582 case AARCH64_OPND_QLF_X
:
583 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
586 case AARCH64_OPND_QLF_WSP
:
587 if (target
== AARCH64_OPND_QLF_W
588 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
591 case AARCH64_OPND_QLF_SP
:
592 if (target
== AARCH64_OPND_QLF_X
593 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
603 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
604 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
606 Return NIL if more than one expected qualifiers are found. */
608 aarch64_opnd_qualifier_t
609 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
611 const aarch64_opnd_qualifier_t known_qlf
,
618 When the known qualifier is NIL, we have to assume that there is only
619 one qualifier sequence in the *QSEQ_LIST and return the corresponding
620 qualifier directly. One scenario is that for instruction
621 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
622 which has only one possible valid qualifier sequence
624 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
625 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
627 Because the qualifier NIL has dual roles in the qualifier sequence:
628 it can mean no qualifier for the operand, or the qualifer sequence is
629 not in use (when all qualifiers in the sequence are NILs), we have to
630 handle this special case here. */
631 if (known_qlf
== AARCH64_OPND_NIL
)
633 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
634 return qseq_list
[0][idx
];
637 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
639 if (qseq_list
[i
][known_idx
] == known_qlf
)
642 /* More than one sequences are found to have KNOWN_QLF at
644 return AARCH64_OPND_NIL
;
649 return qseq_list
[saved_i
][idx
];
652 enum operand_qualifier_kind
660 /* Operand qualifier description. */
661 struct operand_qualifier_data
663 /* The usage of the three data fields depends on the qualifier kind. */
670 enum operand_qualifier_kind kind
;
673 /* Indexed by the operand qualifier enumerators. */
674 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
676 {0, 0, 0, "NIL", OQK_NIL
},
678 /* Operand variant qualifiers.
680 element size, number of elements and common value for encoding. */
682 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
683 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
684 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
685 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
687 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
688 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
689 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
690 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
691 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
693 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
694 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
695 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
696 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
697 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
698 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
699 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
700 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
701 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
702 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
704 {0, 0, 0, "z", OQK_OPD_VARIANT
},
705 {0, 0, 0, "m", OQK_OPD_VARIANT
},
707 /* Qualifiers constraining the value range.
709 Lower bound, higher bound, unused. */
711 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
712 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
713 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
714 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
715 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
716 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
718 /* Qualifiers for miscellaneous purpose.
720 unused, unused and unused. */
725 {0, 0, 0, "retrieving", 0},
728 static inline bfd_boolean
729 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
731 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
735 static inline bfd_boolean
736 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
738 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
743 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
745 return aarch64_opnd_qualifiers
[qualifier
].desc
;
748 /* Given an operand qualifier, return the expected data element size
749 of a qualified operand. */
751 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
753 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
754 return aarch64_opnd_qualifiers
[qualifier
].data0
;
758 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
760 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
761 return aarch64_opnd_qualifiers
[qualifier
].data1
;
765 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
767 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
768 return aarch64_opnd_qualifiers
[qualifier
].data2
;
772 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
774 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
775 return aarch64_opnd_qualifiers
[qualifier
].data0
;
779 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
781 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
782 return aarch64_opnd_qualifiers
[qualifier
].data1
;
787 aarch64_verbose (const char *str
, ...)
798 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
802 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
803 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
808 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
809 const aarch64_opnd_qualifier_t
*qualifier
)
812 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
814 aarch64_verbose ("dump_match_qualifiers:");
815 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
816 curr
[i
] = opnd
[i
].qualifier
;
817 dump_qualifier_sequence (curr
);
818 aarch64_verbose ("against");
819 dump_qualifier_sequence (qualifier
);
821 #endif /* DEBUG_AARCH64 */
823 /* TODO improve this, we can have an extra field at the runtime to
824 store the number of operands rather than calculating it every time. */
827 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
830 const enum aarch64_opnd
*opnds
= opcode
->operands
;
831 while (opnds
[i
++] != AARCH64_OPND_NIL
)
834 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
838 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
839 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
841 N.B. on the entry, it is very likely that only some operands in *INST
842 have had their qualifiers been established.
844 If STOP_AT is not -1, the function will only try to match
845 the qualifier sequence for operands before and including the operand
846 of index STOP_AT; and on success *RET will only be filled with the first
847 (STOP_AT+1) qualifiers.
849 A couple examples of the matching algorithm:
857 Apart from serving the main encoding routine, this can also be called
858 during or after the operand decoding. */
861 aarch64_find_best_match (const aarch64_inst
*inst
,
862 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
863 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
867 const aarch64_opnd_qualifier_t
*qualifiers
;
869 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
872 DEBUG_TRACE ("SUCCEED: no operand");
876 if (stop_at
< 0 || stop_at
>= num_opnds
)
877 stop_at
= num_opnds
- 1;
879 /* For each pattern. */
880 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
883 qualifiers
= *qualifiers_list
;
885 /* Start as positive. */
888 DEBUG_TRACE ("%d", i
);
891 dump_match_qualifiers (inst
->operands
, qualifiers
);
894 /* Most opcodes has much fewer patterns in the list.
895 First NIL qualifier indicates the end in the list. */
896 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
898 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
904 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
906 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
908 /* Either the operand does not have qualifier, or the qualifier
909 for the operand needs to be deduced from the qualifier
911 In the latter case, any constraint checking related with
912 the obtained qualifier should be done later in
913 operand_general_constraint_met_p. */
916 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
918 /* Unless the target qualifier can also qualify the operand
919 (which has already had a non-nil qualifier), non-equal
920 qualifiers are generally un-matched. */
921 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
930 continue; /* Equal qualifiers are certainly matched. */
933 /* Qualifiers established. */
940 /* Fill the result in *RET. */
942 qualifiers
= *qualifiers_list
;
944 DEBUG_TRACE ("complete qualifiers using list %d", i
);
947 dump_qualifier_sequence (qualifiers
);
950 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
951 ret
[j
] = *qualifiers
;
952 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
953 ret
[j
] = AARCH64_OPND_QLF_NIL
;
955 DEBUG_TRACE ("SUCCESS");
959 DEBUG_TRACE ("FAIL");
963 /* Operand qualifier matching and resolving.
965 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
966 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
968 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
972 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
975 aarch64_opnd_qualifier_seq_t qualifiers
;
977 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
980 DEBUG_TRACE ("matching FAIL");
984 if (inst
->opcode
->flags
& F_STRICT
)
986 /* Require an exact qualifier match, even for NIL qualifiers. */
987 nops
= aarch64_num_of_operands (inst
->opcode
);
988 for (i
= 0; i
< nops
; ++i
)
989 if (inst
->operands
[i
].qualifier
!= qualifiers
[i
])
993 /* Update the qualifiers. */
994 if (update_p
== TRUE
)
995 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
997 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
999 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
1000 "update %s with %s for operand %d",
1001 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
1002 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
1003 inst
->operands
[i
].qualifier
= qualifiers
[i
];
1006 DEBUG_TRACE ("matching SUCCESS");
1010 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1013 IS32 indicates whether value is a 32-bit immediate or not.
1014 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1015 amount will be returned in *SHIFT_AMOUNT. */
1018 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
1022 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1026 /* Allow all zeros or all ones in top 32-bits, so that
1027 32-bit constant expressions like ~0x80000000 are
1029 uint64_t ext
= value
;
1030 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
1031 /* Immediate out of range. */
1033 value
&= (int64_t) 0xffffffff;
1036 /* first, try movz then movn */
1038 if ((value
& ((int64_t) 0xffff << 0)) == value
)
1040 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
1042 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
1044 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
1049 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1053 if (shift_amount
!= NULL
)
1054 *shift_amount
= amount
;
1056 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
1061 /* Build the accepted values for immediate logical SIMD instructions.
1063 The standard encodings of the immediate value are:
1064 N imms immr SIMD size R S
1065 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1066 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1067 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1068 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1069 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1070 0 11110s 00000r 2 UInt(r) UInt(s)
1071 where all-ones value of S is reserved.
1073 Let's call E the SIMD size.
1075 The immediate value is: S+1 bits '1' rotated to the right by R.
1077 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1078 (remember S != E - 1). */
1080 #define TOTAL_IMM_NB 5334
1085 aarch64_insn encoding
;
1086 } simd_imm_encoding
;
1088 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
1091 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
1093 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
1094 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
1096 if (imm1
->imm
< imm2
->imm
)
1098 if (imm1
->imm
> imm2
->imm
)
1103 /* immediate bitfield standard encoding
1104 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1105 1 ssssss rrrrrr 64 rrrrrr ssssss
1106 0 0sssss 0rrrrr 32 rrrrr sssss
1107 0 10ssss 00rrrr 16 rrrr ssss
1108 0 110sss 000rrr 8 rrr sss
1109 0 1110ss 0000rr 4 rr ss
1110 0 11110s 00000r 2 r s */
1112 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1114 return (is64
<< 12) | (r
<< 6) | s
;
1118 build_immediate_table (void)
1120 uint32_t log_e
, e
, s
, r
, s_mask
;
1126 for (log_e
= 1; log_e
<= 6; log_e
++)
1128 /* Get element size. */
1133 mask
= 0xffffffffffffffffull
;
1139 mask
= (1ull << e
) - 1;
1141 1 ((1 << 4) - 1) << 2 = 111100
1142 2 ((1 << 3) - 1) << 3 = 111000
1143 3 ((1 << 2) - 1) << 4 = 110000
1144 4 ((1 << 1) - 1) << 5 = 100000
1145 5 ((1 << 0) - 1) << 6 = 000000 */
1146 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1148 for (s
= 0; s
< e
- 1; s
++)
1149 for (r
= 0; r
< e
; r
++)
1151 /* s+1 consecutive bits to 1 (s < 63) */
1152 imm
= (1ull << (s
+ 1)) - 1;
1153 /* rotate right by r */
1155 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1156 /* replicate the constant depending on SIMD size */
1159 case 1: imm
= (imm
<< 2) | imm
;
1161 case 2: imm
= (imm
<< 4) | imm
;
1163 case 3: imm
= (imm
<< 8) | imm
;
1165 case 4: imm
= (imm
<< 16) | imm
;
1167 case 5: imm
= (imm
<< 32) | imm
;
1172 simd_immediates
[nb_imms
].imm
= imm
;
1173 simd_immediates
[nb_imms
].encoding
=
1174 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1178 assert (nb_imms
== TOTAL_IMM_NB
);
1179 qsort(simd_immediates
, nb_imms
,
1180 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1183 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1184 be accepted by logical (immediate) instructions
1185 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1187 ESIZE is the number of bytes in the decoded immediate value.
1188 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1189 VALUE will be returned in *ENCODING. */
1192 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1194 simd_imm_encoding imm_enc
;
1195 const simd_imm_encoding
*imm_encoding
;
1196 static bfd_boolean initialized
= FALSE
;
1200 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), is32: %d", value
,
1203 if (initialized
== FALSE
)
1205 build_immediate_table ();
1209 /* Allow all zeros or all ones in top bits, so that
1210 constant expressions like ~1 are permitted. */
1211 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1212 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1215 /* Replicate to a full 64-bit value. */
1217 for (i
= esize
* 8; i
< 64; i
*= 2)
1218 value
|= (value
<< i
);
1220 imm_enc
.imm
= value
;
1221 imm_encoding
= (const simd_imm_encoding
*)
1222 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1223 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1224 if (imm_encoding
== NULL
)
1226 DEBUG_TRACE ("exit with FALSE");
1229 if (encoding
!= NULL
)
1230 *encoding
= imm_encoding
->encoding
;
1231 DEBUG_TRACE ("exit with TRUE");
1235 /* If 64-bit immediate IMM is in the format of
1236 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1237 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1238 of value "abcdefgh". Otherwise return -1. */
1240 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1246 for (i
= 0; i
< 8; i
++)
1248 byte
= (imm
>> (8 * i
)) & 0xff;
1251 else if (byte
!= 0x00)
1257 /* Utility inline functions for operand_general_constraint_met_p. */
1260 set_error (aarch64_operand_error
*mismatch_detail
,
1261 enum aarch64_operand_error_kind kind
, int idx
,
1264 if (mismatch_detail
== NULL
)
1266 mismatch_detail
->kind
= kind
;
1267 mismatch_detail
->index
= idx
;
1268 mismatch_detail
->error
= error
;
1272 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1275 if (mismatch_detail
== NULL
)
1277 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1281 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1282 int idx
, int lower_bound
, int upper_bound
,
1285 if (mismatch_detail
== NULL
)
1287 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1288 mismatch_detail
->data
[0] = lower_bound
;
1289 mismatch_detail
->data
[1] = upper_bound
;
1293 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1294 int idx
, int lower_bound
, int upper_bound
)
1296 if (mismatch_detail
== NULL
)
1298 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1299 _("immediate value"));
1303 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1304 int idx
, int lower_bound
, int upper_bound
)
1306 if (mismatch_detail
== NULL
)
1308 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1309 _("immediate offset"));
1313 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1314 int idx
, int lower_bound
, int upper_bound
)
1316 if (mismatch_detail
== NULL
)
1318 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1319 _("register number"));
1323 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1324 int idx
, int lower_bound
, int upper_bound
)
1326 if (mismatch_detail
== NULL
)
1328 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1329 _("register element index"));
1333 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1334 int idx
, int lower_bound
, int upper_bound
)
1336 if (mismatch_detail
== NULL
)
1338 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1342 /* Report that the MUL modifier in operand IDX should be in the range
1343 [LOWER_BOUND, UPPER_BOUND]. */
1345 set_multiplier_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1346 int idx
, int lower_bound
, int upper_bound
)
1348 if (mismatch_detail
== NULL
)
1350 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1355 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1358 if (mismatch_detail
== NULL
)
1360 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1361 mismatch_detail
->data
[0] = alignment
;
1365 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1368 if (mismatch_detail
== NULL
)
1370 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1371 mismatch_detail
->data
[0] = expected_num
;
1375 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1378 if (mismatch_detail
== NULL
)
1380 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1383 /* General constraint checking based on operand code.
1385 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1386 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1388 This function has to be called after the qualifiers for all operands
1391 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1392 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1393 of error message during the disassembling where error message is not
1394 wanted. We avoid the dynamic construction of strings of error messages
1395 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1396 use a combination of error code, static string and some integer data to
1397 represent an error. */
1400 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1401 enum aarch64_opnd type
,
1402 const aarch64_opcode
*opcode
,
1403 aarch64_operand_error
*mismatch_detail
)
1405 unsigned num
, modifiers
, shift
;
1407 int64_t imm
, min_value
, max_value
;
1408 uint64_t uvalue
, mask
;
1409 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1410 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1412 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1414 switch (aarch64_operands
[type
].op_class
)
1416 case AARCH64_OPND_CLASS_INT_REG
:
1417 /* Check pair reg constraints for cas* instructions. */
1418 if (type
== AARCH64_OPND_PAIRREG
)
1420 assert (idx
== 1 || idx
== 3);
1421 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1423 set_syntax_error (mismatch_detail
, idx
- 1,
1424 _("reg pair must start from even reg"));
1427 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1429 set_syntax_error (mismatch_detail
, idx
,
1430 _("reg pair must be contiguous"));
1436 /* <Xt> may be optional in some IC and TLBI instructions. */
1437 if (type
== AARCH64_OPND_Rt_SYS
)
1439 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1440 == AARCH64_OPND_CLASS_SYSTEM
));
1441 if (opnds
[1].present
1442 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1444 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1447 if (!opnds
[1].present
1448 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1450 set_other_error (mismatch_detail
, idx
, _("missing register"));
1456 case AARCH64_OPND_QLF_WSP
:
1457 case AARCH64_OPND_QLF_SP
:
1458 if (!aarch64_stack_pointer_p (opnd
))
1460 set_other_error (mismatch_detail
, idx
,
1461 _("stack pointer register expected"));
1470 case AARCH64_OPND_CLASS_SVE_REG
:
1473 case AARCH64_OPND_SVE_Zn_INDEX
:
1474 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1475 if (!value_in_range_p (opnd
->reglane
.index
, 0, 64 / size
- 1))
1477 set_elem_idx_out_of_range_error (mismatch_detail
, idx
,
1483 case AARCH64_OPND_SVE_ZnxN
:
1484 case AARCH64_OPND_SVE_ZtxN
:
1485 if (opnd
->reglist
.num_regs
!= get_opcode_dependent_value (opcode
))
1487 set_other_error (mismatch_detail
, idx
,
1488 _("invalid register list"));
1498 case AARCH64_OPND_CLASS_PRED_REG
:
1499 if (opnd
->reg
.regno
>= 8
1500 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1502 set_other_error (mismatch_detail
, idx
, _("p0-p7 expected"));
1507 case AARCH64_OPND_CLASS_COND
:
1508 if (type
== AARCH64_OPND_COND1
1509 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1511 /* Not allow AL or NV. */
1512 set_syntax_error (mismatch_detail
, idx
, NULL
);
1516 case AARCH64_OPND_CLASS_ADDRESS
:
1517 /* Check writeback. */
1518 switch (opcode
->iclass
)
1522 case ldstnapair_offs
:
1525 if (opnd
->addr
.writeback
== 1)
1527 set_syntax_error (mismatch_detail
, idx
,
1528 _("unexpected address writeback"));
1533 if (opnd
->addr
.writeback
== 1 && opnd
->addr
.preind
!= 1)
1535 set_syntax_error (mismatch_detail
, idx
,
1536 _("unexpected address writeback"));
1541 case ldstpair_indexed
:
1544 if (opnd
->addr
.writeback
== 0)
1546 set_syntax_error (mismatch_detail
, idx
,
1547 _("address writeback expected"));
1552 assert (opnd
->addr
.writeback
== 0);
1557 case AARCH64_OPND_ADDR_SIMM7
:
1558 /* Scaled signed 7 bits immediate offset. */
1559 /* Get the size of the data element that is accessed, which may be
1560 different from that of the source register size,
1561 e.g. in strb/ldrb. */
1562 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1563 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1565 set_offset_out_of_range_error (mismatch_detail
, idx
,
1566 -64 * size
, 63 * size
);
1569 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1571 set_unaligned_error (mismatch_detail
, idx
, size
);
1575 case AARCH64_OPND_ADDR_SIMM9
:
1576 /* Unscaled signed 9 bits immediate offset. */
1577 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1579 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1584 case AARCH64_OPND_ADDR_SIMM9_2
:
1585 /* Unscaled signed 9 bits immediate offset, which has to be negative
1587 size
= aarch64_get_qualifier_esize (qualifier
);
1588 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1589 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1590 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1592 set_other_error (mismatch_detail
, idx
,
1593 _("negative or unaligned offset expected"));
1596 case AARCH64_OPND_ADDR_SIMM10
:
1597 /* Scaled signed 10 bits immediate offset. */
1598 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4088))
1600 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4088);
1603 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 8))
1605 set_unaligned_error (mismatch_detail
, idx
, 8);
1610 case AARCH64_OPND_SIMD_ADDR_POST
:
1611 /* AdvSIMD load/store multiple structures, post-index. */
1613 if (opnd
->addr
.offset
.is_reg
)
1615 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1619 set_other_error (mismatch_detail
, idx
,
1620 _("invalid register offset"));
1626 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1627 unsigned num_bytes
; /* total number of bytes transferred. */
1628 /* The opcode dependent area stores the number of elements in
1629 each structure to be loaded/stored. */
1630 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1631 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1632 /* Special handling of loading single structure to all lane. */
1633 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1634 * aarch64_get_qualifier_esize (prev
->qualifier
);
1636 num_bytes
= prev
->reglist
.num_regs
1637 * aarch64_get_qualifier_esize (prev
->qualifier
)
1638 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1639 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1641 set_other_error (mismatch_detail
, idx
,
1642 _("invalid post-increment amount"));
1648 case AARCH64_OPND_ADDR_REGOFF
:
1649 /* Get the size of the data element that is accessed, which may be
1650 different from that of the source register size,
1651 e.g. in strb/ldrb. */
1652 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1653 /* It is either no shift or shift by the binary logarithm of SIZE. */
1654 if (opnd
->shifter
.amount
!= 0
1655 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1657 set_other_error (mismatch_detail
, idx
,
1658 _("invalid shift amount"));
1661 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1663 switch (opnd
->shifter
.kind
)
1665 case AARCH64_MOD_UXTW
:
1666 case AARCH64_MOD_LSL
:
1667 case AARCH64_MOD_SXTW
:
1668 case AARCH64_MOD_SXTX
: break;
1670 set_other_error (mismatch_detail
, idx
,
1671 _("invalid extend/shift operator"));
1676 case AARCH64_OPND_ADDR_UIMM12
:
1677 imm
= opnd
->addr
.offset
.imm
;
1678 /* Get the size of the data element that is accessed, which may be
1679 different from that of the source register size,
1680 e.g. in strb/ldrb. */
1681 size
= aarch64_get_qualifier_esize (qualifier
);
1682 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1684 set_offset_out_of_range_error (mismatch_detail
, idx
,
1688 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1690 set_unaligned_error (mismatch_detail
, idx
, size
);
1695 case AARCH64_OPND_ADDR_PCREL14
:
1696 case AARCH64_OPND_ADDR_PCREL19
:
1697 case AARCH64_OPND_ADDR_PCREL21
:
1698 case AARCH64_OPND_ADDR_PCREL26
:
1699 imm
= opnd
->imm
.value
;
1700 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1702 /* The offset value in a PC-relative branch instruction is alway
1703 4-byte aligned and is encoded without the lowest 2 bits. */
1704 if (!value_aligned_p (imm
, 4))
1706 set_unaligned_error (mismatch_detail
, idx
, 4);
1709 /* Right shift by 2 so that we can carry out the following check
1713 size
= get_operand_fields_width (get_operand_from_code (type
));
1714 if (!value_fit_signed_field_p (imm
, size
))
1716 set_other_error (mismatch_detail
, idx
,
1717 _("immediate out of range"));
1722 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
1723 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
1724 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
1725 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
1729 assert (!opnd
->addr
.offset
.is_reg
);
1730 assert (opnd
->addr
.preind
);
1731 num
= 1 + get_operand_specific_data (&aarch64_operands
[type
]);
1734 if ((opnd
->addr
.offset
.imm
!= 0 && !opnd
->shifter
.operator_present
)
1735 || (opnd
->shifter
.operator_present
1736 && opnd
->shifter
.kind
!= AARCH64_MOD_MUL_VL
))
1738 set_other_error (mismatch_detail
, idx
,
1739 _("invalid addressing mode"));
1742 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1744 set_offset_out_of_range_error (mismatch_detail
, idx
,
1745 min_value
, max_value
);
1748 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1750 set_unaligned_error (mismatch_detail
, idx
, num
);
1755 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
1758 goto sve_imm_offset_vl
;
1760 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
1763 goto sve_imm_offset_vl
;
1765 case AARCH64_OPND_SVE_ADDR_RI_U6
:
1766 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
1767 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
1768 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
1772 assert (!opnd
->addr
.offset
.is_reg
);
1773 assert (opnd
->addr
.preind
);
1774 num
= 1 << get_operand_specific_data (&aarch64_operands
[type
]);
1777 if (opnd
->shifter
.operator_present
1778 || opnd
->shifter
.amount_present
)
1780 set_other_error (mismatch_detail
, idx
,
1781 _("invalid addressing mode"));
1784 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1786 set_offset_out_of_range_error (mismatch_detail
, idx
,
1787 min_value
, max_value
);
1790 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1792 set_unaligned_error (mismatch_detail
, idx
, num
);
1797 case AARCH64_OPND_SVE_ADDR_RR
:
1798 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
1799 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
1800 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
1801 case AARCH64_OPND_SVE_ADDR_RX
:
1802 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
1803 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
1804 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
1805 case AARCH64_OPND_SVE_ADDR_RZ
:
1806 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
1807 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
1808 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
1809 modifiers
= 1 << AARCH64_MOD_LSL
;
1811 assert (opnd
->addr
.offset
.is_reg
);
1812 assert (opnd
->addr
.preind
);
1813 if ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) != 0
1814 && opnd
->addr
.offset
.regno
== 31)
1816 set_other_error (mismatch_detail
, idx
,
1817 _("index register xzr is not allowed"));
1820 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1821 || (opnd
->shifter
.amount
1822 != get_operand_specific_data (&aarch64_operands
[type
])))
1824 set_other_error (mismatch_detail
, idx
,
1825 _("invalid addressing mode"));
1830 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
1831 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
1832 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
1833 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
1834 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
1835 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
1836 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
1837 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
1838 modifiers
= (1 << AARCH64_MOD_SXTW
) | (1 << AARCH64_MOD_UXTW
);
1839 goto sve_rr_operand
;
1841 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
1842 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
1843 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
1844 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
1847 goto sve_imm_offset
;
1849 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
1850 modifiers
= 1 << AARCH64_MOD_LSL
;
1852 assert (opnd
->addr
.offset
.is_reg
);
1853 assert (opnd
->addr
.preind
);
1854 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1855 || opnd
->shifter
.amount
< 0
1856 || opnd
->shifter
.amount
> 3)
1858 set_other_error (mismatch_detail
, idx
,
1859 _("invalid addressing mode"));
1864 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
1865 modifiers
= (1 << AARCH64_MOD_SXTW
);
1866 goto sve_zz_operand
;
1868 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
1869 modifiers
= 1 << AARCH64_MOD_UXTW
;
1870 goto sve_zz_operand
;
1877 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1878 if (type
== AARCH64_OPND_LEt
)
1880 /* Get the upper bound for the element index. */
1881 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1882 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
1884 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1888 /* The opcode dependent area stores the number of elements in
1889 each structure to be loaded/stored. */
1890 num
= get_opcode_dependent_value (opcode
);
1893 case AARCH64_OPND_LVt
:
1894 assert (num
>= 1 && num
<= 4);
1895 /* Unless LD1/ST1, the number of registers should be equal to that
1896 of the structure elements. */
1897 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1899 set_reg_list_error (mismatch_detail
, idx
, num
);
1903 case AARCH64_OPND_LVt_AL
:
1904 case AARCH64_OPND_LEt
:
1905 assert (num
>= 1 && num
<= 4);
1906 /* The number of registers should be equal to that of the structure
1908 if (opnd
->reglist
.num_regs
!= num
)
1910 set_reg_list_error (mismatch_detail
, idx
, num
);
1919 case AARCH64_OPND_CLASS_IMMEDIATE
:
1920 /* Constraint check on immediate operand. */
1921 imm
= opnd
->imm
.value
;
1922 /* E.g. imm_0_31 constrains value to be 0..31. */
1923 if (qualifier_value_in_range_constraint_p (qualifier
)
1924 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1925 get_upper_bound (qualifier
)))
1927 set_imm_out_of_range_error (mismatch_detail
, idx
,
1928 get_lower_bound (qualifier
),
1929 get_upper_bound (qualifier
));
1935 case AARCH64_OPND_AIMM
:
1936 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1938 set_other_error (mismatch_detail
, idx
,
1939 _("invalid shift operator"));
1942 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
1944 set_other_error (mismatch_detail
, idx
,
1945 _("shift amount must be 0 or 12"));
1948 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
1950 set_other_error (mismatch_detail
, idx
,
1951 _("immediate out of range"));
1956 case AARCH64_OPND_HALF
:
1957 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
1958 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1960 set_other_error (mismatch_detail
, idx
,
1961 _("invalid shift operator"));
1964 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1965 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
1967 set_other_error (mismatch_detail
, idx
,
1968 _("shift amount must be a multiple of 16"));
1971 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
1973 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
1977 if (opnd
->imm
.value
< 0)
1979 set_other_error (mismatch_detail
, idx
,
1980 _("negative immediate value not allowed"));
1983 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
1985 set_other_error (mismatch_detail
, idx
,
1986 _("immediate out of range"));
1991 case AARCH64_OPND_IMM_MOV
:
1993 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1994 imm
= opnd
->imm
.value
;
1998 case OP_MOV_IMM_WIDEN
:
2001 case OP_MOV_IMM_WIDE
:
2002 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
2004 set_other_error (mismatch_detail
, idx
,
2005 _("immediate out of range"));
2009 case OP_MOV_IMM_LOG
:
2010 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
2012 set_other_error (mismatch_detail
, idx
,
2013 _("immediate out of range"));
2024 case AARCH64_OPND_NZCV
:
2025 case AARCH64_OPND_CCMP_IMM
:
2026 case AARCH64_OPND_EXCEPTION
:
2027 case AARCH64_OPND_UIMM4
:
2028 case AARCH64_OPND_UIMM7
:
2029 case AARCH64_OPND_UIMM3_OP1
:
2030 case AARCH64_OPND_UIMM3_OP2
:
2031 case AARCH64_OPND_SVE_UIMM3
:
2032 case AARCH64_OPND_SVE_UIMM7
:
2033 case AARCH64_OPND_SVE_UIMM8
:
2034 case AARCH64_OPND_SVE_UIMM8_53
:
2035 size
= get_operand_fields_width (get_operand_from_code (type
));
2037 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
2039 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2045 case AARCH64_OPND_SIMM5
:
2046 case AARCH64_OPND_SVE_SIMM5
:
2047 case AARCH64_OPND_SVE_SIMM5B
:
2048 case AARCH64_OPND_SVE_SIMM6
:
2049 case AARCH64_OPND_SVE_SIMM8
:
2050 size
= get_operand_fields_width (get_operand_from_code (type
));
2052 if (!value_fit_signed_field_p (opnd
->imm
.value
, size
))
2054 set_imm_out_of_range_error (mismatch_detail
, idx
,
2056 (1 << (size
- 1)) - 1);
2061 case AARCH64_OPND_WIDTH
:
2062 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
2063 && opnds
[0].type
== AARCH64_OPND_Rd
);
2064 size
= get_upper_bound (qualifier
);
2065 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
2066 /* lsb+width <= reg.size */
2068 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
2069 size
- opnds
[idx
-1].imm
.value
);
2074 case AARCH64_OPND_LIMM
:
2075 case AARCH64_OPND_SVE_LIMM
:
2077 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2078 uint64_t uimm
= opnd
->imm
.value
;
2079 if (opcode
->op
== OP_BIC
)
2081 if (aarch64_logical_immediate_p (uimm
, esize
, NULL
) == FALSE
)
2083 set_other_error (mismatch_detail
, idx
,
2084 _("immediate out of range"));
2090 case AARCH64_OPND_IMM0
:
2091 case AARCH64_OPND_FPIMM0
:
2092 if (opnd
->imm
.value
!= 0)
2094 set_other_error (mismatch_detail
, idx
,
2095 _("immediate zero expected"));
2100 case AARCH64_OPND_SHLL_IMM
:
2102 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2103 if (opnd
->imm
.value
!= size
)
2105 set_other_error (mismatch_detail
, idx
,
2106 _("invalid shift amount"));
2111 case AARCH64_OPND_IMM_VLSL
:
2112 size
= aarch64_get_qualifier_esize (qualifier
);
2113 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
2115 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2121 case AARCH64_OPND_IMM_VLSR
:
2122 size
= aarch64_get_qualifier_esize (qualifier
);
2123 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
2125 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
2130 case AARCH64_OPND_SIMD_IMM
:
2131 case AARCH64_OPND_SIMD_IMM_SFT
:
2132 /* Qualifier check. */
2135 case AARCH64_OPND_QLF_LSL
:
2136 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2138 set_other_error (mismatch_detail
, idx
,
2139 _("invalid shift operator"));
2143 case AARCH64_OPND_QLF_MSL
:
2144 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
2146 set_other_error (mismatch_detail
, idx
,
2147 _("invalid shift operator"));
2151 case AARCH64_OPND_QLF_NIL
:
2152 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2154 set_other_error (mismatch_detail
, idx
,
2155 _("shift is not permitted"));
2163 /* Is the immediate valid? */
2165 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
2167 /* uimm8 or simm8 */
2168 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
2170 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
2174 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
2177 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2178 ffffffffgggggggghhhhhhhh'. */
2179 set_other_error (mismatch_detail
, idx
,
2180 _("invalid value for immediate"));
2183 /* Is the shift amount valid? */
2184 switch (opnd
->shifter
.kind
)
2186 case AARCH64_MOD_LSL
:
2187 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2188 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
2190 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
2194 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
2196 set_unaligned_error (mismatch_detail
, idx
, 8);
2200 case AARCH64_MOD_MSL
:
2201 /* Only 8 and 16 are valid shift amount. */
2202 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
2204 set_other_error (mismatch_detail
, idx
,
2205 _("shift amount must be 0 or 16"));
2210 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2212 set_other_error (mismatch_detail
, idx
,
2213 _("invalid shift operator"));
2220 case AARCH64_OPND_FPIMM
:
2221 case AARCH64_OPND_SIMD_FPIMM
:
2222 case AARCH64_OPND_SVE_FPIMM8
:
2223 if (opnd
->imm
.is_fp
== 0)
2225 set_other_error (mismatch_detail
, idx
,
2226 _("floating-point immediate expected"));
2229 /* The value is expected to be an 8-bit floating-point constant with
2230 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2231 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2233 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
2235 set_other_error (mismatch_detail
, idx
,
2236 _("immediate out of range"));
2239 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2241 set_other_error (mismatch_detail
, idx
,
2242 _("invalid shift operator"));
2247 case AARCH64_OPND_SVE_AIMM
:
2250 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2251 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2252 mask
= ~((uint64_t) -1 << (size
* 4) << (size
* 4));
2253 uvalue
= opnd
->imm
.value
;
2254 shift
= opnd
->shifter
.amount
;
2259 set_other_error (mismatch_detail
, idx
,
2260 _("no shift amount allowed for"
2261 " 8-bit constants"));
2267 if (shift
!= 0 && shift
!= 8)
2269 set_other_error (mismatch_detail
, idx
,
2270 _("shift amount must be 0 or 8"));
2273 if (shift
== 0 && (uvalue
& 0xff) == 0)
2276 uvalue
= (int64_t) uvalue
/ 256;
2280 if ((uvalue
& mask
) != uvalue
&& (uvalue
| ~mask
) != uvalue
)
2282 set_other_error (mismatch_detail
, idx
,
2283 _("immediate too big for element size"));
2286 uvalue
= (uvalue
- min_value
) & mask
;
2289 set_other_error (mismatch_detail
, idx
,
2290 _("invalid arithmetic immediate"));
2295 case AARCH64_OPND_SVE_ASIMM
:
2299 case AARCH64_OPND_SVE_I1_HALF_ONE
:
2300 assert (opnd
->imm
.is_fp
);
2301 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x3f800000)
2303 set_other_error (mismatch_detail
, idx
,
2304 _("floating-point value must be 0.5 or 1.0"));
2309 case AARCH64_OPND_SVE_I1_HALF_TWO
:
2310 assert (opnd
->imm
.is_fp
);
2311 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x40000000)
2313 set_other_error (mismatch_detail
, idx
,
2314 _("floating-point value must be 0.5 or 2.0"));
2319 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
2320 assert (opnd
->imm
.is_fp
);
2321 if (opnd
->imm
.value
!= 0 && opnd
->imm
.value
!= 0x3f800000)
2323 set_other_error (mismatch_detail
, idx
,
2324 _("floating-point value must be 0.0 or 1.0"));
2329 case AARCH64_OPND_SVE_INV_LIMM
:
2331 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2332 uint64_t uimm
= ~opnd
->imm
.value
;
2333 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2335 set_other_error (mismatch_detail
, idx
,
2336 _("immediate out of range"));
2342 case AARCH64_OPND_SVE_LIMM_MOV
:
2344 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2345 uint64_t uimm
= opnd
->imm
.value
;
2346 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2348 set_other_error (mismatch_detail
, idx
,
2349 _("immediate out of range"));
2352 if (!aarch64_sve_dupm_mov_immediate_p (uimm
, esize
))
2354 set_other_error (mismatch_detail
, idx
,
2355 _("invalid replicated MOV immediate"));
2361 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2362 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL
);
2363 if (!value_in_range_p (opnd
->shifter
.amount
, 1, 16))
2365 set_multiplier_out_of_range_error (mismatch_detail
, idx
, 1, 16);
2370 case AARCH64_OPND_SVE_SHLIMM_PRED
:
2371 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
2372 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2373 if (!value_in_range_p (opnd
->imm
.value
, 0, 8 * size
- 1))
2375 set_imm_out_of_range_error (mismatch_detail
, idx
,
2381 case AARCH64_OPND_SVE_SHRIMM_PRED
:
2382 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
2383 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2384 if (!value_in_range_p (opnd
->imm
.value
, 1, 8 * size
))
2386 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, 8 * size
);
2396 case AARCH64_OPND_CLASS_CP_REG
:
2397 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2398 valid range: C0 - C15. */
2399 if (opnd
->reg
.regno
> 15)
2401 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2406 case AARCH64_OPND_CLASS_SYSTEM
:
2409 case AARCH64_OPND_PSTATEFIELD
:
2410 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
2413 The immediate must be #0 or #1. */
2414 if ((opnd
->pstatefield
== 0x03 /* UAO. */
2415 || opnd
->pstatefield
== 0x04) /* PAN. */
2416 && opnds
[1].imm
.value
> 1)
2418 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2421 /* MSR SPSel, #uimm4
2422 Uses uimm4 as a control value to select the stack pointer: if
2423 bit 0 is set it selects the current exception level's stack
2424 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2425 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2426 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
2428 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2437 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
2438 /* Get the upper bound for the element index. */
2439 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
2440 /* Index out-of-range. */
2441 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
2443 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2446 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2447 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2448 number is encoded in "size:M:Rm":
2454 if (type
== AARCH64_OPND_Em
&& qualifier
== AARCH64_OPND_QLF_S_H
2455 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
2457 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2462 case AARCH64_OPND_CLASS_MODIFIED_REG
:
2463 assert (idx
== 1 || idx
== 2);
2466 case AARCH64_OPND_Rm_EXT
:
2467 if (aarch64_extend_operator_p (opnd
->shifter
.kind
) == FALSE
2468 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2470 set_other_error (mismatch_detail
, idx
,
2471 _("extend operator expected"));
2474 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2475 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2476 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2478 if (!aarch64_stack_pointer_p (opnds
+ 0)
2479 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
2481 if (!opnd
->shifter
.operator_present
)
2483 set_other_error (mismatch_detail
, idx
,
2484 _("missing extend operator"));
2487 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2489 set_other_error (mismatch_detail
, idx
,
2490 _("'LSL' operator not allowed"));
2494 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
2495 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2496 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
2498 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
2501 /* In the 64-bit form, the final register operand is written as Wm
2502 for all but the (possibly omitted) UXTX/LSL and SXTX
2504 N.B. GAS allows X register to be used with any operator as a
2505 programming convenience. */
2506 if (qualifier
== AARCH64_OPND_QLF_X
2507 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
2508 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
2509 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
2511 set_other_error (mismatch_detail
, idx
, _("W register expected"));
2516 case AARCH64_OPND_Rm_SFT
:
2517 /* ROR is not available to the shifted register operand in
2518 arithmetic instructions. */
2519 if (aarch64_shift_operator_p (opnd
->shifter
.kind
) == FALSE
)
2521 set_other_error (mismatch_detail
, idx
,
2522 _("shift operator expected"));
2525 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
2526 && opcode
->iclass
!= log_shift
)
2528 set_other_error (mismatch_detail
, idx
,
2529 _("'ROR' operator not allowed"));
2532 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2533 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2535 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2552 /* Main entrypoint for the operand constraint checking.
2554 Return 1 if operands of *INST meet the constraint applied by the operand
2555 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2556 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2557 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2558 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2559 error kind when it is notified that an instruction does not pass the check).
2561 Un-determined operand qualifiers may get established during the process. */
2564 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2565 aarch64_operand_error
*mismatch_detail
)
2569 DEBUG_TRACE ("enter");
2571 /* Check for cases where a source register needs to be the same as the
2572 destination register. Do this before matching qualifiers since if
2573 an instruction has both invalid tying and invalid qualifiers,
2574 the error about qualifiers would suggest several alternative
2575 instructions that also have invalid tying. */
2576 i
= inst
->opcode
->tied_operand
;
2577 if (i
> 0 && (inst
->operands
[0].reg
.regno
!= inst
->operands
[i
].reg
.regno
))
2579 if (mismatch_detail
)
2581 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
2582 mismatch_detail
->index
= i
;
2583 mismatch_detail
->error
= NULL
;
2588 /* Match operands' qualifier.
2589 *INST has already had qualifier establish for some, if not all, of
2590 its operands; we need to find out whether these established
2591 qualifiers match one of the qualifier sequence in
2592 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2593 with the corresponding qualifier in such a sequence.
2594 Only basic operand constraint checking is done here; the more thorough
2595 constraint checking will carried out by operand_general_constraint_met_p,
2596 which has be to called after this in order to get all of the operands'
2597 qualifiers established. */
2598 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2600 DEBUG_TRACE ("FAIL on operand qualifier matching");
2601 if (mismatch_detail
)
2603 /* Return an error type to indicate that it is the qualifier
2604 matching failure; we don't care about which operand as there
2605 are enough information in the opcode table to reproduce it. */
2606 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2607 mismatch_detail
->index
= -1;
2608 mismatch_detail
->error
= NULL
;
2613 /* Match operands' constraint. */
2614 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2616 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2617 if (type
== AARCH64_OPND_NIL
)
2619 if (inst
->operands
[i
].skip
)
2621 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2624 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2625 inst
->opcode
, mismatch_detail
) == 0)
2627 DEBUG_TRACE ("FAIL on operand %d", i
);
2632 DEBUG_TRACE ("PASS");
2637 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2638 Also updates the TYPE of each INST->OPERANDS with the corresponding
2639 value of OPCODE->OPERANDS.
2641 Note that some operand qualifiers may need to be manually cleared by
2642 the caller before it further calls the aarch64_opcode_encode; by
2643 doing this, it helps the qualifier matching facilities work
2646 const aarch64_opcode
*
2647 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2650 const aarch64_opcode
*old
= inst
->opcode
;
2652 inst
->opcode
= opcode
;
2654 /* Update the operand types. */
2655 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2657 inst
->operands
[i
].type
= opcode
->operands
[i
];
2658 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2662 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2668 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2671 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2672 if (operands
[i
] == operand
)
2674 else if (operands
[i
] == AARCH64_OPND_NIL
)
2679 /* R0...R30, followed by FOR31. */
2680 #define BANK(R, FOR31) \
2681 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2682 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2683 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2684 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2685 /* [0][0] 32-bit integer regs with sp Wn
2686 [0][1] 64-bit integer regs with sp Xn sf=1
2687 [1][0] 32-bit integer regs with #0 Wn
2688 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2689 static const char *int_reg
[2][2][32] = {
2690 #define R32(X) "w" #X
2691 #define R64(X) "x" #X
2692 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
2693 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
2698 /* Names of the SVE vector registers, first with .S suffixes,
2699 then with .D suffixes. */
2701 static const char *sve_reg
[2][32] = {
2702 #define ZS(X) "z" #X ".s"
2703 #define ZD(X) "z" #X ".d"
2704 BANK (ZS
, ZS (31)), BANK (ZD
, ZD (31))
2710 /* Return the integer register name.
2711 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2713 static inline const char *
2714 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2716 const int has_zr
= sp_reg_p
? 0 : 1;
2717 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2718 return int_reg
[has_zr
][is_64
][regno
];
2721 /* Like get_int_reg_name, but IS_64 is always 1. */
2723 static inline const char *
2724 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2726 const int has_zr
= sp_reg_p
? 0 : 1;
2727 return int_reg
[has_zr
][1][regno
];
2730 /* Get the name of the integer offset register in OPND, using the shift type
2731 to decide whether it's a word or doubleword. */
2733 static inline const char *
2734 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
2736 switch (opnd
->shifter
.kind
)
2738 case AARCH64_MOD_UXTW
:
2739 case AARCH64_MOD_SXTW
:
2740 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
2742 case AARCH64_MOD_LSL
:
2743 case AARCH64_MOD_SXTX
:
2744 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
2751 /* Get the name of the SVE vector offset register in OPND, using the operand
2752 qualifier to decide whether the suffix should be .S or .D. */
2754 static inline const char *
2755 get_addr_sve_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
)
2757 assert (qualifier
== AARCH64_OPND_QLF_S_S
2758 || qualifier
== AARCH64_OPND_QLF_S_D
);
2759 return sve_reg
[qualifier
== AARCH64_OPND_QLF_S_D
][regno
];
2762 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2782 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2783 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2784 (depending on the type of the instruction). IMM8 will be expanded to a
2785 single-precision floating-point value (SIZE == 4) or a double-precision
2786 floating-point value (SIZE == 8). A half-precision floating-point value
2787 (SIZE == 2) is expanded to a single-precision floating-point value. The
2788 expanded value is returned. */
2791 expand_fp_imm (int size
, uint32_t imm8
)
2794 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2796 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2797 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2798 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2799 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2800 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2803 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2804 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2805 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2806 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2807 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2810 else if (size
== 4 || size
== 2)
2812 imm
= (imm8_7
<< 31) /* imm8<7> */
2813 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2814 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2815 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2819 /* An unsupported size. */
2826 /* Produce the string representation of the register list operand *OPND
2827 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2828 the register name that comes before the register number, such as "v". */
2830 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
2833 const int num_regs
= opnd
->reglist
.num_regs
;
2834 const int first_reg
= opnd
->reglist
.first_regno
;
2835 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2836 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2837 char tb
[8]; /* Temporary buffer. */
2839 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2840 assert (num_regs
>= 1 && num_regs
<= 4);
2842 /* Prepare the index if any. */
2843 if (opnd
->reglist
.has_index
)
2844 snprintf (tb
, 8, "[%" PRIi64
"]", opnd
->reglist
.index
);
2848 /* The hyphenated form is preferred for disassembly if there are
2849 more than two registers in the list, and the register numbers
2850 are monotonically increasing in increments of one. */
2851 if (num_regs
> 2 && last_reg
> first_reg
)
2852 snprintf (buf
, size
, "{%s%d.%s-%s%d.%s}%s", prefix
, first_reg
, qlf_name
,
2853 prefix
, last_reg
, qlf_name
, tb
);
2856 const int reg0
= first_reg
;
2857 const int reg1
= (first_reg
+ 1) & 0x1f;
2858 const int reg2
= (first_reg
+ 2) & 0x1f;
2859 const int reg3
= (first_reg
+ 3) & 0x1f;
2864 snprintf (buf
, size
, "{%s%d.%s}%s", prefix
, reg0
, qlf_name
, tb
);
2867 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s}%s", prefix
, reg0
, qlf_name
,
2868 prefix
, reg1
, qlf_name
, tb
);
2871 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2872 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2873 prefix
, reg2
, qlf_name
, tb
);
2876 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2877 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2878 prefix
, reg2
, qlf_name
, prefix
, reg3
, qlf_name
, tb
);
2884 /* Print the register+immediate address in OPND to BUF, which has SIZE
2885 characters. BASE is the name of the base register. */
2888 print_immediate_offset_address (char *buf
, size_t size
,
2889 const aarch64_opnd_info
*opnd
,
2892 if (opnd
->addr
.writeback
)
2894 if (opnd
->addr
.preind
)
2895 snprintf (buf
, size
, "[%s, #%d]!", base
, opnd
->addr
.offset
.imm
);
2897 snprintf (buf
, size
, "[%s], #%d", base
, opnd
->addr
.offset
.imm
);
2901 if (opnd
->shifter
.operator_present
)
2903 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL_VL
);
2904 snprintf (buf
, size
, "[%s, #%d, mul vl]",
2905 base
, opnd
->addr
.offset
.imm
);
2907 else if (opnd
->addr
.offset
.imm
)
2908 snprintf (buf
, size
, "[%s, #%d]", base
, opnd
->addr
.offset
.imm
);
2910 snprintf (buf
, size
, "[%s]", base
);
2914 /* Produce the string representation of the register offset address operand
2915 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2916 the names of the base and offset registers. */
2918 print_register_offset_address (char *buf
, size_t size
,
2919 const aarch64_opnd_info
*opnd
,
2920 const char *base
, const char *offset
)
2922 char tb
[16]; /* Temporary buffer. */
2923 bfd_boolean print_extend_p
= TRUE
;
2924 bfd_boolean print_amount_p
= TRUE
;
2925 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
2927 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
2928 || !opnd
->shifter
.amount_present
))
2930 /* Not print the shift/extend amount when the amount is zero and
2931 when it is not the special case of 8-bit load/store instruction. */
2932 print_amount_p
= FALSE
;
2933 /* Likewise, no need to print the shift operator LSL in such a
2935 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2936 print_extend_p
= FALSE
;
2939 /* Prepare for the extend/shift. */
2943 snprintf (tb
, sizeof (tb
), ", %s #%" PRIi64
, shift_name
,
2944 opnd
->shifter
.amount
);
2946 snprintf (tb
, sizeof (tb
), ", %s", shift_name
);
2951 snprintf (buf
, size
, "[%s, %s%s]", base
, offset
, tb
);
2954 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2955 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2956 PC, PCREL_P and ADDRESS are used to pass in and return information about
2957 the PC-relative address calculation, where the PC value is passed in
2958 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2959 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2960 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2962 The function serves both the disassembler and the assembler diagnostics
2963 issuer, which is the reason why it lives in this file. */
2966 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
2967 const aarch64_opcode
*opcode
,
2968 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
2971 unsigned int i
, num_conds
;
2972 const char *name
= NULL
;
2973 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
2974 enum aarch64_modifier_kind kind
;
2975 uint64_t addr
, enum_value
;
2983 case AARCH64_OPND_Rd
:
2984 case AARCH64_OPND_Rn
:
2985 case AARCH64_OPND_Rm
:
2986 case AARCH64_OPND_Rt
:
2987 case AARCH64_OPND_Rt2
:
2988 case AARCH64_OPND_Rs
:
2989 case AARCH64_OPND_Ra
:
2990 case AARCH64_OPND_Rt_SYS
:
2991 case AARCH64_OPND_PAIRREG
:
2992 case AARCH64_OPND_SVE_Rm
:
2993 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2994 the <ic_op>, therefore we we use opnd->present to override the
2995 generic optional-ness information. */
2996 if (opnd
->type
== AARCH64_OPND_Rt_SYS
)
3001 /* Omit the operand, e.g. RET. */
3002 else if (optional_operand_p (opcode
, idx
)
3004 == get_optional_operand_default_value (opcode
)))
3006 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3007 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3008 snprintf (buf
, size
, "%s",
3009 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3012 case AARCH64_OPND_Rd_SP
:
3013 case AARCH64_OPND_Rn_SP
:
3014 case AARCH64_OPND_SVE_Rn_SP
:
3015 case AARCH64_OPND_Rm_SP
:
3016 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3017 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
3018 || opnd
->qualifier
== AARCH64_OPND_QLF_X
3019 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
3020 snprintf (buf
, size
, "%s",
3021 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
3024 case AARCH64_OPND_Rm_EXT
:
3025 kind
= opnd
->shifter
.kind
;
3026 assert (idx
== 1 || idx
== 2);
3027 if ((aarch64_stack_pointer_p (opnds
)
3028 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
3029 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
3030 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
3031 && kind
== AARCH64_MOD_UXTW
)
3032 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
3033 && kind
== AARCH64_MOD_UXTX
)))
3035 /* 'LSL' is the preferred form in this case. */
3036 kind
= AARCH64_MOD_LSL
;
3037 if (opnd
->shifter
.amount
== 0)
3039 /* Shifter omitted. */
3040 snprintf (buf
, size
, "%s",
3041 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3045 if (opnd
->shifter
.amount
)
3046 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3047 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3048 aarch64_operand_modifiers
[kind
].name
,
3049 opnd
->shifter
.amount
);
3051 snprintf (buf
, size
, "%s, %s",
3052 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3053 aarch64_operand_modifiers
[kind
].name
);
3056 case AARCH64_OPND_Rm_SFT
:
3057 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3058 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3059 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3060 snprintf (buf
, size
, "%s",
3061 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3063 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3064 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3065 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3066 opnd
->shifter
.amount
);
3069 case AARCH64_OPND_Fd
:
3070 case AARCH64_OPND_Fn
:
3071 case AARCH64_OPND_Fm
:
3072 case AARCH64_OPND_Fa
:
3073 case AARCH64_OPND_Ft
:
3074 case AARCH64_OPND_Ft2
:
3075 case AARCH64_OPND_Sd
:
3076 case AARCH64_OPND_Sn
:
3077 case AARCH64_OPND_Sm
:
3078 case AARCH64_OPND_SVE_VZn
:
3079 case AARCH64_OPND_SVE_Vd
:
3080 case AARCH64_OPND_SVE_Vm
:
3081 case AARCH64_OPND_SVE_Vn
:
3082 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
3086 case AARCH64_OPND_Vd
:
3087 case AARCH64_OPND_Vn
:
3088 case AARCH64_OPND_Vm
:
3089 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
3090 aarch64_get_qualifier_name (opnd
->qualifier
));
3093 case AARCH64_OPND_Ed
:
3094 case AARCH64_OPND_En
:
3095 case AARCH64_OPND_Em
:
3096 snprintf (buf
, size
, "v%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3097 aarch64_get_qualifier_name (opnd
->qualifier
),
3098 opnd
->reglane
.index
);
3101 case AARCH64_OPND_VdD1
:
3102 case AARCH64_OPND_VnD1
:
3103 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
3106 case AARCH64_OPND_LVn
:
3107 case AARCH64_OPND_LVt
:
3108 case AARCH64_OPND_LVt_AL
:
3109 case AARCH64_OPND_LEt
:
3110 print_register_list (buf
, size
, opnd
, "v");
3113 case AARCH64_OPND_SVE_Pd
:
3114 case AARCH64_OPND_SVE_Pg3
:
3115 case AARCH64_OPND_SVE_Pg4_5
:
3116 case AARCH64_OPND_SVE_Pg4_10
:
3117 case AARCH64_OPND_SVE_Pg4_16
:
3118 case AARCH64_OPND_SVE_Pm
:
3119 case AARCH64_OPND_SVE_Pn
:
3120 case AARCH64_OPND_SVE_Pt
:
3121 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3122 snprintf (buf
, size
, "p%d", opnd
->reg
.regno
);
3123 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
3124 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
3125 snprintf (buf
, size
, "p%d/%s", opnd
->reg
.regno
,
3126 aarch64_get_qualifier_name (opnd
->qualifier
));
3128 snprintf (buf
, size
, "p%d.%s", opnd
->reg
.regno
,
3129 aarch64_get_qualifier_name (opnd
->qualifier
));
3132 case AARCH64_OPND_SVE_Za_5
:
3133 case AARCH64_OPND_SVE_Za_16
:
3134 case AARCH64_OPND_SVE_Zd
:
3135 case AARCH64_OPND_SVE_Zm_5
:
3136 case AARCH64_OPND_SVE_Zm_16
:
3137 case AARCH64_OPND_SVE_Zn
:
3138 case AARCH64_OPND_SVE_Zt
:
3139 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3140 snprintf (buf
, size
, "z%d", opnd
->reg
.regno
);
3142 snprintf (buf
, size
, "z%d.%s", opnd
->reg
.regno
,
3143 aarch64_get_qualifier_name (opnd
->qualifier
));
3146 case AARCH64_OPND_SVE_ZnxN
:
3147 case AARCH64_OPND_SVE_ZtxN
:
3148 print_register_list (buf
, size
, opnd
, "z");
3151 case AARCH64_OPND_SVE_Zn_INDEX
:
3152 snprintf (buf
, size
, "z%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3153 aarch64_get_qualifier_name (opnd
->qualifier
),
3154 opnd
->reglane
.index
);
3157 case AARCH64_OPND_Cn
:
3158 case AARCH64_OPND_Cm
:
3159 snprintf (buf
, size
, "C%d", opnd
->reg
.regno
);
3162 case AARCH64_OPND_IDX
:
3163 case AARCH64_OPND_IMM
:
3164 case AARCH64_OPND_WIDTH
:
3165 case AARCH64_OPND_UIMM3_OP1
:
3166 case AARCH64_OPND_UIMM3_OP2
:
3167 case AARCH64_OPND_BIT_NUM
:
3168 case AARCH64_OPND_IMM_VLSL
:
3169 case AARCH64_OPND_IMM_VLSR
:
3170 case AARCH64_OPND_SHLL_IMM
:
3171 case AARCH64_OPND_IMM0
:
3172 case AARCH64_OPND_IMMR
:
3173 case AARCH64_OPND_IMMS
:
3174 case AARCH64_OPND_FBITS
:
3175 case AARCH64_OPND_SIMM5
:
3176 case AARCH64_OPND_SVE_SHLIMM_PRED
:
3177 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
3178 case AARCH64_OPND_SVE_SHRIMM_PRED
:
3179 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
3180 case AARCH64_OPND_SVE_SIMM5
:
3181 case AARCH64_OPND_SVE_SIMM5B
:
3182 case AARCH64_OPND_SVE_SIMM6
:
3183 case AARCH64_OPND_SVE_SIMM8
:
3184 case AARCH64_OPND_SVE_UIMM3
:
3185 case AARCH64_OPND_SVE_UIMM7
:
3186 case AARCH64_OPND_SVE_UIMM8
:
3187 case AARCH64_OPND_SVE_UIMM8_53
:
3188 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3191 case AARCH64_OPND_SVE_I1_HALF_ONE
:
3192 case AARCH64_OPND_SVE_I1_HALF_TWO
:
3193 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
3196 c
.i
= opnd
->imm
.value
;
3197 snprintf (buf
, size
, "#%.1f", c
.f
);
3201 case AARCH64_OPND_SVE_PATTERN
:
3202 if (optional_operand_p (opcode
, idx
)
3203 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3205 enum_value
= opnd
->imm
.value
;
3206 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3207 if (aarch64_sve_pattern_array
[enum_value
])
3208 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[enum_value
]);
3210 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3213 case AARCH64_OPND_SVE_PATTERN_SCALED
:
3214 if (optional_operand_p (opcode
, idx
)
3215 && !opnd
->shifter
.operator_present
3216 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3218 enum_value
= opnd
->imm
.value
;
3219 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3220 if (aarch64_sve_pattern_array
[opnd
->imm
.value
])
3221 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[opnd
->imm
.value
]);
3223 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3224 if (opnd
->shifter
.operator_present
)
3226 size_t len
= strlen (buf
);
3227 snprintf (buf
+ len
, size
- len
, ", %s #%" PRIi64
,
3228 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3229 opnd
->shifter
.amount
);
3233 case AARCH64_OPND_SVE_PRFOP
:
3234 enum_value
= opnd
->imm
.value
;
3235 assert (enum_value
< ARRAY_SIZE (aarch64_sve_prfop_array
));
3236 if (aarch64_sve_prfop_array
[enum_value
])
3237 snprintf (buf
, size
, "%s", aarch64_sve_prfop_array
[enum_value
]);
3239 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3242 case AARCH64_OPND_IMM_MOV
:
3243 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3245 case 4: /* e.g. MOV Wd, #<imm32>. */
3247 int imm32
= opnd
->imm
.value
;
3248 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
3251 case 8: /* e.g. MOV Xd, #<imm64>. */
3252 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
3253 opnd
->imm
.value
, opnd
->imm
.value
);
3255 default: assert (0);
3259 case AARCH64_OPND_FPIMM0
:
3260 snprintf (buf
, size
, "#0.0");
3263 case AARCH64_OPND_LIMM
:
3264 case AARCH64_OPND_AIMM
:
3265 case AARCH64_OPND_HALF
:
3266 case AARCH64_OPND_SVE_INV_LIMM
:
3267 case AARCH64_OPND_SVE_LIMM
:
3268 case AARCH64_OPND_SVE_LIMM_MOV
:
3269 if (opnd
->shifter
.amount
)
3270 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3271 opnd
->shifter
.amount
);
3273 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3276 case AARCH64_OPND_SIMD_IMM
:
3277 case AARCH64_OPND_SIMD_IMM_SFT
:
3278 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3279 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
3280 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3282 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%" PRIi64
, opnd
->imm
.value
,
3283 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3284 opnd
->shifter
.amount
);
3287 case AARCH64_OPND_SVE_AIMM
:
3288 case AARCH64_OPND_SVE_ASIMM
:
3289 if (opnd
->shifter
.amount
)
3290 snprintf (buf
, size
, "#%" PRIi64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3291 opnd
->shifter
.amount
);
3293 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3296 case AARCH64_OPND_FPIMM
:
3297 case AARCH64_OPND_SIMD_FPIMM
:
3298 case AARCH64_OPND_SVE_FPIMM8
:
3299 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3301 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3304 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
3305 snprintf (buf
, size
, "#%.18e", c
.f
);
3308 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3311 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
3312 snprintf (buf
, size
, "#%.18e", c
.f
);
3315 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3318 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
3319 snprintf (buf
, size
, "#%.18e", c
.d
);
3322 default: assert (0);
3326 case AARCH64_OPND_CCMP_IMM
:
3327 case AARCH64_OPND_NZCV
:
3328 case AARCH64_OPND_EXCEPTION
:
3329 case AARCH64_OPND_UIMM4
:
3330 case AARCH64_OPND_UIMM7
:
3331 if (optional_operand_p (opcode
, idx
) == TRUE
3332 && (opnd
->imm
.value
==
3333 (int64_t) get_optional_operand_default_value (opcode
)))
3334 /* Omit the operand, e.g. DCPS1. */
3336 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
3339 case AARCH64_OPND_COND
:
3340 case AARCH64_OPND_COND1
:
3341 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
3342 num_conds
= ARRAY_SIZE (opnd
->cond
->names
);
3343 for (i
= 1; i
< num_conds
&& opnd
->cond
->names
[i
]; ++i
)
3345 size_t len
= strlen (buf
);
3347 snprintf (buf
+ len
, size
- len
, " // %s = %s",
3348 opnd
->cond
->names
[0], opnd
->cond
->names
[i
]);
3350 snprintf (buf
+ len
, size
- len
, ", %s",
3351 opnd
->cond
->names
[i
]);
3355 case AARCH64_OPND_ADDR_ADRP
:
3356 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
3362 /* This is not necessary during the disassembling, as print_address_func
3363 in the disassemble_info will take care of the printing. But some
3364 other callers may be still interested in getting the string in *STR,
3365 so here we do snprintf regardless. */
3366 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3369 case AARCH64_OPND_ADDR_PCREL14
:
3370 case AARCH64_OPND_ADDR_PCREL19
:
3371 case AARCH64_OPND_ADDR_PCREL21
:
3372 case AARCH64_OPND_ADDR_PCREL26
:
3373 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
3378 /* This is not necessary during the disassembling, as print_address_func
3379 in the disassemble_info will take care of the printing. But some
3380 other callers may be still interested in getting the string in *STR,
3381 so here we do snprintf regardless. */
3382 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3385 case AARCH64_OPND_ADDR_SIMPLE
:
3386 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
3387 case AARCH64_OPND_SIMD_ADDR_POST
:
3388 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3389 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
3391 if (opnd
->addr
.offset
.is_reg
)
3392 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
3394 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
3397 snprintf (buf
, size
, "[%s]", name
);
3400 case AARCH64_OPND_ADDR_REGOFF
:
3401 case AARCH64_OPND_SVE_ADDR_RR
:
3402 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
3403 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
3404 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
3405 case AARCH64_OPND_SVE_ADDR_RX
:
3406 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
3407 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
3408 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
3409 print_register_offset_address
3410 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3411 get_offset_int_reg_name (opnd
));
3414 case AARCH64_OPND_SVE_ADDR_RZ
:
3415 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
3416 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
3417 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
3418 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
3419 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
3420 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
3421 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
3422 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
3423 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
3424 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
3425 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
3426 print_register_offset_address
3427 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3428 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3431 case AARCH64_OPND_ADDR_SIMM7
:
3432 case AARCH64_OPND_ADDR_SIMM9
:
3433 case AARCH64_OPND_ADDR_SIMM9_2
:
3434 case AARCH64_OPND_ADDR_SIMM10
:
3435 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
3436 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
3437 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
3438 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
3439 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
3440 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
3441 case AARCH64_OPND_SVE_ADDR_RI_U6
:
3442 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
3443 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
3444 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
3445 print_immediate_offset_address
3446 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1));
3449 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
3450 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
3451 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
3452 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
3453 print_immediate_offset_address
3455 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
));
3458 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
3459 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
3460 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
3461 print_register_offset_address
3463 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
3464 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3467 case AARCH64_OPND_ADDR_UIMM12
:
3468 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3469 if (opnd
->addr
.offset
.imm
)
3470 snprintf (buf
, size
, "[%s, #%d]", name
, opnd
->addr
.offset
.imm
);
3472 snprintf (buf
, size
, "[%s]", name
);
3475 case AARCH64_OPND_SYSREG
:
3476 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
3477 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
3478 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
]))
3480 if (aarch64_sys_regs
[i
].name
)
3481 snprintf (buf
, size
, "%s", aarch64_sys_regs
[i
].name
);
3484 /* Implementation defined system register. */
3485 unsigned int value
= opnd
->sysreg
;
3486 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
3487 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
3492 case AARCH64_OPND_PSTATEFIELD
:
3493 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
3494 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
3496 assert (aarch64_pstatefields
[i
].name
);
3497 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
3500 case AARCH64_OPND_SYSREG_AT
:
3501 case AARCH64_OPND_SYSREG_DC
:
3502 case AARCH64_OPND_SYSREG_IC
:
3503 case AARCH64_OPND_SYSREG_TLBI
:
3504 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
3507 case AARCH64_OPND_BARRIER
:
3508 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
3511 case AARCH64_OPND_BARRIER_ISB
:
3512 /* Operand can be omitted, e.g. in DCPS1. */
3513 if (! optional_operand_p (opcode
, idx
)
3514 || (opnd
->barrier
->value
3515 != get_optional_operand_default_value (opcode
)))
3516 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
3519 case AARCH64_OPND_PRFOP
:
3520 if (opnd
->prfop
->name
!= NULL
)
3521 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
3523 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
3526 case AARCH64_OPND_BARRIER_PSB
:
3527 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
3535 #define CPENC(op0,op1,crn,crm,op2) \
3536 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3537 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3538 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3539 /* for 3.9.10 System Instructions */
3540 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3562 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3567 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3572 #define F_HASXT 0x4 /* System instruction register <Xt>
3576 /* TODO there are two more issues need to be resolved
3577 1. handle read-only and write-only system registers
3578 2. handle cpu-implementation-defined system registers. */
3579 const aarch64_sys_reg aarch64_sys_regs
[] =
3581 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
3582 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
3583 { "elr_el1", CPEN_(0,C0
,1), 0 },
3584 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
3585 { "sp_el0", CPEN_(0,C1
,0), 0 },
3586 { "spsel", CPEN_(0,C2
,0), 0 },
3587 { "daif", CPEN_(3,C2
,1), 0 },
3588 { "currentel", CPEN_(0,C2
,2), 0 }, /* RO */
3589 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
3590 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
3591 { "nzcv", CPEN_(3,C2
,0), 0 },
3592 { "fpcr", CPEN_(3,C4
,0), 0 },
3593 { "fpsr", CPEN_(3,C4
,1), 0 },
3594 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
3595 { "dlr_el0", CPEN_(3,C5
,1), 0 },
3596 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
3597 { "elr_el2", CPEN_(4,C0
,1), 0 },
3598 { "sp_el1", CPEN_(4,C1
,0), 0 },
3599 { "spsr_irq", CPEN_(4,C3
,0), 0 },
3600 { "spsr_abt", CPEN_(4,C3
,1), 0 },
3601 { "spsr_und", CPEN_(4,C3
,2), 0 },
3602 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
3603 { "spsr_el3", CPEN_(6,C0
,0), 0 },
3604 { "elr_el3", CPEN_(6,C0
,1), 0 },
3605 { "sp_el2", CPEN_(6,C1
,0), 0 },
3606 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
3607 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
3608 { "midr_el1", CPENC(3,0,C0
,C0
,0), 0 }, /* RO */
3609 { "ctr_el0", CPENC(3,3,C0
,C0
,1), 0 }, /* RO */
3610 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), 0 }, /* RO */
3611 { "revidr_el1", CPENC(3,0,C0
,C0
,6), 0 }, /* RO */
3612 { "aidr_el1", CPENC(3,1,C0
,C0
,7), 0 }, /* RO */
3613 { "dczid_el0", CPENC(3,3,C0
,C0
,7), 0 }, /* RO */
3614 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), 0 }, /* RO */
3615 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), 0 }, /* RO */
3616 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), 0 }, /* RO */
3617 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), 0 }, /* RO */
3618 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), 0 }, /* RO */
3619 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), 0 }, /* RO */
3620 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), 0 }, /* RO */
3621 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), 0 }, /* RO */
3622 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), 0 }, /* RO */
3623 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), 0 }, /* RO */
3624 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), 0 }, /* RO */
3625 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), 0 }, /* RO */
3626 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), 0 }, /* RO */
3627 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), 0 }, /* RO */
3628 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), 0 }, /* RO */
3629 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), 0 }, /* RO */
3630 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), 0 }, /* RO */
3631 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), 0 }, /* RO */
3632 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), 0 }, /* RO */
3633 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), 0 }, /* RO */
3634 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), 0 }, /* RO */
3635 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), 0 }, /* RO */
3636 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), 0 }, /* RO */
3637 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), 0 }, /* RO */
3638 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), 0 }, /* RO */
3639 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), 0 }, /* RO */
3640 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), 0 }, /* RO */
3641 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
}, /* RO */
3642 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), 0 }, /* RO */
3643 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), 0 }, /* RO */
3644 { "clidr_el1", CPENC(3,1,C0
,C0
,1), 0 }, /* RO */
3645 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 }, /* RO */
3646 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
3647 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
3648 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
3649 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
3650 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
3651 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
3652 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
3653 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
3654 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
3655 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
3656 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
3657 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
3658 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
3659 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
3660 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
3661 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
3662 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
3663 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
3664 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
3665 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
3666 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
3667 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
3668 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
3669 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
3670 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
3671 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
3672 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
3673 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
3674 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
3675 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
3676 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
3677 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
3678 { "apiakeylo_el1", CPENC (3, 0, C2
, C1
, 0), F_ARCHEXT
},
3679 { "apiakeyhi_el1", CPENC (3, 0, C2
, C1
, 1), F_ARCHEXT
},
3680 { "apibkeylo_el1", CPENC (3, 0, C2
, C1
, 2), F_ARCHEXT
},
3681 { "apibkeyhi_el1", CPENC (3, 0, C2
, C1
, 3), F_ARCHEXT
},
3682 { "apdakeylo_el1", CPENC (3, 0, C2
, C2
, 0), F_ARCHEXT
},
3683 { "apdakeyhi_el1", CPENC (3, 0, C2
, C2
, 1), F_ARCHEXT
},
3684 { "apdbkeylo_el1", CPENC (3, 0, C2
, C2
, 2), F_ARCHEXT
},
3685 { "apdbkeyhi_el1", CPENC (3, 0, C2
, C2
, 3), F_ARCHEXT
},
3686 { "apgakeylo_el1", CPENC (3, 0, C2
, C3
, 0), F_ARCHEXT
},
3687 { "apgakeyhi_el1", CPENC (3, 0, C2
, C3
, 1), F_ARCHEXT
},
3688 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
3689 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
3690 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
3691 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
3692 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
3693 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
3694 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
3695 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
3696 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
3697 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
3698 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
3699 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
3700 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
}, /* RO */
3701 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
3702 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
}, /* RO */
3703 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
3704 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
}, /* RO */
3705 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
3706 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
3707 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
3708 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
3709 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
3710 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
3711 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
3712 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
3713 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
3714 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
3715 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
3716 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
3717 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
3718 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
3719 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
3720 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
3721 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
3722 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
3723 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
3724 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
3725 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
3726 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
3727 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
3728 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), 0 }, /* RO */
3729 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), 0 }, /* RO */
3730 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), 0 }, /* RO */
3731 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
3732 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
3733 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
3734 { "isr_el1", CPENC(3,0,C12
,C1
,0), 0 }, /* RO */
3735 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
3736 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
3737 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
3738 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
3739 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
3740 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
3741 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RO */
3742 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
3743 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
3744 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
3745 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
3746 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RO */
3747 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), 0 }, /* RO */
3748 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), 0 }, /* RO */
3749 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
3750 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
3751 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
3752 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
3753 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
3754 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
3755 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
3756 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
3757 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
3758 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
3759 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
3760 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
3761 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
3762 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
3763 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
3764 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
3765 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
3766 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
3767 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
3768 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
3769 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
3770 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
3771 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
3772 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
3773 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
3774 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
3775 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
3776 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
3777 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
3778 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
3779 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), 0 }, /* r */
3780 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
3781 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
3782 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* r */
3783 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* w */
3784 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 }, /* r */
3785 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 }, /* w */
3786 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
3787 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
3788 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
3789 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
3790 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
3791 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
3792 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
3793 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
3794 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
3795 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
3796 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
3797 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
3798 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
3799 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
3800 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
3801 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
3802 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
3803 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
3804 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
3805 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
3806 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
3807 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
3808 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
3809 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
3810 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
3811 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
3812 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
3813 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
3814 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
3815 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
3816 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
3817 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
3818 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
3819 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
3820 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
3821 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
3822 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
3823 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
3824 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
3825 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
3826 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
3827 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
3828 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
3829 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
3830 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
3831 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
3832 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
3833 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
3834 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
3835 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
3836 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
3837 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3838 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3839 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3840 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3841 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3842 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3843 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3844 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3845 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3846 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3847 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3848 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3849 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3850 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3851 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3852 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), 0 }, /* r */
3853 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), 0 }, /* w */
3854 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), 0 }, /* r */
3855 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3856 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3857 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3858 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3859 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), 0 }, /* r */
3860 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
3861 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
3862 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
3863 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
}, /* ro */
3864 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3865 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
3866 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
3867 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
3868 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
3869 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
3870 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* ro */
3871 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3872 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3873 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
3874 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
3875 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
3876 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
3877 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), 0 }, /* w */
3878 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
3879 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), 0 }, /* r */
3880 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), 0 }, /* r */
3881 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
3882 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
3883 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
3884 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
3885 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
3886 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
3887 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
3888 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
3889 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
3890 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
3891 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
3892 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
3893 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
3894 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
3895 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
3896 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
3897 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
3898 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
3899 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
3900 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
3901 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
3902 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
3903 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
3904 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
3905 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
3906 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
3907 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
3908 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
3909 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
3910 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
3911 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
3912 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
3913 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
3914 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
3915 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
3916 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
3917 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
3918 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
3919 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
3920 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
3921 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
3922 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
3923 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
3924 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
3925 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
3926 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
3927 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
3928 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
3929 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
3930 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
3931 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
3932 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
3933 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
3934 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
3935 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
3936 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
3937 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
3938 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
3939 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
3940 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
3941 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
3942 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
3943 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
3944 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
3945 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
3946 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
3947 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
3948 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
3949 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
3950 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
3951 { 0, CPENC(0,0,0,0,0), 0 },
3955 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
3957 return (reg
->flags
& F_DEPRECATED
) != 0;
3961 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
3962 const aarch64_sys_reg
*reg
)
3964 if (!(reg
->flags
& F_ARCHEXT
))
3967 /* PAN. Values are from aarch64_sys_regs. */
3968 if (reg
->value
== CPEN_(0,C2
,3)
3969 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3972 /* Virtualization host extensions: system registers. */
3973 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
3974 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
3975 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
3976 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
3977 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
3978 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3981 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3982 if ((reg
->value
== CPEN_ (5, C0
, 0)
3983 || reg
->value
== CPEN_ (5, C0
, 1)
3984 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
3985 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
3986 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
3987 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
3988 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
3989 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
3990 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
3991 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
3992 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
3993 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
3994 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
3995 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
3996 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
3997 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
3998 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4001 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4002 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
4003 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
4004 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
4005 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
4006 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
4007 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
4008 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4011 /* ARMv8.2 features. */
4013 /* ID_AA64MMFR2_EL1. */
4014 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
4015 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4019 if (reg
->value
== CPEN_ (0, C2
, 4)
4020 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4023 /* RAS extension. */
4025 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4026 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4027 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
4028 || reg
->value
== CPENC (3, 0, C5
, C3
, 1)
4029 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
4030 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
4031 || reg
->value
== CPENC (3, 0, C5
, C4
, 0)
4032 || reg
->value
== CPENC (3, 0, C5
, C4
, 1)
4033 || reg
->value
== CPENC (3, 0, C5
, C4
, 2)
4034 || reg
->value
== CPENC (3, 0, C5
, C4
, 3)
4035 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
4036 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
4037 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4040 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4041 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
4042 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
4043 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
4044 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4047 /* Statistical Profiling extension. */
4048 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
4049 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
4050 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
4051 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
4052 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
4053 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
4054 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
4055 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
4056 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
4057 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
4058 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
4059 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
4060 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
4061 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
4064 /* ARMv8.3 Pointer authentication keys. */
4065 if ((reg
->value
== CPENC (3, 0, C2
, C1
, 0)
4066 || reg
->value
== CPENC (3, 0, C2
, C1
, 1)
4067 || reg
->value
== CPENC (3, 0, C2
, C1
, 2)
4068 || reg
->value
== CPENC (3, 0, C2
, C1
, 3)
4069 || reg
->value
== CPENC (3, 0, C2
, C2
, 0)
4070 || reg
->value
== CPENC (3, 0, C2
, C2
, 1)
4071 || reg
->value
== CPENC (3, 0, C2
, C2
, 2)
4072 || reg
->value
== CPENC (3, 0, C2
, C2
, 3)
4073 || reg
->value
== CPENC (3, 0, C2
, C3
, 0)
4074 || reg
->value
== CPENC (3, 0, C2
, C3
, 1))
4075 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_3
))
4081 const aarch64_sys_reg aarch64_pstatefields
[] =
4083 { "spsel", 0x05, 0 },
4084 { "daifset", 0x1e, 0 },
4085 { "daifclr", 0x1f, 0 },
4086 { "pan", 0x04, F_ARCHEXT
},
4087 { "uao", 0x03, F_ARCHEXT
},
4088 { 0, CPENC(0,0,0,0,0), 0 },
4092 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
4093 const aarch64_sys_reg
*reg
)
4095 if (!(reg
->flags
& F_ARCHEXT
))
4098 /* PAN. Values are from aarch64_pstatefields. */
4099 if (reg
->value
== 0x04
4100 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
4103 /* UAO. Values are from aarch64_pstatefields. */
4104 if (reg
->value
== 0x03
4105 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4111 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
4113 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
4114 { "iallu", CPENS(0,C7
,C5
,0), 0 },
4115 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
4116 { 0, CPENS(0,0,0,0), 0 }
4119 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
4121 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
4122 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
4123 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
4124 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
4125 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
4126 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
4127 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
4128 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
4129 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
4130 { 0, CPENS(0,0,0,0), 0 }
4133 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
4135 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
4136 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
4137 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
4138 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
4139 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
4140 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
4141 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
4142 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
4143 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
4144 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
4145 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
4146 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
4147 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
4148 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
4149 { 0, CPENS(0,0,0,0), 0 }
4152 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
4154 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
4155 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
4156 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
4157 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
4158 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
4159 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
4160 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
4161 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
4162 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
4163 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
4164 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
4165 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
4166 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
4167 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
4168 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
4169 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
4170 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
4171 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
4172 { "alle2", CPENS(4,C8
,C7
,0), 0 },
4173 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
4174 { "alle1", CPENS(4,C8
,C7
,4), 0 },
4175 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
4176 { "alle3", CPENS(6,C8
,C7
,0), 0 },
4177 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
4178 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
4179 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
4180 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
4181 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
4182 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
4183 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
4184 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
4185 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
4186 { 0, CPENS(0,0,0,0), 0 }
4190 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
4192 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
4196 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
4197 const aarch64_sys_ins_reg
*reg
)
4199 if (!(reg
->flags
& F_ARCHEXT
))
4202 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4203 if (reg
->value
== CPENS (3, C7
, C12
, 1)
4204 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4207 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4208 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
4209 || reg
->value
== CPENS (0, C7
, C9
, 1))
4210 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4233 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4234 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4237 verify_ldpsw (const struct aarch64_opcode
* opcode ATTRIBUTE_UNUSED
,
4238 const aarch64_insn insn
)
4240 int t
= BITS (insn
, 4, 0);
4241 int n
= BITS (insn
, 9, 5);
4242 int t2
= BITS (insn
, 14, 10);
4246 /* Write back enabled. */
4247 if ((t
== n
|| t2
== n
) && n
!= 31)
4261 /* Return true if VALUE cannot be moved into an SVE register using DUP
4262 (with any element size, not just ESIZE) and if using DUPM would
4263 therefore be OK. ESIZE is the number of bytes in the immediate. */
4266 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue
, int esize
)
4268 int64_t svalue
= uvalue
;
4269 uint64_t upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
4271 if ((uvalue
& ~upper
) != uvalue
&& (uvalue
| upper
) != uvalue
)
4273 if (esize
<= 4 || (uint32_t) uvalue
== (uint32_t) (uvalue
>> 32))
4275 svalue
= (int32_t) uvalue
;
4276 if (esize
<= 2 || (uint16_t) uvalue
== (uint16_t) (uvalue
>> 16))
4278 svalue
= (int16_t) uvalue
;
4279 if (esize
== 1 || (uint8_t) uvalue
== (uint8_t) (uvalue
>> 8))
4283 if ((svalue
& 0xff) == 0)
4285 return svalue
< -128 || svalue
>= 128;
4288 /* Include the opcode description table as well as the operand description
4290 #define VERIFIER(x) verify_##x
4291 #include "aarch64-tbl.h"