1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump
= FALSE
;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array
[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array
[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
108 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
109 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
116 return ((qualifier
>= AARCH64_OPND_QLF_S_B
117 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
127 DP_VECTOR_ACROSS_LANES
,
130 static const char significant_operand_index
[] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
147 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers
[0] == qualifiers
[1]
152 && vector_qualifier_p (qualifiers
[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers
[0])
154 == aarch64_get_qualifier_esize (qualifiers
[1]))
155 && (aarch64_get_qualifier_esize (qualifiers
[0])
156 == aarch64_get_qualifier_esize (qualifiers
[2])))
157 return DP_VECTOR_3SAME
;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers
[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers
[0])
164 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
165 return DP_VECTOR_LONG
;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers
[0] == qualifiers
[1]
168 && vector_qualifier_p (qualifiers
[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers
[0])
171 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers
[0])
173 == aarch64_get_qualifier_esize (qualifiers
[1])))
174 return DP_VECTOR_WIDE
;
176 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers
[1]) == TRUE
180 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
181 return DP_VECTOR_ACROSS_LANES
;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
199 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
202 const aarch64_field fields
[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
247 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
248 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
249 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
250 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
251 { 5, 14 }, /* imm14: in test bit and branch instructions. */
252 { 5, 16 }, /* imm16: in exception instructions. */
253 { 0, 26 }, /* imm26: in unconditional branch instructions. */
254 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
255 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
256 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
257 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
258 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
259 { 22, 1 }, /* N: in logical (immediate) instructions. */
260 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
261 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
262 { 31, 1 }, /* sf: in integer data processing instructions. */
263 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
264 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
265 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
266 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
267 { 31, 1 }, /* b5: in the test bit and branch instructions. */
268 { 19, 5 }, /* b40: in the test bit and branch instructions. */
269 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
270 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
271 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
272 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
273 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
274 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
275 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
276 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
277 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
278 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
279 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
280 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
281 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
282 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
283 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
284 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
285 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
286 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
289 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
290 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
292 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
293 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
294 { 5, 1 }, /* SVE_i1: single-bit immediate. */
295 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
296 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
297 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
298 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
299 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
300 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
301 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
302 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
303 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
304 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
305 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
306 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
307 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
308 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
309 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
310 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
311 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
312 { 16, 4 }, /* SVE_tsz: triangular size select. */
313 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
314 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
315 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
316 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
317 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
318 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
319 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
320 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
321 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
324 enum aarch64_operand_class
325 aarch64_get_operand_class (enum aarch64_opnd type
)
327 return aarch64_operands
[type
].op_class
;
331 aarch64_get_operand_name (enum aarch64_opnd type
)
333 return aarch64_operands
[type
].name
;
336 /* Get operand description string.
337 This is usually for the diagnosis purpose. */
339 aarch64_get_operand_desc (enum aarch64_opnd type
)
341 return aarch64_operands
[type
].desc
;
344 /* Table of all conditional affixes. */
345 const aarch64_cond aarch64_conds
[16] =
347 {{"eq", "none"}, 0x0},
348 {{"ne", "any"}, 0x1},
349 {{"cs", "hs", "nlast"}, 0x2},
350 {{"cc", "lo", "ul", "last"}, 0x3},
351 {{"mi", "first"}, 0x4},
352 {{"pl", "nfrst"}, 0x5},
355 {{"hi", "pmore"}, 0x8},
356 {{"ls", "plast"}, 0x9},
357 {{"ge", "tcont"}, 0xa},
358 {{"lt", "tstop"}, 0xb},
366 get_cond_from_value (aarch64_insn value
)
369 return &aarch64_conds
[(unsigned int) value
];
373 get_inverted_cond (const aarch64_cond
*cond
)
375 return &aarch64_conds
[cond
->value
^ 0x1];
378 /* Table describing the operand extension/shifting operators; indexed by
379 enum aarch64_modifier_kind.
381 The value column provides the most common values for encoding modifiers,
382 which enables table-driven encoding/decoding for the modifiers. */
383 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
404 enum aarch64_modifier_kind
405 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
407 return desc
- aarch64_operand_modifiers
;
411 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
413 return aarch64_operand_modifiers
[kind
].value
;
416 enum aarch64_modifier_kind
417 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
418 bfd_boolean extend_p
)
420 if (extend_p
== TRUE
)
421 return AARCH64_MOD_UXTB
+ value
;
423 return AARCH64_MOD_LSL
- value
;
427 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
429 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
433 static inline bfd_boolean
434 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
436 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
440 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
460 /* Table describing the operands supported by the aliases of the HINT
463 The name column is the operand that is accepted for the alias. The value
464 column is the hint number of the alias. The list of operands is terminated
465 by NULL in the name column. */
467 const struct aarch64_name_value_pair aarch64_hint_options
[] =
469 { "csync", 0x11 }, /* PSB CSYNC. */
473 /* op -> op: load = 0 instruction = 1 store = 2
475 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
476 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
477 const struct aarch64_name_value_pair aarch64_prfops
[32] =
479 { "pldl1keep", B(0, 1, 0) },
480 { "pldl1strm", B(0, 1, 1) },
481 { "pldl2keep", B(0, 2, 0) },
482 { "pldl2strm", B(0, 2, 1) },
483 { "pldl3keep", B(0, 3, 0) },
484 { "pldl3strm", B(0, 3, 1) },
487 { "plil1keep", B(1, 1, 0) },
488 { "plil1strm", B(1, 1, 1) },
489 { "plil2keep", B(1, 2, 0) },
490 { "plil2strm", B(1, 2, 1) },
491 { "plil3keep", B(1, 3, 0) },
492 { "plil3strm", B(1, 3, 1) },
495 { "pstl1keep", B(2, 1, 0) },
496 { "pstl1strm", B(2, 1, 1) },
497 { "pstl2keep", B(2, 2, 0) },
498 { "pstl2strm", B(2, 2, 1) },
499 { "pstl3keep", B(2, 3, 0) },
500 { "pstl3strm", B(2, 3, 1) },
514 /* Utilities on value constraint. */
517 value_in_range_p (int64_t value
, int low
, int high
)
519 return (value
>= low
&& value
<= high
) ? 1 : 0;
522 /* Return true if VALUE is a multiple of ALIGN. */
524 value_aligned_p (int64_t value
, int align
)
526 return (value
% align
) == 0;
529 /* A signed value fits in a field. */
531 value_fit_signed_field_p (int64_t value
, unsigned width
)
534 if (width
< sizeof (value
) * 8)
536 int64_t lim
= (int64_t)1 << (width
- 1);
537 if (value
>= -lim
&& value
< lim
)
543 /* An unsigned value fits in a field. */
545 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
548 if (width
< sizeof (value
) * 8)
550 int64_t lim
= (int64_t)1 << width
;
551 if (value
>= 0 && value
< lim
)
557 /* Return 1 if OPERAND is SP or WSP. */
559 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
561 return ((aarch64_get_operand_class (operand
->type
)
562 == AARCH64_OPND_CLASS_INT_REG
)
563 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
564 && operand
->reg
.regno
== 31);
567 /* Return 1 if OPERAND is XZR or WZP. */
569 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
571 return ((aarch64_get_operand_class (operand
->type
)
572 == AARCH64_OPND_CLASS_INT_REG
)
573 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
574 && operand
->reg
.regno
== 31);
577 /* Return true if the operand *OPERAND that has the operand code
578 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
579 qualified by the qualifier TARGET. */
582 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
583 aarch64_opnd_qualifier_t target
)
585 switch (operand
->qualifier
)
587 case AARCH64_OPND_QLF_W
:
588 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
591 case AARCH64_OPND_QLF_X
:
592 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
595 case AARCH64_OPND_QLF_WSP
:
596 if (target
== AARCH64_OPND_QLF_W
597 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
600 case AARCH64_OPND_QLF_SP
:
601 if (target
== AARCH64_OPND_QLF_X
602 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
612 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
613 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
615 Return NIL if more than one expected qualifiers are found. */
617 aarch64_opnd_qualifier_t
618 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
620 const aarch64_opnd_qualifier_t known_qlf
,
627 When the known qualifier is NIL, we have to assume that there is only
628 one qualifier sequence in the *QSEQ_LIST and return the corresponding
629 qualifier directly. One scenario is that for instruction
630 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
631 which has only one possible valid qualifier sequence
633 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
634 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
636 Because the qualifier NIL has dual roles in the qualifier sequence:
637 it can mean no qualifier for the operand, or the qualifer sequence is
638 not in use (when all qualifiers in the sequence are NILs), we have to
639 handle this special case here. */
640 if (known_qlf
== AARCH64_OPND_NIL
)
642 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
643 return qseq_list
[0][idx
];
646 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
648 if (qseq_list
[i
][known_idx
] == known_qlf
)
651 /* More than one sequences are found to have KNOWN_QLF at
653 return AARCH64_OPND_NIL
;
658 return qseq_list
[saved_i
][idx
];
661 enum operand_qualifier_kind
669 /* Operand qualifier description. */
670 struct operand_qualifier_data
672 /* The usage of the three data fields depends on the qualifier kind. */
679 enum operand_qualifier_kind kind
;
682 /* Indexed by the operand qualifier enumerators. */
683 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
685 {0, 0, 0, "NIL", OQK_NIL
},
687 /* Operand variant qualifiers.
689 element size, number of elements and common value for encoding. */
691 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
692 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
693 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
694 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
696 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
697 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
698 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
699 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
700 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
702 {1, 4, 0x0, "4b", OQK_OPD_VARIANT
},
703 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
704 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
705 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
706 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
707 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
708 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
709 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
710 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
711 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
712 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
714 {0, 0, 0, "z", OQK_OPD_VARIANT
},
715 {0, 0, 0, "m", OQK_OPD_VARIANT
},
717 /* Qualifiers constraining the value range.
719 Lower bound, higher bound, unused. */
721 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE
},
722 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
723 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
724 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
725 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
726 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
727 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
729 /* Qualifiers for miscellaneous purpose.
731 unused, unused and unused. */
736 {0, 0, 0, "retrieving", 0},
739 static inline bfd_boolean
740 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
742 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
746 static inline bfd_boolean
747 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
749 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
754 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
756 return aarch64_opnd_qualifiers
[qualifier
].desc
;
759 /* Given an operand qualifier, return the expected data element size
760 of a qualified operand. */
762 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
764 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
765 return aarch64_opnd_qualifiers
[qualifier
].data0
;
769 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
771 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
772 return aarch64_opnd_qualifiers
[qualifier
].data1
;
776 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
778 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
779 return aarch64_opnd_qualifiers
[qualifier
].data2
;
783 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
785 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
786 return aarch64_opnd_qualifiers
[qualifier
].data0
;
790 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
792 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
793 return aarch64_opnd_qualifiers
[qualifier
].data1
;
798 aarch64_verbose (const char *str
, ...)
809 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
813 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
814 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
819 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
820 const aarch64_opnd_qualifier_t
*qualifier
)
823 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
825 aarch64_verbose ("dump_match_qualifiers:");
826 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
827 curr
[i
] = opnd
[i
].qualifier
;
828 dump_qualifier_sequence (curr
);
829 aarch64_verbose ("against");
830 dump_qualifier_sequence (qualifier
);
832 #endif /* DEBUG_AARCH64 */
834 /* TODO improve this, we can have an extra field at the runtime to
835 store the number of operands rather than calculating it every time. */
838 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
841 const enum aarch64_opnd
*opnds
= opcode
->operands
;
842 while (opnds
[i
++] != AARCH64_OPND_NIL
)
845 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
849 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
850 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
852 N.B. on the entry, it is very likely that only some operands in *INST
853 have had their qualifiers been established.
855 If STOP_AT is not -1, the function will only try to match
856 the qualifier sequence for operands before and including the operand
857 of index STOP_AT; and on success *RET will only be filled with the first
858 (STOP_AT+1) qualifiers.
860 A couple examples of the matching algorithm:
868 Apart from serving the main encoding routine, this can also be called
869 during or after the operand decoding. */
872 aarch64_find_best_match (const aarch64_inst
*inst
,
873 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
874 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
878 const aarch64_opnd_qualifier_t
*qualifiers
;
880 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
883 DEBUG_TRACE ("SUCCEED: no operand");
887 if (stop_at
< 0 || stop_at
>= num_opnds
)
888 stop_at
= num_opnds
- 1;
890 /* For each pattern. */
891 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
894 qualifiers
= *qualifiers_list
;
896 /* Start as positive. */
899 DEBUG_TRACE ("%d", i
);
902 dump_match_qualifiers (inst
->operands
, qualifiers
);
905 /* Most opcodes has much fewer patterns in the list.
906 First NIL qualifier indicates the end in the list. */
907 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
909 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
915 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
917 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
919 /* Either the operand does not have qualifier, or the qualifier
920 for the operand needs to be deduced from the qualifier
922 In the latter case, any constraint checking related with
923 the obtained qualifier should be done later in
924 operand_general_constraint_met_p. */
927 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
929 /* Unless the target qualifier can also qualify the operand
930 (which has already had a non-nil qualifier), non-equal
931 qualifiers are generally un-matched. */
932 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
941 continue; /* Equal qualifiers are certainly matched. */
944 /* Qualifiers established. */
951 /* Fill the result in *RET. */
953 qualifiers
= *qualifiers_list
;
955 DEBUG_TRACE ("complete qualifiers using list %d", i
);
958 dump_qualifier_sequence (qualifiers
);
961 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
962 ret
[j
] = *qualifiers
;
963 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
964 ret
[j
] = AARCH64_OPND_QLF_NIL
;
966 DEBUG_TRACE ("SUCCESS");
970 DEBUG_TRACE ("FAIL");
974 /* Operand qualifier matching and resolving.
976 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
977 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
979 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
983 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
986 aarch64_opnd_qualifier_seq_t qualifiers
;
988 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
991 DEBUG_TRACE ("matching FAIL");
995 if (inst
->opcode
->flags
& F_STRICT
)
997 /* Require an exact qualifier match, even for NIL qualifiers. */
998 nops
= aarch64_num_of_operands (inst
->opcode
);
999 for (i
= 0; i
< nops
; ++i
)
1000 if (inst
->operands
[i
].qualifier
!= qualifiers
[i
])
1004 /* Update the qualifiers. */
1005 if (update_p
== TRUE
)
1006 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1008 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
1010 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
1011 "update %s with %s for operand %d",
1012 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
1013 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
1014 inst
->operands
[i
].qualifier
= qualifiers
[i
];
1017 DEBUG_TRACE ("matching SUCCESS");
1021 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1024 IS32 indicates whether value is a 32-bit immediate or not.
1025 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1026 amount will be returned in *SHIFT_AMOUNT. */
1029 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
1033 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1037 /* Allow all zeros or all ones in top 32-bits, so that
1038 32-bit constant expressions like ~0x80000000 are
1040 uint64_t ext
= value
;
1041 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
1042 /* Immediate out of range. */
1044 value
&= (int64_t) 0xffffffff;
1047 /* first, try movz then movn */
1049 if ((value
& ((int64_t) 0xffff << 0)) == value
)
1051 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
1053 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
1055 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
1060 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1064 if (shift_amount
!= NULL
)
1065 *shift_amount
= amount
;
1067 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
1072 /* Build the accepted values for immediate logical SIMD instructions.
1074 The standard encodings of the immediate value are:
1075 N imms immr SIMD size R S
1076 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1077 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1078 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1079 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1080 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1081 0 11110s 00000r 2 UInt(r) UInt(s)
1082 where all-ones value of S is reserved.
1084 Let's call E the SIMD size.
1086 The immediate value is: S+1 bits '1' rotated to the right by R.
1088 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1089 (remember S != E - 1). */
1091 #define TOTAL_IMM_NB 5334
1096 aarch64_insn encoding
;
1097 } simd_imm_encoding
;
1099 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
1102 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
1104 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
1105 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
1107 if (imm1
->imm
< imm2
->imm
)
1109 if (imm1
->imm
> imm2
->imm
)
1114 /* immediate bitfield standard encoding
1115 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1116 1 ssssss rrrrrr 64 rrrrrr ssssss
1117 0 0sssss 0rrrrr 32 rrrrr sssss
1118 0 10ssss 00rrrr 16 rrrr ssss
1119 0 110sss 000rrr 8 rrr sss
1120 0 1110ss 0000rr 4 rr ss
1121 0 11110s 00000r 2 r s */
1123 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1125 return (is64
<< 12) | (r
<< 6) | s
;
1129 build_immediate_table (void)
1131 uint32_t log_e
, e
, s
, r
, s_mask
;
1137 for (log_e
= 1; log_e
<= 6; log_e
++)
1139 /* Get element size. */
1144 mask
= 0xffffffffffffffffull
;
1150 mask
= (1ull << e
) - 1;
1152 1 ((1 << 4) - 1) << 2 = 111100
1153 2 ((1 << 3) - 1) << 3 = 111000
1154 3 ((1 << 2) - 1) << 4 = 110000
1155 4 ((1 << 1) - 1) << 5 = 100000
1156 5 ((1 << 0) - 1) << 6 = 000000 */
1157 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1159 for (s
= 0; s
< e
- 1; s
++)
1160 for (r
= 0; r
< e
; r
++)
1162 /* s+1 consecutive bits to 1 (s < 63) */
1163 imm
= (1ull << (s
+ 1)) - 1;
1164 /* rotate right by r */
1166 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1167 /* replicate the constant depending on SIMD size */
1170 case 1: imm
= (imm
<< 2) | imm
;
1172 case 2: imm
= (imm
<< 4) | imm
;
1174 case 3: imm
= (imm
<< 8) | imm
;
1176 case 4: imm
= (imm
<< 16) | imm
;
1178 case 5: imm
= (imm
<< 32) | imm
;
1183 simd_immediates
[nb_imms
].imm
= imm
;
1184 simd_immediates
[nb_imms
].encoding
=
1185 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1189 assert (nb_imms
== TOTAL_IMM_NB
);
1190 qsort(simd_immediates
, nb_imms
,
1191 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1194 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1195 be accepted by logical (immediate) instructions
1196 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1198 ESIZE is the number of bytes in the decoded immediate value.
1199 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1200 VALUE will be returned in *ENCODING. */
1203 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1205 simd_imm_encoding imm_enc
;
1206 const simd_imm_encoding
*imm_encoding
;
1207 static bfd_boolean initialized
= FALSE
;
1211 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), esize: %d", value
,
1216 build_immediate_table ();
1220 /* Allow all zeros or all ones in top bits, so that
1221 constant expressions like ~1 are permitted. */
1222 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1223 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1226 /* Replicate to a full 64-bit value. */
1228 for (i
= esize
* 8; i
< 64; i
*= 2)
1229 value
|= (value
<< i
);
1231 imm_enc
.imm
= value
;
1232 imm_encoding
= (const simd_imm_encoding
*)
1233 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1234 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1235 if (imm_encoding
== NULL
)
1237 DEBUG_TRACE ("exit with FALSE");
1240 if (encoding
!= NULL
)
1241 *encoding
= imm_encoding
->encoding
;
1242 DEBUG_TRACE ("exit with TRUE");
1246 /* If 64-bit immediate IMM is in the format of
1247 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1248 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1249 of value "abcdefgh". Otherwise return -1. */
1251 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1257 for (i
= 0; i
< 8; i
++)
1259 byte
= (imm
>> (8 * i
)) & 0xff;
1262 else if (byte
!= 0x00)
1268 /* Utility inline functions for operand_general_constraint_met_p. */
1271 set_error (aarch64_operand_error
*mismatch_detail
,
1272 enum aarch64_operand_error_kind kind
, int idx
,
1275 if (mismatch_detail
== NULL
)
1277 mismatch_detail
->kind
= kind
;
1278 mismatch_detail
->index
= idx
;
1279 mismatch_detail
->error
= error
;
1283 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1286 if (mismatch_detail
== NULL
)
1288 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1292 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1293 int idx
, int lower_bound
, int upper_bound
,
1296 if (mismatch_detail
== NULL
)
1298 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1299 mismatch_detail
->data
[0] = lower_bound
;
1300 mismatch_detail
->data
[1] = upper_bound
;
1304 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1305 int idx
, int lower_bound
, int upper_bound
)
1307 if (mismatch_detail
== NULL
)
1309 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1310 _("immediate value"));
1314 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1315 int idx
, int lower_bound
, int upper_bound
)
1317 if (mismatch_detail
== NULL
)
1319 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1320 _("immediate offset"));
1324 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1325 int idx
, int lower_bound
, int upper_bound
)
1327 if (mismatch_detail
== NULL
)
1329 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1330 _("register number"));
1334 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1335 int idx
, int lower_bound
, int upper_bound
)
1337 if (mismatch_detail
== NULL
)
1339 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1340 _("register element index"));
1344 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1345 int idx
, int lower_bound
, int upper_bound
)
1347 if (mismatch_detail
== NULL
)
1349 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1353 /* Report that the MUL modifier in operand IDX should be in the range
1354 [LOWER_BOUND, UPPER_BOUND]. */
1356 set_multiplier_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1357 int idx
, int lower_bound
, int upper_bound
)
1359 if (mismatch_detail
== NULL
)
1361 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1366 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1369 if (mismatch_detail
== NULL
)
1371 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1372 mismatch_detail
->data
[0] = alignment
;
1376 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1379 if (mismatch_detail
== NULL
)
1381 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1382 mismatch_detail
->data
[0] = expected_num
;
1386 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1389 if (mismatch_detail
== NULL
)
1391 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1394 /* General constraint checking based on operand code.
1396 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1397 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1399 This function has to be called after the qualifiers for all operands
1402 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1403 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1404 of error message during the disassembling where error message is not
1405 wanted. We avoid the dynamic construction of strings of error messages
1406 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1407 use a combination of error code, static string and some integer data to
1408 represent an error. */
1411 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1412 enum aarch64_opnd type
,
1413 const aarch64_opcode
*opcode
,
1414 aarch64_operand_error
*mismatch_detail
)
1416 unsigned num
, modifiers
, shift
;
1418 int64_t imm
, min_value
, max_value
;
1419 uint64_t uvalue
, mask
;
1420 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1421 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1423 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1425 switch (aarch64_operands
[type
].op_class
)
1427 case AARCH64_OPND_CLASS_INT_REG
:
1428 /* Check pair reg constraints for cas* instructions. */
1429 if (type
== AARCH64_OPND_PAIRREG
)
1431 assert (idx
== 1 || idx
== 3);
1432 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1434 set_syntax_error (mismatch_detail
, idx
- 1,
1435 _("reg pair must start from even reg"));
1438 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1440 set_syntax_error (mismatch_detail
, idx
,
1441 _("reg pair must be contiguous"));
1447 /* <Xt> may be optional in some IC and TLBI instructions. */
1448 if (type
== AARCH64_OPND_Rt_SYS
)
1450 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1451 == AARCH64_OPND_CLASS_SYSTEM
));
1452 if (opnds
[1].present
1453 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1455 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1458 if (!opnds
[1].present
1459 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1461 set_other_error (mismatch_detail
, idx
, _("missing register"));
1467 case AARCH64_OPND_QLF_WSP
:
1468 case AARCH64_OPND_QLF_SP
:
1469 if (!aarch64_stack_pointer_p (opnd
))
1471 set_other_error (mismatch_detail
, idx
,
1472 _("stack pointer register expected"));
1481 case AARCH64_OPND_CLASS_SVE_REG
:
1484 case AARCH64_OPND_SVE_Zm3_INDEX
:
1485 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
1486 case AARCH64_OPND_SVE_Zm4_INDEX
:
1487 size
= get_operand_fields_width (get_operand_from_code (type
));
1488 shift
= get_operand_specific_data (&aarch64_operands
[type
]);
1489 mask
= (1 << shift
) - 1;
1490 if (opnd
->reg
.regno
> mask
)
1492 assert (mask
== 7 || mask
== 15);
1493 set_other_error (mismatch_detail
, idx
,
1495 ? _("z0-z15 expected")
1496 : _("z0-z7 expected"));
1499 mask
= (1 << (size
- shift
)) - 1;
1500 if (!value_in_range_p (opnd
->reglane
.index
, 0, mask
))
1502 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, mask
);
1507 case AARCH64_OPND_SVE_Zn_INDEX
:
1508 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1509 if (!value_in_range_p (opnd
->reglane
.index
, 0, 64 / size
- 1))
1511 set_elem_idx_out_of_range_error (mismatch_detail
, idx
,
1517 case AARCH64_OPND_SVE_ZnxN
:
1518 case AARCH64_OPND_SVE_ZtxN
:
1519 if (opnd
->reglist
.num_regs
!= get_opcode_dependent_value (opcode
))
1521 set_other_error (mismatch_detail
, idx
,
1522 _("invalid register list"));
1532 case AARCH64_OPND_CLASS_PRED_REG
:
1533 if (opnd
->reg
.regno
>= 8
1534 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1536 set_other_error (mismatch_detail
, idx
, _("p0-p7 expected"));
1541 case AARCH64_OPND_CLASS_COND
:
1542 if (type
== AARCH64_OPND_COND1
1543 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1545 /* Not allow AL or NV. */
1546 set_syntax_error (mismatch_detail
, idx
, NULL
);
1550 case AARCH64_OPND_CLASS_ADDRESS
:
1551 /* Check writeback. */
1552 switch (opcode
->iclass
)
1556 case ldstnapair_offs
:
1559 if (opnd
->addr
.writeback
== 1)
1561 set_syntax_error (mismatch_detail
, idx
,
1562 _("unexpected address writeback"));
1567 if (opnd
->addr
.writeback
== 1 && opnd
->addr
.preind
!= 1)
1569 set_syntax_error (mismatch_detail
, idx
,
1570 _("unexpected address writeback"));
1575 case ldstpair_indexed
:
1578 if (opnd
->addr
.writeback
== 0)
1580 set_syntax_error (mismatch_detail
, idx
,
1581 _("address writeback expected"));
1586 assert (opnd
->addr
.writeback
== 0);
1591 case AARCH64_OPND_ADDR_SIMM7
:
1592 /* Scaled signed 7 bits immediate offset. */
1593 /* Get the size of the data element that is accessed, which may be
1594 different from that of the source register size,
1595 e.g. in strb/ldrb. */
1596 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1597 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1599 set_offset_out_of_range_error (mismatch_detail
, idx
,
1600 -64 * size
, 63 * size
);
1603 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1605 set_unaligned_error (mismatch_detail
, idx
, size
);
1609 case AARCH64_OPND_ADDR_OFFSET
:
1610 case AARCH64_OPND_ADDR_SIMM9
:
1611 /* Unscaled signed 9 bits immediate offset. */
1612 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1614 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1619 case AARCH64_OPND_ADDR_SIMM9_2
:
1620 /* Unscaled signed 9 bits immediate offset, which has to be negative
1622 size
= aarch64_get_qualifier_esize (qualifier
);
1623 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1624 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1625 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1627 set_other_error (mismatch_detail
, idx
,
1628 _("negative or unaligned offset expected"));
1631 case AARCH64_OPND_ADDR_SIMM10
:
1632 /* Scaled signed 10 bits immediate offset. */
1633 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4088))
1635 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4088);
1638 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 8))
1640 set_unaligned_error (mismatch_detail
, idx
, 8);
1645 case AARCH64_OPND_SIMD_ADDR_POST
:
1646 /* AdvSIMD load/store multiple structures, post-index. */
1648 if (opnd
->addr
.offset
.is_reg
)
1650 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1654 set_other_error (mismatch_detail
, idx
,
1655 _("invalid register offset"));
1661 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1662 unsigned num_bytes
; /* total number of bytes transferred. */
1663 /* The opcode dependent area stores the number of elements in
1664 each structure to be loaded/stored. */
1665 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1666 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1667 /* Special handling of loading single structure to all lane. */
1668 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1669 * aarch64_get_qualifier_esize (prev
->qualifier
);
1671 num_bytes
= prev
->reglist
.num_regs
1672 * aarch64_get_qualifier_esize (prev
->qualifier
)
1673 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1674 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1676 set_other_error (mismatch_detail
, idx
,
1677 _("invalid post-increment amount"));
1683 case AARCH64_OPND_ADDR_REGOFF
:
1684 /* Get the size of the data element that is accessed, which may be
1685 different from that of the source register size,
1686 e.g. in strb/ldrb. */
1687 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1688 /* It is either no shift or shift by the binary logarithm of SIZE. */
1689 if (opnd
->shifter
.amount
!= 0
1690 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1692 set_other_error (mismatch_detail
, idx
,
1693 _("invalid shift amount"));
1696 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1698 switch (opnd
->shifter
.kind
)
1700 case AARCH64_MOD_UXTW
:
1701 case AARCH64_MOD_LSL
:
1702 case AARCH64_MOD_SXTW
:
1703 case AARCH64_MOD_SXTX
: break;
1705 set_other_error (mismatch_detail
, idx
,
1706 _("invalid extend/shift operator"));
1711 case AARCH64_OPND_ADDR_UIMM12
:
1712 imm
= opnd
->addr
.offset
.imm
;
1713 /* Get the size of the data element that is accessed, which may be
1714 different from that of the source register size,
1715 e.g. in strb/ldrb. */
1716 size
= aarch64_get_qualifier_esize (qualifier
);
1717 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1719 set_offset_out_of_range_error (mismatch_detail
, idx
,
1723 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1725 set_unaligned_error (mismatch_detail
, idx
, size
);
1730 case AARCH64_OPND_ADDR_PCREL14
:
1731 case AARCH64_OPND_ADDR_PCREL19
:
1732 case AARCH64_OPND_ADDR_PCREL21
:
1733 case AARCH64_OPND_ADDR_PCREL26
:
1734 imm
= opnd
->imm
.value
;
1735 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1737 /* The offset value in a PC-relative branch instruction is alway
1738 4-byte aligned and is encoded without the lowest 2 bits. */
1739 if (!value_aligned_p (imm
, 4))
1741 set_unaligned_error (mismatch_detail
, idx
, 4);
1744 /* Right shift by 2 so that we can carry out the following check
1748 size
= get_operand_fields_width (get_operand_from_code (type
));
1749 if (!value_fit_signed_field_p (imm
, size
))
1751 set_other_error (mismatch_detail
, idx
,
1752 _("immediate out of range"));
1757 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
1758 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
1759 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
1760 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
1764 assert (!opnd
->addr
.offset
.is_reg
);
1765 assert (opnd
->addr
.preind
);
1766 num
= 1 + get_operand_specific_data (&aarch64_operands
[type
]);
1769 if ((opnd
->addr
.offset
.imm
!= 0 && !opnd
->shifter
.operator_present
)
1770 || (opnd
->shifter
.operator_present
1771 && opnd
->shifter
.kind
!= AARCH64_MOD_MUL_VL
))
1773 set_other_error (mismatch_detail
, idx
,
1774 _("invalid addressing mode"));
1777 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1779 set_offset_out_of_range_error (mismatch_detail
, idx
,
1780 min_value
, max_value
);
1783 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1785 set_unaligned_error (mismatch_detail
, idx
, num
);
1790 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
1793 goto sve_imm_offset_vl
;
1795 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
1798 goto sve_imm_offset_vl
;
1800 case AARCH64_OPND_SVE_ADDR_RI_U6
:
1801 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
1802 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
1803 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
1807 assert (!opnd
->addr
.offset
.is_reg
);
1808 assert (opnd
->addr
.preind
);
1809 num
= 1 << get_operand_specific_data (&aarch64_operands
[type
]);
1812 if (opnd
->shifter
.operator_present
1813 || opnd
->shifter
.amount_present
)
1815 set_other_error (mismatch_detail
, idx
,
1816 _("invalid addressing mode"));
1819 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1821 set_offset_out_of_range_error (mismatch_detail
, idx
,
1822 min_value
, max_value
);
1825 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1827 set_unaligned_error (mismatch_detail
, idx
, num
);
1832 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
1835 goto sve_imm_offset
;
1837 case AARCH64_OPND_SVE_ADDR_RR
:
1838 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
1839 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
1840 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
1841 case AARCH64_OPND_SVE_ADDR_RX
:
1842 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
1843 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
1844 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
1845 case AARCH64_OPND_SVE_ADDR_RZ
:
1846 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
1847 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
1848 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
1849 modifiers
= 1 << AARCH64_MOD_LSL
;
1851 assert (opnd
->addr
.offset
.is_reg
);
1852 assert (opnd
->addr
.preind
);
1853 if ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) != 0
1854 && opnd
->addr
.offset
.regno
== 31)
1856 set_other_error (mismatch_detail
, idx
,
1857 _("index register xzr is not allowed"));
1860 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1861 || (opnd
->shifter
.amount
1862 != get_operand_specific_data (&aarch64_operands
[type
])))
1864 set_other_error (mismatch_detail
, idx
,
1865 _("invalid addressing mode"));
1870 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
1871 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
1872 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
1873 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
1874 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
1875 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
1876 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
1877 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
1878 modifiers
= (1 << AARCH64_MOD_SXTW
) | (1 << AARCH64_MOD_UXTW
);
1879 goto sve_rr_operand
;
1881 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
1882 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
1883 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
1884 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
1887 goto sve_imm_offset
;
1889 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
1890 modifiers
= 1 << AARCH64_MOD_LSL
;
1892 assert (opnd
->addr
.offset
.is_reg
);
1893 assert (opnd
->addr
.preind
);
1894 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1895 || opnd
->shifter
.amount
< 0
1896 || opnd
->shifter
.amount
> 3)
1898 set_other_error (mismatch_detail
, idx
,
1899 _("invalid addressing mode"));
1904 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
1905 modifiers
= (1 << AARCH64_MOD_SXTW
);
1906 goto sve_zz_operand
;
1908 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
1909 modifiers
= 1 << AARCH64_MOD_UXTW
;
1910 goto sve_zz_operand
;
1917 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1918 if (type
== AARCH64_OPND_LEt
)
1920 /* Get the upper bound for the element index. */
1921 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1922 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
1924 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1928 /* The opcode dependent area stores the number of elements in
1929 each structure to be loaded/stored. */
1930 num
= get_opcode_dependent_value (opcode
);
1933 case AARCH64_OPND_LVt
:
1934 assert (num
>= 1 && num
<= 4);
1935 /* Unless LD1/ST1, the number of registers should be equal to that
1936 of the structure elements. */
1937 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1939 set_reg_list_error (mismatch_detail
, idx
, num
);
1943 case AARCH64_OPND_LVt_AL
:
1944 case AARCH64_OPND_LEt
:
1945 assert (num
>= 1 && num
<= 4);
1946 /* The number of registers should be equal to that of the structure
1948 if (opnd
->reglist
.num_regs
!= num
)
1950 set_reg_list_error (mismatch_detail
, idx
, num
);
1959 case AARCH64_OPND_CLASS_IMMEDIATE
:
1960 /* Constraint check on immediate operand. */
1961 imm
= opnd
->imm
.value
;
1962 /* E.g. imm_0_31 constrains value to be 0..31. */
1963 if (qualifier_value_in_range_constraint_p (qualifier
)
1964 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1965 get_upper_bound (qualifier
)))
1967 set_imm_out_of_range_error (mismatch_detail
, idx
,
1968 get_lower_bound (qualifier
),
1969 get_upper_bound (qualifier
));
1975 case AARCH64_OPND_AIMM
:
1976 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1978 set_other_error (mismatch_detail
, idx
,
1979 _("invalid shift operator"));
1982 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
1984 set_other_error (mismatch_detail
, idx
,
1985 _("shift amount must be 0 or 12"));
1988 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
1990 set_other_error (mismatch_detail
, idx
,
1991 _("immediate out of range"));
1996 case AARCH64_OPND_HALF
:
1997 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
1998 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2000 set_other_error (mismatch_detail
, idx
,
2001 _("invalid shift operator"));
2004 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2005 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
2007 set_other_error (mismatch_detail
, idx
,
2008 _("shift amount must be a multiple of 16"));
2011 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
2013 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
2017 if (opnd
->imm
.value
< 0)
2019 set_other_error (mismatch_detail
, idx
,
2020 _("negative immediate value not allowed"));
2023 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
2025 set_other_error (mismatch_detail
, idx
,
2026 _("immediate out of range"));
2031 case AARCH64_OPND_IMM_MOV
:
2033 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2034 imm
= opnd
->imm
.value
;
2038 case OP_MOV_IMM_WIDEN
:
2041 case OP_MOV_IMM_WIDE
:
2042 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
2044 set_other_error (mismatch_detail
, idx
,
2045 _("immediate out of range"));
2049 case OP_MOV_IMM_LOG
:
2050 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
2052 set_other_error (mismatch_detail
, idx
,
2053 _("immediate out of range"));
2064 case AARCH64_OPND_NZCV
:
2065 case AARCH64_OPND_CCMP_IMM
:
2066 case AARCH64_OPND_EXCEPTION
:
2067 case AARCH64_OPND_UIMM4
:
2068 case AARCH64_OPND_UIMM7
:
2069 case AARCH64_OPND_UIMM3_OP1
:
2070 case AARCH64_OPND_UIMM3_OP2
:
2071 case AARCH64_OPND_SVE_UIMM3
:
2072 case AARCH64_OPND_SVE_UIMM7
:
2073 case AARCH64_OPND_SVE_UIMM8
:
2074 case AARCH64_OPND_SVE_UIMM8_53
:
2075 size
= get_operand_fields_width (get_operand_from_code (type
));
2077 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
2079 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2085 case AARCH64_OPND_SIMM5
:
2086 case AARCH64_OPND_SVE_SIMM5
:
2087 case AARCH64_OPND_SVE_SIMM5B
:
2088 case AARCH64_OPND_SVE_SIMM6
:
2089 case AARCH64_OPND_SVE_SIMM8
:
2090 size
= get_operand_fields_width (get_operand_from_code (type
));
2092 if (!value_fit_signed_field_p (opnd
->imm
.value
, size
))
2094 set_imm_out_of_range_error (mismatch_detail
, idx
,
2096 (1 << (size
- 1)) - 1);
2101 case AARCH64_OPND_WIDTH
:
2102 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
2103 && opnds
[0].type
== AARCH64_OPND_Rd
);
2104 size
= get_upper_bound (qualifier
);
2105 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
2106 /* lsb+width <= reg.size */
2108 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
2109 size
- opnds
[idx
-1].imm
.value
);
2114 case AARCH64_OPND_LIMM
:
2115 case AARCH64_OPND_SVE_LIMM
:
2117 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2118 uint64_t uimm
= opnd
->imm
.value
;
2119 if (opcode
->op
== OP_BIC
)
2121 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2123 set_other_error (mismatch_detail
, idx
,
2124 _("immediate out of range"));
2130 case AARCH64_OPND_IMM0
:
2131 case AARCH64_OPND_FPIMM0
:
2132 if (opnd
->imm
.value
!= 0)
2134 set_other_error (mismatch_detail
, idx
,
2135 _("immediate zero expected"));
2140 case AARCH64_OPND_IMM_ROT1
:
2141 case AARCH64_OPND_IMM_ROT2
:
2142 case AARCH64_OPND_SVE_IMM_ROT2
:
2143 if (opnd
->imm
.value
!= 0
2144 && opnd
->imm
.value
!= 90
2145 && opnd
->imm
.value
!= 180
2146 && opnd
->imm
.value
!= 270)
2148 set_other_error (mismatch_detail
, idx
,
2149 _("rotate expected to be 0, 90, 180 or 270"));
2154 case AARCH64_OPND_IMM_ROT3
:
2155 case AARCH64_OPND_SVE_IMM_ROT1
:
2156 if (opnd
->imm
.value
!= 90 && opnd
->imm
.value
!= 270)
2158 set_other_error (mismatch_detail
, idx
,
2159 _("rotate expected to be 90 or 270"));
2164 case AARCH64_OPND_SHLL_IMM
:
2166 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2167 if (opnd
->imm
.value
!= size
)
2169 set_other_error (mismatch_detail
, idx
,
2170 _("invalid shift amount"));
2175 case AARCH64_OPND_IMM_VLSL
:
2176 size
= aarch64_get_qualifier_esize (qualifier
);
2177 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
2179 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2185 case AARCH64_OPND_IMM_VLSR
:
2186 size
= aarch64_get_qualifier_esize (qualifier
);
2187 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
2189 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
2194 case AARCH64_OPND_SIMD_IMM
:
2195 case AARCH64_OPND_SIMD_IMM_SFT
:
2196 /* Qualifier check. */
2199 case AARCH64_OPND_QLF_LSL
:
2200 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2202 set_other_error (mismatch_detail
, idx
,
2203 _("invalid shift operator"));
2207 case AARCH64_OPND_QLF_MSL
:
2208 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
2210 set_other_error (mismatch_detail
, idx
,
2211 _("invalid shift operator"));
2215 case AARCH64_OPND_QLF_NIL
:
2216 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2218 set_other_error (mismatch_detail
, idx
,
2219 _("shift is not permitted"));
2227 /* Is the immediate valid? */
2229 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
2231 /* uimm8 or simm8 */
2232 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
2234 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
2238 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
2241 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2242 ffffffffgggggggghhhhhhhh'. */
2243 set_other_error (mismatch_detail
, idx
,
2244 _("invalid value for immediate"));
2247 /* Is the shift amount valid? */
2248 switch (opnd
->shifter
.kind
)
2250 case AARCH64_MOD_LSL
:
2251 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2252 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
2254 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
2258 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
2260 set_unaligned_error (mismatch_detail
, idx
, 8);
2264 case AARCH64_MOD_MSL
:
2265 /* Only 8 and 16 are valid shift amount. */
2266 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
2268 set_other_error (mismatch_detail
, idx
,
2269 _("shift amount must be 0 or 16"));
2274 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2276 set_other_error (mismatch_detail
, idx
,
2277 _("invalid shift operator"));
2284 case AARCH64_OPND_FPIMM
:
2285 case AARCH64_OPND_SIMD_FPIMM
:
2286 case AARCH64_OPND_SVE_FPIMM8
:
2287 if (opnd
->imm
.is_fp
== 0)
2289 set_other_error (mismatch_detail
, idx
,
2290 _("floating-point immediate expected"));
2293 /* The value is expected to be an 8-bit floating-point constant with
2294 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2295 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2297 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
2299 set_other_error (mismatch_detail
, idx
,
2300 _("immediate out of range"));
2303 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2305 set_other_error (mismatch_detail
, idx
,
2306 _("invalid shift operator"));
2311 case AARCH64_OPND_SVE_AIMM
:
2314 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2315 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2316 mask
= ~((uint64_t) -1 << (size
* 4) << (size
* 4));
2317 uvalue
= opnd
->imm
.value
;
2318 shift
= opnd
->shifter
.amount
;
2323 set_other_error (mismatch_detail
, idx
,
2324 _("no shift amount allowed for"
2325 " 8-bit constants"));
2331 if (shift
!= 0 && shift
!= 8)
2333 set_other_error (mismatch_detail
, idx
,
2334 _("shift amount must be 0 or 8"));
2337 if (shift
== 0 && (uvalue
& 0xff) == 0)
2340 uvalue
= (int64_t) uvalue
/ 256;
2344 if ((uvalue
& mask
) != uvalue
&& (uvalue
| ~mask
) != uvalue
)
2346 set_other_error (mismatch_detail
, idx
,
2347 _("immediate too big for element size"));
2350 uvalue
= (uvalue
- min_value
) & mask
;
2353 set_other_error (mismatch_detail
, idx
,
2354 _("invalid arithmetic immediate"));
2359 case AARCH64_OPND_SVE_ASIMM
:
2363 case AARCH64_OPND_SVE_I1_HALF_ONE
:
2364 assert (opnd
->imm
.is_fp
);
2365 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x3f800000)
2367 set_other_error (mismatch_detail
, idx
,
2368 _("floating-point value must be 0.5 or 1.0"));
2373 case AARCH64_OPND_SVE_I1_HALF_TWO
:
2374 assert (opnd
->imm
.is_fp
);
2375 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x40000000)
2377 set_other_error (mismatch_detail
, idx
,
2378 _("floating-point value must be 0.5 or 2.0"));
2383 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
2384 assert (opnd
->imm
.is_fp
);
2385 if (opnd
->imm
.value
!= 0 && opnd
->imm
.value
!= 0x3f800000)
2387 set_other_error (mismatch_detail
, idx
,
2388 _("floating-point value must be 0.0 or 1.0"));
2393 case AARCH64_OPND_SVE_INV_LIMM
:
2395 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2396 uint64_t uimm
= ~opnd
->imm
.value
;
2397 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2399 set_other_error (mismatch_detail
, idx
,
2400 _("immediate out of range"));
2406 case AARCH64_OPND_SVE_LIMM_MOV
:
2408 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2409 uint64_t uimm
= opnd
->imm
.value
;
2410 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2412 set_other_error (mismatch_detail
, idx
,
2413 _("immediate out of range"));
2416 if (!aarch64_sve_dupm_mov_immediate_p (uimm
, esize
))
2418 set_other_error (mismatch_detail
, idx
,
2419 _("invalid replicated MOV immediate"));
2425 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2426 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL
);
2427 if (!value_in_range_p (opnd
->shifter
.amount
, 1, 16))
2429 set_multiplier_out_of_range_error (mismatch_detail
, idx
, 1, 16);
2434 case AARCH64_OPND_SVE_SHLIMM_PRED
:
2435 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
2436 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2437 if (!value_in_range_p (opnd
->imm
.value
, 0, 8 * size
- 1))
2439 set_imm_out_of_range_error (mismatch_detail
, idx
,
2445 case AARCH64_OPND_SVE_SHRIMM_PRED
:
2446 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
2447 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2448 if (!value_in_range_p (opnd
->imm
.value
, 1, 8 * size
))
2450 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, 8 * size
);
2460 case AARCH64_OPND_CLASS_SYSTEM
:
2463 case AARCH64_OPND_PSTATEFIELD
:
2464 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
2467 The immediate must be #0 or #1. */
2468 if ((opnd
->pstatefield
== 0x03 /* UAO. */
2469 || opnd
->pstatefield
== 0x04 /* PAN. */
2470 || opnd
->pstatefield
== 0x1a) /* DIT. */
2471 && opnds
[1].imm
.value
> 1)
2473 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2476 /* MSR SPSel, #uimm4
2477 Uses uimm4 as a control value to select the stack pointer: if
2478 bit 0 is set it selects the current exception level's stack
2479 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2480 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2481 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
2483 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2492 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
2493 /* Get the upper bound for the element index. */
2494 if (opcode
->op
== OP_FCMLA_ELEM
)
2495 /* FCMLA index range depends on the vector size of other operands
2496 and is halfed because complex numbers take two elements. */
2497 num
= aarch64_get_qualifier_nelem (opnds
[0].qualifier
)
2498 * aarch64_get_qualifier_esize (opnds
[0].qualifier
) / 2;
2501 num
= num
/ aarch64_get_qualifier_esize (qualifier
) - 1;
2503 /* Index out-of-range. */
2504 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
2506 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2509 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2510 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2511 number is encoded in "size:M:Rm":
2517 if (type
== AARCH64_OPND_Em
&& qualifier
== AARCH64_OPND_QLF_S_H
2518 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
2520 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2525 case AARCH64_OPND_CLASS_MODIFIED_REG
:
2526 assert (idx
== 1 || idx
== 2);
2529 case AARCH64_OPND_Rm_EXT
:
2530 if (!aarch64_extend_operator_p (opnd
->shifter
.kind
)
2531 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2533 set_other_error (mismatch_detail
, idx
,
2534 _("extend operator expected"));
2537 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2538 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2539 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2541 if (!aarch64_stack_pointer_p (opnds
+ 0)
2542 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
2544 if (!opnd
->shifter
.operator_present
)
2546 set_other_error (mismatch_detail
, idx
,
2547 _("missing extend operator"));
2550 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2552 set_other_error (mismatch_detail
, idx
,
2553 _("'LSL' operator not allowed"));
2557 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
2558 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2559 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
2561 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
2564 /* In the 64-bit form, the final register operand is written as Wm
2565 for all but the (possibly omitted) UXTX/LSL and SXTX
2567 N.B. GAS allows X register to be used with any operator as a
2568 programming convenience. */
2569 if (qualifier
== AARCH64_OPND_QLF_X
2570 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
2571 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
2572 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
2574 set_other_error (mismatch_detail
, idx
, _("W register expected"));
2579 case AARCH64_OPND_Rm_SFT
:
2580 /* ROR is not available to the shifted register operand in
2581 arithmetic instructions. */
2582 if (!aarch64_shift_operator_p (opnd
->shifter
.kind
))
2584 set_other_error (mismatch_detail
, idx
,
2585 _("shift operator expected"));
2588 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
2589 && opcode
->iclass
!= log_shift
)
2591 set_other_error (mismatch_detail
, idx
,
2592 _("'ROR' operator not allowed"));
2595 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2596 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2598 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2615 /* Main entrypoint for the operand constraint checking.
2617 Return 1 if operands of *INST meet the constraint applied by the operand
2618 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2619 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2620 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2621 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2622 error kind when it is notified that an instruction does not pass the check).
2624 Un-determined operand qualifiers may get established during the process. */
2627 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2628 aarch64_operand_error
*mismatch_detail
)
2632 DEBUG_TRACE ("enter");
2634 /* Check for cases where a source register needs to be the same as the
2635 destination register. Do this before matching qualifiers since if
2636 an instruction has both invalid tying and invalid qualifiers,
2637 the error about qualifiers would suggest several alternative
2638 instructions that also have invalid tying. */
2639 i
= inst
->opcode
->tied_operand
;
2640 if (i
> 0 && (inst
->operands
[0].reg
.regno
!= inst
->operands
[i
].reg
.regno
))
2642 if (mismatch_detail
)
2644 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
2645 mismatch_detail
->index
= i
;
2646 mismatch_detail
->error
= NULL
;
2651 /* Match operands' qualifier.
2652 *INST has already had qualifier establish for some, if not all, of
2653 its operands; we need to find out whether these established
2654 qualifiers match one of the qualifier sequence in
2655 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2656 with the corresponding qualifier in such a sequence.
2657 Only basic operand constraint checking is done here; the more thorough
2658 constraint checking will carried out by operand_general_constraint_met_p,
2659 which has be to called after this in order to get all of the operands'
2660 qualifiers established. */
2661 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2663 DEBUG_TRACE ("FAIL on operand qualifier matching");
2664 if (mismatch_detail
)
2666 /* Return an error type to indicate that it is the qualifier
2667 matching failure; we don't care about which operand as there
2668 are enough information in the opcode table to reproduce it. */
2669 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2670 mismatch_detail
->index
= -1;
2671 mismatch_detail
->error
= NULL
;
2676 /* Match operands' constraint. */
2677 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2679 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2680 if (type
== AARCH64_OPND_NIL
)
2682 if (inst
->operands
[i
].skip
)
2684 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2687 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2688 inst
->opcode
, mismatch_detail
) == 0)
2690 DEBUG_TRACE ("FAIL on operand %d", i
);
2695 DEBUG_TRACE ("PASS");
2700 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2701 Also updates the TYPE of each INST->OPERANDS with the corresponding
2702 value of OPCODE->OPERANDS.
2704 Note that some operand qualifiers may need to be manually cleared by
2705 the caller before it further calls the aarch64_opcode_encode; by
2706 doing this, it helps the qualifier matching facilities work
2709 const aarch64_opcode
*
2710 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2713 const aarch64_opcode
*old
= inst
->opcode
;
2715 inst
->opcode
= opcode
;
2717 /* Update the operand types. */
2718 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2720 inst
->operands
[i
].type
= opcode
->operands
[i
];
2721 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2725 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2731 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2734 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2735 if (operands
[i
] == operand
)
2737 else if (operands
[i
] == AARCH64_OPND_NIL
)
2742 /* R0...R30, followed by FOR31. */
2743 #define BANK(R, FOR31) \
2744 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2745 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2746 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2747 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2748 /* [0][0] 32-bit integer regs with sp Wn
2749 [0][1] 64-bit integer regs with sp Xn sf=1
2750 [1][0] 32-bit integer regs with #0 Wn
2751 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2752 static const char *int_reg
[2][2][32] = {
2753 #define R32(X) "w" #X
2754 #define R64(X) "x" #X
2755 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
2756 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
2761 /* Names of the SVE vector registers, first with .S suffixes,
2762 then with .D suffixes. */
2764 static const char *sve_reg
[2][32] = {
2765 #define ZS(X) "z" #X ".s"
2766 #define ZD(X) "z" #X ".d"
2767 BANK (ZS
, ZS (31)), BANK (ZD
, ZD (31))
2773 /* Return the integer register name.
2774 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2776 static inline const char *
2777 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2779 const int has_zr
= sp_reg_p
? 0 : 1;
2780 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2781 return int_reg
[has_zr
][is_64
][regno
];
2784 /* Like get_int_reg_name, but IS_64 is always 1. */
2786 static inline const char *
2787 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2789 const int has_zr
= sp_reg_p
? 0 : 1;
2790 return int_reg
[has_zr
][1][regno
];
2793 /* Get the name of the integer offset register in OPND, using the shift type
2794 to decide whether it's a word or doubleword. */
2796 static inline const char *
2797 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
2799 switch (opnd
->shifter
.kind
)
2801 case AARCH64_MOD_UXTW
:
2802 case AARCH64_MOD_SXTW
:
2803 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
2805 case AARCH64_MOD_LSL
:
2806 case AARCH64_MOD_SXTX
:
2807 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
2814 /* Get the name of the SVE vector offset register in OPND, using the operand
2815 qualifier to decide whether the suffix should be .S or .D. */
2817 static inline const char *
2818 get_addr_sve_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
)
2820 assert (qualifier
== AARCH64_OPND_QLF_S_S
2821 || qualifier
== AARCH64_OPND_QLF_S_D
);
2822 return sve_reg
[qualifier
== AARCH64_OPND_QLF_S_D
][regno
];
2825 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2845 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2846 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2847 (depending on the type of the instruction). IMM8 will be expanded to a
2848 single-precision floating-point value (SIZE == 4) or a double-precision
2849 floating-point value (SIZE == 8). A half-precision floating-point value
2850 (SIZE == 2) is expanded to a single-precision floating-point value. The
2851 expanded value is returned. */
2854 expand_fp_imm (int size
, uint32_t imm8
)
2857 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2859 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2860 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2861 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2862 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2863 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2866 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2867 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2868 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2869 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2870 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2873 else if (size
== 4 || size
== 2)
2875 imm
= (imm8_7
<< 31) /* imm8<7> */
2876 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2877 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2878 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2882 /* An unsupported size. */
2889 /* Produce the string representation of the register list operand *OPND
2890 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2891 the register name that comes before the register number, such as "v". */
2893 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
2896 const int num_regs
= opnd
->reglist
.num_regs
;
2897 const int first_reg
= opnd
->reglist
.first_regno
;
2898 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2899 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2900 char tb
[8]; /* Temporary buffer. */
2902 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2903 assert (num_regs
>= 1 && num_regs
<= 4);
2905 /* Prepare the index if any. */
2906 if (opnd
->reglist
.has_index
)
2907 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2908 snprintf (tb
, 8, "[%" PRIi64
"]", (opnd
->reglist
.index
% 100));
2912 /* The hyphenated form is preferred for disassembly if there are
2913 more than two registers in the list, and the register numbers
2914 are monotonically increasing in increments of one. */
2915 if (num_regs
> 2 && last_reg
> first_reg
)
2916 snprintf (buf
, size
, "{%s%d.%s-%s%d.%s}%s", prefix
, first_reg
, qlf_name
,
2917 prefix
, last_reg
, qlf_name
, tb
);
2920 const int reg0
= first_reg
;
2921 const int reg1
= (first_reg
+ 1) & 0x1f;
2922 const int reg2
= (first_reg
+ 2) & 0x1f;
2923 const int reg3
= (first_reg
+ 3) & 0x1f;
2928 snprintf (buf
, size
, "{%s%d.%s}%s", prefix
, reg0
, qlf_name
, tb
);
2931 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s}%s", prefix
, reg0
, qlf_name
,
2932 prefix
, reg1
, qlf_name
, tb
);
2935 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2936 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2937 prefix
, reg2
, qlf_name
, tb
);
2940 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2941 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2942 prefix
, reg2
, qlf_name
, prefix
, reg3
, qlf_name
, tb
);
2948 /* Print the register+immediate address in OPND to BUF, which has SIZE
2949 characters. BASE is the name of the base register. */
2952 print_immediate_offset_address (char *buf
, size_t size
,
2953 const aarch64_opnd_info
*opnd
,
2956 if (opnd
->addr
.writeback
)
2958 if (opnd
->addr
.preind
)
2959 snprintf (buf
, size
, "[%s, #%d]!", base
, opnd
->addr
.offset
.imm
);
2961 snprintf (buf
, size
, "[%s], #%d", base
, opnd
->addr
.offset
.imm
);
2965 if (opnd
->shifter
.operator_present
)
2967 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL_VL
);
2968 snprintf (buf
, size
, "[%s, #%d, mul vl]",
2969 base
, opnd
->addr
.offset
.imm
);
2971 else if (opnd
->addr
.offset
.imm
)
2972 snprintf (buf
, size
, "[%s, #%d]", base
, opnd
->addr
.offset
.imm
);
2974 snprintf (buf
, size
, "[%s]", base
);
2978 /* Produce the string representation of the register offset address operand
2979 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2980 the names of the base and offset registers. */
2982 print_register_offset_address (char *buf
, size_t size
,
2983 const aarch64_opnd_info
*opnd
,
2984 const char *base
, const char *offset
)
2986 char tb
[16]; /* Temporary buffer. */
2987 bfd_boolean print_extend_p
= TRUE
;
2988 bfd_boolean print_amount_p
= TRUE
;
2989 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
2991 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
2992 || !opnd
->shifter
.amount_present
))
2994 /* Not print the shift/extend amount when the amount is zero and
2995 when it is not the special case of 8-bit load/store instruction. */
2996 print_amount_p
= FALSE
;
2997 /* Likewise, no need to print the shift operator LSL in such a
2999 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3000 print_extend_p
= FALSE
;
3003 /* Prepare for the extend/shift. */
3007 snprintf (tb
, sizeof (tb
), ", %s #%" PRIi64
, shift_name
,
3008 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3009 (opnd
->shifter
.amount
% 100));
3011 snprintf (tb
, sizeof (tb
), ", %s", shift_name
);
3016 snprintf (buf
, size
, "[%s, %s%s]", base
, offset
, tb
);
3019 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3020 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3021 PC, PCREL_P and ADDRESS are used to pass in and return information about
3022 the PC-relative address calculation, where the PC value is passed in
3023 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3024 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3025 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3027 The function serves both the disassembler and the assembler diagnostics
3028 issuer, which is the reason why it lives in this file. */
3031 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
3032 const aarch64_opcode
*opcode
,
3033 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
3036 unsigned int i
, num_conds
;
3037 const char *name
= NULL
;
3038 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
3039 enum aarch64_modifier_kind kind
;
3040 uint64_t addr
, enum_value
;
3048 case AARCH64_OPND_Rd
:
3049 case AARCH64_OPND_Rn
:
3050 case AARCH64_OPND_Rm
:
3051 case AARCH64_OPND_Rt
:
3052 case AARCH64_OPND_Rt2
:
3053 case AARCH64_OPND_Rs
:
3054 case AARCH64_OPND_Ra
:
3055 case AARCH64_OPND_Rt_SYS
:
3056 case AARCH64_OPND_PAIRREG
:
3057 case AARCH64_OPND_SVE_Rm
:
3058 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3059 the <ic_op>, therefore we use opnd->present to override the
3060 generic optional-ness information. */
3061 if (opnd
->type
== AARCH64_OPND_Rt_SYS
)
3066 /* Omit the operand, e.g. RET. */
3067 else if (optional_operand_p (opcode
, idx
)
3069 == get_optional_operand_default_value (opcode
)))
3071 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3072 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3073 snprintf (buf
, size
, "%s",
3074 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3077 case AARCH64_OPND_Rd_SP
:
3078 case AARCH64_OPND_Rn_SP
:
3079 case AARCH64_OPND_SVE_Rn_SP
:
3080 case AARCH64_OPND_Rm_SP
:
3081 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3082 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
3083 || opnd
->qualifier
== AARCH64_OPND_QLF_X
3084 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
3085 snprintf (buf
, size
, "%s",
3086 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
3089 case AARCH64_OPND_Rm_EXT
:
3090 kind
= opnd
->shifter
.kind
;
3091 assert (idx
== 1 || idx
== 2);
3092 if ((aarch64_stack_pointer_p (opnds
)
3093 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
3094 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
3095 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
3096 && kind
== AARCH64_MOD_UXTW
)
3097 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
3098 && kind
== AARCH64_MOD_UXTX
)))
3100 /* 'LSL' is the preferred form in this case. */
3101 kind
= AARCH64_MOD_LSL
;
3102 if (opnd
->shifter
.amount
== 0)
3104 /* Shifter omitted. */
3105 snprintf (buf
, size
, "%s",
3106 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3110 if (opnd
->shifter
.amount
)
3111 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3112 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3113 aarch64_operand_modifiers
[kind
].name
,
3114 opnd
->shifter
.amount
);
3116 snprintf (buf
, size
, "%s, %s",
3117 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3118 aarch64_operand_modifiers
[kind
].name
);
3121 case AARCH64_OPND_Rm_SFT
:
3122 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3123 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3124 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3125 snprintf (buf
, size
, "%s",
3126 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3128 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3129 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3130 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3131 opnd
->shifter
.amount
);
3134 case AARCH64_OPND_Fd
:
3135 case AARCH64_OPND_Fn
:
3136 case AARCH64_OPND_Fm
:
3137 case AARCH64_OPND_Fa
:
3138 case AARCH64_OPND_Ft
:
3139 case AARCH64_OPND_Ft2
:
3140 case AARCH64_OPND_Sd
:
3141 case AARCH64_OPND_Sn
:
3142 case AARCH64_OPND_Sm
:
3143 case AARCH64_OPND_SVE_VZn
:
3144 case AARCH64_OPND_SVE_Vd
:
3145 case AARCH64_OPND_SVE_Vm
:
3146 case AARCH64_OPND_SVE_Vn
:
3147 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
3151 case AARCH64_OPND_Va
:
3152 case AARCH64_OPND_Vd
:
3153 case AARCH64_OPND_Vn
:
3154 case AARCH64_OPND_Vm
:
3155 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
3156 aarch64_get_qualifier_name (opnd
->qualifier
));
3159 case AARCH64_OPND_Ed
:
3160 case AARCH64_OPND_En
:
3161 case AARCH64_OPND_Em
:
3162 case AARCH64_OPND_SM3_IMM2
:
3163 snprintf (buf
, size
, "v%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3164 aarch64_get_qualifier_name (opnd
->qualifier
),
3165 opnd
->reglane
.index
);
3168 case AARCH64_OPND_VdD1
:
3169 case AARCH64_OPND_VnD1
:
3170 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
3173 case AARCH64_OPND_LVn
:
3174 case AARCH64_OPND_LVt
:
3175 case AARCH64_OPND_LVt_AL
:
3176 case AARCH64_OPND_LEt
:
3177 print_register_list (buf
, size
, opnd
, "v");
3180 case AARCH64_OPND_SVE_Pd
:
3181 case AARCH64_OPND_SVE_Pg3
:
3182 case AARCH64_OPND_SVE_Pg4_5
:
3183 case AARCH64_OPND_SVE_Pg4_10
:
3184 case AARCH64_OPND_SVE_Pg4_16
:
3185 case AARCH64_OPND_SVE_Pm
:
3186 case AARCH64_OPND_SVE_Pn
:
3187 case AARCH64_OPND_SVE_Pt
:
3188 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3189 snprintf (buf
, size
, "p%d", opnd
->reg
.regno
);
3190 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
3191 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
3192 snprintf (buf
, size
, "p%d/%s", opnd
->reg
.regno
,
3193 aarch64_get_qualifier_name (opnd
->qualifier
));
3195 snprintf (buf
, size
, "p%d.%s", opnd
->reg
.regno
,
3196 aarch64_get_qualifier_name (opnd
->qualifier
));
3199 case AARCH64_OPND_SVE_Za_5
:
3200 case AARCH64_OPND_SVE_Za_16
:
3201 case AARCH64_OPND_SVE_Zd
:
3202 case AARCH64_OPND_SVE_Zm_5
:
3203 case AARCH64_OPND_SVE_Zm_16
:
3204 case AARCH64_OPND_SVE_Zn
:
3205 case AARCH64_OPND_SVE_Zt
:
3206 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3207 snprintf (buf
, size
, "z%d", opnd
->reg
.regno
);
3209 snprintf (buf
, size
, "z%d.%s", opnd
->reg
.regno
,
3210 aarch64_get_qualifier_name (opnd
->qualifier
));
3213 case AARCH64_OPND_SVE_ZnxN
:
3214 case AARCH64_OPND_SVE_ZtxN
:
3215 print_register_list (buf
, size
, opnd
, "z");
3218 case AARCH64_OPND_SVE_Zm3_INDEX
:
3219 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
3220 case AARCH64_OPND_SVE_Zm4_INDEX
:
3221 case AARCH64_OPND_SVE_Zn_INDEX
:
3222 snprintf (buf
, size
, "z%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3223 aarch64_get_qualifier_name (opnd
->qualifier
),
3224 opnd
->reglane
.index
);
3227 case AARCH64_OPND_CRn
:
3228 case AARCH64_OPND_CRm
:
3229 snprintf (buf
, size
, "C%" PRIi64
, opnd
->imm
.value
);
3232 case AARCH64_OPND_IDX
:
3233 case AARCH64_OPND_MASK
:
3234 case AARCH64_OPND_IMM
:
3235 case AARCH64_OPND_IMM_2
:
3236 case AARCH64_OPND_WIDTH
:
3237 case AARCH64_OPND_UIMM3_OP1
:
3238 case AARCH64_OPND_UIMM3_OP2
:
3239 case AARCH64_OPND_BIT_NUM
:
3240 case AARCH64_OPND_IMM_VLSL
:
3241 case AARCH64_OPND_IMM_VLSR
:
3242 case AARCH64_OPND_SHLL_IMM
:
3243 case AARCH64_OPND_IMM0
:
3244 case AARCH64_OPND_IMMR
:
3245 case AARCH64_OPND_IMMS
:
3246 case AARCH64_OPND_FBITS
:
3247 case AARCH64_OPND_SIMM5
:
3248 case AARCH64_OPND_SVE_SHLIMM_PRED
:
3249 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
3250 case AARCH64_OPND_SVE_SHRIMM_PRED
:
3251 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
3252 case AARCH64_OPND_SVE_SIMM5
:
3253 case AARCH64_OPND_SVE_SIMM5B
:
3254 case AARCH64_OPND_SVE_SIMM6
:
3255 case AARCH64_OPND_SVE_SIMM8
:
3256 case AARCH64_OPND_SVE_UIMM3
:
3257 case AARCH64_OPND_SVE_UIMM7
:
3258 case AARCH64_OPND_SVE_UIMM8
:
3259 case AARCH64_OPND_SVE_UIMM8_53
:
3260 case AARCH64_OPND_IMM_ROT1
:
3261 case AARCH64_OPND_IMM_ROT2
:
3262 case AARCH64_OPND_IMM_ROT3
:
3263 case AARCH64_OPND_SVE_IMM_ROT1
:
3264 case AARCH64_OPND_SVE_IMM_ROT2
:
3265 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3268 case AARCH64_OPND_SVE_I1_HALF_ONE
:
3269 case AARCH64_OPND_SVE_I1_HALF_TWO
:
3270 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
3273 c
.i
= opnd
->imm
.value
;
3274 snprintf (buf
, size
, "#%.1f", c
.f
);
3278 case AARCH64_OPND_SVE_PATTERN
:
3279 if (optional_operand_p (opcode
, idx
)
3280 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3282 enum_value
= opnd
->imm
.value
;
3283 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3284 if (aarch64_sve_pattern_array
[enum_value
])
3285 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[enum_value
]);
3287 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3290 case AARCH64_OPND_SVE_PATTERN_SCALED
:
3291 if (optional_operand_p (opcode
, idx
)
3292 && !opnd
->shifter
.operator_present
3293 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3295 enum_value
= opnd
->imm
.value
;
3296 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3297 if (aarch64_sve_pattern_array
[opnd
->imm
.value
])
3298 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[opnd
->imm
.value
]);
3300 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3301 if (opnd
->shifter
.operator_present
)
3303 size_t len
= strlen (buf
);
3304 snprintf (buf
+ len
, size
- len
, ", %s #%" PRIi64
,
3305 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3306 opnd
->shifter
.amount
);
3310 case AARCH64_OPND_SVE_PRFOP
:
3311 enum_value
= opnd
->imm
.value
;
3312 assert (enum_value
< ARRAY_SIZE (aarch64_sve_prfop_array
));
3313 if (aarch64_sve_prfop_array
[enum_value
])
3314 snprintf (buf
, size
, "%s", aarch64_sve_prfop_array
[enum_value
]);
3316 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3319 case AARCH64_OPND_IMM_MOV
:
3320 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3322 case 4: /* e.g. MOV Wd, #<imm32>. */
3324 int imm32
= opnd
->imm
.value
;
3325 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
3328 case 8: /* e.g. MOV Xd, #<imm64>. */
3329 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
3330 opnd
->imm
.value
, opnd
->imm
.value
);
3332 default: assert (0);
3336 case AARCH64_OPND_FPIMM0
:
3337 snprintf (buf
, size
, "#0.0");
3340 case AARCH64_OPND_LIMM
:
3341 case AARCH64_OPND_AIMM
:
3342 case AARCH64_OPND_HALF
:
3343 case AARCH64_OPND_SVE_INV_LIMM
:
3344 case AARCH64_OPND_SVE_LIMM
:
3345 case AARCH64_OPND_SVE_LIMM_MOV
:
3346 if (opnd
->shifter
.amount
)
3347 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3348 opnd
->shifter
.amount
);
3350 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3353 case AARCH64_OPND_SIMD_IMM
:
3354 case AARCH64_OPND_SIMD_IMM_SFT
:
3355 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3356 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
3357 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3359 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%" PRIi64
, opnd
->imm
.value
,
3360 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3361 opnd
->shifter
.amount
);
3364 case AARCH64_OPND_SVE_AIMM
:
3365 case AARCH64_OPND_SVE_ASIMM
:
3366 if (opnd
->shifter
.amount
)
3367 snprintf (buf
, size
, "#%" PRIi64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3368 opnd
->shifter
.amount
);
3370 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3373 case AARCH64_OPND_FPIMM
:
3374 case AARCH64_OPND_SIMD_FPIMM
:
3375 case AARCH64_OPND_SVE_FPIMM8
:
3376 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3378 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3381 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
3382 snprintf (buf
, size
, "#%.18e", c
.f
);
3385 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3388 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
3389 snprintf (buf
, size
, "#%.18e", c
.f
);
3392 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3395 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
3396 snprintf (buf
, size
, "#%.18e", c
.d
);
3399 default: assert (0);
3403 case AARCH64_OPND_CCMP_IMM
:
3404 case AARCH64_OPND_NZCV
:
3405 case AARCH64_OPND_EXCEPTION
:
3406 case AARCH64_OPND_UIMM4
:
3407 case AARCH64_OPND_UIMM7
:
3408 if (optional_operand_p (opcode
, idx
) == TRUE
3409 && (opnd
->imm
.value
==
3410 (int64_t) get_optional_operand_default_value (opcode
)))
3411 /* Omit the operand, e.g. DCPS1. */
3413 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
3416 case AARCH64_OPND_COND
:
3417 case AARCH64_OPND_COND1
:
3418 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
3419 num_conds
= ARRAY_SIZE (opnd
->cond
->names
);
3420 for (i
= 1; i
< num_conds
&& opnd
->cond
->names
[i
]; ++i
)
3422 size_t len
= strlen (buf
);
3424 snprintf (buf
+ len
, size
- len
, " // %s = %s",
3425 opnd
->cond
->names
[0], opnd
->cond
->names
[i
]);
3427 snprintf (buf
+ len
, size
- len
, ", %s",
3428 opnd
->cond
->names
[i
]);
3432 case AARCH64_OPND_ADDR_ADRP
:
3433 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
3439 /* This is not necessary during the disassembling, as print_address_func
3440 in the disassemble_info will take care of the printing. But some
3441 other callers may be still interested in getting the string in *STR,
3442 so here we do snprintf regardless. */
3443 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3446 case AARCH64_OPND_ADDR_PCREL14
:
3447 case AARCH64_OPND_ADDR_PCREL19
:
3448 case AARCH64_OPND_ADDR_PCREL21
:
3449 case AARCH64_OPND_ADDR_PCREL26
:
3450 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
3455 /* This is not necessary during the disassembling, as print_address_func
3456 in the disassemble_info will take care of the printing. But some
3457 other callers may be still interested in getting the string in *STR,
3458 so here we do snprintf regardless. */
3459 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3462 case AARCH64_OPND_ADDR_SIMPLE
:
3463 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
3464 case AARCH64_OPND_SIMD_ADDR_POST
:
3465 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3466 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
3468 if (opnd
->addr
.offset
.is_reg
)
3469 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
3471 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
3474 snprintf (buf
, size
, "[%s]", name
);
3477 case AARCH64_OPND_ADDR_REGOFF
:
3478 case AARCH64_OPND_SVE_ADDR_RR
:
3479 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
3480 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
3481 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
3482 case AARCH64_OPND_SVE_ADDR_RX
:
3483 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
3484 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
3485 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
3486 print_register_offset_address
3487 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3488 get_offset_int_reg_name (opnd
));
3491 case AARCH64_OPND_SVE_ADDR_RZ
:
3492 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
3493 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
3494 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
3495 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
3496 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
3497 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
3498 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
3499 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
3500 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
3501 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
3502 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
3503 print_register_offset_address
3504 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3505 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3508 case AARCH64_OPND_ADDR_SIMM7
:
3509 case AARCH64_OPND_ADDR_SIMM9
:
3510 case AARCH64_OPND_ADDR_SIMM9_2
:
3511 case AARCH64_OPND_ADDR_SIMM10
:
3512 case AARCH64_OPND_ADDR_OFFSET
:
3513 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
3514 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
3515 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
3516 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
3517 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
3518 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
3519 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
3520 case AARCH64_OPND_SVE_ADDR_RI_U6
:
3521 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
3522 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
3523 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
3524 print_immediate_offset_address
3525 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1));
3528 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
3529 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
3530 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
3531 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
3532 print_immediate_offset_address
3534 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
));
3537 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
3538 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
3539 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
3540 print_register_offset_address
3542 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
3543 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3546 case AARCH64_OPND_ADDR_UIMM12
:
3547 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3548 if (opnd
->addr
.offset
.imm
)
3549 snprintf (buf
, size
, "[%s, #%d]", name
, opnd
->addr
.offset
.imm
);
3551 snprintf (buf
, size
, "[%s]", name
);
3554 case AARCH64_OPND_SYSREG
:
3555 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
3556 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
3557 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
]))
3559 if (aarch64_sys_regs
[i
].name
)
3560 snprintf (buf
, size
, "%s", aarch64_sys_regs
[i
].name
);
3563 /* Implementation defined system register. */
3564 unsigned int value
= opnd
->sysreg
;
3565 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
3566 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
3571 case AARCH64_OPND_PSTATEFIELD
:
3572 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
3573 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
3575 assert (aarch64_pstatefields
[i
].name
);
3576 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
3579 case AARCH64_OPND_SYSREG_AT
:
3580 case AARCH64_OPND_SYSREG_DC
:
3581 case AARCH64_OPND_SYSREG_IC
:
3582 case AARCH64_OPND_SYSREG_TLBI
:
3583 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
3586 case AARCH64_OPND_BARRIER
:
3587 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
3590 case AARCH64_OPND_BARRIER_ISB
:
3591 /* Operand can be omitted, e.g. in DCPS1. */
3592 if (! optional_operand_p (opcode
, idx
)
3593 || (opnd
->barrier
->value
3594 != get_optional_operand_default_value (opcode
)))
3595 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
3598 case AARCH64_OPND_PRFOP
:
3599 if (opnd
->prfop
->name
!= NULL
)
3600 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
3602 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
3605 case AARCH64_OPND_BARRIER_PSB
:
3606 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
3614 #define CPENC(op0,op1,crn,crm,op2) \
3615 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3616 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3617 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3618 /* for 3.9.10 System Instructions */
3619 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3641 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3646 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3651 #define F_HASXT 0x4 /* System instruction register <Xt>
3655 /* TODO there are two more issues need to be resolved
3656 1. handle read-only and write-only system registers
3657 2. handle cpu-implementation-defined system registers. */
3658 const aarch64_sys_reg aarch64_sys_regs
[] =
3660 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
3661 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
3662 { "elr_el1", CPEN_(0,C0
,1), 0 },
3663 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
3664 { "sp_el0", CPEN_(0,C1
,0), 0 },
3665 { "spsel", CPEN_(0,C2
,0), 0 },
3666 { "daif", CPEN_(3,C2
,1), 0 },
3667 { "currentel", CPEN_(0,C2
,2), 0 }, /* RO */
3668 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
3669 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
3670 { "nzcv", CPEN_(3,C2
,0), 0 },
3671 { "fpcr", CPEN_(3,C4
,0), 0 },
3672 { "fpsr", CPEN_(3,C4
,1), 0 },
3673 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
3674 { "dlr_el0", CPEN_(3,C5
,1), 0 },
3675 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
3676 { "elr_el2", CPEN_(4,C0
,1), 0 },
3677 { "sp_el1", CPEN_(4,C1
,0), 0 },
3678 { "spsr_irq", CPEN_(4,C3
,0), 0 },
3679 { "spsr_abt", CPEN_(4,C3
,1), 0 },
3680 { "spsr_und", CPEN_(4,C3
,2), 0 },
3681 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
3682 { "spsr_el3", CPEN_(6,C0
,0), 0 },
3683 { "elr_el3", CPEN_(6,C0
,1), 0 },
3684 { "sp_el2", CPEN_(6,C1
,0), 0 },
3685 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
3686 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
3687 { "midr_el1", CPENC(3,0,C0
,C0
,0), 0 }, /* RO */
3688 { "ctr_el0", CPENC(3,3,C0
,C0
,1), 0 }, /* RO */
3689 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), 0 }, /* RO */
3690 { "revidr_el1", CPENC(3,0,C0
,C0
,6), 0 }, /* RO */
3691 { "aidr_el1", CPENC(3,1,C0
,C0
,7), 0 }, /* RO */
3692 { "dczid_el0", CPENC(3,3,C0
,C0
,7), 0 }, /* RO */
3693 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), 0 }, /* RO */
3694 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), 0 }, /* RO */
3695 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), 0 }, /* RO */
3696 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), 0 }, /* RO */
3697 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), 0 }, /* RO */
3698 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), 0 }, /* RO */
3699 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), 0 }, /* RO */
3700 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), 0 }, /* RO */
3701 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), 0 }, /* RO */
3702 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), 0 }, /* RO */
3703 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), 0 }, /* RO */
3704 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), 0 }, /* RO */
3705 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), 0 }, /* RO */
3706 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), 0 }, /* RO */
3707 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), 0 }, /* RO */
3708 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), 0 }, /* RO */
3709 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), 0 }, /* RO */
3710 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), 0 }, /* RO */
3711 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), 0 }, /* RO */
3712 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), 0 }, /* RO */
3713 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), 0 }, /* RO */
3714 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), 0 }, /* RO */
3715 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), 0 }, /* RO */
3716 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), 0 }, /* RO */
3717 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), 0 }, /* RO */
3718 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), 0 }, /* RO */
3719 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), 0 }, /* RO */
3720 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
}, /* RO */
3721 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), 0 }, /* RO */
3722 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), 0 }, /* RO */
3723 { "id_aa64zfr0_el1", CPENC (3, 0, C0
, C4
, 4), F_ARCHEXT
}, /* RO */
3724 { "clidr_el1", CPENC(3,1,C0
,C0
,1), 0 }, /* RO */
3725 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 }, /* RO */
3726 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
3727 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
3728 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
3729 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
3730 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
3731 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
3732 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
3733 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
3734 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
3735 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
3736 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
3737 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
3738 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
3739 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
3740 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
3741 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
3742 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
3743 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
3744 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
3745 { "zcr_el1", CPENC (3, 0, C1
, C2
, 0), F_ARCHEXT
},
3746 { "zcr_el12", CPENC (3, 5, C1
, C2
, 0), F_ARCHEXT
},
3747 { "zcr_el2", CPENC (3, 4, C1
, C2
, 0), F_ARCHEXT
},
3748 { "zcr_el3", CPENC (3, 6, C1
, C2
, 0), F_ARCHEXT
},
3749 { "zidr_el1", CPENC (3, 0, C0
, C0
, 7), F_ARCHEXT
},
3750 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
3751 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
3752 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
3753 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
3754 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
3755 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
3756 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
3757 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
3758 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
3759 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
3760 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
3761 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
3762 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
3763 { "apiakeylo_el1", CPENC (3, 0, C2
, C1
, 0), F_ARCHEXT
},
3764 { "apiakeyhi_el1", CPENC (3, 0, C2
, C1
, 1), F_ARCHEXT
},
3765 { "apibkeylo_el1", CPENC (3, 0, C2
, C1
, 2), F_ARCHEXT
},
3766 { "apibkeyhi_el1", CPENC (3, 0, C2
, C1
, 3), F_ARCHEXT
},
3767 { "apdakeylo_el1", CPENC (3, 0, C2
, C2
, 0), F_ARCHEXT
},
3768 { "apdakeyhi_el1", CPENC (3, 0, C2
, C2
, 1), F_ARCHEXT
},
3769 { "apdbkeylo_el1", CPENC (3, 0, C2
, C2
, 2), F_ARCHEXT
},
3770 { "apdbkeyhi_el1", CPENC (3, 0, C2
, C2
, 3), F_ARCHEXT
},
3771 { "apgakeylo_el1", CPENC (3, 0, C2
, C3
, 0), F_ARCHEXT
},
3772 { "apgakeyhi_el1", CPENC (3, 0, C2
, C3
, 1), F_ARCHEXT
},
3773 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
3774 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
3775 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
3776 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
3777 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
3778 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
3779 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
3780 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
3781 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
3782 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
3783 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
3784 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
3785 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
}, /* RO */
3786 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
3787 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
}, /* RO */
3788 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
3789 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
}, /* RO */
3790 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
3791 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
3792 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
3793 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
3794 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
3795 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
3796 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
3797 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
3798 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
3799 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
3800 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
3801 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
3802 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
3803 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
3804 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
3805 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
3806 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
3807 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
3808 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
3809 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
3810 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
3811 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
3812 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
3813 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), 0 }, /* RO */
3814 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), 0 }, /* RO */
3815 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), 0 }, /* RO */
3816 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
3817 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
3818 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
3819 { "isr_el1", CPENC(3,0,C12
,C1
,0), 0 }, /* RO */
3820 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
3821 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
3822 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
3823 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
3824 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
3825 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
3826 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RO */
3827 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
3828 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
3829 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
3830 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
3831 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RO */
3832 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), 0 }, /* RO */
3833 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), 0 }, /* RO */
3834 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
3835 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
3836 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
3837 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
3838 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
3839 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
3840 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
3841 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
3842 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
3843 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
3844 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
3845 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
3846 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
3847 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
3848 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
3849 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
3850 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
3851 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
3852 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
3853 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
3854 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
3855 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
3856 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
3857 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
3858 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
3859 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
3860 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
3861 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
3862 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
3863 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
3864 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), 0 }, /* r */
3865 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
3866 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
3867 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* r */
3868 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* w */
3869 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 }, /* r */
3870 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 }, /* w */
3871 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
3872 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
3873 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
3874 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
3875 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
3876 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
3877 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
3878 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
3879 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
3880 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
3881 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
3882 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
3883 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
3884 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
3885 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
3886 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
3887 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
3888 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
3889 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
3890 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
3891 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
3892 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
3893 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
3894 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
3895 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
3896 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
3897 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
3898 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
3899 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
3900 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
3901 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
3902 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
3903 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
3904 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
3905 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
3906 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
3907 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
3908 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
3909 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
3910 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
3911 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
3912 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
3913 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
3914 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
3915 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
3916 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
3917 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
3918 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
3919 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
3920 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
3921 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
3922 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3923 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3924 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3925 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3926 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3927 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3928 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3929 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3930 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3931 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3932 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3933 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3934 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3935 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3936 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3937 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), 0 }, /* r */
3938 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), 0 }, /* w */
3939 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), 0 }, /* r */
3940 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3941 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3942 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3943 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3944 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), 0 }, /* r */
3945 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
3946 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
3947 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
3948 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
}, /* ro */
3949 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3950 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
3951 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
3952 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
3953 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
3954 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
3955 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* ro */
3956 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3957 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3958 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
3959 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
3960 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
3961 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
3962 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), 0 }, /* w */
3963 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
3964 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), 0 }, /* r */
3965 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), 0 }, /* r */
3966 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
3967 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
3968 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
3969 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
3970 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
3971 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
3972 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
3973 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
3974 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
3975 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
3976 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
3977 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
3978 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
3979 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
3980 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
3981 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
3982 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
3983 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
3984 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
3985 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
3986 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
3987 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
3988 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
3989 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
3990 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
3991 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
3992 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
3993 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
3994 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
3995 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
3996 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
3997 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
3998 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
3999 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
4000 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
4001 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
4002 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
4003 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
4004 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
4005 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
4006 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
4007 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
4008 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
4009 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
4010 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
4011 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
4012 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
4013 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
4014 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
4015 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
4016 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
4017 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
4018 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
4019 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
4020 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
4021 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
4022 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
4023 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
4024 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
4025 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
4026 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
4027 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
4028 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
4029 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
4030 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
4031 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
4032 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
4033 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
4034 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
4035 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
4037 { "dit", CPEN_ (3, C2
, 5), F_ARCHEXT
},
4038 { "vstcr_el2", CPENC(3, 4, C2
, C6
, 2), F_ARCHEXT
},
4039 { "vsttbr_el2", CPENC(3, 4, C2
, C6
, 0), F_ARCHEXT
},
4040 { "cnthvs_tval_el2", CPENC(3, 4, C14
, C4
, 0), F_ARCHEXT
},
4041 { "cnthvs_cval_el2", CPENC(3, 4, C14
, C4
, 2), F_ARCHEXT
},
4042 { "cnthvs_ctl_el2", CPENC(3, 4, C14
, C4
, 1), F_ARCHEXT
},
4043 { "cnthps_tval_el2", CPENC(3, 4, C14
, C5
, 0), F_ARCHEXT
},
4044 { "cnthps_cval_el2", CPENC(3, 4, C14
, C5
, 2), F_ARCHEXT
},
4045 { "cnthps_ctl_el2", CPENC(3, 4, C14
, C5
, 1), F_ARCHEXT
},
4046 { "sder32_el2", CPENC(3, 4, C1
, C3
, 1), F_ARCHEXT
},
4047 { "vncr_el2", CPENC(3, 4, C2
, C2
, 0), F_ARCHEXT
},
4048 { 0, CPENC(0,0,0,0,0), 0 },
4052 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
4054 return (reg
->flags
& F_DEPRECATED
) != 0;
4058 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
4059 const aarch64_sys_reg
*reg
)
4061 if (!(reg
->flags
& F_ARCHEXT
))
4064 /* PAN. Values are from aarch64_sys_regs. */
4065 if (reg
->value
== CPEN_(0,C2
,3)
4066 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
4069 /* Virtualization host extensions: system registers. */
4070 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
4071 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
4072 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
4073 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
4074 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
4075 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4078 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4079 if ((reg
->value
== CPEN_ (5, C0
, 0)
4080 || reg
->value
== CPEN_ (5, C0
, 1)
4081 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
4082 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
4083 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
4084 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
4085 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
4086 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
4087 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
4088 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
4089 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
4090 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
4091 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
4092 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
4093 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
4094 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
4095 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4098 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4099 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
4100 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
4101 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
4102 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
4103 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
4104 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
4105 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4108 /* ARMv8.2 features. */
4110 /* ID_AA64MMFR2_EL1. */
4111 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
4112 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4116 if (reg
->value
== CPEN_ (0, C2
, 4)
4117 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4120 /* RAS extension. */
4122 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4123 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4124 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
4125 || reg
->value
== CPENC (3, 0, C5
, C3
, 1)
4126 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
4127 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
4128 || reg
->value
== CPENC (3, 0, C5
, C4
, 0)
4129 || reg
->value
== CPENC (3, 0, C5
, C4
, 1)
4130 || reg
->value
== CPENC (3, 0, C5
, C4
, 2)
4131 || reg
->value
== CPENC (3, 0, C5
, C4
, 3)
4132 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
4133 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
4134 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4137 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4138 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
4139 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
4140 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
4141 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4144 /* Statistical Profiling extension. */
4145 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
4146 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
4147 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
4148 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
4149 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
4150 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
4151 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
4152 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
4153 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
4154 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
4155 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
4156 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
4157 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
4158 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
4161 /* ARMv8.3 Pointer authentication keys. */
4162 if ((reg
->value
== CPENC (3, 0, C2
, C1
, 0)
4163 || reg
->value
== CPENC (3, 0, C2
, C1
, 1)
4164 || reg
->value
== CPENC (3, 0, C2
, C1
, 2)
4165 || reg
->value
== CPENC (3, 0, C2
, C1
, 3)
4166 || reg
->value
== CPENC (3, 0, C2
, C2
, 0)
4167 || reg
->value
== CPENC (3, 0, C2
, C2
, 1)
4168 || reg
->value
== CPENC (3, 0, C2
, C2
, 2)
4169 || reg
->value
== CPENC (3, 0, C2
, C2
, 3)
4170 || reg
->value
== CPENC (3, 0, C2
, C3
, 0)
4171 || reg
->value
== CPENC (3, 0, C2
, C3
, 1))
4172 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_3
))
4176 if ((reg
->value
== CPENC (3, 0, C0
, C4
, 4)
4177 || reg
->value
== CPENC (3, 0, C1
, C2
, 0)
4178 || reg
->value
== CPENC (3, 4, C1
, C2
, 0)
4179 || reg
->value
== CPENC (3, 6, C1
, C2
, 0)
4180 || reg
->value
== CPENC (3, 5, C1
, C2
, 0)
4181 || reg
->value
== CPENC (3, 0, C0
, C0
, 7))
4182 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SVE
))
4185 /* ARMv8.4 features. */
4188 if (reg
->value
== CPEN_ (3, C2
, 5)
4189 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4192 /* Virtualization extensions. */
4193 if ((reg
->value
== CPENC(3, 4, C2
, C6
, 2)
4194 || reg
->value
== CPENC(3, 4, C2
, C6
, 0)
4195 || reg
->value
== CPENC(3, 4, C14
, C4
, 0)
4196 || reg
->value
== CPENC(3, 4, C14
, C4
, 2)
4197 || reg
->value
== CPENC(3, 4, C14
, C4
, 1)
4198 || reg
->value
== CPENC(3, 4, C14
, C5
, 0)
4199 || reg
->value
== CPENC(3, 4, C14
, C5
, 2)
4200 || reg
->value
== CPENC(3, 4, C14
, C5
, 1)
4201 || reg
->value
== CPENC(3, 4, C1
, C3
, 1)
4202 || reg
->value
== CPENC(3, 4, C2
, C2
, 0))
4203 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4206 /* ARMv8.4 TLB instructions. */
4207 if ((reg
->value
== CPENS (0, C8
, C1
, 0)
4208 || reg
->value
== CPENS (0, C8
, C1
, 1)
4209 || reg
->value
== CPENS (0, C8
, C1
, 2)
4210 || reg
->value
== CPENS (0, C8
, C1
, 3)
4211 || reg
->value
== CPENS (0, C8
, C1
, 5)
4212 || reg
->value
== CPENS (0, C8
, C1
, 7)
4213 || reg
->value
== CPENS (4, C8
, C4
, 0)
4214 || reg
->value
== CPENS (4, C8
, C4
, 4)
4215 || reg
->value
== CPENS (4, C8
, C1
, 1)
4216 || reg
->value
== CPENS (4, C8
, C1
, 5)
4217 || reg
->value
== CPENS (4, C8
, C1
, 6)
4218 || reg
->value
== CPENS (6, C8
, C1
, 1)
4219 || reg
->value
== CPENS (6, C8
, C1
, 5)
4220 || reg
->value
== CPENS (4, C8
, C1
, 0)
4221 || reg
->value
== CPENS (4, C8
, C1
, 4)
4222 || reg
->value
== CPENS (6, C8
, C1
, 0)
4223 || reg
->value
== CPENS (0, C8
, C6
, 1)
4224 || reg
->value
== CPENS (0, C8
, C6
, 3)
4225 || reg
->value
== CPENS (0, C8
, C6
, 5)
4226 || reg
->value
== CPENS (0, C8
, C6
, 7)
4227 || reg
->value
== CPENS (0, C8
, C2
, 1)
4228 || reg
->value
== CPENS (0, C8
, C2
, 3)
4229 || reg
->value
== CPENS (0, C8
, C2
, 5)
4230 || reg
->value
== CPENS (0, C8
, C2
, 7)
4231 || reg
->value
== CPENS (0, C8
, C5
, 1)
4232 || reg
->value
== CPENS (0, C8
, C5
, 3)
4233 || reg
->value
== CPENS (0, C8
, C5
, 5)
4234 || reg
->value
== CPENS (0, C8
, C5
, 7)
4235 || reg
->value
== CPENS (4, C8
, C0
, 2)
4236 || reg
->value
== CPENS (4, C8
, C0
, 6)
4237 || reg
->value
== CPENS (4, C8
, C4
, 2)
4238 || reg
->value
== CPENS (4, C8
, C4
, 6)
4239 || reg
->value
== CPENS (4, C8
, C4
, 3)
4240 || reg
->value
== CPENS (4, C8
, C4
, 7)
4241 || reg
->value
== CPENS (4, C8
, C6
, 1)
4242 || reg
->value
== CPENS (4, C8
, C6
, 5)
4243 || reg
->value
== CPENS (4, C8
, C2
, 1)
4244 || reg
->value
== CPENS (4, C8
, C2
, 5)
4245 || reg
->value
== CPENS (4, C8
, C5
, 1)
4246 || reg
->value
== CPENS (4, C8
, C5
, 5)
4247 || reg
->value
== CPENS (6, C8
, C6
, 1)
4248 || reg
->value
== CPENS (6, C8
, C6
, 5)
4249 || reg
->value
== CPENS (6, C8
, C2
, 1)
4250 || reg
->value
== CPENS (6, C8
, C2
, 5)
4251 || reg
->value
== CPENS (6, C8
, C5
, 1)
4252 || reg
->value
== CPENS (6, C8
, C5
, 5))
4253 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4259 /* The CPENC below is fairly misleading, the fields
4260 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4261 by ins_pstatefield, which just shifts the value by the width of the fields
4262 in a loop. So if you CPENC them only the first value will be set, the rest
4263 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4264 value of 0b110000000001000000 (0x30040) while what you want is
4266 const aarch64_sys_reg aarch64_pstatefields
[] =
4268 { "spsel", 0x05, 0 },
4269 { "daifset", 0x1e, 0 },
4270 { "daifclr", 0x1f, 0 },
4271 { "pan", 0x04, F_ARCHEXT
},
4272 { "uao", 0x03, F_ARCHEXT
},
4273 { "dit", 0x1a, F_ARCHEXT
},
4274 { 0, CPENC(0,0,0,0,0), 0 },
4278 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
4279 const aarch64_sys_reg
*reg
)
4281 if (!(reg
->flags
& F_ARCHEXT
))
4284 /* PAN. Values are from aarch64_pstatefields. */
4285 if (reg
->value
== 0x04
4286 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
4289 /* UAO. Values are from aarch64_pstatefields. */
4290 if (reg
->value
== 0x03
4291 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4294 /* DIT. Values are from aarch64_pstatefields. */
4295 if (reg
->value
== 0x1a
4296 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4302 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
4304 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
4305 { "iallu", CPENS(0,C7
,C5
,0), 0 },
4306 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
4307 { 0, CPENS(0,0,0,0), 0 }
4310 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
4312 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
4313 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
4314 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
4315 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
4316 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
4317 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
4318 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
4319 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
4320 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
4321 { 0, CPENS(0,0,0,0), 0 }
4324 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
4326 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
4327 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
4328 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
4329 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
4330 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
4331 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
4332 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
4333 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
4334 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
4335 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
4336 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
4337 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
4338 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
4339 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
4340 { 0, CPENS(0,0,0,0), 0 }
4343 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
4345 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
4346 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
4347 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
4348 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
4349 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
4350 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
4351 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
4352 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
4353 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
4354 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
4355 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
4356 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
4357 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
4358 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
4359 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
4360 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
4361 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
4362 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
4363 { "alle2", CPENS(4,C8
,C7
,0), 0 },
4364 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
4365 { "alle1", CPENS(4,C8
,C7
,4), 0 },
4366 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
4367 { "alle3", CPENS(6,C8
,C7
,0), 0 },
4368 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
4369 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
4370 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
4371 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
4372 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
4373 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
4374 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
4375 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
4376 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
4378 { "vmalle1os", CPENS (0, C8
, C1
, 0), F_ARCHEXT
},
4379 { "vae1os", CPENS (0, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4380 { "aside1os", CPENS (0, C8
, C1
, 2), F_HASXT
| F_ARCHEXT
},
4381 { "vaae1os", CPENS (0, C8
, C1
, 3), F_HASXT
| F_ARCHEXT
},
4382 { "vale1os", CPENS (0, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4383 { "vaale1os", CPENS (0, C8
, C1
, 7), F_HASXT
| F_ARCHEXT
},
4384 { "ipas2e1os", CPENS (4, C8
, C4
, 0), F_HASXT
| F_ARCHEXT
},
4385 { "ipas2le1os", CPENS (4, C8
, C4
, 4), F_HASXT
| F_ARCHEXT
},
4386 { "vae2os", CPENS (4, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4387 { "vale2os", CPENS (4, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4388 { "vmalls12e1os", CPENS (4, C8
, C1
, 6), F_ARCHEXT
},
4389 { "vae3os", CPENS (6, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4390 { "vale3os", CPENS (6, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4391 { "alle2os", CPENS (4, C8
, C1
, 0), F_ARCHEXT
},
4392 { "alle1os", CPENS (4, C8
, C1
, 4), F_ARCHEXT
},
4393 { "alle3os", CPENS (6, C8
, C1
, 0), F_ARCHEXT
},
4395 { "rvae1", CPENS (0, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4396 { "rvaae1", CPENS (0, C8
, C6
, 3), F_HASXT
| F_ARCHEXT
},
4397 { "rvale1", CPENS (0, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4398 { "rvaale1", CPENS (0, C8
, C6
, 7), F_HASXT
| F_ARCHEXT
},
4399 { "rvae1is", CPENS (0, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4400 { "rvaae1is", CPENS (0, C8
, C2
, 3), F_HASXT
| F_ARCHEXT
},
4401 { "rvale1is", CPENS (0, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4402 { "rvaale1is", CPENS (0, C8
, C2
, 7), F_HASXT
| F_ARCHEXT
},
4403 { "rvae1os", CPENS (0, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4404 { "rvaae1os", CPENS (0, C8
, C5
, 3), F_HASXT
| F_ARCHEXT
},
4405 { "rvale1os", CPENS (0, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4406 { "rvaale1os", CPENS (0, C8
, C5
, 7), F_HASXT
| F_ARCHEXT
},
4407 { "ripas2e1is", CPENS (4, C8
, C0
, 2), F_HASXT
| F_ARCHEXT
},
4408 { "ripas2le1is",CPENS (4, C8
, C0
, 6), F_HASXT
| F_ARCHEXT
},
4409 { "ripas2e1", CPENS (4, C8
, C4
, 2), F_HASXT
| F_ARCHEXT
},
4410 { "ripas2le1", CPENS (4, C8
, C4
, 6), F_HASXT
| F_ARCHEXT
},
4411 { "ripas2e1os", CPENS (4, C8
, C4
, 3), F_HASXT
| F_ARCHEXT
},
4412 { "ripas2le1os",CPENS (4, C8
, C4
, 7), F_HASXT
| F_ARCHEXT
},
4413 { "rvae2", CPENS (4, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4414 { "rvale2", CPENS (4, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4415 { "rvae2is", CPENS (4, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4416 { "rvale2is", CPENS (4, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4417 { "rvae2os", CPENS (4, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4418 { "rvale2os", CPENS (4, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4419 { "rvae3", CPENS (6, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4420 { "rvale3", CPENS (6, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4421 { "rvae3is", CPENS (6, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4422 { "rvale3is", CPENS (6, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4423 { "rvae3os", CPENS (6, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4424 { "rvale3os", CPENS (6, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4426 { 0, CPENS(0,0,0,0), 0 }
4430 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
4432 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
4436 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
4437 const aarch64_sys_ins_reg
*reg
)
4439 if (!(reg
->flags
& F_ARCHEXT
))
4442 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4443 if (reg
->value
== CPENS (3, C7
, C12
, 1)
4444 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4447 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4448 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
4449 || reg
->value
== CPENS (0, C7
, C9
, 1))
4450 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4473 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4474 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4477 verify_ldpsw (const struct aarch64_opcode
* opcode ATTRIBUTE_UNUSED
,
4478 const aarch64_insn insn
)
4480 int t
= BITS (insn
, 4, 0);
4481 int n
= BITS (insn
, 9, 5);
4482 int t2
= BITS (insn
, 14, 10);
4486 /* Write back enabled. */
4487 if ((t
== n
|| t2
== n
) && n
!= 31)
4501 /* Return true if VALUE cannot be moved into an SVE register using DUP
4502 (with any element size, not just ESIZE) and if using DUPM would
4503 therefore be OK. ESIZE is the number of bytes in the immediate. */
4506 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue
, int esize
)
4508 int64_t svalue
= uvalue
;
4509 uint64_t upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
4511 if ((uvalue
& ~upper
) != uvalue
&& (uvalue
| upper
) != uvalue
)
4513 if (esize
<= 4 || (uint32_t) uvalue
== (uint32_t) (uvalue
>> 32))
4515 svalue
= (int32_t) uvalue
;
4516 if (esize
<= 2 || (uint16_t) uvalue
== (uint16_t) (uvalue
>> 16))
4518 svalue
= (int16_t) uvalue
;
4519 if (esize
== 1 || (uint8_t) uvalue
== (uint8_t) (uvalue
>> 8))
4523 if ((svalue
& 0xff) == 0)
4525 return svalue
< -128 || svalue
>= 128;
4528 /* Include the opcode description table as well as the operand description
4530 #define VERIFIER(x) verify_##x
4531 #include "aarch64-tbl.h"