1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump
= FALSE
;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array
[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array
[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
108 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
109 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
116 return ((qualifier
>= AARCH64_OPND_QLF_S_B
117 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
127 DP_VECTOR_ACROSS_LANES
,
130 static const char significant_operand_index
[] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
147 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers
[0] == qualifiers
[1]
152 && vector_qualifier_p (qualifiers
[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers
[0])
154 == aarch64_get_qualifier_esize (qualifiers
[1]))
155 && (aarch64_get_qualifier_esize (qualifiers
[0])
156 == aarch64_get_qualifier_esize (qualifiers
[2])))
157 return DP_VECTOR_3SAME
;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers
[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers
[0])
164 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
165 return DP_VECTOR_LONG
;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers
[0] == qualifiers
[1]
168 && vector_qualifier_p (qualifiers
[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers
[0])
171 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers
[0])
173 == aarch64_get_qualifier_esize (qualifiers
[1])))
174 return DP_VECTOR_WIDE
;
176 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers
[1]) == TRUE
180 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
181 return DP_VECTOR_ACROSS_LANES
;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
199 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
202 const aarch64_field fields
[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
247 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
248 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
249 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
250 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
251 { 5, 14 }, /* imm14: in test bit and branch instructions. */
252 { 5, 16 }, /* imm16: in exception instructions. */
253 { 0, 26 }, /* imm26: in unconditional branch instructions. */
254 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
255 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
256 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
257 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
258 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
259 { 22, 1 }, /* N: in logical (immediate) instructions. */
260 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
261 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
262 { 31, 1 }, /* sf: in integer data processing instructions. */
263 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
264 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
265 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
266 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
267 { 31, 1 }, /* b5: in the test bit and branch instructions. */
268 { 19, 5 }, /* b40: in the test bit and branch instructions. */
269 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
270 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
271 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
272 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
273 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
274 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
275 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
276 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
277 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
278 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
279 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
280 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
281 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
282 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
283 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
284 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
285 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
286 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
289 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
290 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
292 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
293 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
294 { 5, 1 }, /* SVE_i1: single-bit immediate. */
295 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
296 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
297 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
298 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
299 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
300 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
301 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
302 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
303 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
304 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
305 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
306 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
307 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
308 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
309 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
310 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
311 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
312 { 16, 4 }, /* SVE_tsz: triangular size select. */
313 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
314 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
315 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
316 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
317 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
318 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
319 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
320 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
321 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
324 enum aarch64_operand_class
325 aarch64_get_operand_class (enum aarch64_opnd type
)
327 return aarch64_operands
[type
].op_class
;
331 aarch64_get_operand_name (enum aarch64_opnd type
)
333 return aarch64_operands
[type
].name
;
336 /* Get operand description string.
337 This is usually for the diagnosis purpose. */
339 aarch64_get_operand_desc (enum aarch64_opnd type
)
341 return aarch64_operands
[type
].desc
;
344 /* Table of all conditional affixes. */
345 const aarch64_cond aarch64_conds
[16] =
347 {{"eq", "none"}, 0x0},
348 {{"ne", "any"}, 0x1},
349 {{"cs", "hs", "nlast"}, 0x2},
350 {{"cc", "lo", "ul", "last"}, 0x3},
351 {{"mi", "first"}, 0x4},
352 {{"pl", "nfrst"}, 0x5},
355 {{"hi", "pmore"}, 0x8},
356 {{"ls", "plast"}, 0x9},
357 {{"ge", "tcont"}, 0xa},
358 {{"lt", "tstop"}, 0xb},
366 get_cond_from_value (aarch64_insn value
)
369 return &aarch64_conds
[(unsigned int) value
];
373 get_inverted_cond (const aarch64_cond
*cond
)
375 return &aarch64_conds
[cond
->value
^ 0x1];
378 /* Table describing the operand extension/shifting operators; indexed by
379 enum aarch64_modifier_kind.
381 The value column provides the most common values for encoding modifiers,
382 which enables table-driven encoding/decoding for the modifiers. */
383 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
404 enum aarch64_modifier_kind
405 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
407 return desc
- aarch64_operand_modifiers
;
411 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
413 return aarch64_operand_modifiers
[kind
].value
;
416 enum aarch64_modifier_kind
417 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
418 bfd_boolean extend_p
)
420 if (extend_p
== TRUE
)
421 return AARCH64_MOD_UXTB
+ value
;
423 return AARCH64_MOD_LSL
- value
;
427 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
429 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
433 static inline bfd_boolean
434 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
436 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
440 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
460 /* Table describing the operands supported by the aliases of the HINT
463 The name column is the operand that is accepted for the alias. The value
464 column is the hint number of the alias. The list of operands is terminated
465 by NULL in the name column. */
467 const struct aarch64_name_value_pair aarch64_hint_options
[] =
469 { "csync", 0x11 }, /* PSB CSYNC. */
473 /* op -> op: load = 0 instruction = 1 store = 2
475 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
476 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
477 const struct aarch64_name_value_pair aarch64_prfops
[32] =
479 { "pldl1keep", B(0, 1, 0) },
480 { "pldl1strm", B(0, 1, 1) },
481 { "pldl2keep", B(0, 2, 0) },
482 { "pldl2strm", B(0, 2, 1) },
483 { "pldl3keep", B(0, 3, 0) },
484 { "pldl3strm", B(0, 3, 1) },
487 { "plil1keep", B(1, 1, 0) },
488 { "plil1strm", B(1, 1, 1) },
489 { "plil2keep", B(1, 2, 0) },
490 { "plil2strm", B(1, 2, 1) },
491 { "plil3keep", B(1, 3, 0) },
492 { "plil3strm", B(1, 3, 1) },
495 { "pstl1keep", B(2, 1, 0) },
496 { "pstl1strm", B(2, 1, 1) },
497 { "pstl2keep", B(2, 2, 0) },
498 { "pstl2strm", B(2, 2, 1) },
499 { "pstl3keep", B(2, 3, 0) },
500 { "pstl3strm", B(2, 3, 1) },
514 /* Utilities on value constraint. */
517 value_in_range_p (int64_t value
, int low
, int high
)
519 return (value
>= low
&& value
<= high
) ? 1 : 0;
522 /* Return true if VALUE is a multiple of ALIGN. */
524 value_aligned_p (int64_t value
, int align
)
526 return (value
% align
) == 0;
529 /* A signed value fits in a field. */
531 value_fit_signed_field_p (int64_t value
, unsigned width
)
534 if (width
< sizeof (value
) * 8)
536 int64_t lim
= (int64_t)1 << (width
- 1);
537 if (value
>= -lim
&& value
< lim
)
543 /* An unsigned value fits in a field. */
545 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
548 if (width
< sizeof (value
) * 8)
550 int64_t lim
= (int64_t)1 << width
;
551 if (value
>= 0 && value
< lim
)
557 /* Return 1 if OPERAND is SP or WSP. */
559 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
561 return ((aarch64_get_operand_class (operand
->type
)
562 == AARCH64_OPND_CLASS_INT_REG
)
563 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
564 && operand
->reg
.regno
== 31);
567 /* Return 1 if OPERAND is XZR or WZP. */
569 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
571 return ((aarch64_get_operand_class (operand
->type
)
572 == AARCH64_OPND_CLASS_INT_REG
)
573 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
574 && operand
->reg
.regno
== 31);
577 /* Return true if the operand *OPERAND that has the operand code
578 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
579 qualified by the qualifier TARGET. */
582 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
583 aarch64_opnd_qualifier_t target
)
585 switch (operand
->qualifier
)
587 case AARCH64_OPND_QLF_W
:
588 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
591 case AARCH64_OPND_QLF_X
:
592 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
595 case AARCH64_OPND_QLF_WSP
:
596 if (target
== AARCH64_OPND_QLF_W
597 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
600 case AARCH64_OPND_QLF_SP
:
601 if (target
== AARCH64_OPND_QLF_X
602 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
612 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
613 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
615 Return NIL if more than one expected qualifiers are found. */
617 aarch64_opnd_qualifier_t
618 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
620 const aarch64_opnd_qualifier_t known_qlf
,
627 When the known qualifier is NIL, we have to assume that there is only
628 one qualifier sequence in the *QSEQ_LIST and return the corresponding
629 qualifier directly. One scenario is that for instruction
630 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
631 which has only one possible valid qualifier sequence
633 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
634 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
636 Because the qualifier NIL has dual roles in the qualifier sequence:
637 it can mean no qualifier for the operand, or the qualifer sequence is
638 not in use (when all qualifiers in the sequence are NILs), we have to
639 handle this special case here. */
640 if (known_qlf
== AARCH64_OPND_NIL
)
642 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
643 return qseq_list
[0][idx
];
646 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
648 if (qseq_list
[i
][known_idx
] == known_qlf
)
651 /* More than one sequences are found to have KNOWN_QLF at
653 return AARCH64_OPND_NIL
;
658 return qseq_list
[saved_i
][idx
];
661 enum operand_qualifier_kind
669 /* Operand qualifier description. */
670 struct operand_qualifier_data
672 /* The usage of the three data fields depends on the qualifier kind. */
679 enum operand_qualifier_kind kind
;
682 /* Indexed by the operand qualifier enumerators. */
683 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
685 {0, 0, 0, "NIL", OQK_NIL
},
687 /* Operand variant qualifiers.
689 element size, number of elements and common value for encoding. */
691 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
692 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
693 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
694 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
696 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
697 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
698 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
699 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
700 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
702 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
703 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
704 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
705 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
706 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
707 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
708 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
709 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
710 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
711 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
713 {0, 0, 0, "z", OQK_OPD_VARIANT
},
714 {0, 0, 0, "m", OQK_OPD_VARIANT
},
716 /* Qualifiers constraining the value range.
718 Lower bound, higher bound, unused. */
720 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE
},
721 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
722 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
723 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
724 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
725 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
726 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
728 /* Qualifiers for miscellaneous purpose.
730 unused, unused and unused. */
735 {0, 0, 0, "retrieving", 0},
738 static inline bfd_boolean
739 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
741 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
745 static inline bfd_boolean
746 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
748 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
753 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
755 return aarch64_opnd_qualifiers
[qualifier
].desc
;
758 /* Given an operand qualifier, return the expected data element size
759 of a qualified operand. */
761 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
763 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
764 return aarch64_opnd_qualifiers
[qualifier
].data0
;
768 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
770 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
771 return aarch64_opnd_qualifiers
[qualifier
].data1
;
775 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
777 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
778 return aarch64_opnd_qualifiers
[qualifier
].data2
;
782 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
784 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
785 return aarch64_opnd_qualifiers
[qualifier
].data0
;
789 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
791 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
792 return aarch64_opnd_qualifiers
[qualifier
].data1
;
797 aarch64_verbose (const char *str
, ...)
808 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
812 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
813 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
818 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
819 const aarch64_opnd_qualifier_t
*qualifier
)
822 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
824 aarch64_verbose ("dump_match_qualifiers:");
825 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
826 curr
[i
] = opnd
[i
].qualifier
;
827 dump_qualifier_sequence (curr
);
828 aarch64_verbose ("against");
829 dump_qualifier_sequence (qualifier
);
831 #endif /* DEBUG_AARCH64 */
833 /* TODO improve this, we can have an extra field at the runtime to
834 store the number of operands rather than calculating it every time. */
837 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
840 const enum aarch64_opnd
*opnds
= opcode
->operands
;
841 while (opnds
[i
++] != AARCH64_OPND_NIL
)
844 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
848 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
849 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
851 N.B. on the entry, it is very likely that only some operands in *INST
852 have had their qualifiers been established.
854 If STOP_AT is not -1, the function will only try to match
855 the qualifier sequence for operands before and including the operand
856 of index STOP_AT; and on success *RET will only be filled with the first
857 (STOP_AT+1) qualifiers.
859 A couple examples of the matching algorithm:
867 Apart from serving the main encoding routine, this can also be called
868 during or after the operand decoding. */
871 aarch64_find_best_match (const aarch64_inst
*inst
,
872 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
873 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
877 const aarch64_opnd_qualifier_t
*qualifiers
;
879 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
882 DEBUG_TRACE ("SUCCEED: no operand");
886 if (stop_at
< 0 || stop_at
>= num_opnds
)
887 stop_at
= num_opnds
- 1;
889 /* For each pattern. */
890 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
893 qualifiers
= *qualifiers_list
;
895 /* Start as positive. */
898 DEBUG_TRACE ("%d", i
);
901 dump_match_qualifiers (inst
->operands
, qualifiers
);
904 /* Most opcodes has much fewer patterns in the list.
905 First NIL qualifier indicates the end in the list. */
906 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
908 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
914 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
916 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
918 /* Either the operand does not have qualifier, or the qualifier
919 for the operand needs to be deduced from the qualifier
921 In the latter case, any constraint checking related with
922 the obtained qualifier should be done later in
923 operand_general_constraint_met_p. */
926 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
928 /* Unless the target qualifier can also qualify the operand
929 (which has already had a non-nil qualifier), non-equal
930 qualifiers are generally un-matched. */
931 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
940 continue; /* Equal qualifiers are certainly matched. */
943 /* Qualifiers established. */
950 /* Fill the result in *RET. */
952 qualifiers
= *qualifiers_list
;
954 DEBUG_TRACE ("complete qualifiers using list %d", i
);
957 dump_qualifier_sequence (qualifiers
);
960 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
961 ret
[j
] = *qualifiers
;
962 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
963 ret
[j
] = AARCH64_OPND_QLF_NIL
;
965 DEBUG_TRACE ("SUCCESS");
969 DEBUG_TRACE ("FAIL");
973 /* Operand qualifier matching and resolving.
975 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
976 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
978 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
982 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
985 aarch64_opnd_qualifier_seq_t qualifiers
;
987 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
990 DEBUG_TRACE ("matching FAIL");
994 if (inst
->opcode
->flags
& F_STRICT
)
996 /* Require an exact qualifier match, even for NIL qualifiers. */
997 nops
= aarch64_num_of_operands (inst
->opcode
);
998 for (i
= 0; i
< nops
; ++i
)
999 if (inst
->operands
[i
].qualifier
!= qualifiers
[i
])
1003 /* Update the qualifiers. */
1004 if (update_p
== TRUE
)
1005 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1007 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
1009 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
1010 "update %s with %s for operand %d",
1011 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
1012 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
1013 inst
->operands
[i
].qualifier
= qualifiers
[i
];
1016 DEBUG_TRACE ("matching SUCCESS");
1020 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1023 IS32 indicates whether value is a 32-bit immediate or not.
1024 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1025 amount will be returned in *SHIFT_AMOUNT. */
1028 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
1032 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1036 /* Allow all zeros or all ones in top 32-bits, so that
1037 32-bit constant expressions like ~0x80000000 are
1039 uint64_t ext
= value
;
1040 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
1041 /* Immediate out of range. */
1043 value
&= (int64_t) 0xffffffff;
1046 /* first, try movz then movn */
1048 if ((value
& ((int64_t) 0xffff << 0)) == value
)
1050 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
1052 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
1054 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
1059 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1063 if (shift_amount
!= NULL
)
1064 *shift_amount
= amount
;
1066 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
1071 /* Build the accepted values for immediate logical SIMD instructions.
1073 The standard encodings of the immediate value are:
1074 N imms immr SIMD size R S
1075 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1076 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1077 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1078 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1079 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1080 0 11110s 00000r 2 UInt(r) UInt(s)
1081 where all-ones value of S is reserved.
1083 Let's call E the SIMD size.
1085 The immediate value is: S+1 bits '1' rotated to the right by R.
1087 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1088 (remember S != E - 1). */
1090 #define TOTAL_IMM_NB 5334
1095 aarch64_insn encoding
;
1096 } simd_imm_encoding
;
1098 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
1101 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
1103 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
1104 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
1106 if (imm1
->imm
< imm2
->imm
)
1108 if (imm1
->imm
> imm2
->imm
)
1113 /* immediate bitfield standard encoding
1114 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1115 1 ssssss rrrrrr 64 rrrrrr ssssss
1116 0 0sssss 0rrrrr 32 rrrrr sssss
1117 0 10ssss 00rrrr 16 rrrr ssss
1118 0 110sss 000rrr 8 rrr sss
1119 0 1110ss 0000rr 4 rr ss
1120 0 11110s 00000r 2 r s */
1122 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1124 return (is64
<< 12) | (r
<< 6) | s
;
1128 build_immediate_table (void)
1130 uint32_t log_e
, e
, s
, r
, s_mask
;
1136 for (log_e
= 1; log_e
<= 6; log_e
++)
1138 /* Get element size. */
1143 mask
= 0xffffffffffffffffull
;
1149 mask
= (1ull << e
) - 1;
1151 1 ((1 << 4) - 1) << 2 = 111100
1152 2 ((1 << 3) - 1) << 3 = 111000
1153 3 ((1 << 2) - 1) << 4 = 110000
1154 4 ((1 << 1) - 1) << 5 = 100000
1155 5 ((1 << 0) - 1) << 6 = 000000 */
1156 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1158 for (s
= 0; s
< e
- 1; s
++)
1159 for (r
= 0; r
< e
; r
++)
1161 /* s+1 consecutive bits to 1 (s < 63) */
1162 imm
= (1ull << (s
+ 1)) - 1;
1163 /* rotate right by r */
1165 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1166 /* replicate the constant depending on SIMD size */
1169 case 1: imm
= (imm
<< 2) | imm
;
1171 case 2: imm
= (imm
<< 4) | imm
;
1173 case 3: imm
= (imm
<< 8) | imm
;
1175 case 4: imm
= (imm
<< 16) | imm
;
1177 case 5: imm
= (imm
<< 32) | imm
;
1182 simd_immediates
[nb_imms
].imm
= imm
;
1183 simd_immediates
[nb_imms
].encoding
=
1184 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1188 assert (nb_imms
== TOTAL_IMM_NB
);
1189 qsort(simd_immediates
, nb_imms
,
1190 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1193 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1194 be accepted by logical (immediate) instructions
1195 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1197 ESIZE is the number of bytes in the decoded immediate value.
1198 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1199 VALUE will be returned in *ENCODING. */
1202 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1204 simd_imm_encoding imm_enc
;
1205 const simd_imm_encoding
*imm_encoding
;
1206 static bfd_boolean initialized
= FALSE
;
1210 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), esize: %d", value
,
1215 build_immediate_table ();
1219 /* Allow all zeros or all ones in top bits, so that
1220 constant expressions like ~1 are permitted. */
1221 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1222 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1225 /* Replicate to a full 64-bit value. */
1227 for (i
= esize
* 8; i
< 64; i
*= 2)
1228 value
|= (value
<< i
);
1230 imm_enc
.imm
= value
;
1231 imm_encoding
= (const simd_imm_encoding
*)
1232 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1233 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1234 if (imm_encoding
== NULL
)
1236 DEBUG_TRACE ("exit with FALSE");
1239 if (encoding
!= NULL
)
1240 *encoding
= imm_encoding
->encoding
;
1241 DEBUG_TRACE ("exit with TRUE");
1245 /* If 64-bit immediate IMM is in the format of
1246 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1247 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1248 of value "abcdefgh". Otherwise return -1. */
1250 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1256 for (i
= 0; i
< 8; i
++)
1258 byte
= (imm
>> (8 * i
)) & 0xff;
1261 else if (byte
!= 0x00)
1267 /* Utility inline functions for operand_general_constraint_met_p. */
1270 set_error (aarch64_operand_error
*mismatch_detail
,
1271 enum aarch64_operand_error_kind kind
, int idx
,
1274 if (mismatch_detail
== NULL
)
1276 mismatch_detail
->kind
= kind
;
1277 mismatch_detail
->index
= idx
;
1278 mismatch_detail
->error
= error
;
1282 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1285 if (mismatch_detail
== NULL
)
1287 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1291 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1292 int idx
, int lower_bound
, int upper_bound
,
1295 if (mismatch_detail
== NULL
)
1297 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1298 mismatch_detail
->data
[0] = lower_bound
;
1299 mismatch_detail
->data
[1] = upper_bound
;
1303 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1304 int idx
, int lower_bound
, int upper_bound
)
1306 if (mismatch_detail
== NULL
)
1308 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1309 _("immediate value"));
1313 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1314 int idx
, int lower_bound
, int upper_bound
)
1316 if (mismatch_detail
== NULL
)
1318 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1319 _("immediate offset"));
1323 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1324 int idx
, int lower_bound
, int upper_bound
)
1326 if (mismatch_detail
== NULL
)
1328 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1329 _("register number"));
1333 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1334 int idx
, int lower_bound
, int upper_bound
)
1336 if (mismatch_detail
== NULL
)
1338 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1339 _("register element index"));
1343 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1344 int idx
, int lower_bound
, int upper_bound
)
1346 if (mismatch_detail
== NULL
)
1348 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1352 /* Report that the MUL modifier in operand IDX should be in the range
1353 [LOWER_BOUND, UPPER_BOUND]. */
1355 set_multiplier_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1356 int idx
, int lower_bound
, int upper_bound
)
1358 if (mismatch_detail
== NULL
)
1360 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1365 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1368 if (mismatch_detail
== NULL
)
1370 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1371 mismatch_detail
->data
[0] = alignment
;
1375 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1378 if (mismatch_detail
== NULL
)
1380 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1381 mismatch_detail
->data
[0] = expected_num
;
1385 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1388 if (mismatch_detail
== NULL
)
1390 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1393 /* General constraint checking based on operand code.
1395 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1396 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1398 This function has to be called after the qualifiers for all operands
1401 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1402 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1403 of error message during the disassembling where error message is not
1404 wanted. We avoid the dynamic construction of strings of error messages
1405 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1406 use a combination of error code, static string and some integer data to
1407 represent an error. */
1410 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1411 enum aarch64_opnd type
,
1412 const aarch64_opcode
*opcode
,
1413 aarch64_operand_error
*mismatch_detail
)
1415 unsigned num
, modifiers
, shift
;
1417 int64_t imm
, min_value
, max_value
;
1418 uint64_t uvalue
, mask
;
1419 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1420 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1422 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1424 switch (aarch64_operands
[type
].op_class
)
1426 case AARCH64_OPND_CLASS_INT_REG
:
1427 /* Check pair reg constraints for cas* instructions. */
1428 if (type
== AARCH64_OPND_PAIRREG
)
1430 assert (idx
== 1 || idx
== 3);
1431 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1433 set_syntax_error (mismatch_detail
, idx
- 1,
1434 _("reg pair must start from even reg"));
1437 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1439 set_syntax_error (mismatch_detail
, idx
,
1440 _("reg pair must be contiguous"));
1446 /* <Xt> may be optional in some IC and TLBI instructions. */
1447 if (type
== AARCH64_OPND_Rt_SYS
)
1449 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1450 == AARCH64_OPND_CLASS_SYSTEM
));
1451 if (opnds
[1].present
1452 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1454 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1457 if (!opnds
[1].present
1458 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1460 set_other_error (mismatch_detail
, idx
, _("missing register"));
1466 case AARCH64_OPND_QLF_WSP
:
1467 case AARCH64_OPND_QLF_SP
:
1468 if (!aarch64_stack_pointer_p (opnd
))
1470 set_other_error (mismatch_detail
, idx
,
1471 _("stack pointer register expected"));
1480 case AARCH64_OPND_CLASS_SVE_REG
:
1483 case AARCH64_OPND_SVE_Zm3_INDEX
:
1484 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
1485 case AARCH64_OPND_SVE_Zm4_INDEX
:
1486 size
= get_operand_fields_width (get_operand_from_code (type
));
1487 shift
= get_operand_specific_data (&aarch64_operands
[type
]);
1488 mask
= (1 << shift
) - 1;
1489 if (opnd
->reg
.regno
> mask
)
1491 assert (mask
== 7 || mask
== 15);
1492 set_other_error (mismatch_detail
, idx
,
1494 ? _("z0-z15 expected")
1495 : _("z0-z7 expected"));
1498 mask
= (1 << (size
- shift
)) - 1;
1499 if (!value_in_range_p (opnd
->reglane
.index
, 0, mask
))
1501 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, mask
);
1506 case AARCH64_OPND_SVE_Zn_INDEX
:
1507 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1508 if (!value_in_range_p (opnd
->reglane
.index
, 0, 64 / size
- 1))
1510 set_elem_idx_out_of_range_error (mismatch_detail
, idx
,
1516 case AARCH64_OPND_SVE_ZnxN
:
1517 case AARCH64_OPND_SVE_ZtxN
:
1518 if (opnd
->reglist
.num_regs
!= get_opcode_dependent_value (opcode
))
1520 set_other_error (mismatch_detail
, idx
,
1521 _("invalid register list"));
1531 case AARCH64_OPND_CLASS_PRED_REG
:
1532 if (opnd
->reg
.regno
>= 8
1533 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1535 set_other_error (mismatch_detail
, idx
, _("p0-p7 expected"));
1540 case AARCH64_OPND_CLASS_COND
:
1541 if (type
== AARCH64_OPND_COND1
1542 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1544 /* Not allow AL or NV. */
1545 set_syntax_error (mismatch_detail
, idx
, NULL
);
1549 case AARCH64_OPND_CLASS_ADDRESS
:
1550 /* Check writeback. */
1551 switch (opcode
->iclass
)
1555 case ldstnapair_offs
:
1558 if (opnd
->addr
.writeback
== 1)
1560 set_syntax_error (mismatch_detail
, idx
,
1561 _("unexpected address writeback"));
1566 if (opnd
->addr
.writeback
== 1 && opnd
->addr
.preind
!= 1)
1568 set_syntax_error (mismatch_detail
, idx
,
1569 _("unexpected address writeback"));
1574 case ldstpair_indexed
:
1577 if (opnd
->addr
.writeback
== 0)
1579 set_syntax_error (mismatch_detail
, idx
,
1580 _("address writeback expected"));
1585 assert (opnd
->addr
.writeback
== 0);
1590 case AARCH64_OPND_ADDR_SIMM7
:
1591 /* Scaled signed 7 bits immediate offset. */
1592 /* Get the size of the data element that is accessed, which may be
1593 different from that of the source register size,
1594 e.g. in strb/ldrb. */
1595 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1596 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1598 set_offset_out_of_range_error (mismatch_detail
, idx
,
1599 -64 * size
, 63 * size
);
1602 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1604 set_unaligned_error (mismatch_detail
, idx
, size
);
1608 case AARCH64_OPND_ADDR_OFFSET
:
1609 case AARCH64_OPND_ADDR_SIMM9
:
1610 /* Unscaled signed 9 bits immediate offset. */
1611 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1613 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1618 case AARCH64_OPND_ADDR_SIMM9_2
:
1619 /* Unscaled signed 9 bits immediate offset, which has to be negative
1621 size
= aarch64_get_qualifier_esize (qualifier
);
1622 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1623 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1624 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1626 set_other_error (mismatch_detail
, idx
,
1627 _("negative or unaligned offset expected"));
1630 case AARCH64_OPND_ADDR_SIMM10
:
1631 /* Scaled signed 10 bits immediate offset. */
1632 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4088))
1634 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4088);
1637 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 8))
1639 set_unaligned_error (mismatch_detail
, idx
, 8);
1644 case AARCH64_OPND_SIMD_ADDR_POST
:
1645 /* AdvSIMD load/store multiple structures, post-index. */
1647 if (opnd
->addr
.offset
.is_reg
)
1649 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1653 set_other_error (mismatch_detail
, idx
,
1654 _("invalid register offset"));
1660 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1661 unsigned num_bytes
; /* total number of bytes transferred. */
1662 /* The opcode dependent area stores the number of elements in
1663 each structure to be loaded/stored. */
1664 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1665 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1666 /* Special handling of loading single structure to all lane. */
1667 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1668 * aarch64_get_qualifier_esize (prev
->qualifier
);
1670 num_bytes
= prev
->reglist
.num_regs
1671 * aarch64_get_qualifier_esize (prev
->qualifier
)
1672 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1673 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1675 set_other_error (mismatch_detail
, idx
,
1676 _("invalid post-increment amount"));
1682 case AARCH64_OPND_ADDR_REGOFF
:
1683 /* Get the size of the data element that is accessed, which may be
1684 different from that of the source register size,
1685 e.g. in strb/ldrb. */
1686 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1687 /* It is either no shift or shift by the binary logarithm of SIZE. */
1688 if (opnd
->shifter
.amount
!= 0
1689 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1691 set_other_error (mismatch_detail
, idx
,
1692 _("invalid shift amount"));
1695 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1697 switch (opnd
->shifter
.kind
)
1699 case AARCH64_MOD_UXTW
:
1700 case AARCH64_MOD_LSL
:
1701 case AARCH64_MOD_SXTW
:
1702 case AARCH64_MOD_SXTX
: break;
1704 set_other_error (mismatch_detail
, idx
,
1705 _("invalid extend/shift operator"));
1710 case AARCH64_OPND_ADDR_UIMM12
:
1711 imm
= opnd
->addr
.offset
.imm
;
1712 /* Get the size of the data element that is accessed, which may be
1713 different from that of the source register size,
1714 e.g. in strb/ldrb. */
1715 size
= aarch64_get_qualifier_esize (qualifier
);
1716 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1718 set_offset_out_of_range_error (mismatch_detail
, idx
,
1722 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1724 set_unaligned_error (mismatch_detail
, idx
, size
);
1729 case AARCH64_OPND_ADDR_PCREL14
:
1730 case AARCH64_OPND_ADDR_PCREL19
:
1731 case AARCH64_OPND_ADDR_PCREL21
:
1732 case AARCH64_OPND_ADDR_PCREL26
:
1733 imm
= opnd
->imm
.value
;
1734 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1736 /* The offset value in a PC-relative branch instruction is alway
1737 4-byte aligned and is encoded without the lowest 2 bits. */
1738 if (!value_aligned_p (imm
, 4))
1740 set_unaligned_error (mismatch_detail
, idx
, 4);
1743 /* Right shift by 2 so that we can carry out the following check
1747 size
= get_operand_fields_width (get_operand_from_code (type
));
1748 if (!value_fit_signed_field_p (imm
, size
))
1750 set_other_error (mismatch_detail
, idx
,
1751 _("immediate out of range"));
1756 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
1757 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
1758 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
1759 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
1763 assert (!opnd
->addr
.offset
.is_reg
);
1764 assert (opnd
->addr
.preind
);
1765 num
= 1 + get_operand_specific_data (&aarch64_operands
[type
]);
1768 if ((opnd
->addr
.offset
.imm
!= 0 && !opnd
->shifter
.operator_present
)
1769 || (opnd
->shifter
.operator_present
1770 && opnd
->shifter
.kind
!= AARCH64_MOD_MUL_VL
))
1772 set_other_error (mismatch_detail
, idx
,
1773 _("invalid addressing mode"));
1776 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1778 set_offset_out_of_range_error (mismatch_detail
, idx
,
1779 min_value
, max_value
);
1782 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1784 set_unaligned_error (mismatch_detail
, idx
, num
);
1789 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
1792 goto sve_imm_offset_vl
;
1794 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
1797 goto sve_imm_offset_vl
;
1799 case AARCH64_OPND_SVE_ADDR_RI_U6
:
1800 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
1801 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
1802 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
1806 assert (!opnd
->addr
.offset
.is_reg
);
1807 assert (opnd
->addr
.preind
);
1808 num
= 1 << get_operand_specific_data (&aarch64_operands
[type
]);
1811 if (opnd
->shifter
.operator_present
1812 || opnd
->shifter
.amount_present
)
1814 set_other_error (mismatch_detail
, idx
,
1815 _("invalid addressing mode"));
1818 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1820 set_offset_out_of_range_error (mismatch_detail
, idx
,
1821 min_value
, max_value
);
1824 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1826 set_unaligned_error (mismatch_detail
, idx
, num
);
1831 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
1834 goto sve_imm_offset
;
1836 case AARCH64_OPND_SVE_ADDR_RR
:
1837 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
1838 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
1839 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
1840 case AARCH64_OPND_SVE_ADDR_RX
:
1841 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
1842 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
1843 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
1844 case AARCH64_OPND_SVE_ADDR_RZ
:
1845 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
1846 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
1847 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
1848 modifiers
= 1 << AARCH64_MOD_LSL
;
1850 assert (opnd
->addr
.offset
.is_reg
);
1851 assert (opnd
->addr
.preind
);
1852 if ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) != 0
1853 && opnd
->addr
.offset
.regno
== 31)
1855 set_other_error (mismatch_detail
, idx
,
1856 _("index register xzr is not allowed"));
1859 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1860 || (opnd
->shifter
.amount
1861 != get_operand_specific_data (&aarch64_operands
[type
])))
1863 set_other_error (mismatch_detail
, idx
,
1864 _("invalid addressing mode"));
1869 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
1870 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
1871 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
1872 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
1873 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
1874 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
1875 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
1876 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
1877 modifiers
= (1 << AARCH64_MOD_SXTW
) | (1 << AARCH64_MOD_UXTW
);
1878 goto sve_rr_operand
;
1880 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
1881 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
1882 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
1883 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
1886 goto sve_imm_offset
;
1888 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
1889 modifiers
= 1 << AARCH64_MOD_LSL
;
1891 assert (opnd
->addr
.offset
.is_reg
);
1892 assert (opnd
->addr
.preind
);
1893 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1894 || opnd
->shifter
.amount
< 0
1895 || opnd
->shifter
.amount
> 3)
1897 set_other_error (mismatch_detail
, idx
,
1898 _("invalid addressing mode"));
1903 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
1904 modifiers
= (1 << AARCH64_MOD_SXTW
);
1905 goto sve_zz_operand
;
1907 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
1908 modifiers
= 1 << AARCH64_MOD_UXTW
;
1909 goto sve_zz_operand
;
1916 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1917 if (type
== AARCH64_OPND_LEt
)
1919 /* Get the upper bound for the element index. */
1920 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1921 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
1923 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1927 /* The opcode dependent area stores the number of elements in
1928 each structure to be loaded/stored. */
1929 num
= get_opcode_dependent_value (opcode
);
1932 case AARCH64_OPND_LVt
:
1933 assert (num
>= 1 && num
<= 4);
1934 /* Unless LD1/ST1, the number of registers should be equal to that
1935 of the structure elements. */
1936 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1938 set_reg_list_error (mismatch_detail
, idx
, num
);
1942 case AARCH64_OPND_LVt_AL
:
1943 case AARCH64_OPND_LEt
:
1944 assert (num
>= 1 && num
<= 4);
1945 /* The number of registers should be equal to that of the structure
1947 if (opnd
->reglist
.num_regs
!= num
)
1949 set_reg_list_error (mismatch_detail
, idx
, num
);
1958 case AARCH64_OPND_CLASS_IMMEDIATE
:
1959 /* Constraint check on immediate operand. */
1960 imm
= opnd
->imm
.value
;
1961 /* E.g. imm_0_31 constrains value to be 0..31. */
1962 if (qualifier_value_in_range_constraint_p (qualifier
)
1963 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1964 get_upper_bound (qualifier
)))
1966 set_imm_out_of_range_error (mismatch_detail
, idx
,
1967 get_lower_bound (qualifier
),
1968 get_upper_bound (qualifier
));
1974 case AARCH64_OPND_AIMM
:
1975 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1977 set_other_error (mismatch_detail
, idx
,
1978 _("invalid shift operator"));
1981 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
1983 set_other_error (mismatch_detail
, idx
,
1984 _("shift amount must be 0 or 12"));
1987 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
1989 set_other_error (mismatch_detail
, idx
,
1990 _("immediate out of range"));
1995 case AARCH64_OPND_HALF
:
1996 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
1997 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1999 set_other_error (mismatch_detail
, idx
,
2000 _("invalid shift operator"));
2003 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2004 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
2006 set_other_error (mismatch_detail
, idx
,
2007 _("shift amount must be a multiple of 16"));
2010 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
2012 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
2016 if (opnd
->imm
.value
< 0)
2018 set_other_error (mismatch_detail
, idx
,
2019 _("negative immediate value not allowed"));
2022 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
2024 set_other_error (mismatch_detail
, idx
,
2025 _("immediate out of range"));
2030 case AARCH64_OPND_IMM_MOV
:
2032 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2033 imm
= opnd
->imm
.value
;
2037 case OP_MOV_IMM_WIDEN
:
2040 case OP_MOV_IMM_WIDE
:
2041 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
2043 set_other_error (mismatch_detail
, idx
,
2044 _("immediate out of range"));
2048 case OP_MOV_IMM_LOG
:
2049 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
2051 set_other_error (mismatch_detail
, idx
,
2052 _("immediate out of range"));
2063 case AARCH64_OPND_NZCV
:
2064 case AARCH64_OPND_CCMP_IMM
:
2065 case AARCH64_OPND_EXCEPTION
:
2066 case AARCH64_OPND_UIMM4
:
2067 case AARCH64_OPND_UIMM7
:
2068 case AARCH64_OPND_UIMM3_OP1
:
2069 case AARCH64_OPND_UIMM3_OP2
:
2070 case AARCH64_OPND_SVE_UIMM3
:
2071 case AARCH64_OPND_SVE_UIMM7
:
2072 case AARCH64_OPND_SVE_UIMM8
:
2073 case AARCH64_OPND_SVE_UIMM8_53
:
2074 size
= get_operand_fields_width (get_operand_from_code (type
));
2076 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
2078 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2084 case AARCH64_OPND_SIMM5
:
2085 case AARCH64_OPND_SVE_SIMM5
:
2086 case AARCH64_OPND_SVE_SIMM5B
:
2087 case AARCH64_OPND_SVE_SIMM6
:
2088 case AARCH64_OPND_SVE_SIMM8
:
2089 size
= get_operand_fields_width (get_operand_from_code (type
));
2091 if (!value_fit_signed_field_p (opnd
->imm
.value
, size
))
2093 set_imm_out_of_range_error (mismatch_detail
, idx
,
2095 (1 << (size
- 1)) - 1);
2100 case AARCH64_OPND_WIDTH
:
2101 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
2102 && opnds
[0].type
== AARCH64_OPND_Rd
);
2103 size
= get_upper_bound (qualifier
);
2104 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
2105 /* lsb+width <= reg.size */
2107 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
2108 size
- opnds
[idx
-1].imm
.value
);
2113 case AARCH64_OPND_LIMM
:
2114 case AARCH64_OPND_SVE_LIMM
:
2116 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2117 uint64_t uimm
= opnd
->imm
.value
;
2118 if (opcode
->op
== OP_BIC
)
2120 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2122 set_other_error (mismatch_detail
, idx
,
2123 _("immediate out of range"));
2129 case AARCH64_OPND_IMM0
:
2130 case AARCH64_OPND_FPIMM0
:
2131 if (opnd
->imm
.value
!= 0)
2133 set_other_error (mismatch_detail
, idx
,
2134 _("immediate zero expected"));
2139 case AARCH64_OPND_IMM_ROT1
:
2140 case AARCH64_OPND_IMM_ROT2
:
2141 case AARCH64_OPND_SVE_IMM_ROT2
:
2142 if (opnd
->imm
.value
!= 0
2143 && opnd
->imm
.value
!= 90
2144 && opnd
->imm
.value
!= 180
2145 && opnd
->imm
.value
!= 270)
2147 set_other_error (mismatch_detail
, idx
,
2148 _("rotate expected to be 0, 90, 180 or 270"));
2153 case AARCH64_OPND_IMM_ROT3
:
2154 case AARCH64_OPND_SVE_IMM_ROT1
:
2155 if (opnd
->imm
.value
!= 90 && opnd
->imm
.value
!= 270)
2157 set_other_error (mismatch_detail
, idx
,
2158 _("rotate expected to be 90 or 270"));
2163 case AARCH64_OPND_SHLL_IMM
:
2165 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2166 if (opnd
->imm
.value
!= size
)
2168 set_other_error (mismatch_detail
, idx
,
2169 _("invalid shift amount"));
2174 case AARCH64_OPND_IMM_VLSL
:
2175 size
= aarch64_get_qualifier_esize (qualifier
);
2176 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
2178 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2184 case AARCH64_OPND_IMM_VLSR
:
2185 size
= aarch64_get_qualifier_esize (qualifier
);
2186 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
2188 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
2193 case AARCH64_OPND_SIMD_IMM
:
2194 case AARCH64_OPND_SIMD_IMM_SFT
:
2195 /* Qualifier check. */
2198 case AARCH64_OPND_QLF_LSL
:
2199 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2201 set_other_error (mismatch_detail
, idx
,
2202 _("invalid shift operator"));
2206 case AARCH64_OPND_QLF_MSL
:
2207 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
2209 set_other_error (mismatch_detail
, idx
,
2210 _("invalid shift operator"));
2214 case AARCH64_OPND_QLF_NIL
:
2215 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2217 set_other_error (mismatch_detail
, idx
,
2218 _("shift is not permitted"));
2226 /* Is the immediate valid? */
2228 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
2230 /* uimm8 or simm8 */
2231 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
2233 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
2237 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
2240 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2241 ffffffffgggggggghhhhhhhh'. */
2242 set_other_error (mismatch_detail
, idx
,
2243 _("invalid value for immediate"));
2246 /* Is the shift amount valid? */
2247 switch (opnd
->shifter
.kind
)
2249 case AARCH64_MOD_LSL
:
2250 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2251 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
2253 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
2257 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
2259 set_unaligned_error (mismatch_detail
, idx
, 8);
2263 case AARCH64_MOD_MSL
:
2264 /* Only 8 and 16 are valid shift amount. */
2265 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
2267 set_other_error (mismatch_detail
, idx
,
2268 _("shift amount must be 0 or 16"));
2273 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2275 set_other_error (mismatch_detail
, idx
,
2276 _("invalid shift operator"));
2283 case AARCH64_OPND_FPIMM
:
2284 case AARCH64_OPND_SIMD_FPIMM
:
2285 case AARCH64_OPND_SVE_FPIMM8
:
2286 if (opnd
->imm
.is_fp
== 0)
2288 set_other_error (mismatch_detail
, idx
,
2289 _("floating-point immediate expected"));
2292 /* The value is expected to be an 8-bit floating-point constant with
2293 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2294 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2296 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
2298 set_other_error (mismatch_detail
, idx
,
2299 _("immediate out of range"));
2302 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2304 set_other_error (mismatch_detail
, idx
,
2305 _("invalid shift operator"));
2310 case AARCH64_OPND_SVE_AIMM
:
2313 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2314 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2315 mask
= ~((uint64_t) -1 << (size
* 4) << (size
* 4));
2316 uvalue
= opnd
->imm
.value
;
2317 shift
= opnd
->shifter
.amount
;
2322 set_other_error (mismatch_detail
, idx
,
2323 _("no shift amount allowed for"
2324 " 8-bit constants"));
2330 if (shift
!= 0 && shift
!= 8)
2332 set_other_error (mismatch_detail
, idx
,
2333 _("shift amount must be 0 or 8"));
2336 if (shift
== 0 && (uvalue
& 0xff) == 0)
2339 uvalue
= (int64_t) uvalue
/ 256;
2343 if ((uvalue
& mask
) != uvalue
&& (uvalue
| ~mask
) != uvalue
)
2345 set_other_error (mismatch_detail
, idx
,
2346 _("immediate too big for element size"));
2349 uvalue
= (uvalue
- min_value
) & mask
;
2352 set_other_error (mismatch_detail
, idx
,
2353 _("invalid arithmetic immediate"));
2358 case AARCH64_OPND_SVE_ASIMM
:
2362 case AARCH64_OPND_SVE_I1_HALF_ONE
:
2363 assert (opnd
->imm
.is_fp
);
2364 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x3f800000)
2366 set_other_error (mismatch_detail
, idx
,
2367 _("floating-point value must be 0.5 or 1.0"));
2372 case AARCH64_OPND_SVE_I1_HALF_TWO
:
2373 assert (opnd
->imm
.is_fp
);
2374 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x40000000)
2376 set_other_error (mismatch_detail
, idx
,
2377 _("floating-point value must be 0.5 or 2.0"));
2382 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
2383 assert (opnd
->imm
.is_fp
);
2384 if (opnd
->imm
.value
!= 0 && opnd
->imm
.value
!= 0x3f800000)
2386 set_other_error (mismatch_detail
, idx
,
2387 _("floating-point value must be 0.0 or 1.0"));
2392 case AARCH64_OPND_SVE_INV_LIMM
:
2394 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2395 uint64_t uimm
= ~opnd
->imm
.value
;
2396 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2398 set_other_error (mismatch_detail
, idx
,
2399 _("immediate out of range"));
2405 case AARCH64_OPND_SVE_LIMM_MOV
:
2407 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2408 uint64_t uimm
= opnd
->imm
.value
;
2409 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2411 set_other_error (mismatch_detail
, idx
,
2412 _("immediate out of range"));
2415 if (!aarch64_sve_dupm_mov_immediate_p (uimm
, esize
))
2417 set_other_error (mismatch_detail
, idx
,
2418 _("invalid replicated MOV immediate"));
2424 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2425 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL
);
2426 if (!value_in_range_p (opnd
->shifter
.amount
, 1, 16))
2428 set_multiplier_out_of_range_error (mismatch_detail
, idx
, 1, 16);
2433 case AARCH64_OPND_SVE_SHLIMM_PRED
:
2434 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
2435 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2436 if (!value_in_range_p (opnd
->imm
.value
, 0, 8 * size
- 1))
2438 set_imm_out_of_range_error (mismatch_detail
, idx
,
2444 case AARCH64_OPND_SVE_SHRIMM_PRED
:
2445 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
2446 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2447 if (!value_in_range_p (opnd
->imm
.value
, 1, 8 * size
))
2449 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, 8 * size
);
2459 case AARCH64_OPND_CLASS_SYSTEM
:
2462 case AARCH64_OPND_PSTATEFIELD
:
2463 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
2466 The immediate must be #0 or #1. */
2467 if ((opnd
->pstatefield
== 0x03 /* UAO. */
2468 || opnd
->pstatefield
== 0x04) /* PAN. */
2469 && opnds
[1].imm
.value
> 1)
2471 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2474 /* MSR SPSel, #uimm4
2475 Uses uimm4 as a control value to select the stack pointer: if
2476 bit 0 is set it selects the current exception level's stack
2477 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2478 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2479 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
2481 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2490 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
2491 /* Get the upper bound for the element index. */
2492 if (opcode
->op
== OP_FCMLA_ELEM
)
2493 /* FCMLA index range depends on the vector size of other operands
2494 and is halfed because complex numbers take two elements. */
2495 num
= aarch64_get_qualifier_nelem (opnds
[0].qualifier
)
2496 * aarch64_get_qualifier_esize (opnds
[0].qualifier
) / 2;
2499 num
= num
/ aarch64_get_qualifier_esize (qualifier
) - 1;
2501 /* Index out-of-range. */
2502 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
2504 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2507 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2508 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2509 number is encoded in "size:M:Rm":
2515 if (type
== AARCH64_OPND_Em
&& qualifier
== AARCH64_OPND_QLF_S_H
2516 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
2518 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2523 case AARCH64_OPND_CLASS_MODIFIED_REG
:
2524 assert (idx
== 1 || idx
== 2);
2527 case AARCH64_OPND_Rm_EXT
:
2528 if (!aarch64_extend_operator_p (opnd
->shifter
.kind
)
2529 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2531 set_other_error (mismatch_detail
, idx
,
2532 _("extend operator expected"));
2535 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2536 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2537 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2539 if (!aarch64_stack_pointer_p (opnds
+ 0)
2540 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
2542 if (!opnd
->shifter
.operator_present
)
2544 set_other_error (mismatch_detail
, idx
,
2545 _("missing extend operator"));
2548 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2550 set_other_error (mismatch_detail
, idx
,
2551 _("'LSL' operator not allowed"));
2555 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
2556 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2557 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
2559 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
2562 /* In the 64-bit form, the final register operand is written as Wm
2563 for all but the (possibly omitted) UXTX/LSL and SXTX
2565 N.B. GAS allows X register to be used with any operator as a
2566 programming convenience. */
2567 if (qualifier
== AARCH64_OPND_QLF_X
2568 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
2569 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
2570 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
2572 set_other_error (mismatch_detail
, idx
, _("W register expected"));
2577 case AARCH64_OPND_Rm_SFT
:
2578 /* ROR is not available to the shifted register operand in
2579 arithmetic instructions. */
2580 if (!aarch64_shift_operator_p (opnd
->shifter
.kind
))
2582 set_other_error (mismatch_detail
, idx
,
2583 _("shift operator expected"));
2586 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
2587 && opcode
->iclass
!= log_shift
)
2589 set_other_error (mismatch_detail
, idx
,
2590 _("'ROR' operator not allowed"));
2593 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2594 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2596 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2613 /* Main entrypoint for the operand constraint checking.
2615 Return 1 if operands of *INST meet the constraint applied by the operand
2616 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2617 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2618 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2619 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2620 error kind when it is notified that an instruction does not pass the check).
2622 Un-determined operand qualifiers may get established during the process. */
2625 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2626 aarch64_operand_error
*mismatch_detail
)
2630 DEBUG_TRACE ("enter");
2632 /* Check for cases where a source register needs to be the same as the
2633 destination register. Do this before matching qualifiers since if
2634 an instruction has both invalid tying and invalid qualifiers,
2635 the error about qualifiers would suggest several alternative
2636 instructions that also have invalid tying. */
2637 i
= inst
->opcode
->tied_operand
;
2638 if (i
> 0 && (inst
->operands
[0].reg
.regno
!= inst
->operands
[i
].reg
.regno
))
2640 if (mismatch_detail
)
2642 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
2643 mismatch_detail
->index
= i
;
2644 mismatch_detail
->error
= NULL
;
2649 /* Match operands' qualifier.
2650 *INST has already had qualifier establish for some, if not all, of
2651 its operands; we need to find out whether these established
2652 qualifiers match one of the qualifier sequence in
2653 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2654 with the corresponding qualifier in such a sequence.
2655 Only basic operand constraint checking is done here; the more thorough
2656 constraint checking will carried out by operand_general_constraint_met_p,
2657 which has be to called after this in order to get all of the operands'
2658 qualifiers established. */
2659 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2661 DEBUG_TRACE ("FAIL on operand qualifier matching");
2662 if (mismatch_detail
)
2664 /* Return an error type to indicate that it is the qualifier
2665 matching failure; we don't care about which operand as there
2666 are enough information in the opcode table to reproduce it. */
2667 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2668 mismatch_detail
->index
= -1;
2669 mismatch_detail
->error
= NULL
;
2674 /* Match operands' constraint. */
2675 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2677 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2678 if (type
== AARCH64_OPND_NIL
)
2680 if (inst
->operands
[i
].skip
)
2682 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2685 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2686 inst
->opcode
, mismatch_detail
) == 0)
2688 DEBUG_TRACE ("FAIL on operand %d", i
);
2693 DEBUG_TRACE ("PASS");
2698 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2699 Also updates the TYPE of each INST->OPERANDS with the corresponding
2700 value of OPCODE->OPERANDS.
2702 Note that some operand qualifiers may need to be manually cleared by
2703 the caller before it further calls the aarch64_opcode_encode; by
2704 doing this, it helps the qualifier matching facilities work
2707 const aarch64_opcode
*
2708 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2711 const aarch64_opcode
*old
= inst
->opcode
;
2713 inst
->opcode
= opcode
;
2715 /* Update the operand types. */
2716 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2718 inst
->operands
[i
].type
= opcode
->operands
[i
];
2719 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2723 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2729 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2732 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2733 if (operands
[i
] == operand
)
2735 else if (operands
[i
] == AARCH64_OPND_NIL
)
2740 /* R0...R30, followed by FOR31. */
2741 #define BANK(R, FOR31) \
2742 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2743 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2744 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2745 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2746 /* [0][0] 32-bit integer regs with sp Wn
2747 [0][1] 64-bit integer regs with sp Xn sf=1
2748 [1][0] 32-bit integer regs with #0 Wn
2749 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2750 static const char *int_reg
[2][2][32] = {
2751 #define R32(X) "w" #X
2752 #define R64(X) "x" #X
2753 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
2754 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
2759 /* Names of the SVE vector registers, first with .S suffixes,
2760 then with .D suffixes. */
2762 static const char *sve_reg
[2][32] = {
2763 #define ZS(X) "z" #X ".s"
2764 #define ZD(X) "z" #X ".d"
2765 BANK (ZS
, ZS (31)), BANK (ZD
, ZD (31))
2771 /* Return the integer register name.
2772 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2774 static inline const char *
2775 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2777 const int has_zr
= sp_reg_p
? 0 : 1;
2778 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2779 return int_reg
[has_zr
][is_64
][regno
];
2782 /* Like get_int_reg_name, but IS_64 is always 1. */
2784 static inline const char *
2785 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2787 const int has_zr
= sp_reg_p
? 0 : 1;
2788 return int_reg
[has_zr
][1][regno
];
2791 /* Get the name of the integer offset register in OPND, using the shift type
2792 to decide whether it's a word or doubleword. */
2794 static inline const char *
2795 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
2797 switch (opnd
->shifter
.kind
)
2799 case AARCH64_MOD_UXTW
:
2800 case AARCH64_MOD_SXTW
:
2801 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
2803 case AARCH64_MOD_LSL
:
2804 case AARCH64_MOD_SXTX
:
2805 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
2812 /* Get the name of the SVE vector offset register in OPND, using the operand
2813 qualifier to decide whether the suffix should be .S or .D. */
2815 static inline const char *
2816 get_addr_sve_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
)
2818 assert (qualifier
== AARCH64_OPND_QLF_S_S
2819 || qualifier
== AARCH64_OPND_QLF_S_D
);
2820 return sve_reg
[qualifier
== AARCH64_OPND_QLF_S_D
][regno
];
2823 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2843 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2844 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2845 (depending on the type of the instruction). IMM8 will be expanded to a
2846 single-precision floating-point value (SIZE == 4) or a double-precision
2847 floating-point value (SIZE == 8). A half-precision floating-point value
2848 (SIZE == 2) is expanded to a single-precision floating-point value. The
2849 expanded value is returned. */
2852 expand_fp_imm (int size
, uint32_t imm8
)
2855 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2857 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2858 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2859 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2860 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2861 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2864 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2865 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2866 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2867 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2868 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2871 else if (size
== 4 || size
== 2)
2873 imm
= (imm8_7
<< 31) /* imm8<7> */
2874 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2875 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2876 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2880 /* An unsupported size. */
2887 /* Produce the string representation of the register list operand *OPND
2888 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2889 the register name that comes before the register number, such as "v". */
2891 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
2894 const int num_regs
= opnd
->reglist
.num_regs
;
2895 const int first_reg
= opnd
->reglist
.first_regno
;
2896 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2897 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2898 char tb
[8]; /* Temporary buffer. */
2900 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2901 assert (num_regs
>= 1 && num_regs
<= 4);
2903 /* Prepare the index if any. */
2904 if (opnd
->reglist
.has_index
)
2905 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2906 snprintf (tb
, 8, "[%" PRIi64
"]", (opnd
->reglist
.index
% 100));
2910 /* The hyphenated form is preferred for disassembly if there are
2911 more than two registers in the list, and the register numbers
2912 are monotonically increasing in increments of one. */
2913 if (num_regs
> 2 && last_reg
> first_reg
)
2914 snprintf (buf
, size
, "{%s%d.%s-%s%d.%s}%s", prefix
, first_reg
, qlf_name
,
2915 prefix
, last_reg
, qlf_name
, tb
);
2918 const int reg0
= first_reg
;
2919 const int reg1
= (first_reg
+ 1) & 0x1f;
2920 const int reg2
= (first_reg
+ 2) & 0x1f;
2921 const int reg3
= (first_reg
+ 3) & 0x1f;
2926 snprintf (buf
, size
, "{%s%d.%s}%s", prefix
, reg0
, qlf_name
, tb
);
2929 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s}%s", prefix
, reg0
, qlf_name
,
2930 prefix
, reg1
, qlf_name
, tb
);
2933 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2934 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2935 prefix
, reg2
, qlf_name
, tb
);
2938 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2939 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2940 prefix
, reg2
, qlf_name
, prefix
, reg3
, qlf_name
, tb
);
2946 /* Print the register+immediate address in OPND to BUF, which has SIZE
2947 characters. BASE is the name of the base register. */
2950 print_immediate_offset_address (char *buf
, size_t size
,
2951 const aarch64_opnd_info
*opnd
,
2954 if (opnd
->addr
.writeback
)
2956 if (opnd
->addr
.preind
)
2957 snprintf (buf
, size
, "[%s, #%d]!", base
, opnd
->addr
.offset
.imm
);
2959 snprintf (buf
, size
, "[%s], #%d", base
, opnd
->addr
.offset
.imm
);
2963 if (opnd
->shifter
.operator_present
)
2965 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL_VL
);
2966 snprintf (buf
, size
, "[%s, #%d, mul vl]",
2967 base
, opnd
->addr
.offset
.imm
);
2969 else if (opnd
->addr
.offset
.imm
)
2970 snprintf (buf
, size
, "[%s, #%d]", base
, opnd
->addr
.offset
.imm
);
2972 snprintf (buf
, size
, "[%s]", base
);
2976 /* Produce the string representation of the register offset address operand
2977 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2978 the names of the base and offset registers. */
2980 print_register_offset_address (char *buf
, size_t size
,
2981 const aarch64_opnd_info
*opnd
,
2982 const char *base
, const char *offset
)
2984 char tb
[16]; /* Temporary buffer. */
2985 bfd_boolean print_extend_p
= TRUE
;
2986 bfd_boolean print_amount_p
= TRUE
;
2987 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
2989 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
2990 || !opnd
->shifter
.amount_present
))
2992 /* Not print the shift/extend amount when the amount is zero and
2993 when it is not the special case of 8-bit load/store instruction. */
2994 print_amount_p
= FALSE
;
2995 /* Likewise, no need to print the shift operator LSL in such a
2997 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2998 print_extend_p
= FALSE
;
3001 /* Prepare for the extend/shift. */
3005 snprintf (tb
, sizeof (tb
), ", %s #%" PRIi64
, shift_name
,
3006 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3007 (opnd
->shifter
.amount
% 100));
3009 snprintf (tb
, sizeof (tb
), ", %s", shift_name
);
3014 snprintf (buf
, size
, "[%s, %s%s]", base
, offset
, tb
);
3017 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3018 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3019 PC, PCREL_P and ADDRESS are used to pass in and return information about
3020 the PC-relative address calculation, where the PC value is passed in
3021 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3022 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3023 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3025 The function serves both the disassembler and the assembler diagnostics
3026 issuer, which is the reason why it lives in this file. */
3029 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
3030 const aarch64_opcode
*opcode
,
3031 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
3034 unsigned int i
, num_conds
;
3035 const char *name
= NULL
;
3036 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
3037 enum aarch64_modifier_kind kind
;
3038 uint64_t addr
, enum_value
;
3046 case AARCH64_OPND_Rd
:
3047 case AARCH64_OPND_Rn
:
3048 case AARCH64_OPND_Rm
:
3049 case AARCH64_OPND_Rt
:
3050 case AARCH64_OPND_Rt2
:
3051 case AARCH64_OPND_Rs
:
3052 case AARCH64_OPND_Ra
:
3053 case AARCH64_OPND_Rt_SYS
:
3054 case AARCH64_OPND_PAIRREG
:
3055 case AARCH64_OPND_SVE_Rm
:
3056 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3057 the <ic_op>, therefore we use opnd->present to override the
3058 generic optional-ness information. */
3059 if (opnd
->type
== AARCH64_OPND_Rt_SYS
)
3064 /* Omit the operand, e.g. RET. */
3065 else if (optional_operand_p (opcode
, idx
)
3067 == get_optional_operand_default_value (opcode
)))
3069 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3070 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3071 snprintf (buf
, size
, "%s",
3072 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3075 case AARCH64_OPND_Rd_SP
:
3076 case AARCH64_OPND_Rn_SP
:
3077 case AARCH64_OPND_SVE_Rn_SP
:
3078 case AARCH64_OPND_Rm_SP
:
3079 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3080 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
3081 || opnd
->qualifier
== AARCH64_OPND_QLF_X
3082 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
3083 snprintf (buf
, size
, "%s",
3084 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
3087 case AARCH64_OPND_Rm_EXT
:
3088 kind
= opnd
->shifter
.kind
;
3089 assert (idx
== 1 || idx
== 2);
3090 if ((aarch64_stack_pointer_p (opnds
)
3091 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
3092 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
3093 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
3094 && kind
== AARCH64_MOD_UXTW
)
3095 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
3096 && kind
== AARCH64_MOD_UXTX
)))
3098 /* 'LSL' is the preferred form in this case. */
3099 kind
= AARCH64_MOD_LSL
;
3100 if (opnd
->shifter
.amount
== 0)
3102 /* Shifter omitted. */
3103 snprintf (buf
, size
, "%s",
3104 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3108 if (opnd
->shifter
.amount
)
3109 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3110 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3111 aarch64_operand_modifiers
[kind
].name
,
3112 opnd
->shifter
.amount
);
3114 snprintf (buf
, size
, "%s, %s",
3115 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3116 aarch64_operand_modifiers
[kind
].name
);
3119 case AARCH64_OPND_Rm_SFT
:
3120 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3121 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3122 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3123 snprintf (buf
, size
, "%s",
3124 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3126 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3127 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3128 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3129 opnd
->shifter
.amount
);
3132 case AARCH64_OPND_Fd
:
3133 case AARCH64_OPND_Fn
:
3134 case AARCH64_OPND_Fm
:
3135 case AARCH64_OPND_Fa
:
3136 case AARCH64_OPND_Ft
:
3137 case AARCH64_OPND_Ft2
:
3138 case AARCH64_OPND_Sd
:
3139 case AARCH64_OPND_Sn
:
3140 case AARCH64_OPND_Sm
:
3141 case AARCH64_OPND_SVE_VZn
:
3142 case AARCH64_OPND_SVE_Vd
:
3143 case AARCH64_OPND_SVE_Vm
:
3144 case AARCH64_OPND_SVE_Vn
:
3145 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
3149 case AARCH64_OPND_Va
:
3150 case AARCH64_OPND_Vd
:
3151 case AARCH64_OPND_Vn
:
3152 case AARCH64_OPND_Vm
:
3153 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
3154 aarch64_get_qualifier_name (opnd
->qualifier
));
3157 case AARCH64_OPND_Ed
:
3158 case AARCH64_OPND_En
:
3159 case AARCH64_OPND_Em
:
3160 case AARCH64_OPND_SM3_IMM2
:
3161 snprintf (buf
, size
, "v%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3162 aarch64_get_qualifier_name (opnd
->qualifier
),
3163 opnd
->reglane
.index
);
3166 case AARCH64_OPND_VdD1
:
3167 case AARCH64_OPND_VnD1
:
3168 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
3171 case AARCH64_OPND_LVn
:
3172 case AARCH64_OPND_LVt
:
3173 case AARCH64_OPND_LVt_AL
:
3174 case AARCH64_OPND_LEt
:
3175 print_register_list (buf
, size
, opnd
, "v");
3178 case AARCH64_OPND_SVE_Pd
:
3179 case AARCH64_OPND_SVE_Pg3
:
3180 case AARCH64_OPND_SVE_Pg4_5
:
3181 case AARCH64_OPND_SVE_Pg4_10
:
3182 case AARCH64_OPND_SVE_Pg4_16
:
3183 case AARCH64_OPND_SVE_Pm
:
3184 case AARCH64_OPND_SVE_Pn
:
3185 case AARCH64_OPND_SVE_Pt
:
3186 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3187 snprintf (buf
, size
, "p%d", opnd
->reg
.regno
);
3188 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
3189 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
3190 snprintf (buf
, size
, "p%d/%s", opnd
->reg
.regno
,
3191 aarch64_get_qualifier_name (opnd
->qualifier
));
3193 snprintf (buf
, size
, "p%d.%s", opnd
->reg
.regno
,
3194 aarch64_get_qualifier_name (opnd
->qualifier
));
3197 case AARCH64_OPND_SVE_Za_5
:
3198 case AARCH64_OPND_SVE_Za_16
:
3199 case AARCH64_OPND_SVE_Zd
:
3200 case AARCH64_OPND_SVE_Zm_5
:
3201 case AARCH64_OPND_SVE_Zm_16
:
3202 case AARCH64_OPND_SVE_Zn
:
3203 case AARCH64_OPND_SVE_Zt
:
3204 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3205 snprintf (buf
, size
, "z%d", opnd
->reg
.regno
);
3207 snprintf (buf
, size
, "z%d.%s", opnd
->reg
.regno
,
3208 aarch64_get_qualifier_name (opnd
->qualifier
));
3211 case AARCH64_OPND_SVE_ZnxN
:
3212 case AARCH64_OPND_SVE_ZtxN
:
3213 print_register_list (buf
, size
, opnd
, "z");
3216 case AARCH64_OPND_SVE_Zm3_INDEX
:
3217 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
3218 case AARCH64_OPND_SVE_Zm4_INDEX
:
3219 case AARCH64_OPND_SVE_Zn_INDEX
:
3220 snprintf (buf
, size
, "z%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3221 aarch64_get_qualifier_name (opnd
->qualifier
),
3222 opnd
->reglane
.index
);
3225 case AARCH64_OPND_CRn
:
3226 case AARCH64_OPND_CRm
:
3227 snprintf (buf
, size
, "C%" PRIi64
, opnd
->imm
.value
);
3230 case AARCH64_OPND_IDX
:
3231 case AARCH64_OPND_MASK
:
3232 case AARCH64_OPND_IMM
:
3233 case AARCH64_OPND_IMM_2
:
3234 case AARCH64_OPND_WIDTH
:
3235 case AARCH64_OPND_UIMM3_OP1
:
3236 case AARCH64_OPND_UIMM3_OP2
:
3237 case AARCH64_OPND_BIT_NUM
:
3238 case AARCH64_OPND_IMM_VLSL
:
3239 case AARCH64_OPND_IMM_VLSR
:
3240 case AARCH64_OPND_SHLL_IMM
:
3241 case AARCH64_OPND_IMM0
:
3242 case AARCH64_OPND_IMMR
:
3243 case AARCH64_OPND_IMMS
:
3244 case AARCH64_OPND_FBITS
:
3245 case AARCH64_OPND_SIMM5
:
3246 case AARCH64_OPND_SVE_SHLIMM_PRED
:
3247 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
3248 case AARCH64_OPND_SVE_SHRIMM_PRED
:
3249 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
3250 case AARCH64_OPND_SVE_SIMM5
:
3251 case AARCH64_OPND_SVE_SIMM5B
:
3252 case AARCH64_OPND_SVE_SIMM6
:
3253 case AARCH64_OPND_SVE_SIMM8
:
3254 case AARCH64_OPND_SVE_UIMM3
:
3255 case AARCH64_OPND_SVE_UIMM7
:
3256 case AARCH64_OPND_SVE_UIMM8
:
3257 case AARCH64_OPND_SVE_UIMM8_53
:
3258 case AARCH64_OPND_IMM_ROT1
:
3259 case AARCH64_OPND_IMM_ROT2
:
3260 case AARCH64_OPND_IMM_ROT3
:
3261 case AARCH64_OPND_SVE_IMM_ROT1
:
3262 case AARCH64_OPND_SVE_IMM_ROT2
:
3263 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3266 case AARCH64_OPND_SVE_I1_HALF_ONE
:
3267 case AARCH64_OPND_SVE_I1_HALF_TWO
:
3268 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
3271 c
.i
= opnd
->imm
.value
;
3272 snprintf (buf
, size
, "#%.1f", c
.f
);
3276 case AARCH64_OPND_SVE_PATTERN
:
3277 if (optional_operand_p (opcode
, idx
)
3278 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3280 enum_value
= opnd
->imm
.value
;
3281 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3282 if (aarch64_sve_pattern_array
[enum_value
])
3283 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[enum_value
]);
3285 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3288 case AARCH64_OPND_SVE_PATTERN_SCALED
:
3289 if (optional_operand_p (opcode
, idx
)
3290 && !opnd
->shifter
.operator_present
3291 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3293 enum_value
= opnd
->imm
.value
;
3294 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3295 if (aarch64_sve_pattern_array
[opnd
->imm
.value
])
3296 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[opnd
->imm
.value
]);
3298 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3299 if (opnd
->shifter
.operator_present
)
3301 size_t len
= strlen (buf
);
3302 snprintf (buf
+ len
, size
- len
, ", %s #%" PRIi64
,
3303 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3304 opnd
->shifter
.amount
);
3308 case AARCH64_OPND_SVE_PRFOP
:
3309 enum_value
= opnd
->imm
.value
;
3310 assert (enum_value
< ARRAY_SIZE (aarch64_sve_prfop_array
));
3311 if (aarch64_sve_prfop_array
[enum_value
])
3312 snprintf (buf
, size
, "%s", aarch64_sve_prfop_array
[enum_value
]);
3314 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3317 case AARCH64_OPND_IMM_MOV
:
3318 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3320 case 4: /* e.g. MOV Wd, #<imm32>. */
3322 int imm32
= opnd
->imm
.value
;
3323 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
3326 case 8: /* e.g. MOV Xd, #<imm64>. */
3327 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
3328 opnd
->imm
.value
, opnd
->imm
.value
);
3330 default: assert (0);
3334 case AARCH64_OPND_FPIMM0
:
3335 snprintf (buf
, size
, "#0.0");
3338 case AARCH64_OPND_LIMM
:
3339 case AARCH64_OPND_AIMM
:
3340 case AARCH64_OPND_HALF
:
3341 case AARCH64_OPND_SVE_INV_LIMM
:
3342 case AARCH64_OPND_SVE_LIMM
:
3343 case AARCH64_OPND_SVE_LIMM_MOV
:
3344 if (opnd
->shifter
.amount
)
3345 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3346 opnd
->shifter
.amount
);
3348 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3351 case AARCH64_OPND_SIMD_IMM
:
3352 case AARCH64_OPND_SIMD_IMM_SFT
:
3353 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3354 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
3355 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3357 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%" PRIi64
, opnd
->imm
.value
,
3358 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3359 opnd
->shifter
.amount
);
3362 case AARCH64_OPND_SVE_AIMM
:
3363 case AARCH64_OPND_SVE_ASIMM
:
3364 if (opnd
->shifter
.amount
)
3365 snprintf (buf
, size
, "#%" PRIi64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3366 opnd
->shifter
.amount
);
3368 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3371 case AARCH64_OPND_FPIMM
:
3372 case AARCH64_OPND_SIMD_FPIMM
:
3373 case AARCH64_OPND_SVE_FPIMM8
:
3374 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3376 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3379 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
3380 snprintf (buf
, size
, "#%.18e", c
.f
);
3383 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3386 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
3387 snprintf (buf
, size
, "#%.18e", c
.f
);
3390 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3393 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
3394 snprintf (buf
, size
, "#%.18e", c
.d
);
3397 default: assert (0);
3401 case AARCH64_OPND_CCMP_IMM
:
3402 case AARCH64_OPND_NZCV
:
3403 case AARCH64_OPND_EXCEPTION
:
3404 case AARCH64_OPND_UIMM4
:
3405 case AARCH64_OPND_UIMM7
:
3406 if (optional_operand_p (opcode
, idx
) == TRUE
3407 && (opnd
->imm
.value
==
3408 (int64_t) get_optional_operand_default_value (opcode
)))
3409 /* Omit the operand, e.g. DCPS1. */
3411 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
3414 case AARCH64_OPND_COND
:
3415 case AARCH64_OPND_COND1
:
3416 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
3417 num_conds
= ARRAY_SIZE (opnd
->cond
->names
);
3418 for (i
= 1; i
< num_conds
&& opnd
->cond
->names
[i
]; ++i
)
3420 size_t len
= strlen (buf
);
3422 snprintf (buf
+ len
, size
- len
, " // %s = %s",
3423 opnd
->cond
->names
[0], opnd
->cond
->names
[i
]);
3425 snprintf (buf
+ len
, size
- len
, ", %s",
3426 opnd
->cond
->names
[i
]);
3430 case AARCH64_OPND_ADDR_ADRP
:
3431 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
3437 /* This is not necessary during the disassembling, as print_address_func
3438 in the disassemble_info will take care of the printing. But some
3439 other callers may be still interested in getting the string in *STR,
3440 so here we do snprintf regardless. */
3441 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3444 case AARCH64_OPND_ADDR_PCREL14
:
3445 case AARCH64_OPND_ADDR_PCREL19
:
3446 case AARCH64_OPND_ADDR_PCREL21
:
3447 case AARCH64_OPND_ADDR_PCREL26
:
3448 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
3453 /* This is not necessary during the disassembling, as print_address_func
3454 in the disassemble_info will take care of the printing. But some
3455 other callers may be still interested in getting the string in *STR,
3456 so here we do snprintf regardless. */
3457 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3460 case AARCH64_OPND_ADDR_SIMPLE
:
3461 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
3462 case AARCH64_OPND_SIMD_ADDR_POST
:
3463 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3464 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
3466 if (opnd
->addr
.offset
.is_reg
)
3467 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
3469 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
3472 snprintf (buf
, size
, "[%s]", name
);
3475 case AARCH64_OPND_ADDR_REGOFF
:
3476 case AARCH64_OPND_SVE_ADDR_RR
:
3477 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
3478 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
3479 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
3480 case AARCH64_OPND_SVE_ADDR_RX
:
3481 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
3482 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
3483 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
3484 print_register_offset_address
3485 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3486 get_offset_int_reg_name (opnd
));
3489 case AARCH64_OPND_SVE_ADDR_RZ
:
3490 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
3491 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
3492 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
3493 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
3494 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
3495 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
3496 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
3497 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
3498 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
3499 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
3500 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
3501 print_register_offset_address
3502 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3503 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3506 case AARCH64_OPND_ADDR_SIMM7
:
3507 case AARCH64_OPND_ADDR_SIMM9
:
3508 case AARCH64_OPND_ADDR_SIMM9_2
:
3509 case AARCH64_OPND_ADDR_SIMM10
:
3510 case AARCH64_OPND_ADDR_OFFSET
:
3511 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
3512 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
3513 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
3514 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
3515 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
3516 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
3517 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
3518 case AARCH64_OPND_SVE_ADDR_RI_U6
:
3519 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
3520 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
3521 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
3522 print_immediate_offset_address
3523 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1));
3526 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
3527 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
3528 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
3529 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
3530 print_immediate_offset_address
3532 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
));
3535 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
3536 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
3537 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
3538 print_register_offset_address
3540 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
3541 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3544 case AARCH64_OPND_ADDR_UIMM12
:
3545 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3546 if (opnd
->addr
.offset
.imm
)
3547 snprintf (buf
, size
, "[%s, #%d]", name
, opnd
->addr
.offset
.imm
);
3549 snprintf (buf
, size
, "[%s]", name
);
3552 case AARCH64_OPND_SYSREG
:
3553 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
3554 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
3555 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
]))
3557 if (aarch64_sys_regs
[i
].name
)
3558 snprintf (buf
, size
, "%s", aarch64_sys_regs
[i
].name
);
3561 /* Implementation defined system register. */
3562 unsigned int value
= opnd
->sysreg
;
3563 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
3564 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
3569 case AARCH64_OPND_PSTATEFIELD
:
3570 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
3571 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
3573 assert (aarch64_pstatefields
[i
].name
);
3574 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
3577 case AARCH64_OPND_SYSREG_AT
:
3578 case AARCH64_OPND_SYSREG_DC
:
3579 case AARCH64_OPND_SYSREG_IC
:
3580 case AARCH64_OPND_SYSREG_TLBI
:
3581 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
3584 case AARCH64_OPND_BARRIER
:
3585 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
3588 case AARCH64_OPND_BARRIER_ISB
:
3589 /* Operand can be omitted, e.g. in DCPS1. */
3590 if (! optional_operand_p (opcode
, idx
)
3591 || (opnd
->barrier
->value
3592 != get_optional_operand_default_value (opcode
)))
3593 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
3596 case AARCH64_OPND_PRFOP
:
3597 if (opnd
->prfop
->name
!= NULL
)
3598 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
3600 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
3603 case AARCH64_OPND_BARRIER_PSB
:
3604 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
3612 #define CPENC(op0,op1,crn,crm,op2) \
3613 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3614 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3615 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3616 /* for 3.9.10 System Instructions */
3617 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3639 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3644 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3649 #define F_HASXT 0x4 /* System instruction register <Xt>
3653 /* TODO there are two more issues need to be resolved
3654 1. handle read-only and write-only system registers
3655 2. handle cpu-implementation-defined system registers. */
3656 const aarch64_sys_reg aarch64_sys_regs
[] =
3658 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
3659 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
3660 { "elr_el1", CPEN_(0,C0
,1), 0 },
3661 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
3662 { "sp_el0", CPEN_(0,C1
,0), 0 },
3663 { "spsel", CPEN_(0,C2
,0), 0 },
3664 { "daif", CPEN_(3,C2
,1), 0 },
3665 { "currentel", CPEN_(0,C2
,2), 0 }, /* RO */
3666 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
3667 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
3668 { "nzcv", CPEN_(3,C2
,0), 0 },
3669 { "fpcr", CPEN_(3,C4
,0), 0 },
3670 { "fpsr", CPEN_(3,C4
,1), 0 },
3671 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
3672 { "dlr_el0", CPEN_(3,C5
,1), 0 },
3673 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
3674 { "elr_el2", CPEN_(4,C0
,1), 0 },
3675 { "sp_el1", CPEN_(4,C1
,0), 0 },
3676 { "spsr_irq", CPEN_(4,C3
,0), 0 },
3677 { "spsr_abt", CPEN_(4,C3
,1), 0 },
3678 { "spsr_und", CPEN_(4,C3
,2), 0 },
3679 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
3680 { "spsr_el3", CPEN_(6,C0
,0), 0 },
3681 { "elr_el3", CPEN_(6,C0
,1), 0 },
3682 { "sp_el2", CPEN_(6,C1
,0), 0 },
3683 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
3684 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
3685 { "midr_el1", CPENC(3,0,C0
,C0
,0), 0 }, /* RO */
3686 { "ctr_el0", CPENC(3,3,C0
,C0
,1), 0 }, /* RO */
3687 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), 0 }, /* RO */
3688 { "revidr_el1", CPENC(3,0,C0
,C0
,6), 0 }, /* RO */
3689 { "aidr_el1", CPENC(3,1,C0
,C0
,7), 0 }, /* RO */
3690 { "dczid_el0", CPENC(3,3,C0
,C0
,7), 0 }, /* RO */
3691 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), 0 }, /* RO */
3692 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), 0 }, /* RO */
3693 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), 0 }, /* RO */
3694 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), 0 }, /* RO */
3695 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), 0 }, /* RO */
3696 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), 0 }, /* RO */
3697 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), 0 }, /* RO */
3698 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), 0 }, /* RO */
3699 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), 0 }, /* RO */
3700 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), 0 }, /* RO */
3701 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), 0 }, /* RO */
3702 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), 0 }, /* RO */
3703 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), 0 }, /* RO */
3704 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), 0 }, /* RO */
3705 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), 0 }, /* RO */
3706 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), 0 }, /* RO */
3707 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), 0 }, /* RO */
3708 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), 0 }, /* RO */
3709 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), 0 }, /* RO */
3710 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), 0 }, /* RO */
3711 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), 0 }, /* RO */
3712 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), 0 }, /* RO */
3713 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), 0 }, /* RO */
3714 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), 0 }, /* RO */
3715 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), 0 }, /* RO */
3716 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), 0 }, /* RO */
3717 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), 0 }, /* RO */
3718 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
}, /* RO */
3719 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), 0 }, /* RO */
3720 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), 0 }, /* RO */
3721 { "id_aa64zfr0_el1", CPENC (3, 0, C0
, C4
, 4), F_ARCHEXT
}, /* RO */
3722 { "clidr_el1", CPENC(3,1,C0
,C0
,1), 0 }, /* RO */
3723 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 }, /* RO */
3724 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
3725 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
3726 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
3727 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
3728 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
3729 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
3730 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
3731 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
3732 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
3733 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
3734 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
3735 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
3736 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
3737 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
3738 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
3739 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
3740 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
3741 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
3742 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
3743 { "zcr_el1", CPENC (3, 0, C1
, C2
, 0), F_ARCHEXT
},
3744 { "zcr_el12", CPENC (3, 5, C1
, C2
, 0), F_ARCHEXT
},
3745 { "zcr_el2", CPENC (3, 4, C1
, C2
, 0), F_ARCHEXT
},
3746 { "zcr_el3", CPENC (3, 6, C1
, C2
, 0), F_ARCHEXT
},
3747 { "zidr_el1", CPENC (3, 0, C0
, C0
, 7), F_ARCHEXT
},
3748 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
3749 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
3750 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
3751 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
3752 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
3753 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
3754 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
3755 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
3756 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
3757 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
3758 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
3759 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
3760 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
3761 { "apiakeylo_el1", CPENC (3, 0, C2
, C1
, 0), F_ARCHEXT
},
3762 { "apiakeyhi_el1", CPENC (3, 0, C2
, C1
, 1), F_ARCHEXT
},
3763 { "apibkeylo_el1", CPENC (3, 0, C2
, C1
, 2), F_ARCHEXT
},
3764 { "apibkeyhi_el1", CPENC (3, 0, C2
, C1
, 3), F_ARCHEXT
},
3765 { "apdakeylo_el1", CPENC (3, 0, C2
, C2
, 0), F_ARCHEXT
},
3766 { "apdakeyhi_el1", CPENC (3, 0, C2
, C2
, 1), F_ARCHEXT
},
3767 { "apdbkeylo_el1", CPENC (3, 0, C2
, C2
, 2), F_ARCHEXT
},
3768 { "apdbkeyhi_el1", CPENC (3, 0, C2
, C2
, 3), F_ARCHEXT
},
3769 { "apgakeylo_el1", CPENC (3, 0, C2
, C3
, 0), F_ARCHEXT
},
3770 { "apgakeyhi_el1", CPENC (3, 0, C2
, C3
, 1), F_ARCHEXT
},
3771 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
3772 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
3773 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
3774 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
3775 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
3776 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
3777 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
3778 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
3779 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
3780 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
3781 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
3782 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
3783 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
}, /* RO */
3784 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
3785 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
}, /* RO */
3786 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
3787 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
}, /* RO */
3788 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
3789 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
3790 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
3791 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
3792 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
3793 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
3794 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
3795 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
3796 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
3797 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
3798 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
3799 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
3800 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
3801 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
3802 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
3803 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
3804 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
3805 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
3806 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
3807 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
3808 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
3809 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
3810 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
3811 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), 0 }, /* RO */
3812 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), 0 }, /* RO */
3813 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), 0 }, /* RO */
3814 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
3815 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
3816 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
3817 { "isr_el1", CPENC(3,0,C12
,C1
,0), 0 }, /* RO */
3818 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
3819 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
3820 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
3821 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
3822 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
3823 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
3824 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RO */
3825 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
3826 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
3827 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
3828 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
3829 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RO */
3830 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), 0 }, /* RO */
3831 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), 0 }, /* RO */
3832 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
3833 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
3834 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
3835 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
3836 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
3837 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
3838 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
3839 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
3840 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
3841 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
3842 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
3843 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
3844 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
3845 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
3846 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
3847 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
3848 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
3849 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
3850 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
3851 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
3852 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
3853 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
3854 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
3855 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
3856 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
3857 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
3858 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
3859 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
3860 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
3861 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
3862 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), 0 }, /* r */
3863 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
3864 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
3865 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* r */
3866 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* w */
3867 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 }, /* r */
3868 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 }, /* w */
3869 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
3870 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
3871 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
3872 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
3873 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
3874 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
3875 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
3876 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
3877 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
3878 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
3879 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
3880 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
3881 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
3882 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
3883 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
3884 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
3885 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
3886 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
3887 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
3888 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
3889 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
3890 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
3891 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
3892 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
3893 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
3894 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
3895 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
3896 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
3897 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
3898 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
3899 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
3900 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
3901 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
3902 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
3903 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
3904 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
3905 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
3906 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
3907 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
3908 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
3909 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
3910 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
3911 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
3912 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
3913 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
3914 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
3915 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
3916 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
3917 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
3918 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
3919 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
3920 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3921 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3922 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3923 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3924 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3925 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3926 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3927 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3928 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3929 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3930 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3931 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3932 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3933 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3934 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3935 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), 0 }, /* r */
3936 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), 0 }, /* w */
3937 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), 0 }, /* r */
3938 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3939 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3940 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3941 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3942 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), 0 }, /* r */
3943 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
3944 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
3945 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
3946 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
}, /* ro */
3947 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3948 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
3949 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
3950 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
3951 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
3952 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
3953 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* ro */
3954 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3955 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3956 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
3957 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
3958 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
3959 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
3960 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), 0 }, /* w */
3961 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
3962 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), 0 }, /* r */
3963 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), 0 }, /* r */
3964 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
3965 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
3966 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
3967 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
3968 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
3969 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
3970 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
3971 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
3972 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
3973 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
3974 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
3975 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
3976 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
3977 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
3978 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
3979 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
3980 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
3981 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
3982 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
3983 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
3984 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
3985 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
3986 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
3987 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
3988 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
3989 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
3990 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
3991 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
3992 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
3993 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
3994 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
3995 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
3996 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
3997 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
3998 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
3999 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
4000 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
4001 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
4002 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
4003 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
4004 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
4005 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
4006 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
4007 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
4008 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
4009 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
4010 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
4011 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
4012 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
4013 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
4014 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
4015 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
4016 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
4017 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
4018 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
4019 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
4020 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
4021 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
4022 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
4023 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
4024 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
4025 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
4026 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
4027 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
4028 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
4029 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
4030 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
4031 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
4032 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
4033 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
4034 { 0, CPENC(0,0,0,0,0), 0 },
4038 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
4040 return (reg
->flags
& F_DEPRECATED
) != 0;
4044 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
4045 const aarch64_sys_reg
*reg
)
4047 if (!(reg
->flags
& F_ARCHEXT
))
4050 /* PAN. Values are from aarch64_sys_regs. */
4051 if (reg
->value
== CPEN_(0,C2
,3)
4052 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
4055 /* Virtualization host extensions: system registers. */
4056 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
4057 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
4058 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
4059 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
4060 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
4061 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4064 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4065 if ((reg
->value
== CPEN_ (5, C0
, 0)
4066 || reg
->value
== CPEN_ (5, C0
, 1)
4067 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
4068 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
4069 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
4070 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
4071 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
4072 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
4073 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
4074 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
4075 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
4076 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
4077 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
4078 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
4079 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
4080 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
4081 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4084 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4085 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
4086 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
4087 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
4088 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
4089 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
4090 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
4091 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4094 /* ARMv8.2 features. */
4096 /* ID_AA64MMFR2_EL1. */
4097 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
4098 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4102 if (reg
->value
== CPEN_ (0, C2
, 4)
4103 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4106 /* RAS extension. */
4108 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4109 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4110 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
4111 || reg
->value
== CPENC (3, 0, C5
, C3
, 1)
4112 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
4113 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
4114 || reg
->value
== CPENC (3, 0, C5
, C4
, 0)
4115 || reg
->value
== CPENC (3, 0, C5
, C4
, 1)
4116 || reg
->value
== CPENC (3, 0, C5
, C4
, 2)
4117 || reg
->value
== CPENC (3, 0, C5
, C4
, 3)
4118 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
4119 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
4120 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4123 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4124 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
4125 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
4126 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
4127 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4130 /* Statistical Profiling extension. */
4131 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
4132 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
4133 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
4134 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
4135 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
4136 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
4137 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
4138 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
4139 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
4140 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
4141 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
4142 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
4143 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
4144 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
4147 /* ARMv8.3 Pointer authentication keys. */
4148 if ((reg
->value
== CPENC (3, 0, C2
, C1
, 0)
4149 || reg
->value
== CPENC (3, 0, C2
, C1
, 1)
4150 || reg
->value
== CPENC (3, 0, C2
, C1
, 2)
4151 || reg
->value
== CPENC (3, 0, C2
, C1
, 3)
4152 || reg
->value
== CPENC (3, 0, C2
, C2
, 0)
4153 || reg
->value
== CPENC (3, 0, C2
, C2
, 1)
4154 || reg
->value
== CPENC (3, 0, C2
, C2
, 2)
4155 || reg
->value
== CPENC (3, 0, C2
, C2
, 3)
4156 || reg
->value
== CPENC (3, 0, C2
, C3
, 0)
4157 || reg
->value
== CPENC (3, 0, C2
, C3
, 1))
4158 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_3
))
4162 if ((reg
->value
== CPENC (3, 0, C0
, C4
, 4)
4163 || reg
->value
== CPENC (3, 0, C1
, C2
, 0)
4164 || reg
->value
== CPENC (3, 4, C1
, C2
, 0)
4165 || reg
->value
== CPENC (3, 6, C1
, C2
, 0)
4166 || reg
->value
== CPENC (3, 5, C1
, C2
, 0)
4167 || reg
->value
== CPENC (3, 0, C0
, C0
, 7))
4168 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SVE
))
4174 const aarch64_sys_reg aarch64_pstatefields
[] =
4176 { "spsel", 0x05, 0 },
4177 { "daifset", 0x1e, 0 },
4178 { "daifclr", 0x1f, 0 },
4179 { "pan", 0x04, F_ARCHEXT
},
4180 { "uao", 0x03, F_ARCHEXT
},
4181 { 0, CPENC(0,0,0,0,0), 0 },
4185 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
4186 const aarch64_sys_reg
*reg
)
4188 if (!(reg
->flags
& F_ARCHEXT
))
4191 /* PAN. Values are from aarch64_pstatefields. */
4192 if (reg
->value
== 0x04
4193 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
4196 /* UAO. Values are from aarch64_pstatefields. */
4197 if (reg
->value
== 0x03
4198 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4204 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
4206 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
4207 { "iallu", CPENS(0,C7
,C5
,0), 0 },
4208 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
4209 { 0, CPENS(0,0,0,0), 0 }
4212 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
4214 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
4215 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
4216 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
4217 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
4218 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
4219 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
4220 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
4221 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
4222 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
4223 { 0, CPENS(0,0,0,0), 0 }
4226 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
4228 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
4229 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
4230 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
4231 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
4232 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
4233 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
4234 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
4235 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
4236 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
4237 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
4238 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
4239 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
4240 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
4241 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
4242 { 0, CPENS(0,0,0,0), 0 }
4245 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
4247 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
4248 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
4249 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
4250 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
4251 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
4252 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
4253 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
4254 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
4255 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
4256 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
4257 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
4258 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
4259 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
4260 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
4261 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
4262 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
4263 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
4264 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
4265 { "alle2", CPENS(4,C8
,C7
,0), 0 },
4266 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
4267 { "alle1", CPENS(4,C8
,C7
,4), 0 },
4268 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
4269 { "alle3", CPENS(6,C8
,C7
,0), 0 },
4270 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
4271 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
4272 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
4273 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
4274 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
4275 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
4276 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
4277 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
4278 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
4279 { 0, CPENS(0,0,0,0), 0 }
4283 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
4285 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
4289 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
4290 const aarch64_sys_ins_reg
*reg
)
4292 if (!(reg
->flags
& F_ARCHEXT
))
4295 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4296 if (reg
->value
== CPENS (3, C7
, C12
, 1)
4297 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4300 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4301 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
4302 || reg
->value
== CPENS (0, C7
, C9
, 1))
4303 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4326 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4327 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4330 verify_ldpsw (const struct aarch64_opcode
* opcode ATTRIBUTE_UNUSED
,
4331 const aarch64_insn insn
)
4333 int t
= BITS (insn
, 4, 0);
4334 int n
= BITS (insn
, 9, 5);
4335 int t2
= BITS (insn
, 14, 10);
4339 /* Write back enabled. */
4340 if ((t
== n
|| t2
== n
) && n
!= 31)
4354 /* Return true if VALUE cannot be moved into an SVE register using DUP
4355 (with any element size, not just ESIZE) and if using DUPM would
4356 therefore be OK. ESIZE is the number of bytes in the immediate. */
4359 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue
, int esize
)
4361 int64_t svalue
= uvalue
;
4362 uint64_t upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
4364 if ((uvalue
& ~upper
) != uvalue
&& (uvalue
| upper
) != uvalue
)
4366 if (esize
<= 4 || (uint32_t) uvalue
== (uint32_t) (uvalue
>> 32))
4368 svalue
= (int32_t) uvalue
;
4369 if (esize
<= 2 || (uint16_t) uvalue
== (uint16_t) (uvalue
>> 16))
4371 svalue
= (int16_t) uvalue
;
4372 if (esize
== 1 || (uint8_t) uvalue
== (uint8_t) (uvalue
>> 8))
4376 if ((svalue
& 0xff) == 0)
4378 return svalue
< -128 || svalue
>= 128;
4381 /* Include the opcode description table as well as the operand description
4383 #define VERIFIER(x) verify_##x
4384 #include "aarch64-tbl.h"