1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
31 #include "aarch64-opc.h"
34 int debug_dump
= FALSE
;
35 #endif /* DEBUG_AARCH64 */
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
43 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
44 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
51 return ((qualifier
>= AARCH64_OPND_QLF_S_B
52 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
62 DP_VECTOR_ACROSS_LANES
,
65 static const char significant_operand_index
[] =
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
82 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers
[0] == qualifiers
[1]
87 && vector_qualifier_p (qualifiers
[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers
[0])
89 == aarch64_get_qualifier_esize (qualifiers
[1]))
90 && (aarch64_get_qualifier_esize (qualifiers
[0])
91 == aarch64_get_qualifier_esize (qualifiers
[2])))
92 return DP_VECTOR_3SAME
;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
96 if (vector_qualifier_p (qualifiers
[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers
[0])
99 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
100 return DP_VECTOR_LONG
;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers
[0] == qualifiers
[1]
103 && vector_qualifier_p (qualifiers
[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers
[0])
106 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers
[0])
108 == aarch64_get_qualifier_esize (qualifiers
[1])))
109 return DP_VECTOR_WIDE
;
111 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers
[1]) == TRUE
115 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
116 return DP_VECTOR_ACROSS_LANES
;
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
134 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
137 const aarch64_field fields
[] =
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
196 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
197 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
198 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
199 { 31, 1 }, /* b5: in the test bit and branch instructions. */
200 { 19, 5 }, /* b40: in the test bit and branch instructions. */
201 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
204 enum aarch64_operand_class
205 aarch64_get_operand_class (enum aarch64_opnd type
)
207 return aarch64_operands
[type
].op_class
;
211 aarch64_get_operand_name (enum aarch64_opnd type
)
213 return aarch64_operands
[type
].name
;
216 /* Get operand description string.
217 This is usually for the diagnosis purpose. */
219 aarch64_get_operand_desc (enum aarch64_opnd type
)
221 return aarch64_operands
[type
].desc
;
224 /* Table of all conditional affixes. */
225 const aarch64_cond aarch64_conds
[16] =
230 {{"cc", "lo", "ul"}, 0x3},
246 get_cond_from_value (aarch64_insn value
)
249 return &aarch64_conds
[(unsigned int) value
];
253 get_inverted_cond (const aarch64_cond
*cond
)
255 return &aarch64_conds
[cond
->value
^ 0x1];
258 /* Table describing the operand extension/shifting operators; indexed by
259 enum aarch64_modifier_kind.
261 The value column provides the most common values for encoding modifiers,
262 which enables table-driven encoding/decoding for the modifiers. */
263 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
282 enum aarch64_modifier_kind
283 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
285 return desc
- aarch64_operand_modifiers
;
289 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
291 return aarch64_operand_modifiers
[kind
].value
;
294 enum aarch64_modifier_kind
295 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
296 bfd_boolean extend_p
)
298 if (extend_p
== TRUE
)
299 return AARCH64_MOD_UXTB
+ value
;
301 return AARCH64_MOD_LSL
- value
;
305 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
307 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
311 static inline bfd_boolean
312 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
314 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
318 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
338 /* op -> op: load = 0 instruction = 1 store = 2
340 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
341 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
342 const struct aarch64_name_value_pair aarch64_prfops
[32] =
344 { "pldl1keep", B(0, 1, 0) },
345 { "pldl1strm", B(0, 1, 1) },
346 { "pldl2keep", B(0, 2, 0) },
347 { "pldl2strm", B(0, 2, 1) },
348 { "pldl3keep", B(0, 3, 0) },
349 { "pldl3strm", B(0, 3, 1) },
352 { "plil1keep", B(1, 1, 0) },
353 { "plil1strm", B(1, 1, 1) },
354 { "plil2keep", B(1, 2, 0) },
355 { "plil2strm", B(1, 2, 1) },
356 { "plil3keep", B(1, 3, 0) },
357 { "plil3strm", B(1, 3, 1) },
360 { "pstl1keep", B(2, 1, 0) },
361 { "pstl1strm", B(2, 1, 1) },
362 { "pstl2keep", B(2, 2, 0) },
363 { "pstl2strm", B(2, 2, 1) },
364 { "pstl3keep", B(2, 3, 0) },
365 { "pstl3strm", B(2, 3, 1) },
379 /* Utilities on value constraint. */
382 value_in_range_p (int64_t value
, int low
, int high
)
384 return (value
>= low
&& value
<= high
) ? 1 : 0;
388 value_aligned_p (int64_t value
, int align
)
390 return ((value
& (align
- 1)) == 0) ? 1 : 0;
393 /* A signed value fits in a field. */
395 value_fit_signed_field_p (int64_t value
, unsigned width
)
398 if (width
< sizeof (value
) * 8)
400 int64_t lim
= (int64_t)1 << (width
- 1);
401 if (value
>= -lim
&& value
< lim
)
407 /* An unsigned value fits in a field. */
409 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
412 if (width
< sizeof (value
) * 8)
414 int64_t lim
= (int64_t)1 << width
;
415 if (value
>= 0 && value
< lim
)
421 /* Return 1 if OPERAND is SP or WSP. */
423 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
425 return ((aarch64_get_operand_class (operand
->type
)
426 == AARCH64_OPND_CLASS_INT_REG
)
427 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
428 && operand
->reg
.regno
== 31);
431 /* Return 1 if OPERAND is XZR or WZP. */
433 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
435 return ((aarch64_get_operand_class (operand
->type
)
436 == AARCH64_OPND_CLASS_INT_REG
)
437 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
438 && operand
->reg
.regno
== 31);
441 /* Return true if the operand *OPERAND that has the operand code
442 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
443 qualified by the qualifier TARGET. */
446 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
447 aarch64_opnd_qualifier_t target
)
449 switch (operand
->qualifier
)
451 case AARCH64_OPND_QLF_W
:
452 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
455 case AARCH64_OPND_QLF_X
:
456 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
459 case AARCH64_OPND_QLF_WSP
:
460 if (target
== AARCH64_OPND_QLF_W
461 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
464 case AARCH64_OPND_QLF_SP
:
465 if (target
== AARCH64_OPND_QLF_X
466 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
476 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
477 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
479 Return NIL if more than one expected qualifiers are found. */
481 aarch64_opnd_qualifier_t
482 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
484 const aarch64_opnd_qualifier_t known_qlf
,
491 When the known qualifier is NIL, we have to assume that there is only
492 one qualifier sequence in the *QSEQ_LIST and return the corresponding
493 qualifier directly. One scenario is that for instruction
494 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
495 which has only one possible valid qualifier sequence
497 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
498 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
500 Because the qualifier NIL has dual roles in the qualifier sequence:
501 it can mean no qualifier for the operand, or the qualifer sequence is
502 not in use (when all qualifiers in the sequence are NILs), we have to
503 handle this special case here. */
504 if (known_qlf
== AARCH64_OPND_NIL
)
506 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
507 return qseq_list
[0][idx
];
510 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
512 if (qseq_list
[i
][known_idx
] == known_qlf
)
515 /* More than one sequences are found to have KNOWN_QLF at
517 return AARCH64_OPND_NIL
;
522 return qseq_list
[saved_i
][idx
];
525 enum operand_qualifier_kind
533 /* Operand qualifier description. */
534 struct operand_qualifier_data
536 /* The usage of the three data fields depends on the qualifier kind. */
543 enum operand_qualifier_kind kind
;
546 /* Indexed by the operand qualifier enumerators. */
547 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
549 {0, 0, 0, "NIL", OQK_NIL
},
551 /* Operand variant qualifiers.
553 element size, number of elements and common value for encoding. */
555 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
556 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
557 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
558 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
560 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
561 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
562 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
563 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
564 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
566 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
567 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
568 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
569 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
570 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
571 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
572 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
573 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
574 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
576 /* Qualifiers constraining the value range.
578 Lower bound, higher bound, unused. */
580 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
581 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
582 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
583 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
584 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
585 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
587 /* Qualifiers for miscellaneous purpose.
589 unused, unused and unused. */
594 {0, 0, 0, "retrieving", 0},
597 static inline bfd_boolean
598 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
600 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
604 static inline bfd_boolean
605 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
607 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
612 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
614 return aarch64_opnd_qualifiers
[qualifier
].desc
;
617 /* Given an operand qualifier, return the expected data element size
618 of a qualified operand. */
620 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
622 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
623 return aarch64_opnd_qualifiers
[qualifier
].data0
;
627 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
629 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
630 return aarch64_opnd_qualifiers
[qualifier
].data1
;
634 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
636 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
637 return aarch64_opnd_qualifiers
[qualifier
].data2
;
641 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
643 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
644 return aarch64_opnd_qualifiers
[qualifier
].data0
;
648 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
650 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
651 return aarch64_opnd_qualifiers
[qualifier
].data1
;
656 aarch64_verbose (const char *str
, ...)
667 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
671 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
672 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
677 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
678 const aarch64_opnd_qualifier_t
*qualifier
)
681 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
683 aarch64_verbose ("dump_match_qualifiers:");
684 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
685 curr
[i
] = opnd
[i
].qualifier
;
686 dump_qualifier_sequence (curr
);
687 aarch64_verbose ("against");
688 dump_qualifier_sequence (qualifier
);
690 #endif /* DEBUG_AARCH64 */
692 /* TODO improve this, we can have an extra field at the runtime to
693 store the number of operands rather than calculating it every time. */
696 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
699 const enum aarch64_opnd
*opnds
= opcode
->operands
;
700 while (opnds
[i
++] != AARCH64_OPND_NIL
)
703 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
707 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
708 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
710 N.B. on the entry, it is very likely that only some operands in *INST
711 have had their qualifiers been established.
713 If STOP_AT is not -1, the function will only try to match
714 the qualifier sequence for operands before and including the operand
715 of index STOP_AT; and on success *RET will only be filled with the first
716 (STOP_AT+1) qualifiers.
718 A couple examples of the matching algorithm:
726 Apart from serving the main encoding routine, this can also be called
727 during or after the operand decoding. */
730 aarch64_find_best_match (const aarch64_inst
*inst
,
731 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
732 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
736 const aarch64_opnd_qualifier_t
*qualifiers
;
738 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
741 DEBUG_TRACE ("SUCCEED: no operand");
745 if (stop_at
< 0 || stop_at
>= num_opnds
)
746 stop_at
= num_opnds
- 1;
748 /* For each pattern. */
749 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
752 qualifiers
= *qualifiers_list
;
754 /* Start as positive. */
757 DEBUG_TRACE ("%d", i
);
760 dump_match_qualifiers (inst
->operands
, qualifiers
);
763 /* Most opcodes has much fewer patterns in the list.
764 First NIL qualifier indicates the end in the list. */
765 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
767 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
773 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
775 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
777 /* Either the operand does not have qualifier, or the qualifier
778 for the operand needs to be deduced from the qualifier
780 In the latter case, any constraint checking related with
781 the obtained qualifier should be done later in
782 operand_general_constraint_met_p. */
785 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
787 /* Unless the target qualifier can also qualify the operand
788 (which has already had a non-nil qualifier), non-equal
789 qualifiers are generally un-matched. */
790 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
799 continue; /* Equal qualifiers are certainly matched. */
802 /* Qualifiers established. */
809 /* Fill the result in *RET. */
811 qualifiers
= *qualifiers_list
;
813 DEBUG_TRACE ("complete qualifiers using list %d", i
);
816 dump_qualifier_sequence (qualifiers
);
819 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
820 ret
[j
] = *qualifiers
;
821 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
822 ret
[j
] = AARCH64_OPND_QLF_NIL
;
824 DEBUG_TRACE ("SUCCESS");
828 DEBUG_TRACE ("FAIL");
832 /* Operand qualifier matching and resolving.
834 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
835 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
837 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
841 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
844 aarch64_opnd_qualifier_seq_t qualifiers
;
846 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
849 DEBUG_TRACE ("matching FAIL");
853 /* Update the qualifiers. */
854 if (update_p
== TRUE
)
855 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
857 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
859 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
860 "update %s with %s for operand %d",
861 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
862 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
863 inst
->operands
[i
].qualifier
= qualifiers
[i
];
866 DEBUG_TRACE ("matching SUCCESS");
870 /* Return TRUE if VALUE is a wide constant that can be moved into a general
873 IS32 indicates whether value is a 32-bit immediate or not.
874 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
875 amount will be returned in *SHIFT_AMOUNT. */
878 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
882 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
886 /* Allow all zeros or all ones in top 32-bits, so that
887 32-bit constant expressions like ~0x80000000 are
889 uint64_t ext
= value
;
890 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
891 /* Immediate out of range. */
893 value
&= (int64_t) 0xffffffff;
896 /* first, try movz then movn */
898 if ((value
& ((int64_t) 0xffff << 0)) == value
)
900 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
902 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
904 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
909 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
913 if (shift_amount
!= NULL
)
914 *shift_amount
= amount
;
916 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
921 /* Build the accepted values for immediate logical SIMD instructions.
923 The standard encodings of the immediate value are:
924 N imms immr SIMD size R S
925 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
926 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
927 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
928 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
929 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
930 0 11110s 00000r 2 UInt(r) UInt(s)
931 where all-ones value of S is reserved.
933 Let's call E the SIMD size.
935 The immediate value is: S+1 bits '1' rotated to the right by R.
937 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
938 (remember S != E - 1). */
940 #define TOTAL_IMM_NB 5334
945 aarch64_insn encoding
;
948 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
951 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
953 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
954 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
956 if (imm1
->imm
< imm2
->imm
)
958 if (imm1
->imm
> imm2
->imm
)
963 /* immediate bitfield standard encoding
964 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
965 1 ssssss rrrrrr 64 rrrrrr ssssss
966 0 0sssss 0rrrrr 32 rrrrr sssss
967 0 10ssss 00rrrr 16 rrrr ssss
968 0 110sss 000rrr 8 rrr sss
969 0 1110ss 0000rr 4 rr ss
970 0 11110s 00000r 2 r s */
972 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
974 return (is64
<< 12) | (r
<< 6) | s
;
978 build_immediate_table (void)
980 uint32_t log_e
, e
, s
, r
, s_mask
;
986 for (log_e
= 1; log_e
<= 6; log_e
++)
988 /* Get element size. */
993 mask
= 0xffffffffffffffffull
;
999 mask
= (1ull << e
) - 1;
1001 1 ((1 << 4) - 1) << 2 = 111100
1002 2 ((1 << 3) - 1) << 3 = 111000
1003 3 ((1 << 2) - 1) << 4 = 110000
1004 4 ((1 << 1) - 1) << 5 = 100000
1005 5 ((1 << 0) - 1) << 6 = 000000 */
1006 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1008 for (s
= 0; s
< e
- 1; s
++)
1009 for (r
= 0; r
< e
; r
++)
1011 /* s+1 consecutive bits to 1 (s < 63) */
1012 imm
= (1ull << (s
+ 1)) - 1;
1013 /* rotate right by r */
1015 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1016 /* replicate the constant depending on SIMD size */
1019 case 1: imm
= (imm
<< 2) | imm
;
1020 case 2: imm
= (imm
<< 4) | imm
;
1021 case 3: imm
= (imm
<< 8) | imm
;
1022 case 4: imm
= (imm
<< 16) | imm
;
1023 case 5: imm
= (imm
<< 32) | imm
;
1027 simd_immediates
[nb_imms
].imm
= imm
;
1028 simd_immediates
[nb_imms
].encoding
=
1029 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1033 assert (nb_imms
== TOTAL_IMM_NB
);
1034 qsort(simd_immediates
, nb_imms
,
1035 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1038 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1039 be accepted by logical (immediate) instructions
1040 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1042 IS32 indicates whether or not VALUE is a 32-bit immediate.
1043 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1044 VALUE will be returned in *ENCODING. */
1047 aarch64_logical_immediate_p (uint64_t value
, int is32
, aarch64_insn
*encoding
)
1049 simd_imm_encoding imm_enc
;
1050 const simd_imm_encoding
*imm_encoding
;
1051 static bfd_boolean initialized
= FALSE
;
1053 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), is32: %d", value
,
1056 if (initialized
== FALSE
)
1058 build_immediate_table ();
1064 /* Allow all zeros or all ones in top 32-bits, so that
1065 constant expressions like ~1 are permitted. */
1066 if (value
>> 32 != 0 && value
>> 32 != 0xffffffff)
1069 /* Replicate the 32 lower bits to the 32 upper bits. */
1070 value
&= 0xffffffff;
1071 value
|= value
<< 32;
1074 imm_enc
.imm
= value
;
1075 imm_encoding
= (const simd_imm_encoding
*)
1076 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1077 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1078 if (imm_encoding
== NULL
)
1080 DEBUG_TRACE ("exit with FALSE");
1083 if (encoding
!= NULL
)
1084 *encoding
= imm_encoding
->encoding
;
1085 DEBUG_TRACE ("exit with TRUE");
1089 /* If 64-bit immediate IMM is in the format of
1090 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1091 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1092 of value "abcdefgh". Otherwise return -1. */
1094 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1100 for (i
= 0; i
< 8; i
++)
1102 byte
= (imm
>> (8 * i
)) & 0xff;
1105 else if (byte
!= 0x00)
1111 /* Utility inline functions for operand_general_constraint_met_p. */
1114 set_error (aarch64_operand_error
*mismatch_detail
,
1115 enum aarch64_operand_error_kind kind
, int idx
,
1118 if (mismatch_detail
== NULL
)
1120 mismatch_detail
->kind
= kind
;
1121 mismatch_detail
->index
= idx
;
1122 mismatch_detail
->error
= error
;
1126 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1129 if (mismatch_detail
== NULL
)
1131 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1135 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1136 int idx
, int lower_bound
, int upper_bound
,
1139 if (mismatch_detail
== NULL
)
1141 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1142 mismatch_detail
->data
[0] = lower_bound
;
1143 mismatch_detail
->data
[1] = upper_bound
;
1147 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1148 int idx
, int lower_bound
, int upper_bound
)
1150 if (mismatch_detail
== NULL
)
1152 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1153 _("immediate value"));
1157 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1158 int idx
, int lower_bound
, int upper_bound
)
1160 if (mismatch_detail
== NULL
)
1162 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1163 _("immediate offset"));
1167 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1168 int idx
, int lower_bound
, int upper_bound
)
1170 if (mismatch_detail
== NULL
)
1172 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1173 _("register number"));
1177 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1178 int idx
, int lower_bound
, int upper_bound
)
1180 if (mismatch_detail
== NULL
)
1182 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1183 _("register element index"));
1187 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1188 int idx
, int lower_bound
, int upper_bound
)
1190 if (mismatch_detail
== NULL
)
1192 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1197 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1200 if (mismatch_detail
== NULL
)
1202 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1203 mismatch_detail
->data
[0] = alignment
;
1207 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1210 if (mismatch_detail
== NULL
)
1212 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1213 mismatch_detail
->data
[0] = expected_num
;
1217 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1220 if (mismatch_detail
== NULL
)
1222 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1225 /* General constraint checking based on operand code.
1227 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1228 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1230 This function has to be called after the qualifiers for all operands
1233 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1234 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1235 of error message during the disassembling where error message is not
1236 wanted. We avoid the dynamic construction of strings of error messages
1237 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1238 use a combination of error code, static string and some integer data to
1239 represent an error. */
1242 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1243 enum aarch64_opnd type
,
1244 const aarch64_opcode
*opcode
,
1245 aarch64_operand_error
*mismatch_detail
)
1250 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1251 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1253 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1255 switch (aarch64_operands
[type
].op_class
)
1257 case AARCH64_OPND_CLASS_INT_REG
:
1258 /* Check pair reg constraints for cas* instructions. */
1259 if (type
== AARCH64_OPND_PAIRREG
)
1261 assert (idx
== 1 || idx
== 3);
1262 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1264 set_syntax_error (mismatch_detail
, idx
- 1,
1265 _("reg pair must start from even reg"));
1268 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1270 set_syntax_error (mismatch_detail
, idx
,
1271 _("reg pair must be contiguous"));
1277 /* <Xt> may be optional in some IC and TLBI instructions. */
1278 if (type
== AARCH64_OPND_Rt_SYS
)
1280 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1281 == AARCH64_OPND_CLASS_SYSTEM
));
1282 if (opnds
[1].present
&& !opnds
[0].sysins_op
->has_xt
)
1284 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1287 if (!opnds
[1].present
&& opnds
[0].sysins_op
->has_xt
)
1289 set_other_error (mismatch_detail
, idx
, _("missing register"));
1295 case AARCH64_OPND_QLF_WSP
:
1296 case AARCH64_OPND_QLF_SP
:
1297 if (!aarch64_stack_pointer_p (opnd
))
1299 set_other_error (mismatch_detail
, idx
,
1300 _("stack pointer register expected"));
1309 case AARCH64_OPND_CLASS_COND
:
1310 if (type
== AARCH64_OPND_COND1
1311 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1313 /* Not allow AL or NV. */
1314 set_syntax_error (mismatch_detail
, idx
, NULL
);
1318 case AARCH64_OPND_CLASS_ADDRESS
:
1319 /* Check writeback. */
1320 switch (opcode
->iclass
)
1324 case ldstnapair_offs
:
1327 if (opnd
->addr
.writeback
== 1)
1329 set_syntax_error (mismatch_detail
, idx
,
1330 _("unexpected address writeback"));
1335 case ldstpair_indexed
:
1338 if (opnd
->addr
.writeback
== 0)
1340 set_syntax_error (mismatch_detail
, idx
,
1341 _("address writeback expected"));
1346 assert (opnd
->addr
.writeback
== 0);
1351 case AARCH64_OPND_ADDR_SIMM7
:
1352 /* Scaled signed 7 bits immediate offset. */
1353 /* Get the size of the data element that is accessed, which may be
1354 different from that of the source register size,
1355 e.g. in strb/ldrb. */
1356 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1357 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1359 set_offset_out_of_range_error (mismatch_detail
, idx
,
1360 -64 * size
, 63 * size
);
1363 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1365 set_unaligned_error (mismatch_detail
, idx
, size
);
1369 case AARCH64_OPND_ADDR_SIMM9
:
1370 /* Unscaled signed 9 bits immediate offset. */
1371 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1373 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1378 case AARCH64_OPND_ADDR_SIMM9_2
:
1379 /* Unscaled signed 9 bits immediate offset, which has to be negative
1381 size
= aarch64_get_qualifier_esize (qualifier
);
1382 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1383 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1384 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1386 set_other_error (mismatch_detail
, idx
,
1387 _("negative or unaligned offset expected"));
1390 case AARCH64_OPND_SIMD_ADDR_POST
:
1391 /* AdvSIMD load/store multiple structures, post-index. */
1393 if (opnd
->addr
.offset
.is_reg
)
1395 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1399 set_other_error (mismatch_detail
, idx
,
1400 _("invalid register offset"));
1406 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1407 unsigned num_bytes
; /* total number of bytes transferred. */
1408 /* The opcode dependent area stores the number of elements in
1409 each structure to be loaded/stored. */
1410 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1411 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1412 /* Special handling of loading single structure to all lane. */
1413 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1414 * aarch64_get_qualifier_esize (prev
->qualifier
);
1416 num_bytes
= prev
->reglist
.num_regs
1417 * aarch64_get_qualifier_esize (prev
->qualifier
)
1418 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1419 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1421 set_other_error (mismatch_detail
, idx
,
1422 _("invalid post-increment amount"));
1428 case AARCH64_OPND_ADDR_REGOFF
:
1429 /* Get the size of the data element that is accessed, which may be
1430 different from that of the source register size,
1431 e.g. in strb/ldrb. */
1432 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1433 /* It is either no shift or shift by the binary logarithm of SIZE. */
1434 if (opnd
->shifter
.amount
!= 0
1435 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1437 set_other_error (mismatch_detail
, idx
,
1438 _("invalid shift amount"));
1441 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1443 switch (opnd
->shifter
.kind
)
1445 case AARCH64_MOD_UXTW
:
1446 case AARCH64_MOD_LSL
:
1447 case AARCH64_MOD_SXTW
:
1448 case AARCH64_MOD_SXTX
: break;
1450 set_other_error (mismatch_detail
, idx
,
1451 _("invalid extend/shift operator"));
1456 case AARCH64_OPND_ADDR_UIMM12
:
1457 imm
= opnd
->addr
.offset
.imm
;
1458 /* Get the size of the data element that is accessed, which may be
1459 different from that of the source register size,
1460 e.g. in strb/ldrb. */
1461 size
= aarch64_get_qualifier_esize (qualifier
);
1462 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1464 set_offset_out_of_range_error (mismatch_detail
, idx
,
1468 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1470 set_unaligned_error (mismatch_detail
, idx
, size
);
1475 case AARCH64_OPND_ADDR_PCREL14
:
1476 case AARCH64_OPND_ADDR_PCREL19
:
1477 case AARCH64_OPND_ADDR_PCREL21
:
1478 case AARCH64_OPND_ADDR_PCREL26
:
1479 imm
= opnd
->imm
.value
;
1480 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1482 /* The offset value in a PC-relative branch instruction is alway
1483 4-byte aligned and is encoded without the lowest 2 bits. */
1484 if (!value_aligned_p (imm
, 4))
1486 set_unaligned_error (mismatch_detail
, idx
, 4);
1489 /* Right shift by 2 so that we can carry out the following check
1493 size
= get_operand_fields_width (get_operand_from_code (type
));
1494 if (!value_fit_signed_field_p (imm
, size
))
1496 set_other_error (mismatch_detail
, idx
,
1497 _("immediate out of range"));
1507 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1508 /* The opcode dependent area stores the number of elements in
1509 each structure to be loaded/stored. */
1510 num
= get_opcode_dependent_value (opcode
);
1513 case AARCH64_OPND_LVt
:
1514 assert (num
>= 1 && num
<= 4);
1515 /* Unless LD1/ST1, the number of registers should be equal to that
1516 of the structure elements. */
1517 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1519 set_reg_list_error (mismatch_detail
, idx
, num
);
1523 case AARCH64_OPND_LVt_AL
:
1524 case AARCH64_OPND_LEt
:
1525 assert (num
>= 1 && num
<= 4);
1526 /* The number of registers should be equal to that of the structure
1528 if (opnd
->reglist
.num_regs
!= num
)
1530 set_reg_list_error (mismatch_detail
, idx
, num
);
1539 case AARCH64_OPND_CLASS_IMMEDIATE
:
1540 /* Constraint check on immediate operand. */
1541 imm
= opnd
->imm
.value
;
1542 /* E.g. imm_0_31 constrains value to be 0..31. */
1543 if (qualifier_value_in_range_constraint_p (qualifier
)
1544 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1545 get_upper_bound (qualifier
)))
1547 set_imm_out_of_range_error (mismatch_detail
, idx
,
1548 get_lower_bound (qualifier
),
1549 get_upper_bound (qualifier
));
1555 case AARCH64_OPND_AIMM
:
1556 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1558 set_other_error (mismatch_detail
, idx
,
1559 _("invalid shift operator"));
1562 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
1564 set_other_error (mismatch_detail
, idx
,
1565 _("shift amount expected to be 0 or 12"));
1568 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
1570 set_other_error (mismatch_detail
, idx
,
1571 _("immediate out of range"));
1576 case AARCH64_OPND_HALF
:
1577 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
1578 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1580 set_other_error (mismatch_detail
, idx
,
1581 _("invalid shift operator"));
1584 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1585 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
1587 set_other_error (mismatch_detail
, idx
,
1588 _("shift amount should be a multiple of 16"));
1591 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
1593 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
1597 if (opnd
->imm
.value
< 0)
1599 set_other_error (mismatch_detail
, idx
,
1600 _("negative immediate value not allowed"));
1603 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
1605 set_other_error (mismatch_detail
, idx
,
1606 _("immediate out of range"));
1611 case AARCH64_OPND_IMM_MOV
:
1613 int is32
= aarch64_get_qualifier_esize (opnds
[0].qualifier
) == 4;
1614 imm
= opnd
->imm
.value
;
1618 case OP_MOV_IMM_WIDEN
:
1620 /* Fall through... */
1621 case OP_MOV_IMM_WIDE
:
1622 if (!aarch64_wide_constant_p (imm
, is32
, NULL
))
1624 set_other_error (mismatch_detail
, idx
,
1625 _("immediate out of range"));
1629 case OP_MOV_IMM_LOG
:
1630 if (!aarch64_logical_immediate_p (imm
, is32
, NULL
))
1632 set_other_error (mismatch_detail
, idx
,
1633 _("immediate out of range"));
1644 case AARCH64_OPND_NZCV
:
1645 case AARCH64_OPND_CCMP_IMM
:
1646 case AARCH64_OPND_EXCEPTION
:
1647 case AARCH64_OPND_UIMM4
:
1648 case AARCH64_OPND_UIMM7
:
1649 case AARCH64_OPND_UIMM3_OP1
:
1650 case AARCH64_OPND_UIMM3_OP2
:
1651 size
= get_operand_fields_width (get_operand_from_code (type
));
1653 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
1655 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1661 case AARCH64_OPND_WIDTH
:
1662 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
1663 && opnds
[0].type
== AARCH64_OPND_Rd
);
1664 size
= get_upper_bound (qualifier
);
1665 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
1666 /* lsb+width <= reg.size */
1668 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
1669 size
- opnds
[idx
-1].imm
.value
);
1674 case AARCH64_OPND_LIMM
:
1676 int is32
= opnds
[0].qualifier
== AARCH64_OPND_QLF_W
;
1677 uint64_t uimm
= opnd
->imm
.value
;
1678 if (opcode
->op
== OP_BIC
)
1680 if (aarch64_logical_immediate_p (uimm
, is32
, NULL
) == FALSE
)
1682 set_other_error (mismatch_detail
, idx
,
1683 _("immediate out of range"));
1689 case AARCH64_OPND_IMM0
:
1690 case AARCH64_OPND_FPIMM0
:
1691 if (opnd
->imm
.value
!= 0)
1693 set_other_error (mismatch_detail
, idx
,
1694 _("immediate zero expected"));
1699 case AARCH64_OPND_SHLL_IMM
:
1701 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
1702 if (opnd
->imm
.value
!= size
)
1704 set_other_error (mismatch_detail
, idx
,
1705 _("invalid shift amount"));
1710 case AARCH64_OPND_IMM_VLSL
:
1711 size
= aarch64_get_qualifier_esize (qualifier
);
1712 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
1714 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1720 case AARCH64_OPND_IMM_VLSR
:
1721 size
= aarch64_get_qualifier_esize (qualifier
);
1722 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
1724 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
1729 case AARCH64_OPND_SIMD_IMM
:
1730 case AARCH64_OPND_SIMD_IMM_SFT
:
1731 /* Qualifier check. */
1734 case AARCH64_OPND_QLF_LSL
:
1735 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1737 set_other_error (mismatch_detail
, idx
,
1738 _("invalid shift operator"));
1742 case AARCH64_OPND_QLF_MSL
:
1743 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
1745 set_other_error (mismatch_detail
, idx
,
1746 _("invalid shift operator"));
1750 case AARCH64_OPND_QLF_NIL
:
1751 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1753 set_other_error (mismatch_detail
, idx
,
1754 _("shift is not permitted"));
1762 /* Is the immediate valid? */
1764 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
1766 /* uimm8 or simm8 */
1767 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
1769 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
1773 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
1776 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1777 ffffffffgggggggghhhhhhhh'. */
1778 set_other_error (mismatch_detail
, idx
,
1779 _("invalid value for immediate"));
1782 /* Is the shift amount valid? */
1783 switch (opnd
->shifter
.kind
)
1785 case AARCH64_MOD_LSL
:
1786 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1787 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
1789 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
1793 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
1795 set_unaligned_error (mismatch_detail
, idx
, 8);
1799 case AARCH64_MOD_MSL
:
1800 /* Only 8 and 16 are valid shift amount. */
1801 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
1803 set_other_error (mismatch_detail
, idx
,
1804 _("shift amount expected to be 0 or 16"));
1809 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1811 set_other_error (mismatch_detail
, idx
,
1812 _("invalid shift operator"));
1819 case AARCH64_OPND_FPIMM
:
1820 case AARCH64_OPND_SIMD_FPIMM
:
1821 if (opnd
->imm
.is_fp
== 0)
1823 set_other_error (mismatch_detail
, idx
,
1824 _("floating-point immediate expected"));
1827 /* The value is expected to be an 8-bit floating-point constant with
1828 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1829 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1831 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
1833 set_other_error (mismatch_detail
, idx
,
1834 _("immediate out of range"));
1837 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1839 set_other_error (mismatch_detail
, idx
,
1840 _("invalid shift operator"));
1850 case AARCH64_OPND_CLASS_CP_REG
:
1851 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1852 valid range: C0 - C15. */
1853 if (opnd
->reg
.regno
> 15)
1855 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
1860 case AARCH64_OPND_CLASS_SYSTEM
:
1863 case AARCH64_OPND_PSTATEFIELD
:
1864 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
1866 The immediate must be #0 or #1. */
1867 if (opnd
->pstatefield
== 0x04 /* PAN. */
1868 && opnds
[1].imm
.value
> 1)
1870 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
1873 /* MSR SPSel, #uimm4
1874 Uses uimm4 as a control value to select the stack pointer: if
1875 bit 0 is set it selects the current exception level's stack
1876 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1877 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1878 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
1880 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
1889 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
1890 /* Get the upper bound for the element index. */
1891 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1892 /* Index out-of-range. */
1893 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
1895 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1898 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1899 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1900 number is encoded in "size:M:Rm":
1906 if (type
== AARCH64_OPND_Em
&& qualifier
== AARCH64_OPND_QLF_S_H
1907 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
1909 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
1914 case AARCH64_OPND_CLASS_MODIFIED_REG
:
1915 assert (idx
== 1 || idx
== 2);
1918 case AARCH64_OPND_Rm_EXT
:
1919 if (aarch64_extend_operator_p (opnd
->shifter
.kind
) == FALSE
1920 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1922 set_other_error (mismatch_detail
, idx
,
1923 _("extend operator expected"));
1926 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1927 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1928 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1930 if (!aarch64_stack_pointer_p (opnds
+ 0)
1931 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
1933 if (!opnd
->shifter
.operator_present
)
1935 set_other_error (mismatch_detail
, idx
,
1936 _("missing extend operator"));
1939 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
1941 set_other_error (mismatch_detail
, idx
,
1942 _("'LSL' operator not allowed"));
1946 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
1947 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
1948 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
1950 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
1953 /* In the 64-bit form, the final register operand is written as Wm
1954 for all but the (possibly omitted) UXTX/LSL and SXTX
1956 N.B. GAS allows X register to be used with any operator as a
1957 programming convenience. */
1958 if (qualifier
== AARCH64_OPND_QLF_X
1959 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
1960 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
1961 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
1963 set_other_error (mismatch_detail
, idx
, _("W register expected"));
1968 case AARCH64_OPND_Rm_SFT
:
1969 /* ROR is not available to the shifted register operand in
1970 arithmetic instructions. */
1971 if (aarch64_shift_operator_p (opnd
->shifter
.kind
) == FALSE
)
1973 set_other_error (mismatch_detail
, idx
,
1974 _("shift operator expected"));
1977 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
1978 && opcode
->iclass
!= log_shift
)
1980 set_other_error (mismatch_detail
, idx
,
1981 _("'ROR' operator not allowed"));
1984 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
1985 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
1987 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2004 /* Main entrypoint for the operand constraint checking.
2006 Return 1 if operands of *INST meet the constraint applied by the operand
2007 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2008 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2009 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2010 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2011 error kind when it is notified that an instruction does not pass the check).
2013 Un-determined operand qualifiers may get established during the process. */
2016 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2017 aarch64_operand_error
*mismatch_detail
)
2021 DEBUG_TRACE ("enter");
2023 /* Match operands' qualifier.
2024 *INST has already had qualifier establish for some, if not all, of
2025 its operands; we need to find out whether these established
2026 qualifiers match one of the qualifier sequence in
2027 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2028 with the corresponding qualifier in such a sequence.
2029 Only basic operand constraint checking is done here; the more thorough
2030 constraint checking will carried out by operand_general_constraint_met_p,
2031 which has be to called after this in order to get all of the operands'
2032 qualifiers established. */
2033 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2035 DEBUG_TRACE ("FAIL on operand qualifier matching");
2036 if (mismatch_detail
)
2038 /* Return an error type to indicate that it is the qualifier
2039 matching failure; we don't care about which operand as there
2040 are enough information in the opcode table to reproduce it. */
2041 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2042 mismatch_detail
->index
= -1;
2043 mismatch_detail
->error
= NULL
;
2048 /* Match operands' constraint. */
2049 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2051 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2052 if (type
== AARCH64_OPND_NIL
)
2054 if (inst
->operands
[i
].skip
)
2056 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2059 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2060 inst
->opcode
, mismatch_detail
) == 0)
2062 DEBUG_TRACE ("FAIL on operand %d", i
);
2067 DEBUG_TRACE ("PASS");
2072 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2073 Also updates the TYPE of each INST->OPERANDS with the corresponding
2074 value of OPCODE->OPERANDS.
2076 Note that some operand qualifiers may need to be manually cleared by
2077 the caller before it further calls the aarch64_opcode_encode; by
2078 doing this, it helps the qualifier matching facilities work
2081 const aarch64_opcode
*
2082 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2085 const aarch64_opcode
*old
= inst
->opcode
;
2087 inst
->opcode
= opcode
;
2089 /* Update the operand types. */
2090 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2092 inst
->operands
[i
].type
= opcode
->operands
[i
];
2093 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2097 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2103 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2106 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2107 if (operands
[i
] == operand
)
2109 else if (operands
[i
] == AARCH64_OPND_NIL
)
2114 /* [0][0] 32-bit integer regs with sp Wn
2115 [0][1] 64-bit integer regs with sp Xn sf=1
2116 [1][0] 32-bit integer regs with #0 Wn
2117 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2118 static const char *int_reg
[2][2][32] = {
2121 { { R32
"0", R32
"1", R32
"2", R32
"3", R32
"4", R32
"5", R32
"6", R32
"7",
2122 R32
"8", R32
"9", R32
"10", R32
"11", R32
"12", R32
"13", R32
"14", R32
"15",
2123 R32
"16", R32
"17", R32
"18", R32
"19", R32
"20", R32
"21", R32
"22", R32
"23",
2124 R32
"24", R32
"25", R32
"26", R32
"27", R32
"28", R32
"29", R32
"30", "wsp" },
2125 { R64
"0", R64
"1", R64
"2", R64
"3", R64
"4", R64
"5", R64
"6", R64
"7",
2126 R64
"8", R64
"9", R64
"10", R64
"11", R64
"12", R64
"13", R64
"14", R64
"15",
2127 R64
"16", R64
"17", R64
"18", R64
"19", R64
"20", R64
"21", R64
"22", R64
"23",
2128 R64
"24", R64
"25", R64
"26", R64
"27", R64
"28", R64
"29", R64
"30", "sp" } },
2129 { { R32
"0", R32
"1", R32
"2", R32
"3", R32
"4", R32
"5", R32
"6", R32
"7",
2130 R32
"8", R32
"9", R32
"10", R32
"11", R32
"12", R32
"13", R32
"14", R32
"15",
2131 R32
"16", R32
"17", R32
"18", R32
"19", R32
"20", R32
"21", R32
"22", R32
"23",
2132 R32
"24", R32
"25", R32
"26", R32
"27", R32
"28", R32
"29", R32
"30", R32
"zr" },
2133 { R64
"0", R64
"1", R64
"2", R64
"3", R64
"4", R64
"5", R64
"6", R64
"7",
2134 R64
"8", R64
"9", R64
"10", R64
"11", R64
"12", R64
"13", R64
"14", R64
"15",
2135 R64
"16", R64
"17", R64
"18", R64
"19", R64
"20", R64
"21", R64
"22", R64
"23",
2136 R64
"24", R64
"25", R64
"26", R64
"27", R64
"28", R64
"29", R64
"30", R64
"zr" } }
2141 /* Return the integer register name.
2142 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2144 static inline const char *
2145 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2147 const int has_zr
= sp_reg_p
? 0 : 1;
2148 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2149 return int_reg
[has_zr
][is_64
][regno
];
2152 /* Like get_int_reg_name, but IS_64 is always 1. */
2154 static inline const char *
2155 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2157 const int has_zr
= sp_reg_p
? 0 : 1;
2158 return int_reg
[has_zr
][1][regno
];
2161 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2181 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2182 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2183 (depending on the type of the instruction). IMM8 will be expanded to a
2184 single-precision floating-point value (SIZE == 4) or a double-precision
2185 floating-point value (SIZE == 8). A half-precision floating-point value
2186 (SIZE == 2) is expanded to a single-precision floating-point value. The
2187 expanded value is returned. */
2190 expand_fp_imm (int size
, uint32_t imm8
)
2193 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2195 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2196 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2197 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2198 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2199 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2202 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2203 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2204 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2205 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2206 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2209 else if (size
== 4 || size
== 2)
2211 imm
= (imm8_7
<< 31) /* imm8<7> */
2212 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2213 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2214 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2218 /* An unsupported size. */
2225 /* Produce the string representation of the register list operand *OPND
2226 in the buffer pointed by BUF of size SIZE. */
2228 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
)
2230 const int num_regs
= opnd
->reglist
.num_regs
;
2231 const int first_reg
= opnd
->reglist
.first_regno
;
2232 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2233 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2234 char tb
[8]; /* Temporary buffer. */
2236 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2237 assert (num_regs
>= 1 && num_regs
<= 4);
2239 /* Prepare the index if any. */
2240 if (opnd
->reglist
.has_index
)
2241 snprintf (tb
, 8, "[%d]", opnd
->reglist
.index
);
2245 /* The hyphenated form is preferred for disassembly if there are
2246 more than two registers in the list, and the register numbers
2247 are monotonically increasing in increments of one. */
2248 if (num_regs
> 2 && last_reg
> first_reg
)
2249 snprintf (buf
, size
, "{v%d.%s-v%d.%s}%s", first_reg
, qlf_name
,
2250 last_reg
, qlf_name
, tb
);
2253 const int reg0
= first_reg
;
2254 const int reg1
= (first_reg
+ 1) & 0x1f;
2255 const int reg2
= (first_reg
+ 2) & 0x1f;
2256 const int reg3
= (first_reg
+ 3) & 0x1f;
2261 snprintf (buf
, size
, "{v%d.%s}%s", reg0
, qlf_name
, tb
);
2264 snprintf (buf
, size
, "{v%d.%s, v%d.%s}%s", reg0
, qlf_name
,
2265 reg1
, qlf_name
, tb
);
2268 snprintf (buf
, size
, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0
, qlf_name
,
2269 reg1
, qlf_name
, reg2
, qlf_name
, tb
);
2272 snprintf (buf
, size
, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2273 reg0
, qlf_name
, reg1
, qlf_name
, reg2
, qlf_name
,
2274 reg3
, qlf_name
, tb
);
2280 /* Produce the string representation of the register offset address operand
2281 *OPND in the buffer pointed by BUF of size SIZE. */
2283 print_register_offset_address (char *buf
, size_t size
,
2284 const aarch64_opnd_info
*opnd
)
2286 const size_t tblen
= 16;
2287 char tb
[tblen
]; /* Temporary buffer. */
2288 bfd_boolean lsl_p
= FALSE
; /* Is LSL shift operator? */
2289 bfd_boolean wm_p
= FALSE
; /* Should Rm be Wm? */
2290 bfd_boolean print_extend_p
= TRUE
;
2291 bfd_boolean print_amount_p
= TRUE
;
2292 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
2294 switch (opnd
->shifter
.kind
)
2296 case AARCH64_MOD_UXTW
: wm_p
= TRUE
; break;
2297 case AARCH64_MOD_LSL
: lsl_p
= TRUE
; break;
2298 case AARCH64_MOD_SXTW
: wm_p
= TRUE
; break;
2299 case AARCH64_MOD_SXTX
: break;
2300 default: assert (0);
2303 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
2304 || !opnd
->shifter
.amount_present
))
2306 /* Not print the shift/extend amount when the amount is zero and
2307 when it is not the special case of 8-bit load/store instruction. */
2308 print_amount_p
= FALSE
;
2309 /* Likewise, no need to print the shift operator LSL in such a
2312 print_extend_p
= FALSE
;
2315 /* Prepare for the extend/shift. */
2319 snprintf (tb
, tblen
, ",%s #%d", shift_name
, opnd
->shifter
.amount
);
2321 snprintf (tb
, tblen
, ",%s", shift_name
);
2326 snprintf (buf
, size
, "[%s,%s%s]",
2327 get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
2328 get_int_reg_name (opnd
->addr
.offset
.regno
,
2329 wm_p
? AARCH64_OPND_QLF_W
: AARCH64_OPND_QLF_X
,
2334 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2335 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2336 PC, PCREL_P and ADDRESS are used to pass in and return information about
2337 the PC-relative address calculation, where the PC value is passed in
2338 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2339 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2340 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2342 The function serves both the disassembler and the assembler diagnostics
2343 issuer, which is the reason why it lives in this file. */
2346 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
2347 const aarch64_opcode
*opcode
,
2348 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
2352 const char *name
= NULL
;
2353 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
2354 enum aarch64_modifier_kind kind
;
2363 case AARCH64_OPND_Rd
:
2364 case AARCH64_OPND_Rn
:
2365 case AARCH64_OPND_Rm
:
2366 case AARCH64_OPND_Rt
:
2367 case AARCH64_OPND_Rt2
:
2368 case AARCH64_OPND_Rs
:
2369 case AARCH64_OPND_Ra
:
2370 case AARCH64_OPND_Rt_SYS
:
2371 case AARCH64_OPND_PAIRREG
:
2372 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2373 the <ic_op>, therefore we we use opnd->present to override the
2374 generic optional-ness information. */
2375 if (opnd
->type
== AARCH64_OPND_Rt_SYS
&& !opnd
->present
)
2377 /* Omit the operand, e.g. RET. */
2378 if (optional_operand_p (opcode
, idx
)
2379 && opnd
->reg
.regno
== get_optional_operand_default_value (opcode
))
2381 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2382 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2383 snprintf (buf
, size
, "%s",
2384 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2387 case AARCH64_OPND_Rd_SP
:
2388 case AARCH64_OPND_Rn_SP
:
2389 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2390 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
2391 || opnd
->qualifier
== AARCH64_OPND_QLF_X
2392 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
2393 snprintf (buf
, size
, "%s",
2394 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
2397 case AARCH64_OPND_Rm_EXT
:
2398 kind
= opnd
->shifter
.kind
;
2399 assert (idx
== 1 || idx
== 2);
2400 if ((aarch64_stack_pointer_p (opnds
)
2401 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
2402 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
2403 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
2404 && kind
== AARCH64_MOD_UXTW
)
2405 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
2406 && kind
== AARCH64_MOD_UXTX
)))
2408 /* 'LSL' is the preferred form in this case. */
2409 kind
= AARCH64_MOD_LSL
;
2410 if (opnd
->shifter
.amount
== 0)
2412 /* Shifter omitted. */
2413 snprintf (buf
, size
, "%s",
2414 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2418 if (opnd
->shifter
.amount
)
2419 snprintf (buf
, size
, "%s, %s #%d",
2420 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2421 aarch64_operand_modifiers
[kind
].name
,
2422 opnd
->shifter
.amount
);
2424 snprintf (buf
, size
, "%s, %s",
2425 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2426 aarch64_operand_modifiers
[kind
].name
);
2429 case AARCH64_OPND_Rm_SFT
:
2430 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2431 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2432 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2433 snprintf (buf
, size
, "%s",
2434 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2436 snprintf (buf
, size
, "%s, %s #%d",
2437 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2438 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2439 opnd
->shifter
.amount
);
2442 case AARCH64_OPND_Fd
:
2443 case AARCH64_OPND_Fn
:
2444 case AARCH64_OPND_Fm
:
2445 case AARCH64_OPND_Fa
:
2446 case AARCH64_OPND_Ft
:
2447 case AARCH64_OPND_Ft2
:
2448 case AARCH64_OPND_Sd
:
2449 case AARCH64_OPND_Sn
:
2450 case AARCH64_OPND_Sm
:
2451 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
2455 case AARCH64_OPND_Vd
:
2456 case AARCH64_OPND_Vn
:
2457 case AARCH64_OPND_Vm
:
2458 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
2459 aarch64_get_qualifier_name (opnd
->qualifier
));
2462 case AARCH64_OPND_Ed
:
2463 case AARCH64_OPND_En
:
2464 case AARCH64_OPND_Em
:
2465 snprintf (buf
, size
, "v%d.%s[%d]", opnd
->reglane
.regno
,
2466 aarch64_get_qualifier_name (opnd
->qualifier
),
2467 opnd
->reglane
.index
);
2470 case AARCH64_OPND_VdD1
:
2471 case AARCH64_OPND_VnD1
:
2472 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
2475 case AARCH64_OPND_LVn
:
2476 case AARCH64_OPND_LVt
:
2477 case AARCH64_OPND_LVt_AL
:
2478 case AARCH64_OPND_LEt
:
2479 print_register_list (buf
, size
, opnd
);
2482 case AARCH64_OPND_Cn
:
2483 case AARCH64_OPND_Cm
:
2484 snprintf (buf
, size
, "C%d", opnd
->reg
.regno
);
2487 case AARCH64_OPND_IDX
:
2488 case AARCH64_OPND_IMM
:
2489 case AARCH64_OPND_WIDTH
:
2490 case AARCH64_OPND_UIMM3_OP1
:
2491 case AARCH64_OPND_UIMM3_OP2
:
2492 case AARCH64_OPND_BIT_NUM
:
2493 case AARCH64_OPND_IMM_VLSL
:
2494 case AARCH64_OPND_IMM_VLSR
:
2495 case AARCH64_OPND_SHLL_IMM
:
2496 case AARCH64_OPND_IMM0
:
2497 case AARCH64_OPND_IMMR
:
2498 case AARCH64_OPND_IMMS
:
2499 case AARCH64_OPND_FBITS
:
2500 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
2503 case AARCH64_OPND_IMM_MOV
:
2504 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2506 case 4: /* e.g. MOV Wd, #<imm32>. */
2508 int imm32
= opnd
->imm
.value
;
2509 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
2512 case 8: /* e.g. MOV Xd, #<imm64>. */
2513 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
2514 opnd
->imm
.value
, opnd
->imm
.value
);
2516 default: assert (0);
2520 case AARCH64_OPND_FPIMM0
:
2521 snprintf (buf
, size
, "#0.0");
2524 case AARCH64_OPND_LIMM
:
2525 case AARCH64_OPND_AIMM
:
2526 case AARCH64_OPND_HALF
:
2527 if (opnd
->shifter
.amount
)
2528 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%d", opnd
->imm
.value
,
2529 opnd
->shifter
.amount
);
2531 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2534 case AARCH64_OPND_SIMD_IMM
:
2535 case AARCH64_OPND_SIMD_IMM_SFT
:
2536 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2537 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
2538 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2540 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%d", opnd
->imm
.value
,
2541 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2542 opnd
->shifter
.amount
);
2545 case AARCH64_OPND_FPIMM
:
2546 case AARCH64_OPND_SIMD_FPIMM
:
2547 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2549 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2552 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
2553 snprintf (buf
, size
, "#%.18e", c
.f
);
2556 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2559 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
2560 snprintf (buf
, size
, "#%.18e", c
.f
);
2563 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2566 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
2567 snprintf (buf
, size
, "#%.18e", c
.d
);
2570 default: assert (0);
2574 case AARCH64_OPND_CCMP_IMM
:
2575 case AARCH64_OPND_NZCV
:
2576 case AARCH64_OPND_EXCEPTION
:
2577 case AARCH64_OPND_UIMM4
:
2578 case AARCH64_OPND_UIMM7
:
2579 if (optional_operand_p (opcode
, idx
) == TRUE
2580 && (opnd
->imm
.value
==
2581 (int64_t) get_optional_operand_default_value (opcode
)))
2582 /* Omit the operand, e.g. DCPS1. */
2584 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
2587 case AARCH64_OPND_COND
:
2588 case AARCH64_OPND_COND1
:
2589 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
2592 case AARCH64_OPND_ADDR_ADRP
:
2593 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
2599 /* This is not necessary during the disassembling, as print_address_func
2600 in the disassemble_info will take care of the printing. But some
2601 other callers may be still interested in getting the string in *STR,
2602 so here we do snprintf regardless. */
2603 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2606 case AARCH64_OPND_ADDR_PCREL14
:
2607 case AARCH64_OPND_ADDR_PCREL19
:
2608 case AARCH64_OPND_ADDR_PCREL21
:
2609 case AARCH64_OPND_ADDR_PCREL26
:
2610 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
2615 /* This is not necessary during the disassembling, as print_address_func
2616 in the disassemble_info will take care of the printing. But some
2617 other callers may be still interested in getting the string in *STR,
2618 so here we do snprintf regardless. */
2619 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2622 case AARCH64_OPND_ADDR_SIMPLE
:
2623 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
2624 case AARCH64_OPND_SIMD_ADDR_POST
:
2625 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2626 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
2628 if (opnd
->addr
.offset
.is_reg
)
2629 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
2631 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
2634 snprintf (buf
, size
, "[%s]", name
);
2637 case AARCH64_OPND_ADDR_REGOFF
:
2638 print_register_offset_address (buf
, size
, opnd
);
2641 case AARCH64_OPND_ADDR_SIMM7
:
2642 case AARCH64_OPND_ADDR_SIMM9
:
2643 case AARCH64_OPND_ADDR_SIMM9_2
:
2644 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2645 if (opnd
->addr
.writeback
)
2647 if (opnd
->addr
.preind
)
2648 snprintf (buf
, size
, "[%s,#%d]!", name
, opnd
->addr
.offset
.imm
);
2650 snprintf (buf
, size
, "[%s],#%d", name
, opnd
->addr
.offset
.imm
);
2654 if (opnd
->addr
.offset
.imm
)
2655 snprintf (buf
, size
, "[%s,#%d]", name
, opnd
->addr
.offset
.imm
);
2657 snprintf (buf
, size
, "[%s]", name
);
2661 case AARCH64_OPND_ADDR_UIMM12
:
2662 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2663 if (opnd
->addr
.offset
.imm
)
2664 snprintf (buf
, size
, "[%s,#%d]", name
, opnd
->addr
.offset
.imm
);
2666 snprintf (buf
, size
, "[%s]", name
);
2669 case AARCH64_OPND_SYSREG
:
2670 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
2671 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
2672 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
]))
2674 if (aarch64_sys_regs
[i
].name
)
2675 snprintf (buf
, size
, "%s", aarch64_sys_regs
[i
].name
);
2678 /* Implementation defined system register. */
2679 unsigned int value
= opnd
->sysreg
;
2680 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
2681 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
2686 case AARCH64_OPND_PSTATEFIELD
:
2687 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
2688 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
2690 assert (aarch64_pstatefields
[i
].name
);
2691 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
2694 case AARCH64_OPND_SYSREG_AT
:
2695 case AARCH64_OPND_SYSREG_DC
:
2696 case AARCH64_OPND_SYSREG_IC
:
2697 case AARCH64_OPND_SYSREG_TLBI
:
2698 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
2701 case AARCH64_OPND_BARRIER
:
2702 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
2705 case AARCH64_OPND_BARRIER_ISB
:
2706 /* Operand can be omitted, e.g. in DCPS1. */
2707 if (! optional_operand_p (opcode
, idx
)
2708 || (opnd
->barrier
->value
2709 != get_optional_operand_default_value (opcode
)))
2710 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
2713 case AARCH64_OPND_PRFOP
:
2714 if (opnd
->prfop
->name
!= NULL
)
2715 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
2717 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
2725 #define CPENC(op0,op1,crn,crm,op2) \
2726 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2727 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2728 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2729 /* for 3.9.10 System Instructions */
2730 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2752 #define F_DEPRECATED 0x1 /* Deprecated system register. */
2757 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
2760 /* TODO there are two more issues need to be resolved
2761 1. handle read-only and write-only system registers
2762 2. handle cpu-implementation-defined system registers. */
2763 const aarch64_sys_reg aarch64_sys_regs
[] =
2765 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
2766 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
2767 { "elr_el1", CPEN_(0,C0
,1), 0 },
2768 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
2769 { "sp_el0", CPEN_(0,C1
,0), 0 },
2770 { "spsel", CPEN_(0,C2
,0), 0 },
2771 { "daif", CPEN_(3,C2
,1), 0 },
2772 { "currentel", CPEN_(0,C2
,2), 0 }, /* RO */
2773 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
2774 { "nzcv", CPEN_(3,C2
,0), 0 },
2775 { "fpcr", CPEN_(3,C4
,0), 0 },
2776 { "fpsr", CPEN_(3,C4
,1), 0 },
2777 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
2778 { "dlr_el0", CPEN_(3,C5
,1), 0 },
2779 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
2780 { "elr_el2", CPEN_(4,C0
,1), 0 },
2781 { "sp_el1", CPEN_(4,C1
,0), 0 },
2782 { "spsr_irq", CPEN_(4,C3
,0), 0 },
2783 { "spsr_abt", CPEN_(4,C3
,1), 0 },
2784 { "spsr_und", CPEN_(4,C3
,2), 0 },
2785 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
2786 { "spsr_el3", CPEN_(6,C0
,0), 0 },
2787 { "elr_el3", CPEN_(6,C0
,1), 0 },
2788 { "sp_el2", CPEN_(6,C1
,0), 0 },
2789 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
2790 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
2791 { "midr_el1", CPENC(3,0,C0
,C0
,0), 0 }, /* RO */
2792 { "ctr_el0", CPENC(3,3,C0
,C0
,1), 0 }, /* RO */
2793 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), 0 }, /* RO */
2794 { "revidr_el1", CPENC(3,0,C0
,C0
,6), 0 }, /* RO */
2795 { "aidr_el1", CPENC(3,1,C0
,C0
,7), 0 }, /* RO */
2796 { "dczid_el0", CPENC(3,3,C0
,C0
,7), 0 }, /* RO */
2797 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), 0 }, /* RO */
2798 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), 0 }, /* RO */
2799 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), 0 }, /* RO */
2800 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), 0 }, /* RO */
2801 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), 0 }, /* RO */
2802 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), 0 }, /* RO */
2803 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), 0 }, /* RO */
2804 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), 0 }, /* RO */
2805 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), 0 }, /* RO */
2806 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), 0 }, /* RO */
2807 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), 0 }, /* RO */
2808 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), 0 }, /* RO */
2809 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), 0 }, /* RO */
2810 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), 0 }, /* RO */
2811 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), 0 }, /* RO */
2812 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), 0 }, /* RO */
2813 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), 0 }, /* RO */
2814 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), 0 }, /* RO */
2815 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), 0 }, /* RO */
2816 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), 0 }, /* RO */
2817 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), 0 }, /* RO */
2818 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), 0 }, /* RO */
2819 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), 0 }, /* RO */
2820 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), 0 }, /* RO */
2821 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), 0 }, /* RO */
2822 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), 0 }, /* RO */
2823 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), 0 }, /* RO */
2824 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
}, /* RO */
2825 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), 0 }, /* RO */
2826 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), 0 }, /* RO */
2827 { "clidr_el1", CPENC(3,1,C0
,C0
,1), 0 }, /* RO */
2828 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 }, /* RO */
2829 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
2830 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
2831 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
2832 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
2833 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
2834 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
2835 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
2836 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
2837 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
2838 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
2839 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
2840 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
2841 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
2842 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
2843 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
2844 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
2845 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
2846 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
2847 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
2848 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
2849 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
2850 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
2851 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
2852 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
2853 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
2854 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
2855 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
2856 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
2857 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
2858 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
2859 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
2860 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
2861 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
2862 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
2863 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
2864 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
2865 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
2866 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
2867 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
2868 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
2869 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
2870 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
2871 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
2872 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
2873 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
2874 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
2875 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
2876 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
2877 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
2878 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
2879 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
2880 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
2881 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
2882 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
2883 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
2884 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
2885 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
2886 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
2887 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
2888 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
2889 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
2890 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
2891 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
2892 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), 0 }, /* RO */
2893 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), 0 }, /* RO */
2894 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), 0 }, /* RO */
2895 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
2896 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
2897 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
2898 { "isr_el1", CPENC(3,0,C12
,C1
,0), 0 }, /* RO */
2899 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
2900 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
2901 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
2902 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
2903 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RO */
2904 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
2905 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
2906 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
2907 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
2908 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RO */
2909 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), 0 }, /* RO */
2910 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), 0 }, /* RO */
2911 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
2912 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
2913 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
2914 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
2915 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
2916 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
2917 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
2918 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
2919 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
2920 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
2921 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
2922 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
2923 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
2924 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
2925 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
2926 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
2927 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
2928 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
2929 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
2930 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
2931 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
2932 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
2933 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
2934 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
2935 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
2936 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
2937 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
2938 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
2939 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
2940 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
2941 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), 0 }, /* r */
2942 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
2943 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
2944 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* r */
2945 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* w */
2946 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 }, /* r */
2947 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 }, /* w */
2948 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
2949 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
2950 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
2951 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
2952 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
2953 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
2954 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
2955 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
2956 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
2957 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
2958 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
2959 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
2960 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
2961 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
2962 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
2963 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
2964 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
2965 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
2966 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
2967 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
2968 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
2969 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
2970 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
2971 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
2972 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
2973 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
2974 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
2975 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
2976 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
2977 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
2978 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
2979 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
2980 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
2981 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
2982 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
2983 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
2984 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
2985 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
2986 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
2987 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
2988 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
2989 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
2990 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
2991 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
2992 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
2993 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
2994 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
2995 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
2996 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
2997 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
2998 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
2999 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3000 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3001 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3002 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3003 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3004 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3005 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3006 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3007 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3008 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3009 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3010 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3011 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3012 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3013 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3014 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), 0 }, /* r */
3015 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), 0 }, /* w */
3016 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), 0 }, /* r */
3017 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3018 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3019 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3020 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3021 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), 0 }, /* r */
3023 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
3024 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
3025 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
3026 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
3027 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), 0 }, /* w */
3028 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
3029 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), 0 }, /* r */
3030 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), 0 }, /* r */
3031 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
3032 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
3033 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
3034 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
3035 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
3036 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
3037 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
3038 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
3039 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
3040 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
3041 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
3042 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
3043 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
3044 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
3045 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
3046 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
3047 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
3048 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
3049 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
3050 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
3051 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
3052 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
3053 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
3054 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
3055 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
3056 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
3057 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
3058 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
3059 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
3060 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
3061 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
3062 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
3063 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
3064 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
3065 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
3066 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
3067 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
3068 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
3069 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
3070 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
3071 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
3072 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
3073 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
3074 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
3075 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
3076 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
3077 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
3078 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
3079 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
3080 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
3081 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
3082 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
3083 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
3084 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
3085 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
3086 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
3087 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
3088 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
3089 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
3090 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
3091 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
3092 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
3093 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
3094 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
3095 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
3096 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
3097 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
3098 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
3099 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
3100 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
3101 { 0, CPENC(0,0,0,0,0), 0 },
3105 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
3107 return (reg
->flags
& F_DEPRECATED
) != 0;
3111 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
3112 const aarch64_sys_reg
*reg
)
3114 if (!(reg
->flags
& F_ARCHEXT
))
3117 /* PAN. Values are from aarch64_sys_regs. */
3118 if (reg
->value
== CPEN_(0,C2
,3)
3119 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3122 /* Virtualization host extensions: system registers. */
3123 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
3124 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
3125 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
3126 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
3127 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
3128 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3131 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3132 if ((reg
->value
== CPEN_ (5, C0
, 0)
3133 || reg
->value
== CPEN_ (5, C0
, 1)
3134 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
3135 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
3136 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
3137 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
3138 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
3139 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
3140 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
3141 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
3142 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
3143 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
3144 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
3145 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
3146 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
3147 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
3148 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3151 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3152 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
3153 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
3154 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
3155 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
3156 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
3157 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
3158 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3160 /* ARMv8.2 features. */
3161 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
3162 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3168 const aarch64_sys_reg aarch64_pstatefields
[] =
3170 { "spsel", 0x05, 0 },
3171 { "daifset", 0x1e, 0 },
3172 { "daifclr", 0x1f, 0 },
3173 { "pan", 0x04, F_ARCHEXT
},
3174 { 0, CPENC(0,0,0,0,0), 0 },
3178 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
3179 const aarch64_sys_reg
*reg
)
3181 if (!(reg
->flags
& F_ARCHEXT
))
3184 /* PAN. Values are from aarch64_pstatefields. */
3185 if (reg
->value
== 0x04
3186 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3192 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
3194 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
3195 { "iallu", CPENS(0,C7
,C5
,0), 0 },
3196 { "ivau", CPENS(3,C7
,C5
,1), 1 },
3197 { 0, CPENS(0,0,0,0), 0 }
3200 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
3202 { "zva", CPENS(3,C7
,C4
,1), 1 },
3203 { "ivac", CPENS(0,C7
,C6
,1), 1 },
3204 { "isw", CPENS(0,C7
,C6
,2), 1 },
3205 { "cvac", CPENS(3,C7
,C10
,1), 1 },
3206 { "csw", CPENS(0,C7
,C10
,2), 1 },
3207 { "cvau", CPENS(3,C7
,C11
,1), 1 },
3208 { "civac", CPENS(3,C7
,C14
,1), 1 },
3209 { "cisw", CPENS(0,C7
,C14
,2), 1 },
3210 { 0, CPENS(0,0,0,0), 0 }
3213 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
3215 { "s1e1r", CPENS(0,C7
,C8
,0), 1 },
3216 { "s1e1w", CPENS(0,C7
,C8
,1), 1 },
3217 { "s1e0r", CPENS(0,C7
,C8
,2), 1 },
3218 { "s1e0w", CPENS(0,C7
,C8
,3), 1 },
3219 { "s12e1r", CPENS(4,C7
,C8
,4), 1 },
3220 { "s12e1w", CPENS(4,C7
,C8
,5), 1 },
3221 { "s12e0r", CPENS(4,C7
,C8
,6), 1 },
3222 { "s12e0w", CPENS(4,C7
,C8
,7), 1 },
3223 { "s1e2r", CPENS(4,C7
,C8
,0), 1 },
3224 { "s1e2w", CPENS(4,C7
,C8
,1), 1 },
3225 { "s1e3r", CPENS(6,C7
,C8
,0), 1 },
3226 { "s1e3w", CPENS(6,C7
,C8
,1), 1 },
3227 { 0, CPENS(0,0,0,0), 0 }
3230 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
3232 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
3233 { "vae1", CPENS(0,C8
,C7
,1), 1 },
3234 { "aside1", CPENS(0,C8
,C7
,2), 1 },
3235 { "vaae1", CPENS(0,C8
,C7
,3), 1 },
3236 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
3237 { "vae1is", CPENS(0,C8
,C3
,1), 1 },
3238 { "aside1is", CPENS(0,C8
,C3
,2), 1 },
3239 { "vaae1is", CPENS(0,C8
,C3
,3), 1 },
3240 { "ipas2e1is", CPENS(4,C8
,C0
,1), 1 },
3241 { "ipas2le1is",CPENS(4,C8
,C0
,5), 1 },
3242 { "ipas2e1", CPENS(4,C8
,C4
,1), 1 },
3243 { "ipas2le1", CPENS(4,C8
,C4
,5), 1 },
3244 { "vae2", CPENS(4,C8
,C7
,1), 1 },
3245 { "vae2is", CPENS(4,C8
,C3
,1), 1 },
3246 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
3247 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
3248 { "vae3", CPENS(6,C8
,C7
,1), 1 },
3249 { "vae3is", CPENS(6,C8
,C3
,1), 1 },
3250 { "alle2", CPENS(4,C8
,C7
,0), 0 },
3251 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
3252 { "alle1", CPENS(4,C8
,C7
,4), 0 },
3253 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
3254 { "alle3", CPENS(6,C8
,C7
,0), 0 },
3255 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
3256 { "vale1is", CPENS(0,C8
,C3
,5), 1 },
3257 { "vale2is", CPENS(4,C8
,C3
,5), 1 },
3258 { "vale3is", CPENS(6,C8
,C3
,5), 1 },
3259 { "vaale1is", CPENS(0,C8
,C3
,7), 1 },
3260 { "vale1", CPENS(0,C8
,C7
,5), 1 },
3261 { "vale2", CPENS(4,C8
,C7
,5), 1 },
3262 { "vale3", CPENS(6,C8
,C7
,5), 1 },
3263 { "vaale1", CPENS(0,C8
,C7
,7), 1 },
3264 { 0, CPENS(0,0,0,0), 0 }
3284 /* Include the opcode description table as well as the operand description
3286 #include "aarch64-tbl.h"