gdb: resume ongoing step after handling fork or vfork
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2021 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bool
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return (qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q);
110 }
111
112 static inline bool
113 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
114 {
115 return (qualifier >= AARCH64_OPND_QLF_S_B
116 && qualifier <= AARCH64_OPND_QLF_S_Q);
117 }
118
119 enum data_pattern
120 {
121 DP_UNKNOWN,
122 DP_VECTOR_3SAME,
123 DP_VECTOR_LONG,
124 DP_VECTOR_WIDE,
125 DP_VECTOR_ACROSS_LANES,
126 };
127
128 static const char significant_operand_index [] =
129 {
130 0, /* DP_UNKNOWN, by default using operand 0. */
131 0, /* DP_VECTOR_3SAME */
132 1, /* DP_VECTOR_LONG */
133 2, /* DP_VECTOR_WIDE */
134 1, /* DP_VECTOR_ACROSS_LANES */
135 };
136
137 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
138 the data pattern.
139 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
140 corresponds to one of a sequence of operands. */
141
142 static enum data_pattern
143 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
144 {
145 if (vector_qualifier_p (qualifiers[0]))
146 {
147 /* e.g. v.4s, v.4s, v.4s
148 or v.4h, v.4h, v.h[3]. */
149 if (qualifiers[0] == qualifiers[1]
150 && vector_qualifier_p (qualifiers[2])
151 && (aarch64_get_qualifier_esize (qualifiers[0])
152 == aarch64_get_qualifier_esize (qualifiers[1]))
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[2])))
155 return DP_VECTOR_3SAME;
156 /* e.g. v.8h, v.8b, v.8b.
157 or v.4s, v.4h, v.h[2].
158 or v.8h, v.16b. */
159 if (vector_qualifier_p (qualifiers[1])
160 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
161 && (aarch64_get_qualifier_esize (qualifiers[0])
162 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
163 return DP_VECTOR_LONG;
164 /* e.g. v.8h, v.8h, v.8b. */
165 if (qualifiers[0] == qualifiers[1]
166 && vector_qualifier_p (qualifiers[2])
167 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
168 && (aarch64_get_qualifier_esize (qualifiers[0])
169 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[1])))
172 return DP_VECTOR_WIDE;
173 }
174 else if (fp_qualifier_p (qualifiers[0]))
175 {
176 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
177 if (vector_qualifier_p (qualifiers[1])
178 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
179 return DP_VECTOR_ACROSS_LANES;
180 }
181
182 return DP_UNKNOWN;
183 }
184
185 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
186 the AdvSIMD instructions. */
187 /* N.B. it is possible to do some optimization that doesn't call
188 get_data_pattern each time when we need to select an operand. We can
189 either buffer the caculated the result or statically generate the data,
190 however, it is not obvious that the optimization will bring significant
191 benefit. */
192
193 int
194 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
195 {
196 return
197 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
198 }
199 \f
200 const aarch64_field fields[] =
201 {
202 { 0, 0 }, /* NIL. */
203 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
204 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
205 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
206 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
207 { 5, 19 }, /* imm19: e.g. in CBZ. */
208 { 5, 19 }, /* immhi: e.g. in ADRP. */
209 { 29, 2 }, /* immlo: e.g. in ADRP. */
210 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
211 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
212 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
213 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
214 { 0, 5 }, /* Rt: in load/store instructions. */
215 { 0, 5 }, /* Rd: in many integer instructions. */
216 { 5, 5 }, /* Rn: in many integer instructions. */
217 { 10, 5 }, /* Rt2: in load/store pair instructions. */
218 { 10, 5 }, /* Ra: in fp instructions. */
219 { 5, 3 }, /* op2: in the system instructions. */
220 { 8, 4 }, /* CRm: in the system instructions. */
221 { 12, 4 }, /* CRn: in the system instructions. */
222 { 16, 3 }, /* op1: in the system instructions. */
223 { 19, 2 }, /* op0: in the system instructions. */
224 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
225 { 12, 4 }, /* cond: condition flags as a source operand. */
226 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
227 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
228 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
229 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
230 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
231 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
232 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
233 { 12, 1 }, /* S: in load/store reg offset instructions. */
234 { 21, 2 }, /* hw: in move wide constant instructions. */
235 { 22, 2 }, /* opc: in load/store reg offset instructions. */
236 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
237 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
238 { 22, 2 }, /* type: floating point type field in fp data inst. */
239 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
240 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
241 { 15, 6 }, /* imm6_2: in rmif instructions. */
242 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
243 { 0, 4 }, /* imm4_2: in rmif instructions. */
244 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
245 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
246 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
247 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
248 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
249 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
250 { 5, 14 }, /* imm14: in test bit and branch instructions. */
251 { 5, 16 }, /* imm16: in exception instructions. */
252 { 0, 16 }, /* imm16_2: in udf instruction. */
253 { 0, 26 }, /* imm26: in unconditional branch instructions. */
254 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
255 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
256 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
257 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
258 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
259 { 22, 1 }, /* N: in logical (immediate) instructions. */
260 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
261 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
262 { 31, 1 }, /* sf: in integer data processing instructions. */
263 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
264 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
265 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
266 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
267 { 31, 1 }, /* b5: in the test bit and branch instructions. */
268 { 19, 5 }, /* b40: in the test bit and branch instructions. */
269 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
270 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
271 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
272 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
273 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
274 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
275 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
276 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
277 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
278 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
279 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
280 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
281 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
282 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
283 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
284 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
285 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
286 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
289 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
290 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
292 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
293 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
294 { 5, 1 }, /* SVE_i1: single-bit immediate. */
295 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
296 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
297 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
298 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
299 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
300 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
301 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
302 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
303 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
304 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
305 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
306 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
307 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
308 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
309 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
310 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
311 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
312 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
313 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
314 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
315 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
316 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
317 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
318 { 16, 4 }, /* SVE_tsz: triangular size select. */
319 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
320 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
321 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
322 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
323 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
324 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
325 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
326 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
327 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
328 { 22, 1 }, /* sz: 1-bit element size select. */
329 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
330 };
331
332 enum aarch64_operand_class
333 aarch64_get_operand_class (enum aarch64_opnd type)
334 {
335 return aarch64_operands[type].op_class;
336 }
337
338 const char *
339 aarch64_get_operand_name (enum aarch64_opnd type)
340 {
341 return aarch64_operands[type].name;
342 }
343
344 /* Get operand description string.
345 This is usually for the diagnosis purpose. */
346 const char *
347 aarch64_get_operand_desc (enum aarch64_opnd type)
348 {
349 return aarch64_operands[type].desc;
350 }
351
352 /* Table of all conditional affixes. */
353 const aarch64_cond aarch64_conds[16] =
354 {
355 {{"eq", "none"}, 0x0},
356 {{"ne", "any"}, 0x1},
357 {{"cs", "hs", "nlast"}, 0x2},
358 {{"cc", "lo", "ul", "last"}, 0x3},
359 {{"mi", "first"}, 0x4},
360 {{"pl", "nfrst"}, 0x5},
361 {{"vs"}, 0x6},
362 {{"vc"}, 0x7},
363 {{"hi", "pmore"}, 0x8},
364 {{"ls", "plast"}, 0x9},
365 {{"ge", "tcont"}, 0xa},
366 {{"lt", "tstop"}, 0xb},
367 {{"gt"}, 0xc},
368 {{"le"}, 0xd},
369 {{"al"}, 0xe},
370 {{"nv"}, 0xf},
371 };
372
373 const aarch64_cond *
374 get_cond_from_value (aarch64_insn value)
375 {
376 assert (value < 16);
377 return &aarch64_conds[(unsigned int) value];
378 }
379
380 const aarch64_cond *
381 get_inverted_cond (const aarch64_cond *cond)
382 {
383 return &aarch64_conds[cond->value ^ 0x1];
384 }
385
386 /* Table describing the operand extension/shifting operators; indexed by
387 enum aarch64_modifier_kind.
388
389 The value column provides the most common values for encoding modifiers,
390 which enables table-driven encoding/decoding for the modifiers. */
391 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
392 {
393 {"none", 0x0},
394 {"msl", 0x0},
395 {"ror", 0x3},
396 {"asr", 0x2},
397 {"lsr", 0x1},
398 {"lsl", 0x0},
399 {"uxtb", 0x0},
400 {"uxth", 0x1},
401 {"uxtw", 0x2},
402 {"uxtx", 0x3},
403 {"sxtb", 0x4},
404 {"sxth", 0x5},
405 {"sxtw", 0x6},
406 {"sxtx", 0x7},
407 {"mul", 0x0},
408 {"mul vl", 0x0},
409 {NULL, 0},
410 };
411
412 enum aarch64_modifier_kind
413 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
414 {
415 return desc - aarch64_operand_modifiers;
416 }
417
418 aarch64_insn
419 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
420 {
421 return aarch64_operand_modifiers[kind].value;
422 }
423
424 enum aarch64_modifier_kind
425 aarch64_get_operand_modifier_from_value (aarch64_insn value,
426 bool extend_p)
427 {
428 if (extend_p)
429 return AARCH64_MOD_UXTB + value;
430 else
431 return AARCH64_MOD_LSL - value;
432 }
433
434 bool
435 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
436 {
437 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
438 }
439
440 static inline bool
441 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
442 {
443 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
444 }
445
446 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
447 {
448 { "#0x00", 0x0 },
449 { "oshld", 0x1 },
450 { "oshst", 0x2 },
451 { "osh", 0x3 },
452 { "#0x04", 0x4 },
453 { "nshld", 0x5 },
454 { "nshst", 0x6 },
455 { "nsh", 0x7 },
456 { "#0x08", 0x8 },
457 { "ishld", 0x9 },
458 { "ishst", 0xa },
459 { "ish", 0xb },
460 { "#0x0c", 0xc },
461 { "ld", 0xd },
462 { "st", 0xe },
463 { "sy", 0xf },
464 };
465
466 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
467 { /* CRm<3:2> #imm */
468 { "oshnxs", 16 }, /* 00 16 */
469 { "nshnxs", 20 }, /* 01 20 */
470 { "ishnxs", 24 }, /* 10 24 */
471 { "synxs", 28 }, /* 11 28 */
472 };
473
474 /* Table describing the operands supported by the aliases of the HINT
475 instruction.
476
477 The name column is the operand that is accepted for the alias. The value
478 column is the hint number of the alias. The list of operands is terminated
479 by NULL in the name column. */
480
481 const struct aarch64_name_value_pair aarch64_hint_options[] =
482 {
483 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
484 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
485 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
486 { "c", HINT_OPD_C }, /* BTI C. */
487 { "j", HINT_OPD_J }, /* BTI J. */
488 { "jc", HINT_OPD_JC }, /* BTI JC. */
489 { NULL, HINT_OPD_NULL },
490 };
491
492 /* op -> op: load = 0 instruction = 1 store = 2
493 l -> level: 1-3
494 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
495 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
496 const struct aarch64_name_value_pair aarch64_prfops[32] =
497 {
498 { "pldl1keep", B(0, 1, 0) },
499 { "pldl1strm", B(0, 1, 1) },
500 { "pldl2keep", B(0, 2, 0) },
501 { "pldl2strm", B(0, 2, 1) },
502 { "pldl3keep", B(0, 3, 0) },
503 { "pldl3strm", B(0, 3, 1) },
504 { NULL, 0x06 },
505 { NULL, 0x07 },
506 { "plil1keep", B(1, 1, 0) },
507 { "plil1strm", B(1, 1, 1) },
508 { "plil2keep", B(1, 2, 0) },
509 { "plil2strm", B(1, 2, 1) },
510 { "plil3keep", B(1, 3, 0) },
511 { "plil3strm", B(1, 3, 1) },
512 { NULL, 0x0e },
513 { NULL, 0x0f },
514 { "pstl1keep", B(2, 1, 0) },
515 { "pstl1strm", B(2, 1, 1) },
516 { "pstl2keep", B(2, 2, 0) },
517 { "pstl2strm", B(2, 2, 1) },
518 { "pstl3keep", B(2, 3, 0) },
519 { "pstl3strm", B(2, 3, 1) },
520 { NULL, 0x16 },
521 { NULL, 0x17 },
522 { NULL, 0x18 },
523 { NULL, 0x19 },
524 { NULL, 0x1a },
525 { NULL, 0x1b },
526 { NULL, 0x1c },
527 { NULL, 0x1d },
528 { NULL, 0x1e },
529 { NULL, 0x1f },
530 };
531 #undef B
532 \f
533 /* Utilities on value constraint. */
534
535 static inline int
536 value_in_range_p (int64_t value, int low, int high)
537 {
538 return (value >= low && value <= high) ? 1 : 0;
539 }
540
541 /* Return true if VALUE is a multiple of ALIGN. */
542 static inline int
543 value_aligned_p (int64_t value, int align)
544 {
545 return (value % align) == 0;
546 }
547
548 /* A signed value fits in a field. */
549 static inline int
550 value_fit_signed_field_p (int64_t value, unsigned width)
551 {
552 assert (width < 32);
553 if (width < sizeof (value) * 8)
554 {
555 int64_t lim = (uint64_t) 1 << (width - 1);
556 if (value >= -lim && value < lim)
557 return 1;
558 }
559 return 0;
560 }
561
562 /* An unsigned value fits in a field. */
563 static inline int
564 value_fit_unsigned_field_p (int64_t value, unsigned width)
565 {
566 assert (width < 32);
567 if (width < sizeof (value) * 8)
568 {
569 int64_t lim = (uint64_t) 1 << width;
570 if (value >= 0 && value < lim)
571 return 1;
572 }
573 return 0;
574 }
575
576 /* Return 1 if OPERAND is SP or WSP. */
577 int
578 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
579 {
580 return ((aarch64_get_operand_class (operand->type)
581 == AARCH64_OPND_CLASS_INT_REG)
582 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
583 && operand->reg.regno == 31);
584 }
585
586 /* Return 1 if OPERAND is XZR or WZP. */
587 int
588 aarch64_zero_register_p (const aarch64_opnd_info *operand)
589 {
590 return ((aarch64_get_operand_class (operand->type)
591 == AARCH64_OPND_CLASS_INT_REG)
592 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
593 && operand->reg.regno == 31);
594 }
595
596 /* Return true if the operand *OPERAND that has the operand code
597 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
598 qualified by the qualifier TARGET. */
599
600 static inline int
601 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
602 aarch64_opnd_qualifier_t target)
603 {
604 switch (operand->qualifier)
605 {
606 case AARCH64_OPND_QLF_W:
607 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
608 return 1;
609 break;
610 case AARCH64_OPND_QLF_X:
611 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
612 return 1;
613 break;
614 case AARCH64_OPND_QLF_WSP:
615 if (target == AARCH64_OPND_QLF_W
616 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
617 return 1;
618 break;
619 case AARCH64_OPND_QLF_SP:
620 if (target == AARCH64_OPND_QLF_X
621 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
622 return 1;
623 break;
624 default:
625 break;
626 }
627
628 return 0;
629 }
630
631 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
632 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
633
634 Return NIL if more than one expected qualifiers are found. */
635
636 aarch64_opnd_qualifier_t
637 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
638 int idx,
639 const aarch64_opnd_qualifier_t known_qlf,
640 int known_idx)
641 {
642 int i, saved_i;
643
644 /* Special case.
645
646 When the known qualifier is NIL, we have to assume that there is only
647 one qualifier sequence in the *QSEQ_LIST and return the corresponding
648 qualifier directly. One scenario is that for instruction
649 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
650 which has only one possible valid qualifier sequence
651 NIL, S_D
652 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
653 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
654
655 Because the qualifier NIL has dual roles in the qualifier sequence:
656 it can mean no qualifier for the operand, or the qualifer sequence is
657 not in use (when all qualifiers in the sequence are NILs), we have to
658 handle this special case here. */
659 if (known_qlf == AARCH64_OPND_NIL)
660 {
661 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
662 return qseq_list[0][idx];
663 }
664
665 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
666 {
667 if (qseq_list[i][known_idx] == known_qlf)
668 {
669 if (saved_i != -1)
670 /* More than one sequences are found to have KNOWN_QLF at
671 KNOWN_IDX. */
672 return AARCH64_OPND_NIL;
673 saved_i = i;
674 }
675 }
676
677 return qseq_list[saved_i][idx];
678 }
679
680 enum operand_qualifier_kind
681 {
682 OQK_NIL,
683 OQK_OPD_VARIANT,
684 OQK_VALUE_IN_RANGE,
685 OQK_MISC,
686 };
687
688 /* Operand qualifier description. */
689 struct operand_qualifier_data
690 {
691 /* The usage of the three data fields depends on the qualifier kind. */
692 int data0;
693 int data1;
694 int data2;
695 /* Description. */
696 const char *desc;
697 /* Kind. */
698 enum operand_qualifier_kind kind;
699 };
700
701 /* Indexed by the operand qualifier enumerators. */
702 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
703 {
704 {0, 0, 0, "NIL", OQK_NIL},
705
706 /* Operand variant qualifiers.
707 First 3 fields:
708 element size, number of elements and common value for encoding. */
709
710 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
711 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
712 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
713 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
714
715 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
716 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
717 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
718 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
719 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
720 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
721 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
722
723 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
724 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
725 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
726 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
727 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
728 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
729 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
730 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
731 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
732 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
733 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
734
735 {0, 0, 0, "z", OQK_OPD_VARIANT},
736 {0, 0, 0, "m", OQK_OPD_VARIANT},
737
738 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
739 {16, 0, 0, "tag", OQK_OPD_VARIANT},
740
741 /* Qualifiers constraining the value range.
742 First 3 fields:
743 Lower bound, higher bound, unused. */
744
745 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
746 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
747 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
748 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
749 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
750 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
751 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
752
753 /* Qualifiers for miscellaneous purpose.
754 First 3 fields:
755 unused, unused and unused. */
756
757 {0, 0, 0, "lsl", 0},
758 {0, 0, 0, "msl", 0},
759
760 {0, 0, 0, "retrieving", 0},
761 };
762
763 static inline bool
764 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
765 {
766 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
767 }
768
769 static inline bool
770 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
771 {
772 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
773 }
774
775 const char*
776 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
777 {
778 return aarch64_opnd_qualifiers[qualifier].desc;
779 }
780
781 /* Given an operand qualifier, return the expected data element size
782 of a qualified operand. */
783 unsigned char
784 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
785 {
786 assert (operand_variant_qualifier_p (qualifier));
787 return aarch64_opnd_qualifiers[qualifier].data0;
788 }
789
790 unsigned char
791 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
792 {
793 assert (operand_variant_qualifier_p (qualifier));
794 return aarch64_opnd_qualifiers[qualifier].data1;
795 }
796
797 aarch64_insn
798 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
799 {
800 assert (operand_variant_qualifier_p (qualifier));
801 return aarch64_opnd_qualifiers[qualifier].data2;
802 }
803
804 static int
805 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
806 {
807 assert (qualifier_value_in_range_constraint_p (qualifier));
808 return aarch64_opnd_qualifiers[qualifier].data0;
809 }
810
811 static int
812 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
813 {
814 assert (qualifier_value_in_range_constraint_p (qualifier));
815 return aarch64_opnd_qualifiers[qualifier].data1;
816 }
817
818 #ifdef DEBUG_AARCH64
819 void
820 aarch64_verbose (const char *str, ...)
821 {
822 va_list ap;
823 va_start (ap, str);
824 printf ("#### ");
825 vprintf (str, ap);
826 printf ("\n");
827 va_end (ap);
828 }
829
830 static inline void
831 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
832 {
833 int i;
834 printf ("#### \t");
835 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
836 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
837 printf ("\n");
838 }
839
840 static void
841 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
842 const aarch64_opnd_qualifier_t *qualifier)
843 {
844 int i;
845 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
846
847 aarch64_verbose ("dump_match_qualifiers:");
848 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
849 curr[i] = opnd[i].qualifier;
850 dump_qualifier_sequence (curr);
851 aarch64_verbose ("against");
852 dump_qualifier_sequence (qualifier);
853 }
854 #endif /* DEBUG_AARCH64 */
855
856 /* This function checks if the given instruction INSN is a destructive
857 instruction based on the usage of the registers. It does not recognize
858 unary destructive instructions. */
859 bool
860 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
861 {
862 int i = 0;
863 const enum aarch64_opnd *opnds = opcode->operands;
864
865 if (opnds[0] == AARCH64_OPND_NIL)
866 return false;
867
868 while (opnds[++i] != AARCH64_OPND_NIL)
869 if (opnds[i] == opnds[0])
870 return true;
871
872 return false;
873 }
874
875 /* TODO improve this, we can have an extra field at the runtime to
876 store the number of operands rather than calculating it every time. */
877
878 int
879 aarch64_num_of_operands (const aarch64_opcode *opcode)
880 {
881 int i = 0;
882 const enum aarch64_opnd *opnds = opcode->operands;
883 while (opnds[i++] != AARCH64_OPND_NIL)
884 ;
885 --i;
886 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
887 return i;
888 }
889
890 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
891 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
892
893 N.B. on the entry, it is very likely that only some operands in *INST
894 have had their qualifiers been established.
895
896 If STOP_AT is not -1, the function will only try to match
897 the qualifier sequence for operands before and including the operand
898 of index STOP_AT; and on success *RET will only be filled with the first
899 (STOP_AT+1) qualifiers.
900
901 A couple examples of the matching algorithm:
902
903 X,W,NIL should match
904 X,W,NIL
905
906 NIL,NIL should match
907 X ,NIL
908
909 Apart from serving the main encoding routine, this can also be called
910 during or after the operand decoding. */
911
912 int
913 aarch64_find_best_match (const aarch64_inst *inst,
914 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
915 int stop_at, aarch64_opnd_qualifier_t *ret)
916 {
917 int found = 0;
918 int i, num_opnds;
919 const aarch64_opnd_qualifier_t *qualifiers;
920
921 num_opnds = aarch64_num_of_operands (inst->opcode);
922 if (num_opnds == 0)
923 {
924 DEBUG_TRACE ("SUCCEED: no operand");
925 return 1;
926 }
927
928 if (stop_at < 0 || stop_at >= num_opnds)
929 stop_at = num_opnds - 1;
930
931 /* For each pattern. */
932 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
933 {
934 int j;
935 qualifiers = *qualifiers_list;
936
937 /* Start as positive. */
938 found = 1;
939
940 DEBUG_TRACE ("%d", i);
941 #ifdef DEBUG_AARCH64
942 if (debug_dump)
943 dump_match_qualifiers (inst->operands, qualifiers);
944 #endif
945
946 /* Most opcodes has much fewer patterns in the list.
947 First NIL qualifier indicates the end in the list. */
948 if (empty_qualifier_sequence_p (qualifiers))
949 {
950 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
951 if (i)
952 found = 0;
953 break;
954 }
955
956 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
957 {
958 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
959 {
960 /* Either the operand does not have qualifier, or the qualifier
961 for the operand needs to be deduced from the qualifier
962 sequence.
963 In the latter case, any constraint checking related with
964 the obtained qualifier should be done later in
965 operand_general_constraint_met_p. */
966 continue;
967 }
968 else if (*qualifiers != inst->operands[j].qualifier)
969 {
970 /* Unless the target qualifier can also qualify the operand
971 (which has already had a non-nil qualifier), non-equal
972 qualifiers are generally un-matched. */
973 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
974 continue;
975 else
976 {
977 found = 0;
978 break;
979 }
980 }
981 else
982 continue; /* Equal qualifiers are certainly matched. */
983 }
984
985 /* Qualifiers established. */
986 if (found == 1)
987 break;
988 }
989
990 if (found == 1)
991 {
992 /* Fill the result in *RET. */
993 int j;
994 qualifiers = *qualifiers_list;
995
996 DEBUG_TRACE ("complete qualifiers using list %d", i);
997 #ifdef DEBUG_AARCH64
998 if (debug_dump)
999 dump_qualifier_sequence (qualifiers);
1000 #endif
1001
1002 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1003 ret[j] = *qualifiers;
1004 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1005 ret[j] = AARCH64_OPND_QLF_NIL;
1006
1007 DEBUG_TRACE ("SUCCESS");
1008 return 1;
1009 }
1010
1011 DEBUG_TRACE ("FAIL");
1012 return 0;
1013 }
1014
1015 /* Operand qualifier matching and resolving.
1016
1017 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1018 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1019
1020 if UPDATE_P, update the qualifier(s) in *INST after the matching
1021 succeeds. */
1022
1023 static int
1024 match_operands_qualifier (aarch64_inst *inst, bool update_p)
1025 {
1026 int i, nops;
1027 aarch64_opnd_qualifier_seq_t qualifiers;
1028
1029 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1030 qualifiers))
1031 {
1032 DEBUG_TRACE ("matching FAIL");
1033 return 0;
1034 }
1035
1036 if (inst->opcode->flags & F_STRICT)
1037 {
1038 /* Require an exact qualifier match, even for NIL qualifiers. */
1039 nops = aarch64_num_of_operands (inst->opcode);
1040 for (i = 0; i < nops; ++i)
1041 if (inst->operands[i].qualifier != qualifiers[i])
1042 return false;
1043 }
1044
1045 /* Update the qualifiers. */
1046 if (update_p)
1047 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1048 {
1049 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1050 break;
1051 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1052 "update %s with %s for operand %d",
1053 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1054 aarch64_get_qualifier_name (qualifiers[i]), i);
1055 inst->operands[i].qualifier = qualifiers[i];
1056 }
1057
1058 DEBUG_TRACE ("matching SUCCESS");
1059 return 1;
1060 }
1061
1062 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1063 register by MOVZ.
1064
1065 IS32 indicates whether value is a 32-bit immediate or not.
1066 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1067 amount will be returned in *SHIFT_AMOUNT. */
1068
1069 bool
1070 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1071 {
1072 int amount;
1073
1074 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1075
1076 if (is32)
1077 {
1078 /* Allow all zeros or all ones in top 32-bits, so that
1079 32-bit constant expressions like ~0x80000000 are
1080 permitted. */
1081 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1082 /* Immediate out of range. */
1083 return false;
1084 value &= 0xffffffff;
1085 }
1086
1087 /* first, try movz then movn */
1088 amount = -1;
1089 if ((value & ((uint64_t) 0xffff << 0)) == value)
1090 amount = 0;
1091 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1092 amount = 16;
1093 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1094 amount = 32;
1095 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1096 amount = 48;
1097
1098 if (amount == -1)
1099 {
1100 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1101 return false;
1102 }
1103
1104 if (shift_amount != NULL)
1105 *shift_amount = amount;
1106
1107 DEBUG_TRACE ("exit true with amount %d", amount);
1108
1109 return true;
1110 }
1111
1112 /* Build the accepted values for immediate logical SIMD instructions.
1113
1114 The standard encodings of the immediate value are:
1115 N imms immr SIMD size R S
1116 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1117 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1118 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1119 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1120 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1121 0 11110s 00000r 2 UInt(r) UInt(s)
1122 where all-ones value of S is reserved.
1123
1124 Let's call E the SIMD size.
1125
1126 The immediate value is: S+1 bits '1' rotated to the right by R.
1127
1128 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1129 (remember S != E - 1). */
1130
1131 #define TOTAL_IMM_NB 5334
1132
1133 typedef struct
1134 {
1135 uint64_t imm;
1136 aarch64_insn encoding;
1137 } simd_imm_encoding;
1138
1139 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1140
1141 static int
1142 simd_imm_encoding_cmp(const void *i1, const void *i2)
1143 {
1144 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1145 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1146
1147 if (imm1->imm < imm2->imm)
1148 return -1;
1149 if (imm1->imm > imm2->imm)
1150 return +1;
1151 return 0;
1152 }
1153
1154 /* immediate bitfield standard encoding
1155 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1156 1 ssssss rrrrrr 64 rrrrrr ssssss
1157 0 0sssss 0rrrrr 32 rrrrr sssss
1158 0 10ssss 00rrrr 16 rrrr ssss
1159 0 110sss 000rrr 8 rrr sss
1160 0 1110ss 0000rr 4 rr ss
1161 0 11110s 00000r 2 r s */
1162 static inline int
1163 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1164 {
1165 return (is64 << 12) | (r << 6) | s;
1166 }
1167
1168 static void
1169 build_immediate_table (void)
1170 {
1171 uint32_t log_e, e, s, r, s_mask;
1172 uint64_t mask, imm;
1173 int nb_imms;
1174 int is64;
1175
1176 nb_imms = 0;
1177 for (log_e = 1; log_e <= 6; log_e++)
1178 {
1179 /* Get element size. */
1180 e = 1u << log_e;
1181 if (log_e == 6)
1182 {
1183 is64 = 1;
1184 mask = 0xffffffffffffffffull;
1185 s_mask = 0;
1186 }
1187 else
1188 {
1189 is64 = 0;
1190 mask = (1ull << e) - 1;
1191 /* log_e s_mask
1192 1 ((1 << 4) - 1) << 2 = 111100
1193 2 ((1 << 3) - 1) << 3 = 111000
1194 3 ((1 << 2) - 1) << 4 = 110000
1195 4 ((1 << 1) - 1) << 5 = 100000
1196 5 ((1 << 0) - 1) << 6 = 000000 */
1197 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1198 }
1199 for (s = 0; s < e - 1; s++)
1200 for (r = 0; r < e; r++)
1201 {
1202 /* s+1 consecutive bits to 1 (s < 63) */
1203 imm = (1ull << (s + 1)) - 1;
1204 /* rotate right by r */
1205 if (r != 0)
1206 imm = (imm >> r) | ((imm << (e - r)) & mask);
1207 /* replicate the constant depending on SIMD size */
1208 switch (log_e)
1209 {
1210 case 1: imm = (imm << 2) | imm;
1211 /* Fall through. */
1212 case 2: imm = (imm << 4) | imm;
1213 /* Fall through. */
1214 case 3: imm = (imm << 8) | imm;
1215 /* Fall through. */
1216 case 4: imm = (imm << 16) | imm;
1217 /* Fall through. */
1218 case 5: imm = (imm << 32) | imm;
1219 /* Fall through. */
1220 case 6: break;
1221 default: abort ();
1222 }
1223 simd_immediates[nb_imms].imm = imm;
1224 simd_immediates[nb_imms].encoding =
1225 encode_immediate_bitfield(is64, s | s_mask, r);
1226 nb_imms++;
1227 }
1228 }
1229 assert (nb_imms == TOTAL_IMM_NB);
1230 qsort(simd_immediates, nb_imms,
1231 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1232 }
1233
1234 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1235 be accepted by logical (immediate) instructions
1236 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1237
1238 ESIZE is the number of bytes in the decoded immediate value.
1239 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1240 VALUE will be returned in *ENCODING. */
1241
1242 bool
1243 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1244 {
1245 simd_imm_encoding imm_enc;
1246 const simd_imm_encoding *imm_encoding;
1247 static bool initialized = false;
1248 uint64_t upper;
1249 int i;
1250
1251 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1252 value, esize);
1253
1254 if (!initialized)
1255 {
1256 build_immediate_table ();
1257 initialized = true;
1258 }
1259
1260 /* Allow all zeros or all ones in top bits, so that
1261 constant expressions like ~1 are permitted. */
1262 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1263 if ((value & ~upper) != value && (value | upper) != value)
1264 return false;
1265
1266 /* Replicate to a full 64-bit value. */
1267 value &= ~upper;
1268 for (i = esize * 8; i < 64; i *= 2)
1269 value |= (value << i);
1270
1271 imm_enc.imm = value;
1272 imm_encoding = (const simd_imm_encoding *)
1273 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1274 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1275 if (imm_encoding == NULL)
1276 {
1277 DEBUG_TRACE ("exit with false");
1278 return false;
1279 }
1280 if (encoding != NULL)
1281 *encoding = imm_encoding->encoding;
1282 DEBUG_TRACE ("exit with true");
1283 return true;
1284 }
1285
1286 /* If 64-bit immediate IMM is in the format of
1287 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1288 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1289 of value "abcdefgh". Otherwise return -1. */
1290 int
1291 aarch64_shrink_expanded_imm8 (uint64_t imm)
1292 {
1293 int i, ret;
1294 uint32_t byte;
1295
1296 ret = 0;
1297 for (i = 0; i < 8; i++)
1298 {
1299 byte = (imm >> (8 * i)) & 0xff;
1300 if (byte == 0xff)
1301 ret |= 1 << i;
1302 else if (byte != 0x00)
1303 return -1;
1304 }
1305 return ret;
1306 }
1307
1308 /* Utility inline functions for operand_general_constraint_met_p. */
1309
1310 static inline void
1311 set_error (aarch64_operand_error *mismatch_detail,
1312 enum aarch64_operand_error_kind kind, int idx,
1313 const char* error)
1314 {
1315 if (mismatch_detail == NULL)
1316 return;
1317 mismatch_detail->kind = kind;
1318 mismatch_detail->index = idx;
1319 mismatch_detail->error = error;
1320 }
1321
1322 static inline void
1323 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1324 const char* error)
1325 {
1326 if (mismatch_detail == NULL)
1327 return;
1328 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1329 }
1330
1331 static inline void
1332 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1333 int idx, int lower_bound, int upper_bound,
1334 const char* error)
1335 {
1336 if (mismatch_detail == NULL)
1337 return;
1338 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1339 mismatch_detail->data[0] = lower_bound;
1340 mismatch_detail->data[1] = upper_bound;
1341 }
1342
1343 static inline void
1344 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1345 int idx, int lower_bound, int upper_bound)
1346 {
1347 if (mismatch_detail == NULL)
1348 return;
1349 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1350 _("immediate value"));
1351 }
1352
1353 static inline void
1354 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1355 int idx, int lower_bound, int upper_bound)
1356 {
1357 if (mismatch_detail == NULL)
1358 return;
1359 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1360 _("immediate offset"));
1361 }
1362
1363 static inline void
1364 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1365 int idx, int lower_bound, int upper_bound)
1366 {
1367 if (mismatch_detail == NULL)
1368 return;
1369 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1370 _("register number"));
1371 }
1372
1373 static inline void
1374 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1375 int idx, int lower_bound, int upper_bound)
1376 {
1377 if (mismatch_detail == NULL)
1378 return;
1379 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1380 _("register element index"));
1381 }
1382
1383 static inline void
1384 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1385 int idx, int lower_bound, int upper_bound)
1386 {
1387 if (mismatch_detail == NULL)
1388 return;
1389 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1390 _("shift amount"));
1391 }
1392
1393 /* Report that the MUL modifier in operand IDX should be in the range
1394 [LOWER_BOUND, UPPER_BOUND]. */
1395 static inline void
1396 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1397 int idx, int lower_bound, int upper_bound)
1398 {
1399 if (mismatch_detail == NULL)
1400 return;
1401 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1402 _("multiplier"));
1403 }
1404
1405 static inline void
1406 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1407 int alignment)
1408 {
1409 if (mismatch_detail == NULL)
1410 return;
1411 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1412 mismatch_detail->data[0] = alignment;
1413 }
1414
1415 static inline void
1416 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1417 int expected_num)
1418 {
1419 if (mismatch_detail == NULL)
1420 return;
1421 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1422 mismatch_detail->data[0] = expected_num;
1423 }
1424
1425 static inline void
1426 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1427 const char* error)
1428 {
1429 if (mismatch_detail == NULL)
1430 return;
1431 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1432 }
1433
1434 /* General constraint checking based on operand code.
1435
1436 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1437 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1438
1439 This function has to be called after the qualifiers for all operands
1440 have been resolved.
1441
1442 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1443 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1444 of error message during the disassembling where error message is not
1445 wanted. We avoid the dynamic construction of strings of error messages
1446 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1447 use a combination of error code, static string and some integer data to
1448 represent an error. */
1449
1450 static int
1451 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1452 enum aarch64_opnd type,
1453 const aarch64_opcode *opcode,
1454 aarch64_operand_error *mismatch_detail)
1455 {
1456 unsigned num, modifiers, shift;
1457 unsigned char size;
1458 int64_t imm, min_value, max_value;
1459 uint64_t uvalue, mask;
1460 const aarch64_opnd_info *opnd = opnds + idx;
1461 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1462
1463 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1464
1465 switch (aarch64_operands[type].op_class)
1466 {
1467 case AARCH64_OPND_CLASS_INT_REG:
1468 /* Check pair reg constraints for cas* instructions. */
1469 if (type == AARCH64_OPND_PAIRREG)
1470 {
1471 assert (idx == 1 || idx == 3);
1472 if (opnds[idx - 1].reg.regno % 2 != 0)
1473 {
1474 set_syntax_error (mismatch_detail, idx - 1,
1475 _("reg pair must start from even reg"));
1476 return 0;
1477 }
1478 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1479 {
1480 set_syntax_error (mismatch_detail, idx,
1481 _("reg pair must be contiguous"));
1482 return 0;
1483 }
1484 break;
1485 }
1486
1487 /* <Xt> may be optional in some IC and TLBI instructions. */
1488 if (type == AARCH64_OPND_Rt_SYS)
1489 {
1490 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1491 == AARCH64_OPND_CLASS_SYSTEM));
1492 if (opnds[1].present
1493 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1494 {
1495 set_other_error (mismatch_detail, idx, _("extraneous register"));
1496 return 0;
1497 }
1498 if (!opnds[1].present
1499 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1500 {
1501 set_other_error (mismatch_detail, idx, _("missing register"));
1502 return 0;
1503 }
1504 }
1505 switch (qualifier)
1506 {
1507 case AARCH64_OPND_QLF_WSP:
1508 case AARCH64_OPND_QLF_SP:
1509 if (!aarch64_stack_pointer_p (opnd))
1510 {
1511 set_other_error (mismatch_detail, idx,
1512 _("stack pointer register expected"));
1513 return 0;
1514 }
1515 break;
1516 default:
1517 break;
1518 }
1519 break;
1520
1521 case AARCH64_OPND_CLASS_SVE_REG:
1522 switch (type)
1523 {
1524 case AARCH64_OPND_SVE_Zm3_INDEX:
1525 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1526 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1527 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1528 case AARCH64_OPND_SVE_Zm4_INDEX:
1529 size = get_operand_fields_width (get_operand_from_code (type));
1530 shift = get_operand_specific_data (&aarch64_operands[type]);
1531 mask = (1 << shift) - 1;
1532 if (opnd->reg.regno > mask)
1533 {
1534 assert (mask == 7 || mask == 15);
1535 set_other_error (mismatch_detail, idx,
1536 mask == 15
1537 ? _("z0-z15 expected")
1538 : _("z0-z7 expected"));
1539 return 0;
1540 }
1541 mask = (1u << (size - shift)) - 1;
1542 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1543 {
1544 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1545 return 0;
1546 }
1547 break;
1548
1549 case AARCH64_OPND_SVE_Zn_INDEX:
1550 size = aarch64_get_qualifier_esize (opnd->qualifier);
1551 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1552 {
1553 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1554 0, 64 / size - 1);
1555 return 0;
1556 }
1557 break;
1558
1559 case AARCH64_OPND_SVE_ZnxN:
1560 case AARCH64_OPND_SVE_ZtxN:
1561 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1562 {
1563 set_other_error (mismatch_detail, idx,
1564 _("invalid register list"));
1565 return 0;
1566 }
1567 break;
1568
1569 default:
1570 break;
1571 }
1572 break;
1573
1574 case AARCH64_OPND_CLASS_PRED_REG:
1575 if (opnd->reg.regno >= 8
1576 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1577 {
1578 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1579 return 0;
1580 }
1581 break;
1582
1583 case AARCH64_OPND_CLASS_COND:
1584 if (type == AARCH64_OPND_COND1
1585 && (opnds[idx].cond->value & 0xe) == 0xe)
1586 {
1587 /* Not allow AL or NV. */
1588 set_syntax_error (mismatch_detail, idx, NULL);
1589 }
1590 break;
1591
1592 case AARCH64_OPND_CLASS_ADDRESS:
1593 /* Check writeback. */
1594 switch (opcode->iclass)
1595 {
1596 case ldst_pos:
1597 case ldst_unscaled:
1598 case ldstnapair_offs:
1599 case ldstpair_off:
1600 case ldst_unpriv:
1601 if (opnd->addr.writeback == 1)
1602 {
1603 set_syntax_error (mismatch_detail, idx,
1604 _("unexpected address writeback"));
1605 return 0;
1606 }
1607 break;
1608 case ldst_imm10:
1609 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1610 {
1611 set_syntax_error (mismatch_detail, idx,
1612 _("unexpected address writeback"));
1613 return 0;
1614 }
1615 break;
1616 case ldst_imm9:
1617 case ldstpair_indexed:
1618 case asisdlsep:
1619 case asisdlsop:
1620 if (opnd->addr.writeback == 0)
1621 {
1622 set_syntax_error (mismatch_detail, idx,
1623 _("address writeback expected"));
1624 return 0;
1625 }
1626 break;
1627 default:
1628 assert (opnd->addr.writeback == 0);
1629 break;
1630 }
1631 switch (type)
1632 {
1633 case AARCH64_OPND_ADDR_SIMM7:
1634 /* Scaled signed 7 bits immediate offset. */
1635 /* Get the size of the data element that is accessed, which may be
1636 different from that of the source register size,
1637 e.g. in strb/ldrb. */
1638 size = aarch64_get_qualifier_esize (opnd->qualifier);
1639 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1640 {
1641 set_offset_out_of_range_error (mismatch_detail, idx,
1642 -64 * size, 63 * size);
1643 return 0;
1644 }
1645 if (!value_aligned_p (opnd->addr.offset.imm, size))
1646 {
1647 set_unaligned_error (mismatch_detail, idx, size);
1648 return 0;
1649 }
1650 break;
1651 case AARCH64_OPND_ADDR_OFFSET:
1652 case AARCH64_OPND_ADDR_SIMM9:
1653 /* Unscaled signed 9 bits immediate offset. */
1654 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1655 {
1656 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1657 return 0;
1658 }
1659 break;
1660
1661 case AARCH64_OPND_ADDR_SIMM9_2:
1662 /* Unscaled signed 9 bits immediate offset, which has to be negative
1663 or unaligned. */
1664 size = aarch64_get_qualifier_esize (qualifier);
1665 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1666 && !value_aligned_p (opnd->addr.offset.imm, size))
1667 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1668 return 1;
1669 set_other_error (mismatch_detail, idx,
1670 _("negative or unaligned offset expected"));
1671 return 0;
1672
1673 case AARCH64_OPND_ADDR_SIMM10:
1674 /* Scaled signed 10 bits immediate offset. */
1675 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1676 {
1677 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1678 return 0;
1679 }
1680 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1681 {
1682 set_unaligned_error (mismatch_detail, idx, 8);
1683 return 0;
1684 }
1685 break;
1686
1687 case AARCH64_OPND_ADDR_SIMM11:
1688 /* Signed 11 bits immediate offset (multiple of 16). */
1689 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1690 {
1691 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1692 return 0;
1693 }
1694
1695 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1696 {
1697 set_unaligned_error (mismatch_detail, idx, 16);
1698 return 0;
1699 }
1700 break;
1701
1702 case AARCH64_OPND_ADDR_SIMM13:
1703 /* Signed 13 bits immediate offset (multiple of 16). */
1704 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1705 {
1706 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1707 return 0;
1708 }
1709
1710 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1711 {
1712 set_unaligned_error (mismatch_detail, idx, 16);
1713 return 0;
1714 }
1715 break;
1716
1717 case AARCH64_OPND_SIMD_ADDR_POST:
1718 /* AdvSIMD load/store multiple structures, post-index. */
1719 assert (idx == 1);
1720 if (opnd->addr.offset.is_reg)
1721 {
1722 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1723 return 1;
1724 else
1725 {
1726 set_other_error (mismatch_detail, idx,
1727 _("invalid register offset"));
1728 return 0;
1729 }
1730 }
1731 else
1732 {
1733 const aarch64_opnd_info *prev = &opnds[idx-1];
1734 unsigned num_bytes; /* total number of bytes transferred. */
1735 /* The opcode dependent area stores the number of elements in
1736 each structure to be loaded/stored. */
1737 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1738 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1739 /* Special handling of loading single structure to all lane. */
1740 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1741 * aarch64_get_qualifier_esize (prev->qualifier);
1742 else
1743 num_bytes = prev->reglist.num_regs
1744 * aarch64_get_qualifier_esize (prev->qualifier)
1745 * aarch64_get_qualifier_nelem (prev->qualifier);
1746 if ((int) num_bytes != opnd->addr.offset.imm)
1747 {
1748 set_other_error (mismatch_detail, idx,
1749 _("invalid post-increment amount"));
1750 return 0;
1751 }
1752 }
1753 break;
1754
1755 case AARCH64_OPND_ADDR_REGOFF:
1756 /* Get the size of the data element that is accessed, which may be
1757 different from that of the source register size,
1758 e.g. in strb/ldrb. */
1759 size = aarch64_get_qualifier_esize (opnd->qualifier);
1760 /* It is either no shift or shift by the binary logarithm of SIZE. */
1761 if (opnd->shifter.amount != 0
1762 && opnd->shifter.amount != (int)get_logsz (size))
1763 {
1764 set_other_error (mismatch_detail, idx,
1765 _("invalid shift amount"));
1766 return 0;
1767 }
1768 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1769 operators. */
1770 switch (opnd->shifter.kind)
1771 {
1772 case AARCH64_MOD_UXTW:
1773 case AARCH64_MOD_LSL:
1774 case AARCH64_MOD_SXTW:
1775 case AARCH64_MOD_SXTX: break;
1776 default:
1777 set_other_error (mismatch_detail, idx,
1778 _("invalid extend/shift operator"));
1779 return 0;
1780 }
1781 break;
1782
1783 case AARCH64_OPND_ADDR_UIMM12:
1784 imm = opnd->addr.offset.imm;
1785 /* Get the size of the data element that is accessed, which may be
1786 different from that of the source register size,
1787 e.g. in strb/ldrb. */
1788 size = aarch64_get_qualifier_esize (qualifier);
1789 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1790 {
1791 set_offset_out_of_range_error (mismatch_detail, idx,
1792 0, 4095 * size);
1793 return 0;
1794 }
1795 if (!value_aligned_p (opnd->addr.offset.imm, size))
1796 {
1797 set_unaligned_error (mismatch_detail, idx, size);
1798 return 0;
1799 }
1800 break;
1801
1802 case AARCH64_OPND_ADDR_PCREL14:
1803 case AARCH64_OPND_ADDR_PCREL19:
1804 case AARCH64_OPND_ADDR_PCREL21:
1805 case AARCH64_OPND_ADDR_PCREL26:
1806 imm = opnd->imm.value;
1807 if (operand_need_shift_by_two (get_operand_from_code (type)))
1808 {
1809 /* The offset value in a PC-relative branch instruction is alway
1810 4-byte aligned and is encoded without the lowest 2 bits. */
1811 if (!value_aligned_p (imm, 4))
1812 {
1813 set_unaligned_error (mismatch_detail, idx, 4);
1814 return 0;
1815 }
1816 /* Right shift by 2 so that we can carry out the following check
1817 canonically. */
1818 imm >>= 2;
1819 }
1820 size = get_operand_fields_width (get_operand_from_code (type));
1821 if (!value_fit_signed_field_p (imm, size))
1822 {
1823 set_other_error (mismatch_detail, idx,
1824 _("immediate out of range"));
1825 return 0;
1826 }
1827 break;
1828
1829 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1830 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1831 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1832 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1833 min_value = -8;
1834 max_value = 7;
1835 sve_imm_offset_vl:
1836 assert (!opnd->addr.offset.is_reg);
1837 assert (opnd->addr.preind);
1838 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1839 min_value *= num;
1840 max_value *= num;
1841 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1842 || (opnd->shifter.operator_present
1843 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1844 {
1845 set_other_error (mismatch_detail, idx,
1846 _("invalid addressing mode"));
1847 return 0;
1848 }
1849 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1850 {
1851 set_offset_out_of_range_error (mismatch_detail, idx,
1852 min_value, max_value);
1853 return 0;
1854 }
1855 if (!value_aligned_p (opnd->addr.offset.imm, num))
1856 {
1857 set_unaligned_error (mismatch_detail, idx, num);
1858 return 0;
1859 }
1860 break;
1861
1862 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1863 min_value = -32;
1864 max_value = 31;
1865 goto sve_imm_offset_vl;
1866
1867 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1868 min_value = -256;
1869 max_value = 255;
1870 goto sve_imm_offset_vl;
1871
1872 case AARCH64_OPND_SVE_ADDR_RI_U6:
1873 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1874 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1875 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1876 min_value = 0;
1877 max_value = 63;
1878 sve_imm_offset:
1879 assert (!opnd->addr.offset.is_reg);
1880 assert (opnd->addr.preind);
1881 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1882 min_value *= num;
1883 max_value *= num;
1884 if (opnd->shifter.operator_present
1885 || opnd->shifter.amount_present)
1886 {
1887 set_other_error (mismatch_detail, idx,
1888 _("invalid addressing mode"));
1889 return 0;
1890 }
1891 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1892 {
1893 set_offset_out_of_range_error (mismatch_detail, idx,
1894 min_value, max_value);
1895 return 0;
1896 }
1897 if (!value_aligned_p (opnd->addr.offset.imm, num))
1898 {
1899 set_unaligned_error (mismatch_detail, idx, num);
1900 return 0;
1901 }
1902 break;
1903
1904 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1905 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
1906 min_value = -8;
1907 max_value = 7;
1908 goto sve_imm_offset;
1909
1910 case AARCH64_OPND_SVE_ADDR_ZX:
1911 /* Everything is already ensured by parse_operands or
1912 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1913 argument type). */
1914 assert (opnd->addr.offset.is_reg);
1915 assert (opnd->addr.preind);
1916 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
1917 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
1918 assert (opnd->shifter.operator_present == 0);
1919 break;
1920
1921 case AARCH64_OPND_SVE_ADDR_R:
1922 case AARCH64_OPND_SVE_ADDR_RR:
1923 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1924 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1925 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1926 case AARCH64_OPND_SVE_ADDR_RX:
1927 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1928 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1929 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1930 case AARCH64_OPND_SVE_ADDR_RZ:
1931 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1932 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1933 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1934 modifiers = 1 << AARCH64_MOD_LSL;
1935 sve_rr_operand:
1936 assert (opnd->addr.offset.is_reg);
1937 assert (opnd->addr.preind);
1938 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1939 && opnd->addr.offset.regno == 31)
1940 {
1941 set_other_error (mismatch_detail, idx,
1942 _("index register xzr is not allowed"));
1943 return 0;
1944 }
1945 if (((1 << opnd->shifter.kind) & modifiers) == 0
1946 || (opnd->shifter.amount
1947 != get_operand_specific_data (&aarch64_operands[type])))
1948 {
1949 set_other_error (mismatch_detail, idx,
1950 _("invalid addressing mode"));
1951 return 0;
1952 }
1953 break;
1954
1955 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1956 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1957 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1958 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1959 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1960 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1961 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1962 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1963 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1964 goto sve_rr_operand;
1965
1966 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1967 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1968 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1969 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1970 min_value = 0;
1971 max_value = 31;
1972 goto sve_imm_offset;
1973
1974 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1975 modifiers = 1 << AARCH64_MOD_LSL;
1976 sve_zz_operand:
1977 assert (opnd->addr.offset.is_reg);
1978 assert (opnd->addr.preind);
1979 if (((1 << opnd->shifter.kind) & modifiers) == 0
1980 || opnd->shifter.amount < 0
1981 || opnd->shifter.amount > 3)
1982 {
1983 set_other_error (mismatch_detail, idx,
1984 _("invalid addressing mode"));
1985 return 0;
1986 }
1987 break;
1988
1989 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1990 modifiers = (1 << AARCH64_MOD_SXTW);
1991 goto sve_zz_operand;
1992
1993 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1994 modifiers = 1 << AARCH64_MOD_UXTW;
1995 goto sve_zz_operand;
1996
1997 default:
1998 break;
1999 }
2000 break;
2001
2002 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2003 if (type == AARCH64_OPND_LEt)
2004 {
2005 /* Get the upper bound for the element index. */
2006 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2007 if (!value_in_range_p (opnd->reglist.index, 0, num))
2008 {
2009 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2010 return 0;
2011 }
2012 }
2013 /* The opcode dependent area stores the number of elements in
2014 each structure to be loaded/stored. */
2015 num = get_opcode_dependent_value (opcode);
2016 switch (type)
2017 {
2018 case AARCH64_OPND_LVt:
2019 assert (num >= 1 && num <= 4);
2020 /* Unless LD1/ST1, the number of registers should be equal to that
2021 of the structure elements. */
2022 if (num != 1 && opnd->reglist.num_regs != num)
2023 {
2024 set_reg_list_error (mismatch_detail, idx, num);
2025 return 0;
2026 }
2027 break;
2028 case AARCH64_OPND_LVt_AL:
2029 case AARCH64_OPND_LEt:
2030 assert (num >= 1 && num <= 4);
2031 /* The number of registers should be equal to that of the structure
2032 elements. */
2033 if (opnd->reglist.num_regs != num)
2034 {
2035 set_reg_list_error (mismatch_detail, idx, num);
2036 return 0;
2037 }
2038 break;
2039 default:
2040 break;
2041 }
2042 break;
2043
2044 case AARCH64_OPND_CLASS_IMMEDIATE:
2045 /* Constraint check on immediate operand. */
2046 imm = opnd->imm.value;
2047 /* E.g. imm_0_31 constrains value to be 0..31. */
2048 if (qualifier_value_in_range_constraint_p (qualifier)
2049 && !value_in_range_p (imm, get_lower_bound (qualifier),
2050 get_upper_bound (qualifier)))
2051 {
2052 set_imm_out_of_range_error (mismatch_detail, idx,
2053 get_lower_bound (qualifier),
2054 get_upper_bound (qualifier));
2055 return 0;
2056 }
2057
2058 switch (type)
2059 {
2060 case AARCH64_OPND_AIMM:
2061 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2062 {
2063 set_other_error (mismatch_detail, idx,
2064 _("invalid shift operator"));
2065 return 0;
2066 }
2067 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2068 {
2069 set_other_error (mismatch_detail, idx,
2070 _("shift amount must be 0 or 12"));
2071 return 0;
2072 }
2073 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2074 {
2075 set_other_error (mismatch_detail, idx,
2076 _("immediate out of range"));
2077 return 0;
2078 }
2079 break;
2080
2081 case AARCH64_OPND_HALF:
2082 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2083 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2084 {
2085 set_other_error (mismatch_detail, idx,
2086 _("invalid shift operator"));
2087 return 0;
2088 }
2089 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2090 if (!value_aligned_p (opnd->shifter.amount, 16))
2091 {
2092 set_other_error (mismatch_detail, idx,
2093 _("shift amount must be a multiple of 16"));
2094 return 0;
2095 }
2096 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2097 {
2098 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2099 0, size * 8 - 16);
2100 return 0;
2101 }
2102 if (opnd->imm.value < 0)
2103 {
2104 set_other_error (mismatch_detail, idx,
2105 _("negative immediate value not allowed"));
2106 return 0;
2107 }
2108 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2109 {
2110 set_other_error (mismatch_detail, idx,
2111 _("immediate out of range"));
2112 return 0;
2113 }
2114 break;
2115
2116 case AARCH64_OPND_IMM_MOV:
2117 {
2118 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2119 imm = opnd->imm.value;
2120 assert (idx == 1);
2121 switch (opcode->op)
2122 {
2123 case OP_MOV_IMM_WIDEN:
2124 imm = ~imm;
2125 /* Fall through. */
2126 case OP_MOV_IMM_WIDE:
2127 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2128 {
2129 set_other_error (mismatch_detail, idx,
2130 _("immediate out of range"));
2131 return 0;
2132 }
2133 break;
2134 case OP_MOV_IMM_LOG:
2135 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2136 {
2137 set_other_error (mismatch_detail, idx,
2138 _("immediate out of range"));
2139 return 0;
2140 }
2141 break;
2142 default:
2143 assert (0);
2144 return 0;
2145 }
2146 }
2147 break;
2148
2149 case AARCH64_OPND_NZCV:
2150 case AARCH64_OPND_CCMP_IMM:
2151 case AARCH64_OPND_EXCEPTION:
2152 case AARCH64_OPND_UNDEFINED:
2153 case AARCH64_OPND_TME_UIMM16:
2154 case AARCH64_OPND_UIMM4:
2155 case AARCH64_OPND_UIMM4_ADDG:
2156 case AARCH64_OPND_UIMM7:
2157 case AARCH64_OPND_UIMM3_OP1:
2158 case AARCH64_OPND_UIMM3_OP2:
2159 case AARCH64_OPND_SVE_UIMM3:
2160 case AARCH64_OPND_SVE_UIMM7:
2161 case AARCH64_OPND_SVE_UIMM8:
2162 case AARCH64_OPND_SVE_UIMM8_53:
2163 size = get_operand_fields_width (get_operand_from_code (type));
2164 assert (size < 32);
2165 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2166 {
2167 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2168 (1u << size) - 1);
2169 return 0;
2170 }
2171 break;
2172
2173 case AARCH64_OPND_UIMM10:
2174 /* Scaled unsigned 10 bits immediate offset. */
2175 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2176 {
2177 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2178 return 0;
2179 }
2180
2181 if (!value_aligned_p (opnd->imm.value, 16))
2182 {
2183 set_unaligned_error (mismatch_detail, idx, 16);
2184 return 0;
2185 }
2186 break;
2187
2188 case AARCH64_OPND_SIMM5:
2189 case AARCH64_OPND_SVE_SIMM5:
2190 case AARCH64_OPND_SVE_SIMM5B:
2191 case AARCH64_OPND_SVE_SIMM6:
2192 case AARCH64_OPND_SVE_SIMM8:
2193 size = get_operand_fields_width (get_operand_from_code (type));
2194 assert (size < 32);
2195 if (!value_fit_signed_field_p (opnd->imm.value, size))
2196 {
2197 set_imm_out_of_range_error (mismatch_detail, idx,
2198 -(1 << (size - 1)),
2199 (1 << (size - 1)) - 1);
2200 return 0;
2201 }
2202 break;
2203
2204 case AARCH64_OPND_WIDTH:
2205 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2206 && opnds[0].type == AARCH64_OPND_Rd);
2207 size = get_upper_bound (qualifier);
2208 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2209 /* lsb+width <= reg.size */
2210 {
2211 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2212 size - opnds[idx-1].imm.value);
2213 return 0;
2214 }
2215 break;
2216
2217 case AARCH64_OPND_LIMM:
2218 case AARCH64_OPND_SVE_LIMM:
2219 {
2220 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2221 uint64_t uimm = opnd->imm.value;
2222 if (opcode->op == OP_BIC)
2223 uimm = ~uimm;
2224 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2225 {
2226 set_other_error (mismatch_detail, idx,
2227 _("immediate out of range"));
2228 return 0;
2229 }
2230 }
2231 break;
2232
2233 case AARCH64_OPND_IMM0:
2234 case AARCH64_OPND_FPIMM0:
2235 if (opnd->imm.value != 0)
2236 {
2237 set_other_error (mismatch_detail, idx,
2238 _("immediate zero expected"));
2239 return 0;
2240 }
2241 break;
2242
2243 case AARCH64_OPND_IMM_ROT1:
2244 case AARCH64_OPND_IMM_ROT2:
2245 case AARCH64_OPND_SVE_IMM_ROT2:
2246 if (opnd->imm.value != 0
2247 && opnd->imm.value != 90
2248 && opnd->imm.value != 180
2249 && opnd->imm.value != 270)
2250 {
2251 set_other_error (mismatch_detail, idx,
2252 _("rotate expected to be 0, 90, 180 or 270"));
2253 return 0;
2254 }
2255 break;
2256
2257 case AARCH64_OPND_IMM_ROT3:
2258 case AARCH64_OPND_SVE_IMM_ROT1:
2259 case AARCH64_OPND_SVE_IMM_ROT3:
2260 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2261 {
2262 set_other_error (mismatch_detail, idx,
2263 _("rotate expected to be 90 or 270"));
2264 return 0;
2265 }
2266 break;
2267
2268 case AARCH64_OPND_SHLL_IMM:
2269 assert (idx == 2);
2270 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2271 if (opnd->imm.value != size)
2272 {
2273 set_other_error (mismatch_detail, idx,
2274 _("invalid shift amount"));
2275 return 0;
2276 }
2277 break;
2278
2279 case AARCH64_OPND_IMM_VLSL:
2280 size = aarch64_get_qualifier_esize (qualifier);
2281 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2282 {
2283 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2284 size * 8 - 1);
2285 return 0;
2286 }
2287 break;
2288
2289 case AARCH64_OPND_IMM_VLSR:
2290 size = aarch64_get_qualifier_esize (qualifier);
2291 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2292 {
2293 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2294 return 0;
2295 }
2296 break;
2297
2298 case AARCH64_OPND_SIMD_IMM:
2299 case AARCH64_OPND_SIMD_IMM_SFT:
2300 /* Qualifier check. */
2301 switch (qualifier)
2302 {
2303 case AARCH64_OPND_QLF_LSL:
2304 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2305 {
2306 set_other_error (mismatch_detail, idx,
2307 _("invalid shift operator"));
2308 return 0;
2309 }
2310 break;
2311 case AARCH64_OPND_QLF_MSL:
2312 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2313 {
2314 set_other_error (mismatch_detail, idx,
2315 _("invalid shift operator"));
2316 return 0;
2317 }
2318 break;
2319 case AARCH64_OPND_QLF_NIL:
2320 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2321 {
2322 set_other_error (mismatch_detail, idx,
2323 _("shift is not permitted"));
2324 return 0;
2325 }
2326 break;
2327 default:
2328 assert (0);
2329 return 0;
2330 }
2331 /* Is the immediate valid? */
2332 assert (idx == 1);
2333 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2334 {
2335 /* uimm8 or simm8 */
2336 if (!value_in_range_p (opnd->imm.value, -128, 255))
2337 {
2338 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2339 return 0;
2340 }
2341 }
2342 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2343 {
2344 /* uimm64 is not
2345 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2346 ffffffffgggggggghhhhhhhh'. */
2347 set_other_error (mismatch_detail, idx,
2348 _("invalid value for immediate"));
2349 return 0;
2350 }
2351 /* Is the shift amount valid? */
2352 switch (opnd->shifter.kind)
2353 {
2354 case AARCH64_MOD_LSL:
2355 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2356 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2357 {
2358 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2359 (size - 1) * 8);
2360 return 0;
2361 }
2362 if (!value_aligned_p (opnd->shifter.amount, 8))
2363 {
2364 set_unaligned_error (mismatch_detail, idx, 8);
2365 return 0;
2366 }
2367 break;
2368 case AARCH64_MOD_MSL:
2369 /* Only 8 and 16 are valid shift amount. */
2370 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2371 {
2372 set_other_error (mismatch_detail, idx,
2373 _("shift amount must be 0 or 16"));
2374 return 0;
2375 }
2376 break;
2377 default:
2378 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2379 {
2380 set_other_error (mismatch_detail, idx,
2381 _("invalid shift operator"));
2382 return 0;
2383 }
2384 break;
2385 }
2386 break;
2387
2388 case AARCH64_OPND_FPIMM:
2389 case AARCH64_OPND_SIMD_FPIMM:
2390 case AARCH64_OPND_SVE_FPIMM8:
2391 if (opnd->imm.is_fp == 0)
2392 {
2393 set_other_error (mismatch_detail, idx,
2394 _("floating-point immediate expected"));
2395 return 0;
2396 }
2397 /* The value is expected to be an 8-bit floating-point constant with
2398 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2399 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2400 instruction). */
2401 if (!value_in_range_p (opnd->imm.value, 0, 255))
2402 {
2403 set_other_error (mismatch_detail, idx,
2404 _("immediate out of range"));
2405 return 0;
2406 }
2407 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2408 {
2409 set_other_error (mismatch_detail, idx,
2410 _("invalid shift operator"));
2411 return 0;
2412 }
2413 break;
2414
2415 case AARCH64_OPND_SVE_AIMM:
2416 min_value = 0;
2417 sve_aimm:
2418 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2419 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2420 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2421 uvalue = opnd->imm.value;
2422 shift = opnd->shifter.amount;
2423 if (size == 1)
2424 {
2425 if (shift != 0)
2426 {
2427 set_other_error (mismatch_detail, idx,
2428 _("no shift amount allowed for"
2429 " 8-bit constants"));
2430 return 0;
2431 }
2432 }
2433 else
2434 {
2435 if (shift != 0 && shift != 8)
2436 {
2437 set_other_error (mismatch_detail, idx,
2438 _("shift amount must be 0 or 8"));
2439 return 0;
2440 }
2441 if (shift == 0 && (uvalue & 0xff) == 0)
2442 {
2443 shift = 8;
2444 uvalue = (int64_t) uvalue / 256;
2445 }
2446 }
2447 mask >>= shift;
2448 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2449 {
2450 set_other_error (mismatch_detail, idx,
2451 _("immediate too big for element size"));
2452 return 0;
2453 }
2454 uvalue = (uvalue - min_value) & mask;
2455 if (uvalue > 0xff)
2456 {
2457 set_other_error (mismatch_detail, idx,
2458 _("invalid arithmetic immediate"));
2459 return 0;
2460 }
2461 break;
2462
2463 case AARCH64_OPND_SVE_ASIMM:
2464 min_value = -128;
2465 goto sve_aimm;
2466
2467 case AARCH64_OPND_SVE_I1_HALF_ONE:
2468 assert (opnd->imm.is_fp);
2469 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2470 {
2471 set_other_error (mismatch_detail, idx,
2472 _("floating-point value must be 0.5 or 1.0"));
2473 return 0;
2474 }
2475 break;
2476
2477 case AARCH64_OPND_SVE_I1_HALF_TWO:
2478 assert (opnd->imm.is_fp);
2479 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2480 {
2481 set_other_error (mismatch_detail, idx,
2482 _("floating-point value must be 0.5 or 2.0"));
2483 return 0;
2484 }
2485 break;
2486
2487 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2488 assert (opnd->imm.is_fp);
2489 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2490 {
2491 set_other_error (mismatch_detail, idx,
2492 _("floating-point value must be 0.0 or 1.0"));
2493 return 0;
2494 }
2495 break;
2496
2497 case AARCH64_OPND_SVE_INV_LIMM:
2498 {
2499 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2500 uint64_t uimm = ~opnd->imm.value;
2501 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2502 {
2503 set_other_error (mismatch_detail, idx,
2504 _("immediate out of range"));
2505 return 0;
2506 }
2507 }
2508 break;
2509
2510 case AARCH64_OPND_SVE_LIMM_MOV:
2511 {
2512 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2513 uint64_t uimm = opnd->imm.value;
2514 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2515 {
2516 set_other_error (mismatch_detail, idx,
2517 _("immediate out of range"));
2518 return 0;
2519 }
2520 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2521 {
2522 set_other_error (mismatch_detail, idx,
2523 _("invalid replicated MOV immediate"));
2524 return 0;
2525 }
2526 }
2527 break;
2528
2529 case AARCH64_OPND_SVE_PATTERN_SCALED:
2530 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2531 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2532 {
2533 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2534 return 0;
2535 }
2536 break;
2537
2538 case AARCH64_OPND_SVE_SHLIMM_PRED:
2539 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2540 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2541 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2542 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2543 {
2544 set_imm_out_of_range_error (mismatch_detail, idx,
2545 0, 8 * size - 1);
2546 return 0;
2547 }
2548 break;
2549
2550 case AARCH64_OPND_SVE_SHRIMM_PRED:
2551 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2552 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2553 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2554 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2555 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2556 {
2557 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2558 return 0;
2559 }
2560 break;
2561
2562 default:
2563 break;
2564 }
2565 break;
2566
2567 case AARCH64_OPND_CLASS_SYSTEM:
2568 switch (type)
2569 {
2570 case AARCH64_OPND_PSTATEFIELD:
2571 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2572 /* MSR UAO, #uimm4
2573 MSR PAN, #uimm4
2574 MSR SSBS,#uimm4
2575 The immediate must be #0 or #1. */
2576 if ((opnd->pstatefield == 0x03 /* UAO. */
2577 || opnd->pstatefield == 0x04 /* PAN. */
2578 || opnd->pstatefield == 0x19 /* SSBS. */
2579 || opnd->pstatefield == 0x1a) /* DIT. */
2580 && opnds[1].imm.value > 1)
2581 {
2582 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2583 return 0;
2584 }
2585 /* MSR SPSel, #uimm4
2586 Uses uimm4 as a control value to select the stack pointer: if
2587 bit 0 is set it selects the current exception level's stack
2588 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2589 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2590 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2591 {
2592 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2593 return 0;
2594 }
2595 break;
2596 default:
2597 break;
2598 }
2599 break;
2600
2601 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2602 /* Get the upper bound for the element index. */
2603 if (opcode->op == OP_FCMLA_ELEM)
2604 /* FCMLA index range depends on the vector size of other operands
2605 and is halfed because complex numbers take two elements. */
2606 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2607 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2608 else
2609 num = 16;
2610 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2611 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2612
2613 /* Index out-of-range. */
2614 if (!value_in_range_p (opnd->reglane.index, 0, num))
2615 {
2616 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2617 return 0;
2618 }
2619 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2620 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2621 number is encoded in "size:M:Rm":
2622 size <Vm>
2623 00 RESERVED
2624 01 0:Rm
2625 10 M:Rm
2626 11 RESERVED */
2627 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2628 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2629 {
2630 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2631 return 0;
2632 }
2633 break;
2634
2635 case AARCH64_OPND_CLASS_MODIFIED_REG:
2636 assert (idx == 1 || idx == 2);
2637 switch (type)
2638 {
2639 case AARCH64_OPND_Rm_EXT:
2640 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2641 && opnd->shifter.kind != AARCH64_MOD_LSL)
2642 {
2643 set_other_error (mismatch_detail, idx,
2644 _("extend operator expected"));
2645 return 0;
2646 }
2647 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2648 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2649 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2650 case. */
2651 if (!aarch64_stack_pointer_p (opnds + 0)
2652 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2653 {
2654 if (!opnd->shifter.operator_present)
2655 {
2656 set_other_error (mismatch_detail, idx,
2657 _("missing extend operator"));
2658 return 0;
2659 }
2660 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2661 {
2662 set_other_error (mismatch_detail, idx,
2663 _("'LSL' operator not allowed"));
2664 return 0;
2665 }
2666 }
2667 assert (opnd->shifter.operator_present /* Default to LSL. */
2668 || opnd->shifter.kind == AARCH64_MOD_LSL);
2669 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2670 {
2671 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2672 return 0;
2673 }
2674 /* In the 64-bit form, the final register operand is written as Wm
2675 for all but the (possibly omitted) UXTX/LSL and SXTX
2676 operators.
2677 N.B. GAS allows X register to be used with any operator as a
2678 programming convenience. */
2679 if (qualifier == AARCH64_OPND_QLF_X
2680 && opnd->shifter.kind != AARCH64_MOD_LSL
2681 && opnd->shifter.kind != AARCH64_MOD_UXTX
2682 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2683 {
2684 set_other_error (mismatch_detail, idx, _("W register expected"));
2685 return 0;
2686 }
2687 break;
2688
2689 case AARCH64_OPND_Rm_SFT:
2690 /* ROR is not available to the shifted register operand in
2691 arithmetic instructions. */
2692 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2693 {
2694 set_other_error (mismatch_detail, idx,
2695 _("shift operator expected"));
2696 return 0;
2697 }
2698 if (opnd->shifter.kind == AARCH64_MOD_ROR
2699 && opcode->iclass != log_shift)
2700 {
2701 set_other_error (mismatch_detail, idx,
2702 _("'ROR' operator not allowed"));
2703 return 0;
2704 }
2705 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2706 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2707 {
2708 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2709 return 0;
2710 }
2711 break;
2712
2713 default:
2714 break;
2715 }
2716 break;
2717
2718 default:
2719 break;
2720 }
2721
2722 return 1;
2723 }
2724
2725 /* Main entrypoint for the operand constraint checking.
2726
2727 Return 1 if operands of *INST meet the constraint applied by the operand
2728 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2729 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2730 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2731 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2732 error kind when it is notified that an instruction does not pass the check).
2733
2734 Un-determined operand qualifiers may get established during the process. */
2735
2736 int
2737 aarch64_match_operands_constraint (aarch64_inst *inst,
2738 aarch64_operand_error *mismatch_detail)
2739 {
2740 int i;
2741
2742 DEBUG_TRACE ("enter");
2743
2744 /* Check for cases where a source register needs to be the same as the
2745 destination register. Do this before matching qualifiers since if
2746 an instruction has both invalid tying and invalid qualifiers,
2747 the error about qualifiers would suggest several alternative
2748 instructions that also have invalid tying. */
2749 i = inst->opcode->tied_operand;
2750 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2751 {
2752 if (mismatch_detail)
2753 {
2754 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2755 mismatch_detail->index = i;
2756 mismatch_detail->error = NULL;
2757 }
2758 return 0;
2759 }
2760
2761 /* Match operands' qualifier.
2762 *INST has already had qualifier establish for some, if not all, of
2763 its operands; we need to find out whether these established
2764 qualifiers match one of the qualifier sequence in
2765 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2766 with the corresponding qualifier in such a sequence.
2767 Only basic operand constraint checking is done here; the more thorough
2768 constraint checking will carried out by operand_general_constraint_met_p,
2769 which has be to called after this in order to get all of the operands'
2770 qualifiers established. */
2771 if (match_operands_qualifier (inst, true /* update_p */) == 0)
2772 {
2773 DEBUG_TRACE ("FAIL on operand qualifier matching");
2774 if (mismatch_detail)
2775 {
2776 /* Return an error type to indicate that it is the qualifier
2777 matching failure; we don't care about which operand as there
2778 are enough information in the opcode table to reproduce it. */
2779 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2780 mismatch_detail->index = -1;
2781 mismatch_detail->error = NULL;
2782 }
2783 return 0;
2784 }
2785
2786 /* Match operands' constraint. */
2787 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2788 {
2789 enum aarch64_opnd type = inst->opcode->operands[i];
2790 if (type == AARCH64_OPND_NIL)
2791 break;
2792 if (inst->operands[i].skip)
2793 {
2794 DEBUG_TRACE ("skip the incomplete operand %d", i);
2795 continue;
2796 }
2797 if (operand_general_constraint_met_p (inst->operands, i, type,
2798 inst->opcode, mismatch_detail) == 0)
2799 {
2800 DEBUG_TRACE ("FAIL on operand %d", i);
2801 return 0;
2802 }
2803 }
2804
2805 DEBUG_TRACE ("PASS");
2806
2807 return 1;
2808 }
2809
2810 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2811 Also updates the TYPE of each INST->OPERANDS with the corresponding
2812 value of OPCODE->OPERANDS.
2813
2814 Note that some operand qualifiers may need to be manually cleared by
2815 the caller before it further calls the aarch64_opcode_encode; by
2816 doing this, it helps the qualifier matching facilities work
2817 properly. */
2818
2819 const aarch64_opcode*
2820 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2821 {
2822 int i;
2823 const aarch64_opcode *old = inst->opcode;
2824
2825 inst->opcode = opcode;
2826
2827 /* Update the operand types. */
2828 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2829 {
2830 inst->operands[i].type = opcode->operands[i];
2831 if (opcode->operands[i] == AARCH64_OPND_NIL)
2832 break;
2833 }
2834
2835 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2836
2837 return old;
2838 }
2839
2840 int
2841 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2842 {
2843 int i;
2844 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2845 if (operands[i] == operand)
2846 return i;
2847 else if (operands[i] == AARCH64_OPND_NIL)
2848 break;
2849 return -1;
2850 }
2851 \f
2852 /* R0...R30, followed by FOR31. */
2853 #define BANK(R, FOR31) \
2854 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2855 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2856 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2857 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2858 /* [0][0] 32-bit integer regs with sp Wn
2859 [0][1] 64-bit integer regs with sp Xn sf=1
2860 [1][0] 32-bit integer regs with #0 Wn
2861 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2862 static const char *int_reg[2][2][32] = {
2863 #define R32(X) "w" #X
2864 #define R64(X) "x" #X
2865 { BANK (R32, "wsp"), BANK (R64, "sp") },
2866 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2867 #undef R64
2868 #undef R32
2869 };
2870
2871 /* Names of the SVE vector registers, first with .S suffixes,
2872 then with .D suffixes. */
2873
2874 static const char *sve_reg[2][32] = {
2875 #define ZS(X) "z" #X ".s"
2876 #define ZD(X) "z" #X ".d"
2877 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2878 #undef ZD
2879 #undef ZS
2880 };
2881 #undef BANK
2882
2883 /* Return the integer register name.
2884 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2885
2886 static inline const char *
2887 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2888 {
2889 const int has_zr = sp_reg_p ? 0 : 1;
2890 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2891 return int_reg[has_zr][is_64][regno];
2892 }
2893
2894 /* Like get_int_reg_name, but IS_64 is always 1. */
2895
2896 static inline const char *
2897 get_64bit_int_reg_name (int regno, int sp_reg_p)
2898 {
2899 const int has_zr = sp_reg_p ? 0 : 1;
2900 return int_reg[has_zr][1][regno];
2901 }
2902
2903 /* Get the name of the integer offset register in OPND, using the shift type
2904 to decide whether it's a word or doubleword. */
2905
2906 static inline const char *
2907 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2908 {
2909 switch (opnd->shifter.kind)
2910 {
2911 case AARCH64_MOD_UXTW:
2912 case AARCH64_MOD_SXTW:
2913 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2914
2915 case AARCH64_MOD_LSL:
2916 case AARCH64_MOD_SXTX:
2917 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2918
2919 default:
2920 abort ();
2921 }
2922 }
2923
2924 /* Get the name of the SVE vector offset register in OPND, using the operand
2925 qualifier to decide whether the suffix should be .S or .D. */
2926
2927 static inline const char *
2928 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2929 {
2930 assert (qualifier == AARCH64_OPND_QLF_S_S
2931 || qualifier == AARCH64_OPND_QLF_S_D);
2932 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2933 }
2934
2935 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2936
2937 typedef union
2938 {
2939 uint64_t i;
2940 double d;
2941 } double_conv_t;
2942
2943 typedef union
2944 {
2945 uint32_t i;
2946 float f;
2947 } single_conv_t;
2948
2949 typedef union
2950 {
2951 uint32_t i;
2952 float f;
2953 } half_conv_t;
2954
2955 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2956 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2957 (depending on the type of the instruction). IMM8 will be expanded to a
2958 single-precision floating-point value (SIZE == 4) or a double-precision
2959 floating-point value (SIZE == 8). A half-precision floating-point value
2960 (SIZE == 2) is expanded to a single-precision floating-point value. The
2961 expanded value is returned. */
2962
2963 static uint64_t
2964 expand_fp_imm (int size, uint32_t imm8)
2965 {
2966 uint64_t imm = 0;
2967 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2968
2969 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2970 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2971 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2972 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2973 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2974 if (size == 8)
2975 {
2976 imm = (imm8_7 << (63-32)) /* imm8<7> */
2977 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2978 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2979 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2980 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2981 imm <<= 32;
2982 }
2983 else if (size == 4 || size == 2)
2984 {
2985 imm = (imm8_7 << 31) /* imm8<7> */
2986 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2987 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2988 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2989 }
2990 else
2991 {
2992 /* An unsupported size. */
2993 assert (0);
2994 }
2995
2996 return imm;
2997 }
2998
2999 /* Produce the string representation of the register list operand *OPND
3000 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3001 the register name that comes before the register number, such as "v". */
3002 static void
3003 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3004 const char *prefix)
3005 {
3006 const int num_regs = opnd->reglist.num_regs;
3007 const int first_reg = opnd->reglist.first_regno;
3008 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3009 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3010 char tb[8]; /* Temporary buffer. */
3011
3012 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3013 assert (num_regs >= 1 && num_regs <= 4);
3014
3015 /* Prepare the index if any. */
3016 if (opnd->reglist.has_index)
3017 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3018 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
3019 else
3020 tb[0] = '\0';
3021
3022 /* The hyphenated form is preferred for disassembly if there are
3023 more than two registers in the list, and the register numbers
3024 are monotonically increasing in increments of one. */
3025 if (num_regs > 2 && last_reg > first_reg)
3026 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
3027 prefix, last_reg, qlf_name, tb);
3028 else
3029 {
3030 const int reg0 = first_reg;
3031 const int reg1 = (first_reg + 1) & 0x1f;
3032 const int reg2 = (first_reg + 2) & 0x1f;
3033 const int reg3 = (first_reg + 3) & 0x1f;
3034
3035 switch (num_regs)
3036 {
3037 case 1:
3038 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3039 break;
3040 case 2:
3041 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3042 prefix, reg1, qlf_name, tb);
3043 break;
3044 case 3:
3045 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3046 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3047 prefix, reg2, qlf_name, tb);
3048 break;
3049 case 4:
3050 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3051 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3052 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3053 break;
3054 }
3055 }
3056 }
3057
3058 /* Print the register+immediate address in OPND to BUF, which has SIZE
3059 characters. BASE is the name of the base register. */
3060
3061 static void
3062 print_immediate_offset_address (char *buf, size_t size,
3063 const aarch64_opnd_info *opnd,
3064 const char *base)
3065 {
3066 if (opnd->addr.writeback)
3067 {
3068 if (opnd->addr.preind)
3069 {
3070 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3071 snprintf (buf, size, "[%s]!", base);
3072 else
3073 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3074 }
3075 else
3076 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3077 }
3078 else
3079 {
3080 if (opnd->shifter.operator_present)
3081 {
3082 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3083 snprintf (buf, size, "[%s, #%d, mul vl]",
3084 base, opnd->addr.offset.imm);
3085 }
3086 else if (opnd->addr.offset.imm)
3087 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3088 else
3089 snprintf (buf, size, "[%s]", base);
3090 }
3091 }
3092
3093 /* Produce the string representation of the register offset address operand
3094 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3095 the names of the base and offset registers. */
3096 static void
3097 print_register_offset_address (char *buf, size_t size,
3098 const aarch64_opnd_info *opnd,
3099 const char *base, const char *offset)
3100 {
3101 char tb[16]; /* Temporary buffer. */
3102 bool print_extend_p = true;
3103 bool print_amount_p = true;
3104 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3105
3106 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3107 || !opnd->shifter.amount_present))
3108 {
3109 /* Not print the shift/extend amount when the amount is zero and
3110 when it is not the special case of 8-bit load/store instruction. */
3111 print_amount_p = false;
3112 /* Likewise, no need to print the shift operator LSL in such a
3113 situation. */
3114 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3115 print_extend_p = false;
3116 }
3117
3118 /* Prepare for the extend/shift. */
3119 if (print_extend_p)
3120 {
3121 if (print_amount_p)
3122 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3123 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3124 (opnd->shifter.amount % 100));
3125 else
3126 snprintf (tb, sizeof (tb), ", %s", shift_name);
3127 }
3128 else
3129 tb[0] = '\0';
3130
3131 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3132 }
3133
3134 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3135 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3136 PC, PCREL_P and ADDRESS are used to pass in and return information about
3137 the PC-relative address calculation, where the PC value is passed in
3138 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3139 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3140 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3141
3142 The function serves both the disassembler and the assembler diagnostics
3143 issuer, which is the reason why it lives in this file. */
3144
3145 void
3146 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3147 const aarch64_opcode *opcode,
3148 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3149 bfd_vma *address, char** notes,
3150 aarch64_feature_set features)
3151 {
3152 unsigned int i, num_conds;
3153 const char *name = NULL;
3154 const aarch64_opnd_info *opnd = opnds + idx;
3155 enum aarch64_modifier_kind kind;
3156 uint64_t addr, enum_value;
3157
3158 buf[0] = '\0';
3159 if (pcrel_p)
3160 *pcrel_p = 0;
3161
3162 switch (opnd->type)
3163 {
3164 case AARCH64_OPND_Rd:
3165 case AARCH64_OPND_Rn:
3166 case AARCH64_OPND_Rm:
3167 case AARCH64_OPND_Rt:
3168 case AARCH64_OPND_Rt2:
3169 case AARCH64_OPND_Rs:
3170 case AARCH64_OPND_Ra:
3171 case AARCH64_OPND_Rt_LS64:
3172 case AARCH64_OPND_Rt_SYS:
3173 case AARCH64_OPND_PAIRREG:
3174 case AARCH64_OPND_SVE_Rm:
3175 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3176 the <ic_op>, therefore we use opnd->present to override the
3177 generic optional-ness information. */
3178 if (opnd->type == AARCH64_OPND_Rt_SYS)
3179 {
3180 if (!opnd->present)
3181 break;
3182 }
3183 /* Omit the operand, e.g. RET. */
3184 else if (optional_operand_p (opcode, idx)
3185 && (opnd->reg.regno
3186 == get_optional_operand_default_value (opcode)))
3187 break;
3188 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3189 || opnd->qualifier == AARCH64_OPND_QLF_X);
3190 snprintf (buf, size, "%s",
3191 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3192 break;
3193
3194 case AARCH64_OPND_Rd_SP:
3195 case AARCH64_OPND_Rn_SP:
3196 case AARCH64_OPND_Rt_SP:
3197 case AARCH64_OPND_SVE_Rn_SP:
3198 case AARCH64_OPND_Rm_SP:
3199 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3200 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3201 || opnd->qualifier == AARCH64_OPND_QLF_X
3202 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3203 snprintf (buf, size, "%s",
3204 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3205 break;
3206
3207 case AARCH64_OPND_Rm_EXT:
3208 kind = opnd->shifter.kind;
3209 assert (idx == 1 || idx == 2);
3210 if ((aarch64_stack_pointer_p (opnds)
3211 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3212 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3213 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3214 && kind == AARCH64_MOD_UXTW)
3215 || (opnd->qualifier == AARCH64_OPND_QLF_X
3216 && kind == AARCH64_MOD_UXTX)))
3217 {
3218 /* 'LSL' is the preferred form in this case. */
3219 kind = AARCH64_MOD_LSL;
3220 if (opnd->shifter.amount == 0)
3221 {
3222 /* Shifter omitted. */
3223 snprintf (buf, size, "%s",
3224 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3225 break;
3226 }
3227 }
3228 if (opnd->shifter.amount)
3229 snprintf (buf, size, "%s, %s #%" PRIi64,
3230 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3231 aarch64_operand_modifiers[kind].name,
3232 opnd->shifter.amount);
3233 else
3234 snprintf (buf, size, "%s, %s",
3235 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3236 aarch64_operand_modifiers[kind].name);
3237 break;
3238
3239 case AARCH64_OPND_Rm_SFT:
3240 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3241 || opnd->qualifier == AARCH64_OPND_QLF_X);
3242 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3243 snprintf (buf, size, "%s",
3244 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3245 else
3246 snprintf (buf, size, "%s, %s #%" PRIi64,
3247 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3248 aarch64_operand_modifiers[opnd->shifter.kind].name,
3249 opnd->shifter.amount);
3250 break;
3251
3252 case AARCH64_OPND_Fd:
3253 case AARCH64_OPND_Fn:
3254 case AARCH64_OPND_Fm:
3255 case AARCH64_OPND_Fa:
3256 case AARCH64_OPND_Ft:
3257 case AARCH64_OPND_Ft2:
3258 case AARCH64_OPND_Sd:
3259 case AARCH64_OPND_Sn:
3260 case AARCH64_OPND_Sm:
3261 case AARCH64_OPND_SVE_VZn:
3262 case AARCH64_OPND_SVE_Vd:
3263 case AARCH64_OPND_SVE_Vm:
3264 case AARCH64_OPND_SVE_Vn:
3265 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3266 opnd->reg.regno);
3267 break;
3268
3269 case AARCH64_OPND_Va:
3270 case AARCH64_OPND_Vd:
3271 case AARCH64_OPND_Vn:
3272 case AARCH64_OPND_Vm:
3273 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3274 aarch64_get_qualifier_name (opnd->qualifier));
3275 break;
3276
3277 case AARCH64_OPND_Ed:
3278 case AARCH64_OPND_En:
3279 case AARCH64_OPND_Em:
3280 case AARCH64_OPND_Em16:
3281 case AARCH64_OPND_SM3_IMM2:
3282 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3283 aarch64_get_qualifier_name (opnd->qualifier),
3284 opnd->reglane.index);
3285 break;
3286
3287 case AARCH64_OPND_VdD1:
3288 case AARCH64_OPND_VnD1:
3289 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3290 break;
3291
3292 case AARCH64_OPND_LVn:
3293 case AARCH64_OPND_LVt:
3294 case AARCH64_OPND_LVt_AL:
3295 case AARCH64_OPND_LEt:
3296 print_register_list (buf, size, opnd, "v");
3297 break;
3298
3299 case AARCH64_OPND_SVE_Pd:
3300 case AARCH64_OPND_SVE_Pg3:
3301 case AARCH64_OPND_SVE_Pg4_5:
3302 case AARCH64_OPND_SVE_Pg4_10:
3303 case AARCH64_OPND_SVE_Pg4_16:
3304 case AARCH64_OPND_SVE_Pm:
3305 case AARCH64_OPND_SVE_Pn:
3306 case AARCH64_OPND_SVE_Pt:
3307 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3308 snprintf (buf, size, "p%d", opnd->reg.regno);
3309 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3310 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3311 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3312 aarch64_get_qualifier_name (opnd->qualifier));
3313 else
3314 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3315 aarch64_get_qualifier_name (opnd->qualifier));
3316 break;
3317
3318 case AARCH64_OPND_SVE_Za_5:
3319 case AARCH64_OPND_SVE_Za_16:
3320 case AARCH64_OPND_SVE_Zd:
3321 case AARCH64_OPND_SVE_Zm_5:
3322 case AARCH64_OPND_SVE_Zm_16:
3323 case AARCH64_OPND_SVE_Zn:
3324 case AARCH64_OPND_SVE_Zt:
3325 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3326 snprintf (buf, size, "z%d", opnd->reg.regno);
3327 else
3328 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3329 aarch64_get_qualifier_name (opnd->qualifier));
3330 break;
3331
3332 case AARCH64_OPND_SVE_ZnxN:
3333 case AARCH64_OPND_SVE_ZtxN:
3334 print_register_list (buf, size, opnd, "z");
3335 break;
3336
3337 case AARCH64_OPND_SVE_Zm3_INDEX:
3338 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3339 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3340 case AARCH64_OPND_SVE_Zm4_11_INDEX:
3341 case AARCH64_OPND_SVE_Zm4_INDEX:
3342 case AARCH64_OPND_SVE_Zn_INDEX:
3343 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3344 aarch64_get_qualifier_name (opnd->qualifier),
3345 opnd->reglane.index);
3346 break;
3347
3348 case AARCH64_OPND_CRn:
3349 case AARCH64_OPND_CRm:
3350 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3351 break;
3352
3353 case AARCH64_OPND_IDX:
3354 case AARCH64_OPND_MASK:
3355 case AARCH64_OPND_IMM:
3356 case AARCH64_OPND_IMM_2:
3357 case AARCH64_OPND_WIDTH:
3358 case AARCH64_OPND_UIMM3_OP1:
3359 case AARCH64_OPND_UIMM3_OP2:
3360 case AARCH64_OPND_BIT_NUM:
3361 case AARCH64_OPND_IMM_VLSL:
3362 case AARCH64_OPND_IMM_VLSR:
3363 case AARCH64_OPND_SHLL_IMM:
3364 case AARCH64_OPND_IMM0:
3365 case AARCH64_OPND_IMMR:
3366 case AARCH64_OPND_IMMS:
3367 case AARCH64_OPND_UNDEFINED:
3368 case AARCH64_OPND_FBITS:
3369 case AARCH64_OPND_TME_UIMM16:
3370 case AARCH64_OPND_SIMM5:
3371 case AARCH64_OPND_SVE_SHLIMM_PRED:
3372 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3373 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3374 case AARCH64_OPND_SVE_SHRIMM_PRED:
3375 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3376 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3377 case AARCH64_OPND_SVE_SIMM5:
3378 case AARCH64_OPND_SVE_SIMM5B:
3379 case AARCH64_OPND_SVE_SIMM6:
3380 case AARCH64_OPND_SVE_SIMM8:
3381 case AARCH64_OPND_SVE_UIMM3:
3382 case AARCH64_OPND_SVE_UIMM7:
3383 case AARCH64_OPND_SVE_UIMM8:
3384 case AARCH64_OPND_SVE_UIMM8_53:
3385 case AARCH64_OPND_IMM_ROT1:
3386 case AARCH64_OPND_IMM_ROT2:
3387 case AARCH64_OPND_IMM_ROT3:
3388 case AARCH64_OPND_SVE_IMM_ROT1:
3389 case AARCH64_OPND_SVE_IMM_ROT2:
3390 case AARCH64_OPND_SVE_IMM_ROT3:
3391 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3392 break;
3393
3394 case AARCH64_OPND_SVE_I1_HALF_ONE:
3395 case AARCH64_OPND_SVE_I1_HALF_TWO:
3396 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3397 {
3398 single_conv_t c;
3399 c.i = opnd->imm.value;
3400 snprintf (buf, size, "#%.1f", c.f);
3401 break;
3402 }
3403
3404 case AARCH64_OPND_SVE_PATTERN:
3405 if (optional_operand_p (opcode, idx)
3406 && opnd->imm.value == get_optional_operand_default_value (opcode))
3407 break;
3408 enum_value = opnd->imm.value;
3409 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3410 if (aarch64_sve_pattern_array[enum_value])
3411 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3412 else
3413 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3414 break;
3415
3416 case AARCH64_OPND_SVE_PATTERN_SCALED:
3417 if (optional_operand_p (opcode, idx)
3418 && !opnd->shifter.operator_present
3419 && opnd->imm.value == get_optional_operand_default_value (opcode))
3420 break;
3421 enum_value = opnd->imm.value;
3422 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3423 if (aarch64_sve_pattern_array[opnd->imm.value])
3424 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3425 else
3426 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3427 if (opnd->shifter.operator_present)
3428 {
3429 size_t len = strlen (buf);
3430 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3431 aarch64_operand_modifiers[opnd->shifter.kind].name,
3432 opnd->shifter.amount);
3433 }
3434 break;
3435
3436 case AARCH64_OPND_SVE_PRFOP:
3437 enum_value = opnd->imm.value;
3438 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3439 if (aarch64_sve_prfop_array[enum_value])
3440 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3441 else
3442 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3443 break;
3444
3445 case AARCH64_OPND_IMM_MOV:
3446 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3447 {
3448 case 4: /* e.g. MOV Wd, #<imm32>. */
3449 {
3450 int imm32 = opnd->imm.value;
3451 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3452 }
3453 break;
3454 case 8: /* e.g. MOV Xd, #<imm64>. */
3455 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3456 opnd->imm.value, opnd->imm.value);
3457 break;
3458 default: assert (0);
3459 }
3460 break;
3461
3462 case AARCH64_OPND_FPIMM0:
3463 snprintf (buf, size, "#0.0");
3464 break;
3465
3466 case AARCH64_OPND_LIMM:
3467 case AARCH64_OPND_AIMM:
3468 case AARCH64_OPND_HALF:
3469 case AARCH64_OPND_SVE_INV_LIMM:
3470 case AARCH64_OPND_SVE_LIMM:
3471 case AARCH64_OPND_SVE_LIMM_MOV:
3472 if (opnd->shifter.amount)
3473 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3474 opnd->shifter.amount);
3475 else
3476 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3477 break;
3478
3479 case AARCH64_OPND_SIMD_IMM:
3480 case AARCH64_OPND_SIMD_IMM_SFT:
3481 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3482 || opnd->shifter.kind == AARCH64_MOD_NONE)
3483 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3484 else
3485 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3486 aarch64_operand_modifiers[opnd->shifter.kind].name,
3487 opnd->shifter.amount);
3488 break;
3489
3490 case AARCH64_OPND_SVE_AIMM:
3491 case AARCH64_OPND_SVE_ASIMM:
3492 if (opnd->shifter.amount)
3493 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3494 opnd->shifter.amount);
3495 else
3496 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3497 break;
3498
3499 case AARCH64_OPND_FPIMM:
3500 case AARCH64_OPND_SIMD_FPIMM:
3501 case AARCH64_OPND_SVE_FPIMM8:
3502 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3503 {
3504 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3505 {
3506 half_conv_t c;
3507 c.i = expand_fp_imm (2, opnd->imm.value);
3508 snprintf (buf, size, "#%.18e", c.f);
3509 }
3510 break;
3511 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3512 {
3513 single_conv_t c;
3514 c.i = expand_fp_imm (4, opnd->imm.value);
3515 snprintf (buf, size, "#%.18e", c.f);
3516 }
3517 break;
3518 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3519 {
3520 double_conv_t c;
3521 c.i = expand_fp_imm (8, opnd->imm.value);
3522 snprintf (buf, size, "#%.18e", c.d);
3523 }
3524 break;
3525 default: assert (0);
3526 }
3527 break;
3528
3529 case AARCH64_OPND_CCMP_IMM:
3530 case AARCH64_OPND_NZCV:
3531 case AARCH64_OPND_EXCEPTION:
3532 case AARCH64_OPND_UIMM4:
3533 case AARCH64_OPND_UIMM4_ADDG:
3534 case AARCH64_OPND_UIMM7:
3535 case AARCH64_OPND_UIMM10:
3536 if (optional_operand_p (opcode, idx)
3537 && (opnd->imm.value ==
3538 (int64_t) get_optional_operand_default_value (opcode)))
3539 /* Omit the operand, e.g. DCPS1. */
3540 break;
3541 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3542 break;
3543
3544 case AARCH64_OPND_COND:
3545 case AARCH64_OPND_COND1:
3546 snprintf (buf, size, "%s", opnd->cond->names[0]);
3547 num_conds = ARRAY_SIZE (opnd->cond->names);
3548 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3549 {
3550 size_t len = strlen (buf);
3551 if (i == 1)
3552 snprintf (buf + len, size - len, " // %s = %s",
3553 opnd->cond->names[0], opnd->cond->names[i]);
3554 else
3555 snprintf (buf + len, size - len, ", %s",
3556 opnd->cond->names[i]);
3557 }
3558 break;
3559
3560 case AARCH64_OPND_ADDR_ADRP:
3561 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3562 + opnd->imm.value;
3563 if (pcrel_p)
3564 *pcrel_p = 1;
3565 if (address)
3566 *address = addr;
3567 /* This is not necessary during the disassembling, as print_address_func
3568 in the disassemble_info will take care of the printing. But some
3569 other callers may be still interested in getting the string in *STR,
3570 so here we do snprintf regardless. */
3571 snprintf (buf, size, "#0x%" PRIx64, addr);
3572 break;
3573
3574 case AARCH64_OPND_ADDR_PCREL14:
3575 case AARCH64_OPND_ADDR_PCREL19:
3576 case AARCH64_OPND_ADDR_PCREL21:
3577 case AARCH64_OPND_ADDR_PCREL26:
3578 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3579 if (pcrel_p)
3580 *pcrel_p = 1;
3581 if (address)
3582 *address = addr;
3583 /* This is not necessary during the disassembling, as print_address_func
3584 in the disassemble_info will take care of the printing. But some
3585 other callers may be still interested in getting the string in *STR,
3586 so here we do snprintf regardless. */
3587 snprintf (buf, size, "#0x%" PRIx64, addr);
3588 break;
3589
3590 case AARCH64_OPND_ADDR_SIMPLE:
3591 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3592 case AARCH64_OPND_SIMD_ADDR_POST:
3593 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3594 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3595 {
3596 if (opnd->addr.offset.is_reg)
3597 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3598 else
3599 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3600 }
3601 else
3602 snprintf (buf, size, "[%s]", name);
3603 break;
3604
3605 case AARCH64_OPND_ADDR_REGOFF:
3606 case AARCH64_OPND_SVE_ADDR_R:
3607 case AARCH64_OPND_SVE_ADDR_RR:
3608 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3609 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3610 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3611 case AARCH64_OPND_SVE_ADDR_RX:
3612 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3613 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3614 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3615 print_register_offset_address
3616 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3617 get_offset_int_reg_name (opnd));
3618 break;
3619
3620 case AARCH64_OPND_SVE_ADDR_ZX:
3621 print_register_offset_address
3622 (buf, size, opnd,
3623 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3624 get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
3625 break;
3626
3627 case AARCH64_OPND_SVE_ADDR_RZ:
3628 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3629 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3630 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3631 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3632 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3633 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3634 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3635 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3636 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3637 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3638 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3639 print_register_offset_address
3640 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3641 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3642 break;
3643
3644 case AARCH64_OPND_ADDR_SIMM7:
3645 case AARCH64_OPND_ADDR_SIMM9:
3646 case AARCH64_OPND_ADDR_SIMM9_2:
3647 case AARCH64_OPND_ADDR_SIMM10:
3648 case AARCH64_OPND_ADDR_SIMM11:
3649 case AARCH64_OPND_ADDR_SIMM13:
3650 case AARCH64_OPND_ADDR_OFFSET:
3651 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3652 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
3653 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3654 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3655 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3656 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3657 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3658 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3659 case AARCH64_OPND_SVE_ADDR_RI_U6:
3660 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3661 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3662 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3663 print_immediate_offset_address
3664 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3665 break;
3666
3667 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3668 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3669 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3670 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3671 print_immediate_offset_address
3672 (buf, size, opnd,
3673 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3674 break;
3675
3676 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3677 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3678 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3679 print_register_offset_address
3680 (buf, size, opnd,
3681 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3682 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3683 break;
3684
3685 case AARCH64_OPND_ADDR_UIMM12:
3686 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3687 if (opnd->addr.offset.imm)
3688 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3689 else
3690 snprintf (buf, size, "[%s]", name);
3691 break;
3692
3693 case AARCH64_OPND_SYSREG:
3694 for (i = 0; aarch64_sys_regs[i].name; ++i)
3695 {
3696 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
3697
3698 bool exact_match
3699 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
3700 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
3701 && AARCH64_CPU_HAS_FEATURE (features, sr->features);
3702
3703 /* Try and find an exact match, But if that fails, return the first
3704 partial match that was found. */
3705 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3706 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
3707 && (name == NULL || exact_match))
3708 {
3709 name = aarch64_sys_regs[i].name;
3710 if (exact_match)
3711 {
3712 if (notes)
3713 *notes = NULL;
3714 break;
3715 }
3716
3717 /* If we didn't match exactly, that means the presense of a flag
3718 indicates what we didn't want for this instruction. e.g. If
3719 F_REG_READ is there, that means we were looking for a write
3720 register. See aarch64_ext_sysreg. */
3721 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3722 *notes = _("reading from a write-only register");
3723 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3724 *notes = _("writing to a read-only register");
3725 }
3726 }
3727
3728 if (name)
3729 snprintf (buf, size, "%s", name);
3730 else
3731 {
3732 /* Implementation defined system register. */
3733 unsigned int value = opnd->sysreg.value;
3734 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3735 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3736 value & 0x7);
3737 }
3738 break;
3739
3740 case AARCH64_OPND_PSTATEFIELD:
3741 for (i = 0; aarch64_pstatefields[i].name; ++i)
3742 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3743 break;
3744 assert (aarch64_pstatefields[i].name);
3745 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3746 break;
3747
3748 case AARCH64_OPND_SYSREG_AT:
3749 case AARCH64_OPND_SYSREG_DC:
3750 case AARCH64_OPND_SYSREG_IC:
3751 case AARCH64_OPND_SYSREG_TLBI:
3752 case AARCH64_OPND_SYSREG_SR:
3753 snprintf (buf, size, "%s", opnd->sysins_op->name);
3754 break;
3755
3756 case AARCH64_OPND_BARRIER:
3757 case AARCH64_OPND_BARRIER_DSB_NXS:
3758 snprintf (buf, size, "%s", opnd->barrier->name);
3759 break;
3760
3761 case AARCH64_OPND_BARRIER_ISB:
3762 /* Operand can be omitted, e.g. in DCPS1. */
3763 if (! optional_operand_p (opcode, idx)
3764 || (opnd->barrier->value
3765 != get_optional_operand_default_value (opcode)))
3766 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3767 break;
3768
3769 case AARCH64_OPND_PRFOP:
3770 if (opnd->prfop->name != NULL)
3771 snprintf (buf, size, "%s", opnd->prfop->name);
3772 else
3773 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3774 break;
3775
3776 case AARCH64_OPND_BARRIER_PSB:
3777 snprintf (buf, size, "csync");
3778 break;
3779
3780 case AARCH64_OPND_BTI_TARGET:
3781 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3782 snprintf (buf, size, "%s", opnd->hint_option->name);
3783 break;
3784
3785 default:
3786 assert (0);
3787 }
3788 }
3789 \f
3790 #define CPENC(op0,op1,crn,crm,op2) \
3791 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3792 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3793 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3794 /* for 3.9.10 System Instructions */
3795 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3796
3797 #define C0 0
3798 #define C1 1
3799 #define C2 2
3800 #define C3 3
3801 #define C4 4
3802 #define C5 5
3803 #define C6 6
3804 #define C7 7
3805 #define C8 8
3806 #define C9 9
3807 #define C10 10
3808 #define C11 11
3809 #define C12 12
3810 #define C13 13
3811 #define C14 14
3812 #define C15 15
3813
3814 #define SYSREG(name, encoding, flags, features) \
3815 { name, encoding, flags, features }
3816
3817 #define SR_CORE(n,e,f) SYSREG (n,e,f,0)
3818
3819 #define SR_FEAT(n,e,f,feat) \
3820 SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
3821
3822 #define SR_FEAT2(n,e,f,fe1,fe2) \
3823 SYSREG ((n), (e), (f) | F_ARCHEXT, \
3824 AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
3825
3826 #define SR_RNG(n,e,f) SR_FEAT2(n,e,f,RNG,V8_5)
3827 #define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
3828 #define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
3829
3830 #define SR_V8_A(n,e,f) SR_FEAT (n,e,f,V8_A)
3831 #define SR_V8_R(n,e,f) SR_FEAT (n,e,f,V8_R)
3832 #define SR_V8_1(n,e,f) SR_FEAT (n,e,f,V8_1)
3833 #define SR_V8_2(n,e,f) SR_FEAT (n,e,f,V8_2)
3834 #define SR_V8_3(n,e,f) SR_FEAT (n,e,f,V8_3)
3835 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
3836 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
3837 #define SR_PAN(n,e,f) SR_FEAT (n,e,f,PAN)
3838 #define SR_RAS(n,e,f) SR_FEAT (n,e,f,RAS)
3839 #define SR_SSBS(n,e,f) SR_FEAT (n,e,f,SSBS)
3840 #define SR_SVE(n,e,f) SR_FEAT (n,e,f,SVE)
3841 #define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
3842 #define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
3843 #define SR_MEMTAG(n,e,f) SR_FEAT (n,e,f,MEMTAG)
3844 #define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
3845
3846 #define SR_EXPAND_ELx(f,x) \
3847 f (x, 1), \
3848 f (x, 2), \
3849 f (x, 3), \
3850 f (x, 4), \
3851 f (x, 5), \
3852 f (x, 6), \
3853 f (x, 7), \
3854 f (x, 8), \
3855 f (x, 9), \
3856 f (x, 10), \
3857 f (x, 11), \
3858 f (x, 12), \
3859 f (x, 13), \
3860 f (x, 14), \
3861 f (x, 15),
3862
3863 #define SR_EXPAND_EL12(f) \
3864 SR_EXPAND_ELx (f,1) \
3865 SR_EXPAND_ELx (f,2)
3866
3867 /* TODO there is one more issues need to be resolved
3868 1. handle cpu-implementation-defined system registers.
3869
3870 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
3871 respectively. If neither of these are set then the register is read-write. */
3872 const aarch64_sys_reg aarch64_sys_regs [] =
3873 {
3874 SR_CORE ("spsr_el1", CPEN_ (0,C0,0), 0), /* = spsr_svc. */
3875 SR_V8_1 ("spsr_el12", CPEN_ (5,C0,0), 0),
3876 SR_CORE ("elr_el1", CPEN_ (0,C0,1), 0),
3877 SR_V8_1 ("elr_el12", CPEN_ (5,C0,1), 0),
3878 SR_CORE ("sp_el0", CPEN_ (0,C1,0), 0),
3879 SR_CORE ("spsel", CPEN_ (0,C2,0), 0),
3880 SR_CORE ("daif", CPEN_ (3,C2,1), 0),
3881 SR_CORE ("currentel", CPEN_ (0,C2,2), F_REG_READ),
3882 SR_PAN ("pan", CPEN_ (0,C2,3), 0),
3883 SR_V8_2 ("uao", CPEN_ (0,C2,4), 0),
3884 SR_CORE ("nzcv", CPEN_ (3,C2,0), 0),
3885 SR_SSBS ("ssbs", CPEN_ (3,C2,6), 0),
3886 SR_CORE ("fpcr", CPEN_ (3,C4,0), 0),
3887 SR_CORE ("fpsr", CPEN_ (3,C4,1), 0),
3888 SR_CORE ("dspsr_el0", CPEN_ (3,C5,0), 0),
3889 SR_CORE ("dlr_el0", CPEN_ (3,C5,1), 0),
3890 SR_CORE ("spsr_el2", CPEN_ (4,C0,0), 0), /* = spsr_hyp. */
3891 SR_CORE ("elr_el2", CPEN_ (4,C0,1), 0),
3892 SR_CORE ("sp_el1", CPEN_ (4,C1,0), 0),
3893 SR_CORE ("spsr_irq", CPEN_ (4,C3,0), 0),
3894 SR_CORE ("spsr_abt", CPEN_ (4,C3,1), 0),
3895 SR_CORE ("spsr_und", CPEN_ (4,C3,2), 0),
3896 SR_CORE ("spsr_fiq", CPEN_ (4,C3,3), 0),
3897 SR_CORE ("spsr_el3", CPEN_ (6,C0,0), 0),
3898 SR_CORE ("elr_el3", CPEN_ (6,C0,1), 0),
3899 SR_CORE ("sp_el2", CPEN_ (6,C1,0), 0),
3900 SR_CORE ("spsr_svc", CPEN_ (0,C0,0), F_DEPRECATED), /* = spsr_el1. */
3901 SR_CORE ("spsr_hyp", CPEN_ (4,C0,0), F_DEPRECATED), /* = spsr_el2. */
3902 SR_CORE ("midr_el1", CPENC (3,0,C0,C0,0), F_REG_READ),
3903 SR_CORE ("ctr_el0", CPENC (3,3,C0,C0,1), F_REG_READ),
3904 SR_CORE ("mpidr_el1", CPENC (3,0,C0,C0,5), F_REG_READ),
3905 SR_CORE ("revidr_el1", CPENC (3,0,C0,C0,6), F_REG_READ),
3906 SR_CORE ("aidr_el1", CPENC (3,1,C0,C0,7), F_REG_READ),
3907 SR_CORE ("dczid_el0", CPENC (3,3,C0,C0,7), F_REG_READ),
3908 SR_CORE ("id_dfr0_el1", CPENC (3,0,C0,C1,2), F_REG_READ),
3909 SR_CORE ("id_pfr0_el1", CPENC (3,0,C0,C1,0), F_REG_READ),
3910 SR_CORE ("id_pfr1_el1", CPENC (3,0,C0,C1,1), F_REG_READ),
3911 SR_ID_PFR2 ("id_pfr2_el1", CPENC (3,0,C0,C3,4), F_REG_READ),
3912 SR_CORE ("id_afr0_el1", CPENC (3,0,C0,C1,3), F_REG_READ),
3913 SR_CORE ("id_mmfr0_el1", CPENC (3,0,C0,C1,4), F_REG_READ),
3914 SR_CORE ("id_mmfr1_el1", CPENC (3,0,C0,C1,5), F_REG_READ),
3915 SR_CORE ("id_mmfr2_el1", CPENC (3,0,C0,C1,6), F_REG_READ),
3916 SR_CORE ("id_mmfr3_el1", CPENC (3,0,C0,C1,7), F_REG_READ),
3917 SR_CORE ("id_mmfr4_el1", CPENC (3,0,C0,C2,6), F_REG_READ),
3918 SR_CORE ("id_isar0_el1", CPENC (3,0,C0,C2,0), F_REG_READ),
3919 SR_CORE ("id_isar1_el1", CPENC (3,0,C0,C2,1), F_REG_READ),
3920 SR_CORE ("id_isar2_el1", CPENC (3,0,C0,C2,2), F_REG_READ),
3921 SR_CORE ("id_isar3_el1", CPENC (3,0,C0,C2,3), F_REG_READ),
3922 SR_CORE ("id_isar4_el1", CPENC (3,0,C0,C2,4), F_REG_READ),
3923 SR_CORE ("id_isar5_el1", CPENC (3,0,C0,C2,5), F_REG_READ),
3924 SR_CORE ("mvfr0_el1", CPENC (3,0,C0,C3,0), F_REG_READ),
3925 SR_CORE ("mvfr1_el1", CPENC (3,0,C0,C3,1), F_REG_READ),
3926 SR_CORE ("mvfr2_el1", CPENC (3,0,C0,C3,2), F_REG_READ),
3927 SR_CORE ("ccsidr_el1", CPENC (3,1,C0,C0,0), F_REG_READ),
3928 SR_CORE ("id_aa64pfr0_el1", CPENC (3,0,C0,C4,0), F_REG_READ),
3929 SR_CORE ("id_aa64pfr1_el1", CPENC (3,0,C0,C4,1), F_REG_READ),
3930 SR_CORE ("id_aa64dfr0_el1", CPENC (3,0,C0,C5,0), F_REG_READ),
3931 SR_CORE ("id_aa64dfr1_el1", CPENC (3,0,C0,C5,1), F_REG_READ),
3932 SR_CORE ("id_aa64isar0_el1", CPENC (3,0,C0,C6,0), F_REG_READ),
3933 SR_CORE ("id_aa64isar1_el1", CPENC (3,0,C0,C6,1), F_REG_READ),
3934 SR_CORE ("id_aa64mmfr0_el1", CPENC (3,0,C0,C7,0), F_REG_READ),
3935 SR_CORE ("id_aa64mmfr1_el1", CPENC (3,0,C0,C7,1), F_REG_READ),
3936 SR_CORE ("id_aa64mmfr2_el1", CPENC (3,0,C0,C7,2), F_REG_READ),
3937 SR_CORE ("id_aa64afr0_el1", CPENC (3,0,C0,C5,4), F_REG_READ),
3938 SR_CORE ("id_aa64afr1_el1", CPENC (3,0,C0,C5,5), F_REG_READ),
3939 SR_SVE ("id_aa64zfr0_el1", CPENC (3,0,C0,C4,4), F_REG_READ),
3940 SR_CORE ("clidr_el1", CPENC (3,1,C0,C0,1), F_REG_READ),
3941 SR_CORE ("csselr_el1", CPENC (3,2,C0,C0,0), 0),
3942 SR_CORE ("vpidr_el2", CPENC (3,4,C0,C0,0), 0),
3943 SR_CORE ("vmpidr_el2", CPENC (3,4,C0,C0,5), 0),
3944 SR_CORE ("sctlr_el1", CPENC (3,0,C1,C0,0), 0),
3945 SR_CORE ("sctlr_el2", CPENC (3,4,C1,C0,0), 0),
3946 SR_CORE ("sctlr_el3", CPENC (3,6,C1,C0,0), 0),
3947 SR_V8_1 ("sctlr_el12", CPENC (3,5,C1,C0,0), 0),
3948 SR_CORE ("actlr_el1", CPENC (3,0,C1,C0,1), 0),
3949 SR_CORE ("actlr_el2", CPENC (3,4,C1,C0,1), 0),
3950 SR_CORE ("actlr_el3", CPENC (3,6,C1,C0,1), 0),
3951 SR_CORE ("cpacr_el1", CPENC (3,0,C1,C0,2), 0),
3952 SR_V8_1 ("cpacr_el12", CPENC (3,5,C1,C0,2), 0),
3953 SR_CORE ("cptr_el2", CPENC (3,4,C1,C1,2), 0),
3954 SR_CORE ("cptr_el3", CPENC (3,6,C1,C1,2), 0),
3955 SR_CORE ("scr_el3", CPENC (3,6,C1,C1,0), 0),
3956 SR_CORE ("hcr_el2", CPENC (3,4,C1,C1,0), 0),
3957 SR_CORE ("mdcr_el2", CPENC (3,4,C1,C1,1), 0),
3958 SR_CORE ("mdcr_el3", CPENC (3,6,C1,C3,1), 0),
3959 SR_CORE ("hstr_el2", CPENC (3,4,C1,C1,3), 0),
3960 SR_CORE ("hacr_el2", CPENC (3,4,C1,C1,7), 0),
3961 SR_SVE ("zcr_el1", CPENC (3,0,C1,C2,0), 0),
3962 SR_SVE ("zcr_el12", CPENC (3,5,C1,C2,0), 0),
3963 SR_SVE ("zcr_el2", CPENC (3,4,C1,C2,0), 0),
3964 SR_SVE ("zcr_el3", CPENC (3,6,C1,C2,0), 0),
3965 SR_SVE ("zidr_el1", CPENC (3,0,C0,C0,7), 0),
3966 SR_CORE ("ttbr0_el1", CPENC (3,0,C2,C0,0), 0),
3967 SR_CORE ("ttbr1_el1", CPENC (3,0,C2,C0,1), 0),
3968 SR_V8_A ("ttbr0_el2", CPENC (3,4,C2,C0,0), 0),
3969 SR_V8_1_A ("ttbr1_el2", CPENC (3,4,C2,C0,1), 0),
3970 SR_CORE ("ttbr0_el3", CPENC (3,6,C2,C0,0), 0),
3971 SR_V8_1 ("ttbr0_el12", CPENC (3,5,C2,C0,0), 0),
3972 SR_V8_1 ("ttbr1_el12", CPENC (3,5,C2,C0,1), 0),
3973 SR_V8_A ("vttbr_el2", CPENC (3,4,C2,C1,0), 0),
3974 SR_CORE ("tcr_el1", CPENC (3,0,C2,C0,2), 0),
3975 SR_CORE ("tcr_el2", CPENC (3,4,C2,C0,2), 0),
3976 SR_CORE ("tcr_el3", CPENC (3,6,C2,C0,2), 0),
3977 SR_V8_1 ("tcr_el12", CPENC (3,5,C2,C0,2), 0),
3978 SR_CORE ("vtcr_el2", CPENC (3,4,C2,C1,2), 0),
3979 SR_V8_3 ("apiakeylo_el1", CPENC (3,0,C2,C1,0), 0),
3980 SR_V8_3 ("apiakeyhi_el1", CPENC (3,0,C2,C1,1), 0),
3981 SR_V8_3 ("apibkeylo_el1", CPENC (3,0,C2,C1,2), 0),
3982 SR_V8_3 ("apibkeyhi_el1", CPENC (3,0,C2,C1,3), 0),
3983 SR_V8_3 ("apdakeylo_el1", CPENC (3,0,C2,C2,0), 0),
3984 SR_V8_3 ("apdakeyhi_el1", CPENC (3,0,C2,C2,1), 0),
3985 SR_V8_3 ("apdbkeylo_el1", CPENC (3,0,C2,C2,2), 0),
3986 SR_V8_3 ("apdbkeyhi_el1", CPENC (3,0,C2,C2,3), 0),
3987 SR_V8_3 ("apgakeylo_el1", CPENC (3,0,C2,C3,0), 0),
3988 SR_V8_3 ("apgakeyhi_el1", CPENC (3,0,C2,C3,1), 0),
3989 SR_CORE ("afsr0_el1", CPENC (3,0,C5,C1,0), 0),
3990 SR_CORE ("afsr1_el1", CPENC (3,0,C5,C1,1), 0),
3991 SR_CORE ("afsr0_el2", CPENC (3,4,C5,C1,0), 0),
3992 SR_CORE ("afsr1_el2", CPENC (3,4,C5,C1,1), 0),
3993 SR_CORE ("afsr0_el3", CPENC (3,6,C5,C1,0), 0),
3994 SR_V8_1 ("afsr0_el12", CPENC (3,5,C5,C1,0), 0),
3995 SR_CORE ("afsr1_el3", CPENC (3,6,C5,C1,1), 0),
3996 SR_V8_1 ("afsr1_el12", CPENC (3,5,C5,C1,1), 0),
3997 SR_CORE ("esr_el1", CPENC (3,0,C5,C2,0), 0),
3998 SR_CORE ("esr_el2", CPENC (3,4,C5,C2,0), 0),
3999 SR_CORE ("esr_el3", CPENC (3,6,C5,C2,0), 0),
4000 SR_V8_1 ("esr_el12", CPENC (3,5,C5,C2,0), 0),
4001 SR_RAS ("vsesr_el2", CPENC (3,4,C5,C2,3), 0),
4002 SR_CORE ("fpexc32_el2", CPENC (3,4,C5,C3,0), 0),
4003 SR_RAS ("erridr_el1", CPENC (3,0,C5,C3,0), F_REG_READ),
4004 SR_RAS ("errselr_el1", CPENC (3,0,C5,C3,1), 0),
4005 SR_RAS ("erxfr_el1", CPENC (3,0,C5,C4,0), F_REG_READ),
4006 SR_RAS ("erxctlr_el1", CPENC (3,0,C5,C4,1), 0),
4007 SR_RAS ("erxstatus_el1", CPENC (3,0,C5,C4,2), 0),
4008 SR_RAS ("erxaddr_el1", CPENC (3,0,C5,C4,3), 0),
4009 SR_RAS ("erxmisc0_el1", CPENC (3,0,C5,C5,0), 0),
4010 SR_RAS ("erxmisc1_el1", CPENC (3,0,C5,C5,1), 0),
4011 SR_RAS ("erxmisc2_el1", CPENC (3,0,C5,C5,2), 0),
4012 SR_RAS ("erxmisc3_el1", CPENC (3,0,C5,C5,3), 0),
4013 SR_RAS ("erxpfgcdn_el1", CPENC (3,0,C5,C4,6), 0),
4014 SR_RAS ("erxpfgctl_el1", CPENC (3,0,C5,C4,5), 0),
4015 SR_RAS ("erxpfgf_el1", CPENC (3,0,C5,C4,4), F_REG_READ),
4016 SR_CORE ("far_el1", CPENC (3,0,C6,C0,0), 0),
4017 SR_CORE ("far_el2", CPENC (3,4,C6,C0,0), 0),
4018 SR_CORE ("far_el3", CPENC (3,6,C6,C0,0), 0),
4019 SR_V8_1 ("far_el12", CPENC (3,5,C6,C0,0), 0),
4020 SR_CORE ("hpfar_el2", CPENC (3,4,C6,C0,4), 0),
4021 SR_CORE ("par_el1", CPENC (3,0,C7,C4,0), 0),
4022 SR_CORE ("mair_el1", CPENC (3,0,C10,C2,0), 0),
4023 SR_CORE ("mair_el2", CPENC (3,4,C10,C2,0), 0),
4024 SR_CORE ("mair_el3", CPENC (3,6,C10,C2,0), 0),
4025 SR_V8_1 ("mair_el12", CPENC (3,5,C10,C2,0), 0),
4026 SR_CORE ("amair_el1", CPENC (3,0,C10,C3,0), 0),
4027 SR_CORE ("amair_el2", CPENC (3,4,C10,C3,0), 0),
4028 SR_CORE ("amair_el3", CPENC (3,6,C10,C3,0), 0),
4029 SR_V8_1 ("amair_el12", CPENC (3,5,C10,C3,0), 0),
4030 SR_CORE ("vbar_el1", CPENC (3,0,C12,C0,0), 0),
4031 SR_CORE ("vbar_el2", CPENC (3,4,C12,C0,0), 0),
4032 SR_CORE ("vbar_el3", CPENC (3,6,C12,C0,0), 0),
4033 SR_V8_1 ("vbar_el12", CPENC (3,5,C12,C0,0), 0),
4034 SR_CORE ("rvbar_el1", CPENC (3,0,C12,C0,1), F_REG_READ),
4035 SR_CORE ("rvbar_el2", CPENC (3,4,C12,C0,1), F_REG_READ),
4036 SR_CORE ("rvbar_el3", CPENC (3,6,C12,C0,1), F_REG_READ),
4037 SR_CORE ("rmr_el1", CPENC (3,0,C12,C0,2), 0),
4038 SR_CORE ("rmr_el2", CPENC (3,4,C12,C0,2), 0),
4039 SR_CORE ("rmr_el3", CPENC (3,6,C12,C0,2), 0),
4040 SR_CORE ("isr_el1", CPENC (3,0,C12,C1,0), F_REG_READ),
4041 SR_RAS ("disr_el1", CPENC (3,0,C12,C1,1), 0),
4042 SR_RAS ("vdisr_el2", CPENC (3,4,C12,C1,1), 0),
4043 SR_CORE ("contextidr_el1", CPENC (3,0,C13,C0,1), 0),
4044 SR_V8_1 ("contextidr_el2", CPENC (3,4,C13,C0,1), 0),
4045 SR_V8_1 ("contextidr_el12", CPENC (3,5,C13,C0,1), 0),
4046 SR_RNG ("rndr", CPENC (3,3,C2,C4,0), F_REG_READ),
4047 SR_RNG ("rndrrs", CPENC (3,3,C2,C4,1), F_REG_READ),
4048 SR_MEMTAG ("tco", CPENC (3,3,C4,C2,7), 0),
4049 SR_MEMTAG ("tfsre0_el1", CPENC (3,0,C5,C6,1), 0),
4050 SR_MEMTAG ("tfsr_el1", CPENC (3,0,C5,C6,0), 0),
4051 SR_MEMTAG ("tfsr_el2", CPENC (3,4,C5,C6,0), 0),
4052 SR_MEMTAG ("tfsr_el3", CPENC (3,6,C5,C6,0), 0),
4053 SR_MEMTAG ("tfsr_el12", CPENC (3,5,C5,C6,0), 0),
4054 SR_MEMTAG ("rgsr_el1", CPENC (3,0,C1,C0,5), 0),
4055 SR_MEMTAG ("gcr_el1", CPENC (3,0,C1,C0,6), 0),
4056 SR_MEMTAG ("gmid_el1", CPENC (3,1,C0,C0,4), F_REG_READ),
4057 SR_CORE ("tpidr_el0", CPENC (3,3,C13,C0,2), 0),
4058 SR_CORE ("tpidrro_el0", CPENC (3,3,C13,C0,3), 0),
4059 SR_CORE ("tpidr_el1", CPENC (3,0,C13,C0,4), 0),
4060 SR_CORE ("tpidr_el2", CPENC (3,4,C13,C0,2), 0),
4061 SR_CORE ("tpidr_el3", CPENC (3,6,C13,C0,2), 0),
4062 SR_SCXTNUM ("scxtnum_el0", CPENC (3,3,C13,C0,7), 0),
4063 SR_SCXTNUM ("scxtnum_el1", CPENC (3,0,C13,C0,7), 0),
4064 SR_SCXTNUM ("scxtnum_el2", CPENC (3,4,C13,C0,7), 0),
4065 SR_SCXTNUM ("scxtnum_el12", CPENC (3,5,C13,C0,7), 0),
4066 SR_SCXTNUM ("scxtnum_el3", CPENC (3,6,C13,C0,7), 0),
4067 SR_CORE ("teecr32_el1", CPENC (2,2,C0, C0,0), 0), /* See section 3.9.7.1. */
4068 SR_CORE ("cntfrq_el0", CPENC (3,3,C14,C0,0), 0),
4069 SR_CORE ("cntpct_el0", CPENC (3,3,C14,C0,1), F_REG_READ),
4070 SR_CORE ("cntvct_el0", CPENC (3,3,C14,C0,2), F_REG_READ),
4071 SR_CORE ("cntvoff_el2", CPENC (3,4,C14,C0,3), 0),
4072 SR_CORE ("cntkctl_el1", CPENC (3,0,C14,C1,0), 0),
4073 SR_V8_1 ("cntkctl_el12", CPENC (3,5,C14,C1,0), 0),
4074 SR_CORE ("cnthctl_el2", CPENC (3,4,C14,C1,0), 0),
4075 SR_CORE ("cntp_tval_el0", CPENC (3,3,C14,C2,0), 0),
4076 SR_V8_1 ("cntp_tval_el02", CPENC (3,5,C14,C2,0), 0),
4077 SR_CORE ("cntp_ctl_el0", CPENC (3,3,C14,C2,1), 0),
4078 SR_V8_1 ("cntp_ctl_el02", CPENC (3,5,C14,C2,1), 0),
4079 SR_CORE ("cntp_cval_el0", CPENC (3,3,C14,C2,2), 0),
4080 SR_V8_1 ("cntp_cval_el02", CPENC (3,5,C14,C2,2), 0),
4081 SR_CORE ("cntv_tval_el0", CPENC (3,3,C14,C3,0), 0),
4082 SR_V8_1 ("cntv_tval_el02", CPENC (3,5,C14,C3,0), 0),
4083 SR_CORE ("cntv_ctl_el0", CPENC (3,3,C14,C3,1), 0),
4084 SR_V8_1 ("cntv_ctl_el02", CPENC (3,5,C14,C3,1), 0),
4085 SR_CORE ("cntv_cval_el0", CPENC (3,3,C14,C3,2), 0),
4086 SR_V8_1 ("cntv_cval_el02", CPENC (3,5,C14,C3,2), 0),
4087 SR_CORE ("cnthp_tval_el2", CPENC (3,4,C14,C2,0), 0),
4088 SR_CORE ("cnthp_ctl_el2", CPENC (3,4,C14,C2,1), 0),
4089 SR_CORE ("cnthp_cval_el2", CPENC (3,4,C14,C2,2), 0),
4090 SR_CORE ("cntps_tval_el1", CPENC (3,7,C14,C2,0), 0),
4091 SR_CORE ("cntps_ctl_el1", CPENC (3,7,C14,C2,1), 0),
4092 SR_CORE ("cntps_cval_el1", CPENC (3,7,C14,C2,2), 0),
4093 SR_V8_1 ("cnthv_tval_el2", CPENC (3,4,C14,C3,0), 0),
4094 SR_V8_1 ("cnthv_ctl_el2", CPENC (3,4,C14,C3,1), 0),
4095 SR_V8_1 ("cnthv_cval_el2", CPENC (3,4,C14,C3,2), 0),
4096 SR_CORE ("dacr32_el2", CPENC (3,4,C3,C0,0), 0),
4097 SR_CORE ("ifsr32_el2", CPENC (3,4,C5,C0,1), 0),
4098 SR_CORE ("teehbr32_el1", CPENC (2,2,C1,C0,0), 0),
4099 SR_CORE ("sder32_el3", CPENC (3,6,C1,C1,1), 0),
4100 SR_CORE ("mdscr_el1", CPENC (2,0,C0,C2,2), 0),
4101 SR_CORE ("mdccsr_el0", CPENC (2,3,C0,C1,0), F_REG_READ),
4102 SR_CORE ("mdccint_el1", CPENC (2,0,C0,C2,0), 0),
4103 SR_CORE ("dbgdtr_el0", CPENC (2,3,C0,C4,0), 0),
4104 SR_CORE ("dbgdtrrx_el0", CPENC (2,3,C0,C5,0), F_REG_READ),
4105 SR_CORE ("dbgdtrtx_el0", CPENC (2,3,C0,C5,0), F_REG_WRITE),
4106 SR_CORE ("osdtrrx_el1", CPENC (2,0,C0,C0,2), 0),
4107 SR_CORE ("osdtrtx_el1", CPENC (2,0,C0,C3,2), 0),
4108 SR_CORE ("oseccr_el1", CPENC (2,0,C0,C6,2), 0),
4109 SR_CORE ("dbgvcr32_el2", CPENC (2,4,C0,C7,0), 0),
4110 SR_CORE ("dbgbvr0_el1", CPENC (2,0,C0,C0,4), 0),
4111 SR_CORE ("dbgbvr1_el1", CPENC (2,0,C0,C1,4), 0),
4112 SR_CORE ("dbgbvr2_el1", CPENC (2,0,C0,C2,4), 0),
4113 SR_CORE ("dbgbvr3_el1", CPENC (2,0,C0,C3,4), 0),
4114 SR_CORE ("dbgbvr4_el1", CPENC (2,0,C0,C4,4), 0),
4115 SR_CORE ("dbgbvr5_el1", CPENC (2,0,C0,C5,4), 0),
4116 SR_CORE ("dbgbvr6_el1", CPENC (2,0,C0,C6,4), 0),
4117 SR_CORE ("dbgbvr7_el1", CPENC (2,0,C0,C7,4), 0),
4118 SR_CORE ("dbgbvr8_el1", CPENC (2,0,C0,C8,4), 0),
4119 SR_CORE ("dbgbvr9_el1", CPENC (2,0,C0,C9,4), 0),
4120 SR_CORE ("dbgbvr10_el1", CPENC (2,0,C0,C10,4), 0),
4121 SR_CORE ("dbgbvr11_el1", CPENC (2,0,C0,C11,4), 0),
4122 SR_CORE ("dbgbvr12_el1", CPENC (2,0,C0,C12,4), 0),
4123 SR_CORE ("dbgbvr13_el1", CPENC (2,0,C0,C13,4), 0),
4124 SR_CORE ("dbgbvr14_el1", CPENC (2,0,C0,C14,4), 0),
4125 SR_CORE ("dbgbvr15_el1", CPENC (2,0,C0,C15,4), 0),
4126 SR_CORE ("dbgbcr0_el1", CPENC (2,0,C0,C0,5), 0),
4127 SR_CORE ("dbgbcr1_el1", CPENC (2,0,C0,C1,5), 0),
4128 SR_CORE ("dbgbcr2_el1", CPENC (2,0,C0,C2,5), 0),
4129 SR_CORE ("dbgbcr3_el1", CPENC (2,0,C0,C3,5), 0),
4130 SR_CORE ("dbgbcr4_el1", CPENC (2,0,C0,C4,5), 0),
4131 SR_CORE ("dbgbcr5_el1", CPENC (2,0,C0,C5,5), 0),
4132 SR_CORE ("dbgbcr6_el1", CPENC (2,0,C0,C6,5), 0),
4133 SR_CORE ("dbgbcr7_el1", CPENC (2,0,C0,C7,5), 0),
4134 SR_CORE ("dbgbcr8_el1", CPENC (2,0,C0,C8,5), 0),
4135 SR_CORE ("dbgbcr9_el1", CPENC (2,0,C0,C9,5), 0),
4136 SR_CORE ("dbgbcr10_el1", CPENC (2,0,C0,C10,5), 0),
4137 SR_CORE ("dbgbcr11_el1", CPENC (2,0,C0,C11,5), 0),
4138 SR_CORE ("dbgbcr12_el1", CPENC (2,0,C0,C12,5), 0),
4139 SR_CORE ("dbgbcr13_el1", CPENC (2,0,C0,C13,5), 0),
4140 SR_CORE ("dbgbcr14_el1", CPENC (2,0,C0,C14,5), 0),
4141 SR_CORE ("dbgbcr15_el1", CPENC (2,0,C0,C15,5), 0),
4142 SR_CORE ("dbgwvr0_el1", CPENC (2,0,C0,C0,6), 0),
4143 SR_CORE ("dbgwvr1_el1", CPENC (2,0,C0,C1,6), 0),
4144 SR_CORE ("dbgwvr2_el1", CPENC (2,0,C0,C2,6), 0),
4145 SR_CORE ("dbgwvr3_el1", CPENC (2,0,C0,C3,6), 0),
4146 SR_CORE ("dbgwvr4_el1", CPENC (2,0,C0,C4,6), 0),
4147 SR_CORE ("dbgwvr5_el1", CPENC (2,0,C0,C5,6), 0),
4148 SR_CORE ("dbgwvr6_el1", CPENC (2,0,C0,C6,6), 0),
4149 SR_CORE ("dbgwvr7_el1", CPENC (2,0,C0,C7,6), 0),
4150 SR_CORE ("dbgwvr8_el1", CPENC (2,0,C0,C8,6), 0),
4151 SR_CORE ("dbgwvr9_el1", CPENC (2,0,C0,C9,6), 0),
4152 SR_CORE ("dbgwvr10_el1", CPENC (2,0,C0,C10,6), 0),
4153 SR_CORE ("dbgwvr11_el1", CPENC (2,0,C0,C11,6), 0),
4154 SR_CORE ("dbgwvr12_el1", CPENC (2,0,C0,C12,6), 0),
4155 SR_CORE ("dbgwvr13_el1", CPENC (2,0,C0,C13,6), 0),
4156 SR_CORE ("dbgwvr14_el1", CPENC (2,0,C0,C14,6), 0),
4157 SR_CORE ("dbgwvr15_el1", CPENC (2,0,C0,C15,6), 0),
4158 SR_CORE ("dbgwcr0_el1", CPENC (2,0,C0,C0,7), 0),
4159 SR_CORE ("dbgwcr1_el1", CPENC (2,0,C0,C1,7), 0),
4160 SR_CORE ("dbgwcr2_el1", CPENC (2,0,C0,C2,7), 0),
4161 SR_CORE ("dbgwcr3_el1", CPENC (2,0,C0,C3,7), 0),
4162 SR_CORE ("dbgwcr4_el1", CPENC (2,0,C0,C4,7), 0),
4163 SR_CORE ("dbgwcr5_el1", CPENC (2,0,C0,C5,7), 0),
4164 SR_CORE ("dbgwcr6_el1", CPENC (2,0,C0,C6,7), 0),
4165 SR_CORE ("dbgwcr7_el1", CPENC (2,0,C0,C7,7), 0),
4166 SR_CORE ("dbgwcr8_el1", CPENC (2,0,C0,C8,7), 0),
4167 SR_CORE ("dbgwcr9_el1", CPENC (2,0,C0,C9,7), 0),
4168 SR_CORE ("dbgwcr10_el1", CPENC (2,0,C0,C10,7), 0),
4169 SR_CORE ("dbgwcr11_el1", CPENC (2,0,C0,C11,7), 0),
4170 SR_CORE ("dbgwcr12_el1", CPENC (2,0,C0,C12,7), 0),
4171 SR_CORE ("dbgwcr13_el1", CPENC (2,0,C0,C13,7), 0),
4172 SR_CORE ("dbgwcr14_el1", CPENC (2,0,C0,C14,7), 0),
4173 SR_CORE ("dbgwcr15_el1", CPENC (2,0,C0,C15,7), 0),
4174 SR_CORE ("mdrar_el1", CPENC (2,0,C1,C0,0), F_REG_READ),
4175 SR_CORE ("oslar_el1", CPENC (2,0,C1,C0,4), F_REG_WRITE),
4176 SR_CORE ("oslsr_el1", CPENC (2,0,C1,C1,4), F_REG_READ),
4177 SR_CORE ("osdlr_el1", CPENC (2,0,C1,C3,4), 0),
4178 SR_CORE ("dbgprcr_el1", CPENC (2,0,C1,C4,4), 0),
4179 SR_CORE ("dbgclaimset_el1", CPENC (2,0,C7,C8,6), 0),
4180 SR_CORE ("dbgclaimclr_el1", CPENC (2,0,C7,C9,6), 0),
4181 SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7,C14,6), F_REG_READ),
4182 SR_PROFILE ("pmblimitr_el1", CPENC (3,0,C9,C10,0), 0),
4183 SR_PROFILE ("pmbptr_el1", CPENC (3,0,C9,C10,1), 0),
4184 SR_PROFILE ("pmbsr_el1", CPENC (3,0,C9,C10,3), 0),
4185 SR_PROFILE ("pmbidr_el1", CPENC (3,0,C9,C10,7), F_REG_READ),
4186 SR_PROFILE ("pmscr_el1", CPENC (3,0,C9,C9,0), 0),
4187 SR_PROFILE ("pmsicr_el1", CPENC (3,0,C9,C9,2), 0),
4188 SR_PROFILE ("pmsirr_el1", CPENC (3,0,C9,C9,3), 0),
4189 SR_PROFILE ("pmsfcr_el1", CPENC (3,0,C9,C9,4), 0),
4190 SR_PROFILE ("pmsevfr_el1", CPENC (3,0,C9,C9,5), 0),
4191 SR_PROFILE ("pmslatfr_el1", CPENC (3,0,C9,C9,6), 0),
4192 SR_PROFILE ("pmsidr_el1", CPENC (3,0,C9,C9,7), 0),
4193 SR_PROFILE ("pmscr_el2", CPENC (3,4,C9,C9,0), 0),
4194 SR_PROFILE ("pmscr_el12", CPENC (3,5,C9,C9,0), 0),
4195 SR_CORE ("pmcr_el0", CPENC (3,3,C9,C12,0), 0),
4196 SR_CORE ("pmcntenset_el0", CPENC (3,3,C9,C12,1), 0),
4197 SR_CORE ("pmcntenclr_el0", CPENC (3,3,C9,C12,2), 0),
4198 SR_CORE ("pmovsclr_el0", CPENC (3,3,C9,C12,3), 0),
4199 SR_CORE ("pmswinc_el0", CPENC (3,3,C9,C12,4), F_REG_WRITE),
4200 SR_CORE ("pmselr_el0", CPENC (3,3,C9,C12,5), 0),
4201 SR_CORE ("pmceid0_el0", CPENC (3,3,C9,C12,6), F_REG_READ),
4202 SR_CORE ("pmceid1_el0", CPENC (3,3,C9,C12,7), F_REG_READ),
4203 SR_CORE ("pmccntr_el0", CPENC (3,3,C9,C13,0), 0),
4204 SR_CORE ("pmxevtyper_el0", CPENC (3,3,C9,C13,1), 0),
4205 SR_CORE ("pmxevcntr_el0", CPENC (3,3,C9,C13,2), 0),
4206 SR_CORE ("pmuserenr_el0", CPENC (3,3,C9,C14,0), 0),
4207 SR_CORE ("pmintenset_el1", CPENC (3,0,C9,C14,1), 0),
4208 SR_CORE ("pmintenclr_el1", CPENC (3,0,C9,C14,2), 0),
4209 SR_CORE ("pmovsset_el0", CPENC (3,3,C9,C14,3), 0),
4210 SR_CORE ("pmevcntr0_el0", CPENC (3,3,C14,C8,0), 0),
4211 SR_CORE ("pmevcntr1_el0", CPENC (3,3,C14,C8,1), 0),
4212 SR_CORE ("pmevcntr2_el0", CPENC (3,3,C14,C8,2), 0),
4213 SR_CORE ("pmevcntr3_el0", CPENC (3,3,C14,C8,3), 0),
4214 SR_CORE ("pmevcntr4_el0", CPENC (3,3,C14,C8,4), 0),
4215 SR_CORE ("pmevcntr5_el0", CPENC (3,3,C14,C8,5), 0),
4216 SR_CORE ("pmevcntr6_el0", CPENC (3,3,C14,C8,6), 0),
4217 SR_CORE ("pmevcntr7_el0", CPENC (3,3,C14,C8,7), 0),
4218 SR_CORE ("pmevcntr8_el0", CPENC (3,3,C14,C9,0), 0),
4219 SR_CORE ("pmevcntr9_el0", CPENC (3,3,C14,C9,1), 0),
4220 SR_CORE ("pmevcntr10_el0", CPENC (3,3,C14,C9,2), 0),
4221 SR_CORE ("pmevcntr11_el0", CPENC (3,3,C14,C9,3), 0),
4222 SR_CORE ("pmevcntr12_el0", CPENC (3,3,C14,C9,4), 0),
4223 SR_CORE ("pmevcntr13_el0", CPENC (3,3,C14,C9,5), 0),
4224 SR_CORE ("pmevcntr14_el0", CPENC (3,3,C14,C9,6), 0),
4225 SR_CORE ("pmevcntr15_el0", CPENC (3,3,C14,C9,7), 0),
4226 SR_CORE ("pmevcntr16_el0", CPENC (3,3,C14,C10,0), 0),
4227 SR_CORE ("pmevcntr17_el0", CPENC (3,3,C14,C10,1), 0),
4228 SR_CORE ("pmevcntr18_el0", CPENC (3,3,C14,C10,2), 0),
4229 SR_CORE ("pmevcntr19_el0", CPENC (3,3,C14,C10,3), 0),
4230 SR_CORE ("pmevcntr20_el0", CPENC (3,3,C14,C10,4), 0),
4231 SR_CORE ("pmevcntr21_el0", CPENC (3,3,C14,C10,5), 0),
4232 SR_CORE ("pmevcntr22_el0", CPENC (3,3,C14,C10,6), 0),
4233 SR_CORE ("pmevcntr23_el0", CPENC (3,3,C14,C10,7), 0),
4234 SR_CORE ("pmevcntr24_el0", CPENC (3,3,C14,C11,0), 0),
4235 SR_CORE ("pmevcntr25_el0", CPENC (3,3,C14,C11,1), 0),
4236 SR_CORE ("pmevcntr26_el0", CPENC (3,3,C14,C11,2), 0),
4237 SR_CORE ("pmevcntr27_el0", CPENC (3,3,C14,C11,3), 0),
4238 SR_CORE ("pmevcntr28_el0", CPENC (3,3,C14,C11,4), 0),
4239 SR_CORE ("pmevcntr29_el0", CPENC (3,3,C14,C11,5), 0),
4240 SR_CORE ("pmevcntr30_el0", CPENC (3,3,C14,C11,6), 0),
4241 SR_CORE ("pmevtyper0_el0", CPENC (3,3,C14,C12,0), 0),
4242 SR_CORE ("pmevtyper1_el0", CPENC (3,3,C14,C12,1), 0),
4243 SR_CORE ("pmevtyper2_el0", CPENC (3,3,C14,C12,2), 0),
4244 SR_CORE ("pmevtyper3_el0", CPENC (3,3,C14,C12,3), 0),
4245 SR_CORE ("pmevtyper4_el0", CPENC (3,3,C14,C12,4), 0),
4246 SR_CORE ("pmevtyper5_el0", CPENC (3,3,C14,C12,5), 0),
4247 SR_CORE ("pmevtyper6_el0", CPENC (3,3,C14,C12,6), 0),
4248 SR_CORE ("pmevtyper7_el0", CPENC (3,3,C14,C12,7), 0),
4249 SR_CORE ("pmevtyper8_el0", CPENC (3,3,C14,C13,0), 0),
4250 SR_CORE ("pmevtyper9_el0", CPENC (3,3,C14,C13,1), 0),
4251 SR_CORE ("pmevtyper10_el0", CPENC (3,3,C14,C13,2), 0),
4252 SR_CORE ("pmevtyper11_el0", CPENC (3,3,C14,C13,3), 0),
4253 SR_CORE ("pmevtyper12_el0", CPENC (3,3,C14,C13,4), 0),
4254 SR_CORE ("pmevtyper13_el0", CPENC (3,3,C14,C13,5), 0),
4255 SR_CORE ("pmevtyper14_el0", CPENC (3,3,C14,C13,6), 0),
4256 SR_CORE ("pmevtyper15_el0", CPENC (3,3,C14,C13,7), 0),
4257 SR_CORE ("pmevtyper16_el0", CPENC (3,3,C14,C14,0), 0),
4258 SR_CORE ("pmevtyper17_el0", CPENC (3,3,C14,C14,1), 0),
4259 SR_CORE ("pmevtyper18_el0", CPENC (3,3,C14,C14,2), 0),
4260 SR_CORE ("pmevtyper19_el0", CPENC (3,3,C14,C14,3), 0),
4261 SR_CORE ("pmevtyper20_el0", CPENC (3,3,C14,C14,4), 0),
4262 SR_CORE ("pmevtyper21_el0", CPENC (3,3,C14,C14,5), 0),
4263 SR_CORE ("pmevtyper22_el0", CPENC (3,3,C14,C14,6), 0),
4264 SR_CORE ("pmevtyper23_el0", CPENC (3,3,C14,C14,7), 0),
4265 SR_CORE ("pmevtyper24_el0", CPENC (3,3,C14,C15,0), 0),
4266 SR_CORE ("pmevtyper25_el0", CPENC (3,3,C14,C15,1), 0),
4267 SR_CORE ("pmevtyper26_el0", CPENC (3,3,C14,C15,2), 0),
4268 SR_CORE ("pmevtyper27_el0", CPENC (3,3,C14,C15,3), 0),
4269 SR_CORE ("pmevtyper28_el0", CPENC (3,3,C14,C15,4), 0),
4270 SR_CORE ("pmevtyper29_el0", CPENC (3,3,C14,C15,5), 0),
4271 SR_CORE ("pmevtyper30_el0", CPENC (3,3,C14,C15,6), 0),
4272 SR_CORE ("pmccfiltr_el0", CPENC (3,3,C14,C15,7), 0),
4273
4274 SR_V8_4 ("dit", CPEN_ (3,C2,5), 0),
4275 SR_V8_4 ("vstcr_el2", CPENC (3,4,C2,C6,2), 0),
4276 SR_V8_4_A ("vsttbr_el2", CPENC (3,4,C2,C6,0), 0),
4277 SR_V8_4 ("cnthvs_tval_el2", CPENC (3,4,C14,C4,0), 0),
4278 SR_V8_4 ("cnthvs_cval_el2", CPENC (3,4,C14,C4,2), 0),
4279 SR_V8_4 ("cnthvs_ctl_el2", CPENC (3,4,C14,C4,1), 0),
4280 SR_V8_4 ("cnthps_tval_el2", CPENC (3,4,C14,C5,0), 0),
4281 SR_V8_4 ("cnthps_cval_el2", CPENC (3,4,C14,C5,2), 0),
4282 SR_V8_4 ("cnthps_ctl_el2", CPENC (3,4,C14,C5,1), 0),
4283 SR_V8_4 ("sder32_el2", CPENC (3,4,C1,C3,1), 0),
4284 SR_V8_4 ("vncr_el2", CPENC (3,4,C2,C2,0), 0),
4285
4286 SR_CORE ("mpam0_el1", CPENC (3,0,C10,C5,1), 0),
4287 SR_CORE ("mpam1_el1", CPENC (3,0,C10,C5,0), 0),
4288 SR_CORE ("mpam1_el12", CPENC (3,5,C10,C5,0), 0),
4289 SR_CORE ("mpam2_el2", CPENC (3,4,C10,C5,0), 0),
4290 SR_CORE ("mpam3_el3", CPENC (3,6,C10,C5,0), 0),
4291 SR_CORE ("mpamhcr_el2", CPENC (3,4,C10,C4,0), 0),
4292 SR_CORE ("mpamidr_el1", CPENC (3,0,C10,C4,4), F_REG_READ),
4293 SR_CORE ("mpamvpm0_el2", CPENC (3,4,C10,C6,0), 0),
4294 SR_CORE ("mpamvpm1_el2", CPENC (3,4,C10,C6,1), 0),
4295 SR_CORE ("mpamvpm2_el2", CPENC (3,4,C10,C6,2), 0),
4296 SR_CORE ("mpamvpm3_el2", CPENC (3,4,C10,C6,3), 0),
4297 SR_CORE ("mpamvpm4_el2", CPENC (3,4,C10,C6,4), 0),
4298 SR_CORE ("mpamvpm5_el2", CPENC (3,4,C10,C6,5), 0),
4299 SR_CORE ("mpamvpm6_el2", CPENC (3,4,C10,C6,6), 0),
4300 SR_CORE ("mpamvpm7_el2", CPENC (3,4,C10,C6,7), 0),
4301 SR_CORE ("mpamvpmv_el2", CPENC (3,4,C10,C4,1), 0),
4302
4303 SR_V8_R ("mpuir_el1", CPENC (3,0,C0,C0,4), F_REG_READ),
4304 SR_V8_R ("mpuir_el2", CPENC (3,4,C0,C0,4), F_REG_READ),
4305 SR_V8_R ("prbar_el1", CPENC (3,0,C6,C8,0), 0),
4306 SR_V8_R ("prbar_el2", CPENC (3,4,C6,C8,0), 0),
4307
4308 #define ENC_BARLAR(x,n,lar) \
4309 CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
4310
4311 #define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
4312 #define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
4313
4314 SR_EXPAND_EL12 (PRBARn_ELx)
4315 SR_V8_R ("prenr_el1", CPENC (3,0,C6,C1,1), 0),
4316 SR_V8_R ("prenr_el2", CPENC (3,4,C6,C1,1), 0),
4317 SR_V8_R ("prlar_el1", CPENC (3,0,C6,C8,1), 0),
4318 SR_V8_R ("prlar_el2", CPENC (3,4,C6,C8,1), 0),
4319 SR_EXPAND_EL12 (PRLARn_ELx)
4320 SR_V8_R ("prselr_el1", CPENC (3,0,C6,C2,1), 0),
4321 SR_V8_R ("prselr_el2", CPENC (3,4,C6,C2,1), 0),
4322 SR_V8_R ("vsctlr_el2", CPENC (3,4,C2,C0,0), 0),
4323
4324 SR_CORE("trbbaser_el1", CPENC (3,0,C9,C11,2), 0),
4325 SR_CORE("trbidr_el1", CPENC (3,0,C9,C11,7), F_REG_READ),
4326 SR_CORE("trblimitr_el1", CPENC (3,0,C9,C11,0), 0),
4327 SR_CORE("trbmar_el1", CPENC (3,0,C9,C11,4), 0),
4328 SR_CORE("trbptr_el1", CPENC (3,0,C9,C11,1), 0),
4329 SR_CORE("trbsr_el1", CPENC (3,0,C9,C11,3), 0),
4330 SR_CORE("trbtrg_el1", CPENC (3,0,C9,C11,6), 0),
4331
4332 SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
4333 SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
4334 SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
4335 SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
4336 SR_CORE ("trcrsr", CPENC (2,1,C0,C10,0), 0),
4337
4338 SR_CORE ("trcauthstatus", CPENC (2,1,C7,C14,6), F_REG_READ),
4339 SR_CORE ("trccidr0", CPENC (2,1,C7,C12,7), F_REG_READ),
4340 SR_CORE ("trccidr1", CPENC (2,1,C7,C13,7), F_REG_READ),
4341 SR_CORE ("trccidr2", CPENC (2,1,C7,C14,7), F_REG_READ),
4342 SR_CORE ("trccidr3", CPENC (2,1,C7,C15,7), F_REG_READ),
4343 SR_CORE ("trcdevaff0", CPENC (2,1,C7,C10,6), F_REG_READ),
4344 SR_CORE ("trcdevaff1", CPENC (2,1,C7,C11,6), F_REG_READ),
4345 SR_CORE ("trcdevarch", CPENC (2,1,C7,C15,6), F_REG_READ),
4346 SR_CORE ("trcdevid", CPENC (2,1,C7,C2,7), F_REG_READ),
4347 SR_CORE ("trcdevtype", CPENC (2,1,C7,C3,7), F_REG_READ),
4348 SR_CORE ("trcidr0", CPENC (2,1,C0,C8,7), F_REG_READ),
4349 SR_CORE ("trcidr1", CPENC (2,1,C0,C9,7), F_REG_READ),
4350 SR_CORE ("trcidr2", CPENC (2,1,C0,C10,7), F_REG_READ),
4351 SR_CORE ("trcidr3", CPENC (2,1,C0,C11,7), F_REG_READ),
4352 SR_CORE ("trcidr4", CPENC (2,1,C0,C12,7), F_REG_READ),
4353 SR_CORE ("trcidr5", CPENC (2,1,C0,C13,7), F_REG_READ),
4354 SR_CORE ("trcidr6", CPENC (2,1,C0,C14,7), F_REG_READ),
4355 SR_CORE ("trcidr7", CPENC (2,1,C0,C15,7), F_REG_READ),
4356 SR_CORE ("trcidr8", CPENC (2,1,C0,C0,6), F_REG_READ),
4357 SR_CORE ("trcidr9", CPENC (2,1,C0,C1,6), F_REG_READ),
4358 SR_CORE ("trcidr10", CPENC (2,1,C0,C2,6), F_REG_READ),
4359 SR_CORE ("trcidr11", CPENC (2,1,C0,C3,6), F_REG_READ),
4360 SR_CORE ("trcidr12", CPENC (2,1,C0,C4,6), F_REG_READ),
4361 SR_CORE ("trcidr13", CPENC (2,1,C0,C5,6), F_REG_READ),
4362 SR_CORE ("trclsr", CPENC (2,1,C7,C13,6), F_REG_READ),
4363 SR_CORE ("trcoslsr", CPENC (2,1,C1,C1,4), F_REG_READ),
4364 SR_CORE ("trcpdsr", CPENC (2,1,C1,C5,4), F_REG_READ),
4365 SR_CORE ("trcpidr0", CPENC (2,1,C7,C8,7), F_REG_READ),
4366 SR_CORE ("trcpidr1", CPENC (2,1,C7,C9,7), F_REG_READ),
4367 SR_CORE ("trcpidr2", CPENC (2,1,C7,C10,7), F_REG_READ),
4368 SR_CORE ("trcpidr3", CPENC (2,1,C7,C11,7), F_REG_READ),
4369 SR_CORE ("trcpidr4", CPENC (2,1,C7,C4,7), F_REG_READ),
4370 SR_CORE ("trcpidr5", CPENC (2,1,C7,C5,7), F_REG_READ),
4371 SR_CORE ("trcpidr6", CPENC (2,1,C7,C6,7), F_REG_READ),
4372 SR_CORE ("trcpidr7", CPENC (2,1,C7,C7,7), F_REG_READ),
4373 SR_CORE ("trcstatr", CPENC (2,1,C0,C3,0), F_REG_READ),
4374 SR_CORE ("trcacatr0", CPENC (2,1,C2,C0,2), 0),
4375 SR_CORE ("trcacatr1", CPENC (2,1,C2,C2,2), 0),
4376 SR_CORE ("trcacatr2", CPENC (2,1,C2,C4,2), 0),
4377 SR_CORE ("trcacatr3", CPENC (2,1,C2,C6,2), 0),
4378 SR_CORE ("trcacatr4", CPENC (2,1,C2,C8,2), 0),
4379 SR_CORE ("trcacatr5", CPENC (2,1,C2,C10,2), 0),
4380 SR_CORE ("trcacatr6", CPENC (2,1,C2,C12,2), 0),
4381 SR_CORE ("trcacatr7", CPENC (2,1,C2,C14,2), 0),
4382 SR_CORE ("trcacatr8", CPENC (2,1,C2,C0,3), 0),
4383 SR_CORE ("trcacatr9", CPENC (2,1,C2,C2,3), 0),
4384 SR_CORE ("trcacatr10", CPENC (2,1,C2,C4,3), 0),
4385 SR_CORE ("trcacatr11", CPENC (2,1,C2,C6,3), 0),
4386 SR_CORE ("trcacatr12", CPENC (2,1,C2,C8,3), 0),
4387 SR_CORE ("trcacatr13", CPENC (2,1,C2,C10,3), 0),
4388 SR_CORE ("trcacatr14", CPENC (2,1,C2,C12,3), 0),
4389 SR_CORE ("trcacatr15", CPENC (2,1,C2,C14,3), 0),
4390 SR_CORE ("trcacvr0", CPENC (2,1,C2,C0,0), 0),
4391 SR_CORE ("trcacvr1", CPENC (2,1,C2,C2,0), 0),
4392 SR_CORE ("trcacvr2", CPENC (2,1,C2,C4,0), 0),
4393 SR_CORE ("trcacvr3", CPENC (2,1,C2,C6,0), 0),
4394 SR_CORE ("trcacvr4", CPENC (2,1,C2,C8,0), 0),
4395 SR_CORE ("trcacvr5", CPENC (2,1,C2,C10,0), 0),
4396 SR_CORE ("trcacvr6", CPENC (2,1,C2,C12,0), 0),
4397 SR_CORE ("trcacvr7", CPENC (2,1,C2,C14,0), 0),
4398 SR_CORE ("trcacvr8", CPENC (2,1,C2,C0,1), 0),
4399 SR_CORE ("trcacvr9", CPENC (2,1,C2,C2,1), 0),
4400 SR_CORE ("trcacvr10", CPENC (2,1,C2,C4,1), 0),
4401 SR_CORE ("trcacvr11", CPENC (2,1,C2,C6,1), 0),
4402 SR_CORE ("trcacvr12", CPENC (2,1,C2,C8,1), 0),
4403 SR_CORE ("trcacvr13", CPENC (2,1,C2,C10,1), 0),
4404 SR_CORE ("trcacvr14", CPENC (2,1,C2,C12,1), 0),
4405 SR_CORE ("trcacvr15", CPENC (2,1,C2,C14,1), 0),
4406 SR_CORE ("trcauxctlr", CPENC (2,1,C0,C6,0), 0),
4407 SR_CORE ("trcbbctlr", CPENC (2,1,C0,C15,0), 0),
4408 SR_CORE ("trcccctlr", CPENC (2,1,C0,C14,0), 0),
4409 SR_CORE ("trccidcctlr0", CPENC (2,1,C3,C0,2), 0),
4410 SR_CORE ("trccidcctlr1", CPENC (2,1,C3,C1,2), 0),
4411 SR_CORE ("trccidcvr0", CPENC (2,1,C3,C0,0), 0),
4412 SR_CORE ("trccidcvr1", CPENC (2,1,C3,C2,0), 0),
4413 SR_CORE ("trccidcvr2", CPENC (2,1,C3,C4,0), 0),
4414 SR_CORE ("trccidcvr3", CPENC (2,1,C3,C6,0), 0),
4415 SR_CORE ("trccidcvr4", CPENC (2,1,C3,C8,0), 0),
4416 SR_CORE ("trccidcvr5", CPENC (2,1,C3,C10,0), 0),
4417 SR_CORE ("trccidcvr6", CPENC (2,1,C3,C12,0), 0),
4418 SR_CORE ("trccidcvr7", CPENC (2,1,C3,C14,0), 0),
4419 SR_CORE ("trcclaimclr", CPENC (2,1,C7,C9,6), 0),
4420 SR_CORE ("trcclaimset", CPENC (2,1,C7,C8,6), 0),
4421 SR_CORE ("trccntctlr0", CPENC (2,1,C0,C4,5), 0),
4422 SR_CORE ("trccntctlr1", CPENC (2,1,C0,C5,5), 0),
4423 SR_CORE ("trccntctlr2", CPENC (2,1,C0,C6,5), 0),
4424 SR_CORE ("trccntctlr3", CPENC (2,1,C0,C7,5), 0),
4425 SR_CORE ("trccntrldvr0", CPENC (2,1,C0,C0,5), 0),
4426 SR_CORE ("trccntrldvr1", CPENC (2,1,C0,C1,5), 0),
4427 SR_CORE ("trccntrldvr2", CPENC (2,1,C0,C2,5), 0),
4428 SR_CORE ("trccntrldvr3", CPENC (2,1,C0,C3,5), 0),
4429 SR_CORE ("trccntvr0", CPENC (2,1,C0,C8,5), 0),
4430 SR_CORE ("trccntvr1", CPENC (2,1,C0,C9,5), 0),
4431 SR_CORE ("trccntvr2", CPENC (2,1,C0,C10,5), 0),
4432 SR_CORE ("trccntvr3", CPENC (2,1,C0,C11,5), 0),
4433 SR_CORE ("trcconfigr", CPENC (2,1,C0,C4,0), 0),
4434 SR_CORE ("trcdvcmr0", CPENC (2,1,C2,C0,6), 0),
4435 SR_CORE ("trcdvcmr1", CPENC (2,1,C2,C4,6), 0),
4436 SR_CORE ("trcdvcmr2", CPENC (2,1,C2,C8,6), 0),
4437 SR_CORE ("trcdvcmr3", CPENC (2,1,C2,C12,6), 0),
4438 SR_CORE ("trcdvcmr4", CPENC (2,1,C2,C0,7), 0),
4439 SR_CORE ("trcdvcmr5", CPENC (2,1,C2,C4,7), 0),
4440 SR_CORE ("trcdvcmr6", CPENC (2,1,C2,C8,7), 0),
4441 SR_CORE ("trcdvcmr7", CPENC (2,1,C2,C12,7), 0),
4442 SR_CORE ("trcdvcvr0", CPENC (2,1,C2,C0,4), 0),
4443 SR_CORE ("trcdvcvr1", CPENC (2,1,C2,C4,4), 0),
4444 SR_CORE ("trcdvcvr2", CPENC (2,1,C2,C8,4), 0),
4445 SR_CORE ("trcdvcvr3", CPENC (2,1,C2,C12,4), 0),
4446 SR_CORE ("trcdvcvr4", CPENC (2,1,C2,C0,5), 0),
4447 SR_CORE ("trcdvcvr5", CPENC (2,1,C2,C4,5), 0),
4448 SR_CORE ("trcdvcvr6", CPENC (2,1,C2,C8,5), 0),
4449 SR_CORE ("trcdvcvr7", CPENC (2,1,C2,C12,5), 0),
4450 SR_CORE ("trceventctl0r", CPENC (2,1,C0,C8,0), 0),
4451 SR_CORE ("trceventctl1r", CPENC (2,1,C0,C9,0), 0),
4452 SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
4453 SR_CORE ("trcextinselr", CPENC (2,1,C0,C8,4), 0),
4454 SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
4455 SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
4456 SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
4457 SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
4458 SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
4459 SR_CORE ("trcimspec1", CPENC (2,1,C0,C1,7), 0),
4460 SR_CORE ("trcimspec2", CPENC (2,1,C0,C2,7), 0),
4461 SR_CORE ("trcimspec3", CPENC (2,1,C0,C3,7), 0),
4462 SR_CORE ("trcimspec4", CPENC (2,1,C0,C4,7), 0),
4463 SR_CORE ("trcimspec5", CPENC (2,1,C0,C5,7), 0),
4464 SR_CORE ("trcimspec6", CPENC (2,1,C0,C6,7), 0),
4465 SR_CORE ("trcimspec7", CPENC (2,1,C0,C7,7), 0),
4466 SR_CORE ("trcitctrl", CPENC (2,1,C7,C0,4), 0),
4467 SR_CORE ("trcpdcr", CPENC (2,1,C1,C4,4), 0),
4468 SR_CORE ("trcprgctlr", CPENC (2,1,C0,C1,0), 0),
4469 SR_CORE ("trcprocselr", CPENC (2,1,C0,C2,0), 0),
4470 SR_CORE ("trcqctlr", CPENC (2,1,C0,C1,1), 0),
4471 SR_CORE ("trcrsctlr2", CPENC (2,1,C1,C2,0), 0),
4472 SR_CORE ("trcrsctlr3", CPENC (2,1,C1,C3,0), 0),
4473 SR_CORE ("trcrsctlr4", CPENC (2,1,C1,C4,0), 0),
4474 SR_CORE ("trcrsctlr5", CPENC (2,1,C1,C5,0), 0),
4475 SR_CORE ("trcrsctlr6", CPENC (2,1,C1,C6,0), 0),
4476 SR_CORE ("trcrsctlr7", CPENC (2,1,C1,C7,0), 0),
4477 SR_CORE ("trcrsctlr8", CPENC (2,1,C1,C8,0), 0),
4478 SR_CORE ("trcrsctlr9", CPENC (2,1,C1,C9,0), 0),
4479 SR_CORE ("trcrsctlr10", CPENC (2,1,C1,C10,0), 0),
4480 SR_CORE ("trcrsctlr11", CPENC (2,1,C1,C11,0), 0),
4481 SR_CORE ("trcrsctlr12", CPENC (2,1,C1,C12,0), 0),
4482 SR_CORE ("trcrsctlr13", CPENC (2,1,C1,C13,0), 0),
4483 SR_CORE ("trcrsctlr14", CPENC (2,1,C1,C14,0), 0),
4484 SR_CORE ("trcrsctlr15", CPENC (2,1,C1,C15,0), 0),
4485 SR_CORE ("trcrsctlr16", CPENC (2,1,C1,C0,1), 0),
4486 SR_CORE ("trcrsctlr17", CPENC (2,1,C1,C1,1), 0),
4487 SR_CORE ("trcrsctlr18", CPENC (2,1,C1,C2,1), 0),
4488 SR_CORE ("trcrsctlr19", CPENC (2,1,C1,C3,1), 0),
4489 SR_CORE ("trcrsctlr20", CPENC (2,1,C1,C4,1), 0),
4490 SR_CORE ("trcrsctlr21", CPENC (2,1,C1,C5,1), 0),
4491 SR_CORE ("trcrsctlr22", CPENC (2,1,C1,C6,1), 0),
4492 SR_CORE ("trcrsctlr23", CPENC (2,1,C1,C7,1), 0),
4493 SR_CORE ("trcrsctlr24", CPENC (2,1,C1,C8,1), 0),
4494 SR_CORE ("trcrsctlr25", CPENC (2,1,C1,C9,1), 0),
4495 SR_CORE ("trcrsctlr26", CPENC (2,1,C1,C10,1), 0),
4496 SR_CORE ("trcrsctlr27", CPENC (2,1,C1,C11,1), 0),
4497 SR_CORE ("trcrsctlr28", CPENC (2,1,C1,C12,1), 0),
4498 SR_CORE ("trcrsctlr29", CPENC (2,1,C1,C13,1), 0),
4499 SR_CORE ("trcrsctlr30", CPENC (2,1,C1,C14,1), 0),
4500 SR_CORE ("trcrsctlr31", CPENC (2,1,C1,C15,1), 0),
4501 SR_CORE ("trcseqevr0", CPENC (2,1,C0,C0,4), 0),
4502 SR_CORE ("trcseqevr1", CPENC (2,1,C0,C1,4), 0),
4503 SR_CORE ("trcseqevr2", CPENC (2,1,C0,C2,4), 0),
4504 SR_CORE ("trcseqrstevr", CPENC (2,1,C0,C6,4), 0),
4505 SR_CORE ("trcseqstr", CPENC (2,1,C0,C7,4), 0),
4506 SR_CORE ("trcssccr0", CPENC (2,1,C1,C0,2), 0),
4507 SR_CORE ("trcssccr1", CPENC (2,1,C1,C1,2), 0),
4508 SR_CORE ("trcssccr2", CPENC (2,1,C1,C2,2), 0),
4509 SR_CORE ("trcssccr3", CPENC (2,1,C1,C3,2), 0),
4510 SR_CORE ("trcssccr4", CPENC (2,1,C1,C4,2), 0),
4511 SR_CORE ("trcssccr5", CPENC (2,1,C1,C5,2), 0),
4512 SR_CORE ("trcssccr6", CPENC (2,1,C1,C6,2), 0),
4513 SR_CORE ("trcssccr7", CPENC (2,1,C1,C7,2), 0),
4514 SR_CORE ("trcsscsr0", CPENC (2,1,C1,C8,2), 0),
4515 SR_CORE ("trcsscsr1", CPENC (2,1,C1,C9,2), 0),
4516 SR_CORE ("trcsscsr2", CPENC (2,1,C1,C10,2), 0),
4517 SR_CORE ("trcsscsr3", CPENC (2,1,C1,C11,2), 0),
4518 SR_CORE ("trcsscsr4", CPENC (2,1,C1,C12,2), 0),
4519 SR_CORE ("trcsscsr5", CPENC (2,1,C1,C13,2), 0),
4520 SR_CORE ("trcsscsr6", CPENC (2,1,C1,C14,2), 0),
4521 SR_CORE ("trcsscsr7", CPENC (2,1,C1,C15,2), 0),
4522 SR_CORE ("trcsspcicr0", CPENC (2,1,C1,C0,3), 0),
4523 SR_CORE ("trcsspcicr1", CPENC (2,1,C1,C1,3), 0),
4524 SR_CORE ("trcsspcicr2", CPENC (2,1,C1,C2,3), 0),
4525 SR_CORE ("trcsspcicr3", CPENC (2,1,C1,C3,3), 0),
4526 SR_CORE ("trcsspcicr4", CPENC (2,1,C1,C4,3), 0),
4527 SR_CORE ("trcsspcicr5", CPENC (2,1,C1,C5,3), 0),
4528 SR_CORE ("trcsspcicr6", CPENC (2,1,C1,C6,3), 0),
4529 SR_CORE ("trcsspcicr7", CPENC (2,1,C1,C7,3), 0),
4530 SR_CORE ("trcstallctlr", CPENC (2,1,C0,C11,0), 0),
4531 SR_CORE ("trcsyncpr", CPENC (2,1,C0,C13,0), 0),
4532 SR_CORE ("trctraceidr", CPENC (2,1,C0,C0,1), 0),
4533 SR_CORE ("trctsctlr", CPENC (2,1,C0,C12,0), 0),
4534 SR_CORE ("trcvdarcctlr", CPENC (2,1,C0,C10,2), 0),
4535 SR_CORE ("trcvdctlr", CPENC (2,1,C0,C8,2), 0),
4536 SR_CORE ("trcvdsacctlr", CPENC (2,1,C0,C9,2), 0),
4537 SR_CORE ("trcvictlr", CPENC (2,1,C0,C0,2), 0),
4538 SR_CORE ("trcviiectlr", CPENC (2,1,C0,C1,2), 0),
4539 SR_CORE ("trcvipcssctlr", CPENC (2,1,C0,C3,2), 0),
4540 SR_CORE ("trcvissctlr", CPENC (2,1,C0,C2,2), 0),
4541 SR_CORE ("trcvmidcctlr0", CPENC (2,1,C3,C2,2), 0),
4542 SR_CORE ("trcvmidcctlr1", CPENC (2,1,C3,C3,2), 0),
4543 SR_CORE ("trcvmidcvr0", CPENC (2,1,C3,C0,1), 0),
4544 SR_CORE ("trcvmidcvr1", CPENC (2,1,C3,C2,1), 0),
4545 SR_CORE ("trcvmidcvr2", CPENC (2,1,C3,C4,1), 0),
4546 SR_CORE ("trcvmidcvr3", CPENC (2,1,C3,C6,1), 0),
4547 SR_CORE ("trcvmidcvr4", CPENC (2,1,C3,C8,1), 0),
4548 SR_CORE ("trcvmidcvr5", CPENC (2,1,C3,C10,1), 0),
4549 SR_CORE ("trcvmidcvr6", CPENC (2,1,C3,C12,1), 0),
4550 SR_CORE ("trcvmidcvr7", CPENC (2,1,C3,C14,1), 0),
4551 SR_CORE ("trclar", CPENC (2,1,C7,C12,6), F_REG_WRITE),
4552 SR_CORE ("trcoslar", CPENC (2,1,C1,C0,4), F_REG_WRITE),
4553
4554 SR_CORE ("csrcr_el0", CPENC (2,3,C8,C0,0), 0),
4555 SR_CORE ("csrptr_el0", CPENC (2,3,C8,C0,1), 0),
4556 SR_CORE ("csridr_el0", CPENC (2,3,C8,C0,2), F_REG_READ),
4557 SR_CORE ("csrptridx_el0", CPENC (2,3,C8,C0,3), F_REG_READ),
4558 SR_CORE ("csrcr_el1", CPENC (2,0,C8,C0,0), 0),
4559 SR_CORE ("csrcr_el12", CPENC (2,5,C8,C0,0), 0),
4560 SR_CORE ("csrptr_el1", CPENC (2,0,C8,C0,1), 0),
4561 SR_CORE ("csrptr_el12", CPENC (2,5,C8,C0,1), 0),
4562 SR_CORE ("csrptridx_el1", CPENC (2,0,C8,C0,3), F_REG_READ),
4563 SR_CORE ("csrcr_el2", CPENC (2,4,C8,C0,0), 0),
4564 SR_CORE ("csrptr_el2", CPENC (2,4,C8,C0,1), 0),
4565 SR_CORE ("csrptridx_el2", CPENC (2,4,C8,C0,3), F_REG_READ),
4566
4567 SR_CORE ("lorc_el1", CPENC (3,0,C10,C4,3), 0),
4568 SR_CORE ("lorea_el1", CPENC (3,0,C10,C4,1), 0),
4569 SR_CORE ("lorn_el1", CPENC (3,0,C10,C4,2), 0),
4570 SR_CORE ("lorsa_el1", CPENC (3,0,C10,C4,0), 0),
4571 SR_CORE ("icc_ctlr_el3", CPENC (3,6,C12,C12,4), 0),
4572 SR_CORE ("icc_sre_el1", CPENC (3,0,C12,C12,5), 0),
4573 SR_CORE ("icc_sre_el2", CPENC (3,4,C12,C9,5), 0),
4574 SR_CORE ("icc_sre_el3", CPENC (3,6,C12,C12,5), 0),
4575 SR_CORE ("ich_vtr_el2", CPENC (3,4,C12,C11,1), F_REG_READ),
4576
4577 SR_CORE ("brbcr_el1", CPENC (2,1,C9,C0,0), 0),
4578 SR_CORE ("brbcr_el12", CPENC (2,5,C9,C0,0), 0),
4579 SR_CORE ("brbfcr_el1", CPENC (2,1,C9,C0,1), 0),
4580 SR_CORE ("brbts_el1", CPENC (2,1,C9,C0,2), 0),
4581 SR_CORE ("brbinfinj_el1", CPENC (2,1,C9,C1,0), 0),
4582 SR_CORE ("brbsrcinj_el1", CPENC (2,1,C9,C1,1), 0),
4583 SR_CORE ("brbtgtinj_el1", CPENC (2,1,C9,C1,2), 0),
4584 SR_CORE ("brbidr0_el1", CPENC (2,1,C9,C2,0), F_REG_READ),
4585 SR_CORE ("brbcr_el2", CPENC (2,4,C9,C0,0), 0),
4586 SR_CORE ("brbsrc0_el1", CPENC (2,1,C8,C0,1), F_REG_READ),
4587 SR_CORE ("brbsrc1_el1", CPENC (2,1,C8,C1,1), F_REG_READ),
4588 SR_CORE ("brbsrc2_el1", CPENC (2,1,C8,C2,1), F_REG_READ),
4589 SR_CORE ("brbsrc3_el1", CPENC (2,1,C8,C3,1), F_REG_READ),
4590 SR_CORE ("brbsrc4_el1", CPENC (2,1,C8,C4,1), F_REG_READ),
4591 SR_CORE ("brbsrc5_el1", CPENC (2,1,C8,C5,1), F_REG_READ),
4592 SR_CORE ("brbsrc6_el1", CPENC (2,1,C8,C6,1), F_REG_READ),
4593 SR_CORE ("brbsrc7_el1", CPENC (2,1,C8,C7,1), F_REG_READ),
4594 SR_CORE ("brbsrc8_el1", CPENC (2,1,C8,C8,1), F_REG_READ),
4595 SR_CORE ("brbsrc9_el1", CPENC (2,1,C8,C9,1), F_REG_READ),
4596 SR_CORE ("brbsrc10_el1", CPENC (2,1,C8,C10,1), F_REG_READ),
4597 SR_CORE ("brbsrc11_el1", CPENC (2,1,C8,C11,1), F_REG_READ),
4598 SR_CORE ("brbsrc12_el1", CPENC (2,1,C8,C12,1), F_REG_READ),
4599 SR_CORE ("brbsrc13_el1", CPENC (2,1,C8,C13,1), F_REG_READ),
4600 SR_CORE ("brbsrc14_el1", CPENC (2,1,C8,C14,1), F_REG_READ),
4601 SR_CORE ("brbsrc15_el1", CPENC (2,1,C8,C15,1), F_REG_READ),
4602 SR_CORE ("brbsrc16_el1", CPENC (2,1,C8,C0,5), F_REG_READ),
4603 SR_CORE ("brbsrc17_el1", CPENC (2,1,C8,C1,5), F_REG_READ),
4604 SR_CORE ("brbsrc18_el1", CPENC (2,1,C8,C2,5), F_REG_READ),
4605 SR_CORE ("brbsrc19_el1", CPENC (2,1,C8,C3,5), F_REG_READ),
4606 SR_CORE ("brbsrc20_el1", CPENC (2,1,C8,C4,5), F_REG_READ),
4607 SR_CORE ("brbsrc21_el1", CPENC (2,1,C8,C5,5), F_REG_READ),
4608 SR_CORE ("brbsrc22_el1", CPENC (2,1,C8,C6,5), F_REG_READ),
4609 SR_CORE ("brbsrc23_el1", CPENC (2,1,C8,C7,5), F_REG_READ),
4610 SR_CORE ("brbsrc24_el1", CPENC (2,1,C8,C8,5), F_REG_READ),
4611 SR_CORE ("brbsrc25_el1", CPENC (2,1,C8,C9,5), F_REG_READ),
4612 SR_CORE ("brbsrc26_el1", CPENC (2,1,C8,C10,5), F_REG_READ),
4613 SR_CORE ("brbsrc27_el1", CPENC (2,1,C8,C11,5), F_REG_READ),
4614 SR_CORE ("brbsrc28_el1", CPENC (2,1,C8,C12,5), F_REG_READ),
4615 SR_CORE ("brbsrc29_el1", CPENC (2,1,C8,C13,5), F_REG_READ),
4616 SR_CORE ("brbsrc30_el1", CPENC (2,1,C8,C14,5), F_REG_READ),
4617 SR_CORE ("brbsrc31_el1", CPENC (2,1,C8,C15,5), F_REG_READ),
4618 SR_CORE ("brbtgt0_el1", CPENC (2,1,C8,C0,2), F_REG_READ),
4619 SR_CORE ("brbtgt1_el1", CPENC (2,1,C8,C1,2), F_REG_READ),
4620 SR_CORE ("brbtgt2_el1", CPENC (2,1,C8,C2,2), F_REG_READ),
4621 SR_CORE ("brbtgt3_el1", CPENC (2,1,C8,C3,2), F_REG_READ),
4622 SR_CORE ("brbtgt4_el1", CPENC (2,1,C8,C4,2), F_REG_READ),
4623 SR_CORE ("brbtgt5_el1", CPENC (2,1,C8,C5,2), F_REG_READ),
4624 SR_CORE ("brbtgt6_el1", CPENC (2,1,C8,C6,2), F_REG_READ),
4625 SR_CORE ("brbtgt7_el1", CPENC (2,1,C8,C7,2), F_REG_READ),
4626 SR_CORE ("brbtgt8_el1", CPENC (2,1,C8,C8,2), F_REG_READ),
4627 SR_CORE ("brbtgt9_el1", CPENC (2,1,C8,C9,2), F_REG_READ),
4628 SR_CORE ("brbtgt10_el1", CPENC (2,1,C8,C10,2), F_REG_READ),
4629 SR_CORE ("brbtgt11_el1", CPENC (2,1,C8,C11,2), F_REG_READ),
4630 SR_CORE ("brbtgt12_el1", CPENC (2,1,C8,C12,2), F_REG_READ),
4631 SR_CORE ("brbtgt13_el1", CPENC (2,1,C8,C13,2), F_REG_READ),
4632 SR_CORE ("brbtgt14_el1", CPENC (2,1,C8,C14,2), F_REG_READ),
4633 SR_CORE ("brbtgt15_el1", CPENC (2,1,C8,C15,2), F_REG_READ),
4634 SR_CORE ("brbtgt16_el1", CPENC (2,1,C8,C0,6), F_REG_READ),
4635 SR_CORE ("brbtgt17_el1", CPENC (2,1,C8,C1,6), F_REG_READ),
4636 SR_CORE ("brbtgt18_el1", CPENC (2,1,C8,C2,6), F_REG_READ),
4637 SR_CORE ("brbtgt19_el1", CPENC (2,1,C8,C3,6), F_REG_READ),
4638 SR_CORE ("brbtgt20_el1", CPENC (2,1,C8,C4,6), F_REG_READ),
4639 SR_CORE ("brbtgt21_el1", CPENC (2,1,C8,C5,6), F_REG_READ),
4640 SR_CORE ("brbtgt22_el1", CPENC (2,1,C8,C6,6), F_REG_READ),
4641 SR_CORE ("brbtgt23_el1", CPENC (2,1,C8,C7,6), F_REG_READ),
4642 SR_CORE ("brbtgt24_el1", CPENC (2,1,C8,C8,6), F_REG_READ),
4643 SR_CORE ("brbtgt25_el1", CPENC (2,1,C8,C9,6), F_REG_READ),
4644 SR_CORE ("brbtgt26_el1", CPENC (2,1,C8,C10,6), F_REG_READ),
4645 SR_CORE ("brbtgt27_el1", CPENC (2,1,C8,C11,6), F_REG_READ),
4646 SR_CORE ("brbtgt28_el1", CPENC (2,1,C8,C12,6), F_REG_READ),
4647 SR_CORE ("brbtgt29_el1", CPENC (2,1,C8,C13,6), F_REG_READ),
4648 SR_CORE ("brbtgt30_el1", CPENC (2,1,C8,C14,6), F_REG_READ),
4649 SR_CORE ("brbtgt31_el1", CPENC (2,1,C8,C15,6), F_REG_READ),
4650 SR_CORE ("brbinf0_el1", CPENC (2,1,C8,C0,0), F_REG_READ),
4651 SR_CORE ("brbinf1_el1", CPENC (2,1,C8,C1,0), F_REG_READ),
4652 SR_CORE ("brbinf2_el1", CPENC (2,1,C8,C2,0), F_REG_READ),
4653 SR_CORE ("brbinf3_el1", CPENC (2,1,C8,C3,0), F_REG_READ),
4654 SR_CORE ("brbinf4_el1", CPENC (2,1,C8,C4,0), F_REG_READ),
4655 SR_CORE ("brbinf5_el1", CPENC (2,1,C8,C5,0), F_REG_READ),
4656 SR_CORE ("brbinf6_el1", CPENC (2,1,C8,C6,0), F_REG_READ),
4657 SR_CORE ("brbinf7_el1", CPENC (2,1,C8,C7,0), F_REG_READ),
4658 SR_CORE ("brbinf8_el1", CPENC (2,1,C8,C8,0), F_REG_READ),
4659 SR_CORE ("brbinf9_el1", CPENC (2,1,C8,C9,0), F_REG_READ),
4660 SR_CORE ("brbinf10_el1", CPENC (2,1,C8,C10,0), F_REG_READ),
4661 SR_CORE ("brbinf11_el1", CPENC (2,1,C8,C11,0), F_REG_READ),
4662 SR_CORE ("brbinf12_el1", CPENC (2,1,C8,C12,0), F_REG_READ),
4663 SR_CORE ("brbinf13_el1", CPENC (2,1,C8,C13,0), F_REG_READ),
4664 SR_CORE ("brbinf14_el1", CPENC (2,1,C8,C14,0), F_REG_READ),
4665 SR_CORE ("brbinf15_el1", CPENC (2,1,C8,C15,0), F_REG_READ),
4666 SR_CORE ("brbinf16_el1", CPENC (2,1,C8,C0,4), F_REG_READ),
4667 SR_CORE ("brbinf17_el1", CPENC (2,1,C8,C1,4), F_REG_READ),
4668 SR_CORE ("brbinf18_el1", CPENC (2,1,C8,C2,4), F_REG_READ),
4669 SR_CORE ("brbinf19_el1", CPENC (2,1,C8,C3,4), F_REG_READ),
4670 SR_CORE ("brbinf20_el1", CPENC (2,1,C8,C4,4), F_REG_READ),
4671 SR_CORE ("brbinf21_el1", CPENC (2,1,C8,C5,4), F_REG_READ),
4672 SR_CORE ("brbinf22_el1", CPENC (2,1,C8,C6,4), F_REG_READ),
4673 SR_CORE ("brbinf23_el1", CPENC (2,1,C8,C7,4), F_REG_READ),
4674 SR_CORE ("brbinf24_el1", CPENC (2,1,C8,C8,4), F_REG_READ),
4675 SR_CORE ("brbinf25_el1", CPENC (2,1,C8,C9,4), F_REG_READ),
4676 SR_CORE ("brbinf26_el1", CPENC (2,1,C8,C10,4), F_REG_READ),
4677 SR_CORE ("brbinf27_el1", CPENC (2,1,C8,C11,4), F_REG_READ),
4678 SR_CORE ("brbinf28_el1", CPENC (2,1,C8,C12,4), F_REG_READ),
4679 SR_CORE ("brbinf29_el1", CPENC (2,1,C8,C13,4), F_REG_READ),
4680 SR_CORE ("brbinf30_el1", CPENC (2,1,C8,C14,4), F_REG_READ),
4681 SR_CORE ("brbinf31_el1", CPENC (2,1,C8,C15,4), F_REG_READ),
4682
4683 SR_CORE ("accdata_el1", CPENC (3,0,C13,C0,5), 0),
4684
4685 SR_CORE ("mfar_el3", CPENC (3,6,C6,C0,5), F_REG_READ),
4686 SR_CORE ("gpccr_el3", CPENC (3,6,C2,C1,6), 0),
4687 SR_CORE ("gptbr_el3", CPENC (3,6,C2,C1,4), 0),
4688
4689 { 0, CPENC (0,0,0,0,0), 0, 0 }
4690 };
4691
4692 bool
4693 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
4694 {
4695 return (reg_flags & F_DEPRECATED) != 0;
4696 }
4697
4698 /* The CPENC below is fairly misleading, the fields
4699 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4700 by ins_pstatefield, which just shifts the value by the width of the fields
4701 in a loop. So if you CPENC them only the first value will be set, the rest
4702 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4703 value of 0b110000000001000000 (0x30040) while what you want is
4704 0b011010 (0x1a). */
4705 const aarch64_sys_reg aarch64_pstatefields [] =
4706 {
4707 SR_CORE ("spsel", 0x05, 0),
4708 SR_CORE ("daifset", 0x1e, 0),
4709 SR_CORE ("daifclr", 0x1f, 0),
4710 SR_PAN ("pan", 0x04, 0),
4711 SR_V8_2 ("uao", 0x03, 0),
4712 SR_SSBS ("ssbs", 0x19, 0),
4713 SR_V8_4 ("dit", 0x1a, 0),
4714 SR_MEMTAG ("tco", 0x1c, 0),
4715 { 0, CPENC (0,0,0,0,0), 0, 0 },
4716 };
4717
4718 bool
4719 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4720 const aarch64_sys_reg *reg)
4721 {
4722 if (!(reg->flags & F_ARCHEXT))
4723 return true;
4724
4725 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
4726 }
4727
4728 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4729 {
4730 { "ialluis", CPENS(0,C7,C1,0), 0 },
4731 { "iallu", CPENS(0,C7,C5,0), 0 },
4732 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4733 { 0, CPENS(0,0,0,0), 0 }
4734 };
4735
4736 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4737 {
4738 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4739 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
4740 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
4741 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4742 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
4743 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
4744 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4745 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
4746 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
4747 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4748 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4749 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4750 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4751 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4752 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4753 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4754 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4755 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4756 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4757 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4758 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4759 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4760 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4761 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4762 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4763 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4764 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4765 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4766 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT },
4767 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT },
4768 { 0, CPENS(0,0,0,0), 0 }
4769 };
4770
4771 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4772 {
4773 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4774 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4775 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4776 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4777 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4778 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4779 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4780 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4781 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4782 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4783 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4784 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4785 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4786 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4787 { 0, CPENS(0,0,0,0), 0 }
4788 };
4789
4790 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4791 {
4792 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4793 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4794 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4795 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4796 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4797 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4798 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4799 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4800 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4801 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4802 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4803 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4804 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4805 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4806 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4807 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4808 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4809 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4810 { "alle2", CPENS(4,C8,C7,0), 0 },
4811 { "alle2is", CPENS(4,C8,C3,0), 0 },
4812 { "alle1", CPENS(4,C8,C7,4), 0 },
4813 { "alle1is", CPENS(4,C8,C3,4), 0 },
4814 { "alle3", CPENS(6,C8,C7,0), 0 },
4815 { "alle3is", CPENS(6,C8,C3,0), 0 },
4816 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4817 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4818 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4819 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4820 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4821 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4822 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4823 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4824
4825 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4826 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4827 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4828 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4829 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4830 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4831 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4832 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4833 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4834 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4835 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4836 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4837 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4838 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4839 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4840 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4841
4842 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4843 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4844 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4845 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4846 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4847 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4848 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4849 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4850 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4851 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4852 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4853 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4854 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4855 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4856 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4857 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4858 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4859 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4860 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4861 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4862 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4863 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4864 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4865 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4866 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4867 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4868 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4869 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4870 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4871 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4872
4873 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT },
4874 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT },
4875 { "paallos", CPENS (6, C8, C1, 4), 0},
4876 { "paall", CPENS (6, C8, C7, 4), 0},
4877
4878 { 0, CPENS(0,0,0,0), 0 }
4879 };
4880
4881 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4882 {
4883 /* RCTX is somewhat unique in a way that it has different values
4884 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4885 Thus op2 is masked out and instead encoded directly in the
4886 aarch64_opcode_table entries for the respective instructions. */
4887 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4888
4889 { 0, CPENS(0,0,0,0), 0 }
4890 };
4891
4892 bool
4893 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4894 {
4895 return (sys_ins_reg->flags & F_HASXT) != 0;
4896 }
4897
4898 extern bool
4899 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4900 const char *reg_name,
4901 aarch64_insn reg_value,
4902 uint32_t reg_flags,
4903 aarch64_feature_set reg_features)
4904 {
4905 /* Armv8-R has no EL3. */
4906 if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_R))
4907 {
4908 const char *suffix = strrchr (reg_name, '_');
4909 if (suffix && !strcmp (suffix, "_el3"))
4910 return false;
4911 }
4912
4913 if (!(reg_flags & F_ARCHEXT))
4914 return true;
4915
4916 if (reg_features
4917 && AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
4918 return true;
4919
4920 /* ARMv8.4 TLB instructions. */
4921 if ((reg_value == CPENS (0, C8, C1, 0)
4922 || reg_value == CPENS (0, C8, C1, 1)
4923 || reg_value == CPENS (0, C8, C1, 2)
4924 || reg_value == CPENS (0, C8, C1, 3)
4925 || reg_value == CPENS (0, C8, C1, 5)
4926 || reg_value == CPENS (0, C8, C1, 7)
4927 || reg_value == CPENS (4, C8, C4, 0)
4928 || reg_value == CPENS (4, C8, C4, 4)
4929 || reg_value == CPENS (4, C8, C1, 1)
4930 || reg_value == CPENS (4, C8, C1, 5)
4931 || reg_value == CPENS (4, C8, C1, 6)
4932 || reg_value == CPENS (6, C8, C1, 1)
4933 || reg_value == CPENS (6, C8, C1, 5)
4934 || reg_value == CPENS (4, C8, C1, 0)
4935 || reg_value == CPENS (4, C8, C1, 4)
4936 || reg_value == CPENS (6, C8, C1, 0)
4937 || reg_value == CPENS (0, C8, C6, 1)
4938 || reg_value == CPENS (0, C8, C6, 3)
4939 || reg_value == CPENS (0, C8, C6, 5)
4940 || reg_value == CPENS (0, C8, C6, 7)
4941 || reg_value == CPENS (0, C8, C2, 1)
4942 || reg_value == CPENS (0, C8, C2, 3)
4943 || reg_value == CPENS (0, C8, C2, 5)
4944 || reg_value == CPENS (0, C8, C2, 7)
4945 || reg_value == CPENS (0, C8, C5, 1)
4946 || reg_value == CPENS (0, C8, C5, 3)
4947 || reg_value == CPENS (0, C8, C5, 5)
4948 || reg_value == CPENS (0, C8, C5, 7)
4949 || reg_value == CPENS (4, C8, C0, 2)
4950 || reg_value == CPENS (4, C8, C0, 6)
4951 || reg_value == CPENS (4, C8, C4, 2)
4952 || reg_value == CPENS (4, C8, C4, 6)
4953 || reg_value == CPENS (4, C8, C4, 3)
4954 || reg_value == CPENS (4, C8, C4, 7)
4955 || reg_value == CPENS (4, C8, C6, 1)
4956 || reg_value == CPENS (4, C8, C6, 5)
4957 || reg_value == CPENS (4, C8, C2, 1)
4958 || reg_value == CPENS (4, C8, C2, 5)
4959 || reg_value == CPENS (4, C8, C5, 1)
4960 || reg_value == CPENS (4, C8, C5, 5)
4961 || reg_value == CPENS (6, C8, C6, 1)
4962 || reg_value == CPENS (6, C8, C6, 5)
4963 || reg_value == CPENS (6, C8, C2, 1)
4964 || reg_value == CPENS (6, C8, C2, 5)
4965 || reg_value == CPENS (6, C8, C5, 1)
4966 || reg_value == CPENS (6, C8, C5, 5))
4967 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4968 return true;
4969
4970 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4971 if (reg_value == CPENS (3, C7, C12, 1)
4972 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4973 return true;
4974
4975 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4976 if (reg_value == CPENS (3, C7, C13, 1)
4977 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
4978 return true;
4979
4980 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
4981 if ((reg_value == CPENS (0, C7, C6, 3)
4982 || reg_value == CPENS (0, C7, C6, 4)
4983 || reg_value == CPENS (0, C7, C10, 4)
4984 || reg_value == CPENS (0, C7, C14, 4)
4985 || reg_value == CPENS (3, C7, C10, 3)
4986 || reg_value == CPENS (3, C7, C12, 3)
4987 || reg_value == CPENS (3, C7, C13, 3)
4988 || reg_value == CPENS (3, C7, C14, 3)
4989 || reg_value == CPENS (3, C7, C4, 3)
4990 || reg_value == CPENS (0, C7, C6, 5)
4991 || reg_value == CPENS (0, C7, C6, 6)
4992 || reg_value == CPENS (0, C7, C10, 6)
4993 || reg_value == CPENS (0, C7, C14, 6)
4994 || reg_value == CPENS (3, C7, C10, 5)
4995 || reg_value == CPENS (3, C7, C12, 5)
4996 || reg_value == CPENS (3, C7, C13, 5)
4997 || reg_value == CPENS (3, C7, C14, 5)
4998 || reg_value == CPENS (3, C7, C4, 4))
4999 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
5000 return true;
5001
5002 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5003 if ((reg_value == CPENS (0, C7, C9, 0)
5004 || reg_value == CPENS (0, C7, C9, 1))
5005 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5006 return true;
5007
5008 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5009 if (reg_value == CPENS (3, C7, C3, 0)
5010 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
5011 return true;
5012
5013 return false;
5014 }
5015
5016 #undef C0
5017 #undef C1
5018 #undef C2
5019 #undef C3
5020 #undef C4
5021 #undef C5
5022 #undef C6
5023 #undef C7
5024 #undef C8
5025 #undef C9
5026 #undef C10
5027 #undef C11
5028 #undef C12
5029 #undef C13
5030 #undef C14
5031 #undef C15
5032
5033 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5034 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5035
5036 static enum err_type
5037 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5038 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5039 bool encoding ATTRIBUTE_UNUSED,
5040 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5041 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5042 {
5043 int t = BITS (insn, 4, 0);
5044 int n = BITS (insn, 9, 5);
5045 int t2 = BITS (insn, 14, 10);
5046
5047 if (BIT (insn, 23))
5048 {
5049 /* Write back enabled. */
5050 if ((t == n || t2 == n) && n != 31)
5051 return ERR_UND;
5052 }
5053
5054 if (BIT (insn, 22))
5055 {
5056 /* Load */
5057 if (t == t2)
5058 return ERR_UND;
5059 }
5060
5061 return ERR_OK;
5062 }
5063
5064 /* Verifier for vector by element 3 operands functions where the
5065 conditions `if sz:L == 11 then UNDEFINED` holds. */
5066
5067 static enum err_type
5068 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5069 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5070 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5071 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5072 {
5073 const aarch64_insn undef_pattern = 0x3;
5074 aarch64_insn value;
5075
5076 assert (inst->opcode);
5077 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5078 value = encoding ? inst->value : insn;
5079 assert (value);
5080
5081 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5082 return ERR_UND;
5083
5084 return ERR_OK;
5085 }
5086
5087 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5088 If INST is NULL the given insn_sequence is cleared and the sequence is left
5089 uninitialized. */
5090
5091 void
5092 init_insn_sequence (const struct aarch64_inst *inst,
5093 aarch64_instr_sequence *insn_sequence)
5094 {
5095 int num_req_entries = 0;
5096 insn_sequence->next_insn = 0;
5097 insn_sequence->num_insns = num_req_entries;
5098 if (insn_sequence->instr)
5099 XDELETE (insn_sequence->instr);
5100 insn_sequence->instr = NULL;
5101
5102 if (inst)
5103 {
5104 insn_sequence->instr = XNEW (aarch64_inst);
5105 memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
5106 }
5107
5108 /* Handle all the cases here. May need to think of something smarter than
5109 a giant if/else chain if this grows. At that time, a lookup table may be
5110 best. */
5111 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5112 num_req_entries = 1;
5113
5114 if (insn_sequence->current_insns)
5115 XDELETEVEC (insn_sequence->current_insns);
5116 insn_sequence->current_insns = NULL;
5117
5118 if (num_req_entries != 0)
5119 {
5120 size_t size = num_req_entries * sizeof (aarch64_inst);
5121 insn_sequence->current_insns
5122 = (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
5123 memset (insn_sequence->current_insns, 0, size);
5124 }
5125 }
5126
5127
5128 /* This function verifies that the instruction INST adheres to its specified
5129 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5130 returned and MISMATCH_DETAIL contains the reason why verification failed.
5131
5132 The function is called both during assembly and disassembly. If assembling
5133 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5134 and will contain the PC of the current instruction w.r.t to the section.
5135
5136 If ENCODING and PC=0 then you are at a start of a section. The constraints
5137 are verified against the given state insn_sequence which is updated as it
5138 transitions through the verification. */
5139
5140 enum err_type
5141 verify_constraints (const struct aarch64_inst *inst,
5142 const aarch64_insn insn ATTRIBUTE_UNUSED,
5143 bfd_vma pc,
5144 bool encoding,
5145 aarch64_operand_error *mismatch_detail,
5146 aarch64_instr_sequence *insn_sequence)
5147 {
5148 assert (inst);
5149 assert (inst->opcode);
5150
5151 const struct aarch64_opcode *opcode = inst->opcode;
5152 if (!opcode->constraints && !insn_sequence->instr)
5153 return ERR_OK;
5154
5155 assert (insn_sequence);
5156
5157 enum err_type res = ERR_OK;
5158
5159 /* This instruction puts a constraint on the insn_sequence. */
5160 if (opcode->flags & F_SCAN)
5161 {
5162 if (insn_sequence->instr)
5163 {
5164 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5165 mismatch_detail->error = _("instruction opens new dependency "
5166 "sequence without ending previous one");
5167 mismatch_detail->index = -1;
5168 mismatch_detail->non_fatal = true;
5169 res = ERR_VFI;
5170 }
5171
5172 init_insn_sequence (inst, insn_sequence);
5173 return res;
5174 }
5175
5176 /* Verify constraints on an existing sequence. */
5177 if (insn_sequence->instr)
5178 {
5179 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5180 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5181 closed a previous one that we should have. */
5182 if (!encoding && pc == 0)
5183 {
5184 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5185 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5186 mismatch_detail->index = -1;
5187 mismatch_detail->non_fatal = true;
5188 res = ERR_VFI;
5189 /* Reset the sequence. */
5190 init_insn_sequence (NULL, insn_sequence);
5191 return res;
5192 }
5193
5194 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5195 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5196 {
5197 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5198 instruction for better error messages. */
5199 if (!opcode->avariant
5200 || !(*opcode->avariant &
5201 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
5202 {
5203 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5204 mismatch_detail->error = _("SVE instruction expected after "
5205 "`movprfx'");
5206 mismatch_detail->index = -1;
5207 mismatch_detail->non_fatal = true;
5208 res = ERR_VFI;
5209 goto done;
5210 }
5211
5212 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5213 instruction that is allowed to be used with a MOVPRFX. */
5214 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5215 {
5216 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5217 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5218 "expected");
5219 mismatch_detail->index = -1;
5220 mismatch_detail->non_fatal = true;
5221 res = ERR_VFI;
5222 goto done;
5223 }
5224
5225 /* Next check for usage of the predicate register. */
5226 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5227 aarch64_opnd_info blk_pred, inst_pred;
5228 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5229 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5230 bool predicated = false;
5231 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5232
5233 /* Determine if the movprfx instruction used is predicated or not. */
5234 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5235 {
5236 predicated = true;
5237 blk_pred = insn_sequence->instr->operands[1];
5238 }
5239
5240 unsigned char max_elem_size = 0;
5241 unsigned char current_elem_size;
5242 int num_op_used = 0, last_op_usage = 0;
5243 int i, inst_pred_idx = -1;
5244 int num_ops = aarch64_num_of_operands (opcode);
5245 for (i = 0; i < num_ops; i++)
5246 {
5247 aarch64_opnd_info inst_op = inst->operands[i];
5248 switch (inst_op.type)
5249 {
5250 case AARCH64_OPND_SVE_Zd:
5251 case AARCH64_OPND_SVE_Zm_5:
5252 case AARCH64_OPND_SVE_Zm_16:
5253 case AARCH64_OPND_SVE_Zn:
5254 case AARCH64_OPND_SVE_Zt:
5255 case AARCH64_OPND_SVE_Vm:
5256 case AARCH64_OPND_SVE_Vn:
5257 case AARCH64_OPND_Va:
5258 case AARCH64_OPND_Vn:
5259 case AARCH64_OPND_Vm:
5260 case AARCH64_OPND_Sn:
5261 case AARCH64_OPND_Sm:
5262 if (inst_op.reg.regno == blk_dest.reg.regno)
5263 {
5264 num_op_used++;
5265 last_op_usage = i;
5266 }
5267 current_elem_size
5268 = aarch64_get_qualifier_esize (inst_op.qualifier);
5269 if (current_elem_size > max_elem_size)
5270 max_elem_size = current_elem_size;
5271 break;
5272 case AARCH64_OPND_SVE_Pd:
5273 case AARCH64_OPND_SVE_Pg3:
5274 case AARCH64_OPND_SVE_Pg4_5:
5275 case AARCH64_OPND_SVE_Pg4_10:
5276 case AARCH64_OPND_SVE_Pg4_16:
5277 case AARCH64_OPND_SVE_Pm:
5278 case AARCH64_OPND_SVE_Pn:
5279 case AARCH64_OPND_SVE_Pt:
5280 inst_pred = inst_op;
5281 inst_pred_idx = i;
5282 break;
5283 default:
5284 break;
5285 }
5286 }
5287
5288 assert (max_elem_size != 0);
5289 aarch64_opnd_info inst_dest = inst->operands[0];
5290 /* Determine the size that should be used to compare against the
5291 movprfx size. */
5292 current_elem_size
5293 = opcode->constraints & C_MAX_ELEM
5294 ? max_elem_size
5295 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5296
5297 /* If movprfx is predicated do some extra checks. */
5298 if (predicated)
5299 {
5300 /* The instruction must be predicated. */
5301 if (inst_pred_idx < 0)
5302 {
5303 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5304 mismatch_detail->error = _("predicated instruction expected "
5305 "after `movprfx'");
5306 mismatch_detail->index = -1;
5307 mismatch_detail->non_fatal = true;
5308 res = ERR_VFI;
5309 goto done;
5310 }
5311
5312 /* The instruction must have a merging predicate. */
5313 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5314 {
5315 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5316 mismatch_detail->error = _("merging predicate expected due "
5317 "to preceding `movprfx'");
5318 mismatch_detail->index = inst_pred_idx;
5319 mismatch_detail->non_fatal = true;
5320 res = ERR_VFI;
5321 goto done;
5322 }
5323
5324 /* The same register must be used in instruction. */
5325 if (blk_pred.reg.regno != inst_pred.reg.regno)
5326 {
5327 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5328 mismatch_detail->error = _("predicate register differs "
5329 "from that in preceding "
5330 "`movprfx'");
5331 mismatch_detail->index = inst_pred_idx;
5332 mismatch_detail->non_fatal = true;
5333 res = ERR_VFI;
5334 goto done;
5335 }
5336 }
5337
5338 /* Destructive operations by definition must allow one usage of the
5339 same register. */
5340 int allowed_usage
5341 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5342
5343 /* Operand is not used at all. */
5344 if (num_op_used == 0)
5345 {
5346 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5347 mismatch_detail->error = _("output register of preceding "
5348 "`movprfx' not used in current "
5349 "instruction");
5350 mismatch_detail->index = 0;
5351 mismatch_detail->non_fatal = true;
5352 res = ERR_VFI;
5353 goto done;
5354 }
5355
5356 /* We now know it's used, now determine exactly where it's used. */
5357 if (blk_dest.reg.regno != inst_dest.reg.regno)
5358 {
5359 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5360 mismatch_detail->error = _("output register of preceding "
5361 "`movprfx' expected as output");
5362 mismatch_detail->index = 0;
5363 mismatch_detail->non_fatal = true;
5364 res = ERR_VFI;
5365 goto done;
5366 }
5367
5368 /* Operand used more than allowed for the specific opcode type. */
5369 if (num_op_used > allowed_usage)
5370 {
5371 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5372 mismatch_detail->error = _("output register of preceding "
5373 "`movprfx' used as input");
5374 mismatch_detail->index = last_op_usage;
5375 mismatch_detail->non_fatal = true;
5376 res = ERR_VFI;
5377 goto done;
5378 }
5379
5380 /* Now the only thing left is the qualifiers checks. The register
5381 must have the same maximum element size. */
5382 if (inst_dest.qualifier
5383 && blk_dest.qualifier
5384 && current_elem_size
5385 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5386 {
5387 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5388 mismatch_detail->error = _("register size not compatible with "
5389 "previous `movprfx'");
5390 mismatch_detail->index = 0;
5391 mismatch_detail->non_fatal = true;
5392 res = ERR_VFI;
5393 goto done;
5394 }
5395 }
5396
5397 done:
5398 /* Add the new instruction to the sequence. */
5399 memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
5400 inst, sizeof (aarch64_inst));
5401
5402 /* Check if sequence is now full. */
5403 if (insn_sequence->next_insn >= insn_sequence->num_insns)
5404 {
5405 /* Sequence is full, but we don't have anything special to do for now,
5406 so clear and reset it. */
5407 init_insn_sequence (NULL, insn_sequence);
5408 }
5409 }
5410
5411 return res;
5412 }
5413
5414
5415 /* Return true if VALUE cannot be moved into an SVE register using DUP
5416 (with any element size, not just ESIZE) and if using DUPM would
5417 therefore be OK. ESIZE is the number of bytes in the immediate. */
5418
5419 bool
5420 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5421 {
5422 int64_t svalue = uvalue;
5423 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5424
5425 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5426 return false;
5427 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5428 {
5429 svalue = (int32_t) uvalue;
5430 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5431 {
5432 svalue = (int16_t) uvalue;
5433 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5434 return false;
5435 }
5436 }
5437 if ((svalue & 0xff) == 0)
5438 svalue /= 256;
5439 return svalue < -128 || svalue >= 128;
5440 }
5441
5442 /* Include the opcode description table as well as the operand description
5443 table. */
5444 #define VERIFIER(x) verify_##x
5445 #include "aarch64-tbl.h"
This page took 0.180235 seconds and 4 git commands to generate.