Update year range in copyright notice of binutils files
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
247 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
248 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
249 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
250 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
251 { 5, 14 }, /* imm14: in test bit and branch instructions. */
252 { 5, 16 }, /* imm16: in exception instructions. */
253 { 0, 26 }, /* imm26: in unconditional branch instructions. */
254 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
255 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
256 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
257 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
258 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
259 { 22, 1 }, /* N: in logical (immediate) instructions. */
260 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
261 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
262 { 31, 1 }, /* sf: in integer data processing instructions. */
263 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
264 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
265 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
266 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
267 { 31, 1 }, /* b5: in the test bit and branch instructions. */
268 { 19, 5 }, /* b40: in the test bit and branch instructions. */
269 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
270 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
271 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
272 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
273 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
274 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
275 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
276 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
277 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
278 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
279 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
280 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
281 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
282 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
283 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
284 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
285 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
286 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
289 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
290 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
292 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
293 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
294 { 5, 1 }, /* SVE_i1: single-bit immediate. */
295 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
296 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
297 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
298 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
299 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
300 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
301 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
302 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
303 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
304 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
305 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
306 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
307 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
308 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
309 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
310 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
311 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
312 { 16, 4 }, /* SVE_tsz: triangular size select. */
313 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
314 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
315 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
316 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
317 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
318 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
319 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
320 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
321 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
322 };
323
324 enum aarch64_operand_class
325 aarch64_get_operand_class (enum aarch64_opnd type)
326 {
327 return aarch64_operands[type].op_class;
328 }
329
330 const char *
331 aarch64_get_operand_name (enum aarch64_opnd type)
332 {
333 return aarch64_operands[type].name;
334 }
335
336 /* Get operand description string.
337 This is usually for the diagnosis purpose. */
338 const char *
339 aarch64_get_operand_desc (enum aarch64_opnd type)
340 {
341 return aarch64_operands[type].desc;
342 }
343
344 /* Table of all conditional affixes. */
345 const aarch64_cond aarch64_conds[16] =
346 {
347 {{"eq", "none"}, 0x0},
348 {{"ne", "any"}, 0x1},
349 {{"cs", "hs", "nlast"}, 0x2},
350 {{"cc", "lo", "ul", "last"}, 0x3},
351 {{"mi", "first"}, 0x4},
352 {{"pl", "nfrst"}, 0x5},
353 {{"vs"}, 0x6},
354 {{"vc"}, 0x7},
355 {{"hi", "pmore"}, 0x8},
356 {{"ls", "plast"}, 0x9},
357 {{"ge", "tcont"}, 0xa},
358 {{"lt", "tstop"}, 0xb},
359 {{"gt"}, 0xc},
360 {{"le"}, 0xd},
361 {{"al"}, 0xe},
362 {{"nv"}, 0xf},
363 };
364
365 const aarch64_cond *
366 get_cond_from_value (aarch64_insn value)
367 {
368 assert (value < 16);
369 return &aarch64_conds[(unsigned int) value];
370 }
371
372 const aarch64_cond *
373 get_inverted_cond (const aarch64_cond *cond)
374 {
375 return &aarch64_conds[cond->value ^ 0x1];
376 }
377
378 /* Table describing the operand extension/shifting operators; indexed by
379 enum aarch64_modifier_kind.
380
381 The value column provides the most common values for encoding modifiers,
382 which enables table-driven encoding/decoding for the modifiers. */
383 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
384 {
385 {"none", 0x0},
386 {"msl", 0x0},
387 {"ror", 0x3},
388 {"asr", 0x2},
389 {"lsr", 0x1},
390 {"lsl", 0x0},
391 {"uxtb", 0x0},
392 {"uxth", 0x1},
393 {"uxtw", 0x2},
394 {"uxtx", 0x3},
395 {"sxtb", 0x4},
396 {"sxth", 0x5},
397 {"sxtw", 0x6},
398 {"sxtx", 0x7},
399 {"mul", 0x0},
400 {"mul vl", 0x0},
401 {NULL, 0},
402 };
403
404 enum aarch64_modifier_kind
405 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
406 {
407 return desc - aarch64_operand_modifiers;
408 }
409
410 aarch64_insn
411 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
412 {
413 return aarch64_operand_modifiers[kind].value;
414 }
415
416 enum aarch64_modifier_kind
417 aarch64_get_operand_modifier_from_value (aarch64_insn value,
418 bfd_boolean extend_p)
419 {
420 if (extend_p == TRUE)
421 return AARCH64_MOD_UXTB + value;
422 else
423 return AARCH64_MOD_LSL - value;
424 }
425
426 bfd_boolean
427 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
428 {
429 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
430 ? TRUE : FALSE;
431 }
432
433 static inline bfd_boolean
434 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
435 {
436 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
437 ? TRUE : FALSE;
438 }
439
440 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
441 {
442 { "#0x00", 0x0 },
443 { "oshld", 0x1 },
444 { "oshst", 0x2 },
445 { "osh", 0x3 },
446 { "#0x04", 0x4 },
447 { "nshld", 0x5 },
448 { "nshst", 0x6 },
449 { "nsh", 0x7 },
450 { "#0x08", 0x8 },
451 { "ishld", 0x9 },
452 { "ishst", 0xa },
453 { "ish", 0xb },
454 { "#0x0c", 0xc },
455 { "ld", 0xd },
456 { "st", 0xe },
457 { "sy", 0xf },
458 };
459
460 /* Table describing the operands supported by the aliases of the HINT
461 instruction.
462
463 The name column is the operand that is accepted for the alias. The value
464 column is the hint number of the alias. The list of operands is terminated
465 by NULL in the name column. */
466
467 const struct aarch64_name_value_pair aarch64_hint_options[] =
468 {
469 { "csync", 0x11 }, /* PSB CSYNC. */
470 { NULL, 0x0 },
471 };
472
473 /* op -> op: load = 0 instruction = 1 store = 2
474 l -> level: 1-3
475 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
476 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
477 const struct aarch64_name_value_pair aarch64_prfops[32] =
478 {
479 { "pldl1keep", B(0, 1, 0) },
480 { "pldl1strm", B(0, 1, 1) },
481 { "pldl2keep", B(0, 2, 0) },
482 { "pldl2strm", B(0, 2, 1) },
483 { "pldl3keep", B(0, 3, 0) },
484 { "pldl3strm", B(0, 3, 1) },
485 { NULL, 0x06 },
486 { NULL, 0x07 },
487 { "plil1keep", B(1, 1, 0) },
488 { "plil1strm", B(1, 1, 1) },
489 { "plil2keep", B(1, 2, 0) },
490 { "plil2strm", B(1, 2, 1) },
491 { "plil3keep", B(1, 3, 0) },
492 { "plil3strm", B(1, 3, 1) },
493 { NULL, 0x0e },
494 { NULL, 0x0f },
495 { "pstl1keep", B(2, 1, 0) },
496 { "pstl1strm", B(2, 1, 1) },
497 { "pstl2keep", B(2, 2, 0) },
498 { "pstl2strm", B(2, 2, 1) },
499 { "pstl3keep", B(2, 3, 0) },
500 { "pstl3strm", B(2, 3, 1) },
501 { NULL, 0x16 },
502 { NULL, 0x17 },
503 { NULL, 0x18 },
504 { NULL, 0x19 },
505 { NULL, 0x1a },
506 { NULL, 0x1b },
507 { NULL, 0x1c },
508 { NULL, 0x1d },
509 { NULL, 0x1e },
510 { NULL, 0x1f },
511 };
512 #undef B
513 \f
514 /* Utilities on value constraint. */
515
516 static inline int
517 value_in_range_p (int64_t value, int low, int high)
518 {
519 return (value >= low && value <= high) ? 1 : 0;
520 }
521
522 /* Return true if VALUE is a multiple of ALIGN. */
523 static inline int
524 value_aligned_p (int64_t value, int align)
525 {
526 return (value % align) == 0;
527 }
528
529 /* A signed value fits in a field. */
530 static inline int
531 value_fit_signed_field_p (int64_t value, unsigned width)
532 {
533 assert (width < 32);
534 if (width < sizeof (value) * 8)
535 {
536 int64_t lim = (int64_t)1 << (width - 1);
537 if (value >= -lim && value < lim)
538 return 1;
539 }
540 return 0;
541 }
542
543 /* An unsigned value fits in a field. */
544 static inline int
545 value_fit_unsigned_field_p (int64_t value, unsigned width)
546 {
547 assert (width < 32);
548 if (width < sizeof (value) * 8)
549 {
550 int64_t lim = (int64_t)1 << width;
551 if (value >= 0 && value < lim)
552 return 1;
553 }
554 return 0;
555 }
556
557 /* Return 1 if OPERAND is SP or WSP. */
558 int
559 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
560 {
561 return ((aarch64_get_operand_class (operand->type)
562 == AARCH64_OPND_CLASS_INT_REG)
563 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
564 && operand->reg.regno == 31);
565 }
566
567 /* Return 1 if OPERAND is XZR or WZP. */
568 int
569 aarch64_zero_register_p (const aarch64_opnd_info *operand)
570 {
571 return ((aarch64_get_operand_class (operand->type)
572 == AARCH64_OPND_CLASS_INT_REG)
573 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
574 && operand->reg.regno == 31);
575 }
576
577 /* Return true if the operand *OPERAND that has the operand code
578 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
579 qualified by the qualifier TARGET. */
580
581 static inline int
582 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
583 aarch64_opnd_qualifier_t target)
584 {
585 switch (operand->qualifier)
586 {
587 case AARCH64_OPND_QLF_W:
588 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
589 return 1;
590 break;
591 case AARCH64_OPND_QLF_X:
592 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
593 return 1;
594 break;
595 case AARCH64_OPND_QLF_WSP:
596 if (target == AARCH64_OPND_QLF_W
597 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
598 return 1;
599 break;
600 case AARCH64_OPND_QLF_SP:
601 if (target == AARCH64_OPND_QLF_X
602 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
603 return 1;
604 break;
605 default:
606 break;
607 }
608
609 return 0;
610 }
611
612 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
613 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
614
615 Return NIL if more than one expected qualifiers are found. */
616
617 aarch64_opnd_qualifier_t
618 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
619 int idx,
620 const aarch64_opnd_qualifier_t known_qlf,
621 int known_idx)
622 {
623 int i, saved_i;
624
625 /* Special case.
626
627 When the known qualifier is NIL, we have to assume that there is only
628 one qualifier sequence in the *QSEQ_LIST and return the corresponding
629 qualifier directly. One scenario is that for instruction
630 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
631 which has only one possible valid qualifier sequence
632 NIL, S_D
633 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
634 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
635
636 Because the qualifier NIL has dual roles in the qualifier sequence:
637 it can mean no qualifier for the operand, or the qualifer sequence is
638 not in use (when all qualifiers in the sequence are NILs), we have to
639 handle this special case here. */
640 if (known_qlf == AARCH64_OPND_NIL)
641 {
642 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
643 return qseq_list[0][idx];
644 }
645
646 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
647 {
648 if (qseq_list[i][known_idx] == known_qlf)
649 {
650 if (saved_i != -1)
651 /* More than one sequences are found to have KNOWN_QLF at
652 KNOWN_IDX. */
653 return AARCH64_OPND_NIL;
654 saved_i = i;
655 }
656 }
657
658 return qseq_list[saved_i][idx];
659 }
660
661 enum operand_qualifier_kind
662 {
663 OQK_NIL,
664 OQK_OPD_VARIANT,
665 OQK_VALUE_IN_RANGE,
666 OQK_MISC,
667 };
668
669 /* Operand qualifier description. */
670 struct operand_qualifier_data
671 {
672 /* The usage of the three data fields depends on the qualifier kind. */
673 int data0;
674 int data1;
675 int data2;
676 /* Description. */
677 const char *desc;
678 /* Kind. */
679 enum operand_qualifier_kind kind;
680 };
681
682 /* Indexed by the operand qualifier enumerators. */
683 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
684 {
685 {0, 0, 0, "NIL", OQK_NIL},
686
687 /* Operand variant qualifiers.
688 First 3 fields:
689 element size, number of elements and common value for encoding. */
690
691 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
692 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
693 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
694 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
695
696 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
697 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
698 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
699 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
700 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
701 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
702
703 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
704 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
705 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
706 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
707 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
708 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
709 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
710 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
711 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
712 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
713 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
714
715 {0, 0, 0, "z", OQK_OPD_VARIANT},
716 {0, 0, 0, "m", OQK_OPD_VARIANT},
717
718 /* Qualifiers constraining the value range.
719 First 3 fields:
720 Lower bound, higher bound, unused. */
721
722 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
723 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
724 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
725 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
726 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
727 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
728 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
729
730 /* Qualifiers for miscellaneous purpose.
731 First 3 fields:
732 unused, unused and unused. */
733
734 {0, 0, 0, "lsl", 0},
735 {0, 0, 0, "msl", 0},
736
737 {0, 0, 0, "retrieving", 0},
738 };
739
740 static inline bfd_boolean
741 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
742 {
743 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
744 ? TRUE : FALSE;
745 }
746
747 static inline bfd_boolean
748 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
749 {
750 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
751 ? TRUE : FALSE;
752 }
753
754 const char*
755 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
756 {
757 return aarch64_opnd_qualifiers[qualifier].desc;
758 }
759
760 /* Given an operand qualifier, return the expected data element size
761 of a qualified operand. */
762 unsigned char
763 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
764 {
765 assert (operand_variant_qualifier_p (qualifier) == TRUE);
766 return aarch64_opnd_qualifiers[qualifier].data0;
767 }
768
769 unsigned char
770 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
771 {
772 assert (operand_variant_qualifier_p (qualifier) == TRUE);
773 return aarch64_opnd_qualifiers[qualifier].data1;
774 }
775
776 aarch64_insn
777 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
778 {
779 assert (operand_variant_qualifier_p (qualifier) == TRUE);
780 return aarch64_opnd_qualifiers[qualifier].data2;
781 }
782
783 static int
784 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
785 {
786 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
787 return aarch64_opnd_qualifiers[qualifier].data0;
788 }
789
790 static int
791 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
792 {
793 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
794 return aarch64_opnd_qualifiers[qualifier].data1;
795 }
796
797 #ifdef DEBUG_AARCH64
798 void
799 aarch64_verbose (const char *str, ...)
800 {
801 va_list ap;
802 va_start (ap, str);
803 printf ("#### ");
804 vprintf (str, ap);
805 printf ("\n");
806 va_end (ap);
807 }
808
809 static inline void
810 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
811 {
812 int i;
813 printf ("#### \t");
814 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
815 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
816 printf ("\n");
817 }
818
819 static void
820 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
821 const aarch64_opnd_qualifier_t *qualifier)
822 {
823 int i;
824 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
825
826 aarch64_verbose ("dump_match_qualifiers:");
827 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
828 curr[i] = opnd[i].qualifier;
829 dump_qualifier_sequence (curr);
830 aarch64_verbose ("against");
831 dump_qualifier_sequence (qualifier);
832 }
833 #endif /* DEBUG_AARCH64 */
834
835 /* TODO improve this, we can have an extra field at the runtime to
836 store the number of operands rather than calculating it every time. */
837
838 int
839 aarch64_num_of_operands (const aarch64_opcode *opcode)
840 {
841 int i = 0;
842 const enum aarch64_opnd *opnds = opcode->operands;
843 while (opnds[i++] != AARCH64_OPND_NIL)
844 ;
845 --i;
846 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
847 return i;
848 }
849
850 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
851 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
852
853 N.B. on the entry, it is very likely that only some operands in *INST
854 have had their qualifiers been established.
855
856 If STOP_AT is not -1, the function will only try to match
857 the qualifier sequence for operands before and including the operand
858 of index STOP_AT; and on success *RET will only be filled with the first
859 (STOP_AT+1) qualifiers.
860
861 A couple examples of the matching algorithm:
862
863 X,W,NIL should match
864 X,W,NIL
865
866 NIL,NIL should match
867 X ,NIL
868
869 Apart from serving the main encoding routine, this can also be called
870 during or after the operand decoding. */
871
872 int
873 aarch64_find_best_match (const aarch64_inst *inst,
874 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
875 int stop_at, aarch64_opnd_qualifier_t *ret)
876 {
877 int found = 0;
878 int i, num_opnds;
879 const aarch64_opnd_qualifier_t *qualifiers;
880
881 num_opnds = aarch64_num_of_operands (inst->opcode);
882 if (num_opnds == 0)
883 {
884 DEBUG_TRACE ("SUCCEED: no operand");
885 return 1;
886 }
887
888 if (stop_at < 0 || stop_at >= num_opnds)
889 stop_at = num_opnds - 1;
890
891 /* For each pattern. */
892 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
893 {
894 int j;
895 qualifiers = *qualifiers_list;
896
897 /* Start as positive. */
898 found = 1;
899
900 DEBUG_TRACE ("%d", i);
901 #ifdef DEBUG_AARCH64
902 if (debug_dump)
903 dump_match_qualifiers (inst->operands, qualifiers);
904 #endif
905
906 /* Most opcodes has much fewer patterns in the list.
907 First NIL qualifier indicates the end in the list. */
908 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
909 {
910 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
911 if (i)
912 found = 0;
913 break;
914 }
915
916 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
917 {
918 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
919 {
920 /* Either the operand does not have qualifier, or the qualifier
921 for the operand needs to be deduced from the qualifier
922 sequence.
923 In the latter case, any constraint checking related with
924 the obtained qualifier should be done later in
925 operand_general_constraint_met_p. */
926 continue;
927 }
928 else if (*qualifiers != inst->operands[j].qualifier)
929 {
930 /* Unless the target qualifier can also qualify the operand
931 (which has already had a non-nil qualifier), non-equal
932 qualifiers are generally un-matched. */
933 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
934 continue;
935 else
936 {
937 found = 0;
938 break;
939 }
940 }
941 else
942 continue; /* Equal qualifiers are certainly matched. */
943 }
944
945 /* Qualifiers established. */
946 if (found == 1)
947 break;
948 }
949
950 if (found == 1)
951 {
952 /* Fill the result in *RET. */
953 int j;
954 qualifiers = *qualifiers_list;
955
956 DEBUG_TRACE ("complete qualifiers using list %d", i);
957 #ifdef DEBUG_AARCH64
958 if (debug_dump)
959 dump_qualifier_sequence (qualifiers);
960 #endif
961
962 for (j = 0; j <= stop_at; ++j, ++qualifiers)
963 ret[j] = *qualifiers;
964 for (; j < AARCH64_MAX_OPND_NUM; ++j)
965 ret[j] = AARCH64_OPND_QLF_NIL;
966
967 DEBUG_TRACE ("SUCCESS");
968 return 1;
969 }
970
971 DEBUG_TRACE ("FAIL");
972 return 0;
973 }
974
975 /* Operand qualifier matching and resolving.
976
977 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
978 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
979
980 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
981 succeeds. */
982
983 static int
984 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
985 {
986 int i, nops;
987 aarch64_opnd_qualifier_seq_t qualifiers;
988
989 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
990 qualifiers))
991 {
992 DEBUG_TRACE ("matching FAIL");
993 return 0;
994 }
995
996 if (inst->opcode->flags & F_STRICT)
997 {
998 /* Require an exact qualifier match, even for NIL qualifiers. */
999 nops = aarch64_num_of_operands (inst->opcode);
1000 for (i = 0; i < nops; ++i)
1001 if (inst->operands[i].qualifier != qualifiers[i])
1002 return FALSE;
1003 }
1004
1005 /* Update the qualifiers. */
1006 if (update_p == TRUE)
1007 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1008 {
1009 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1010 break;
1011 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1012 "update %s with %s for operand %d",
1013 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1014 aarch64_get_qualifier_name (qualifiers[i]), i);
1015 inst->operands[i].qualifier = qualifiers[i];
1016 }
1017
1018 DEBUG_TRACE ("matching SUCCESS");
1019 return 1;
1020 }
1021
1022 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1023 register by MOVZ.
1024
1025 IS32 indicates whether value is a 32-bit immediate or not.
1026 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1027 amount will be returned in *SHIFT_AMOUNT. */
1028
1029 bfd_boolean
1030 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1031 {
1032 int amount;
1033
1034 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1035
1036 if (is32)
1037 {
1038 /* Allow all zeros or all ones in top 32-bits, so that
1039 32-bit constant expressions like ~0x80000000 are
1040 permitted. */
1041 uint64_t ext = value;
1042 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1043 /* Immediate out of range. */
1044 return FALSE;
1045 value &= (int64_t) 0xffffffff;
1046 }
1047
1048 /* first, try movz then movn */
1049 amount = -1;
1050 if ((value & ((int64_t) 0xffff << 0)) == value)
1051 amount = 0;
1052 else if ((value & ((int64_t) 0xffff << 16)) == value)
1053 amount = 16;
1054 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1055 amount = 32;
1056 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1057 amount = 48;
1058
1059 if (amount == -1)
1060 {
1061 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1062 return FALSE;
1063 }
1064
1065 if (shift_amount != NULL)
1066 *shift_amount = amount;
1067
1068 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1069
1070 return TRUE;
1071 }
1072
1073 /* Build the accepted values for immediate logical SIMD instructions.
1074
1075 The standard encodings of the immediate value are:
1076 N imms immr SIMD size R S
1077 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1078 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1079 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1080 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1081 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1082 0 11110s 00000r 2 UInt(r) UInt(s)
1083 where all-ones value of S is reserved.
1084
1085 Let's call E the SIMD size.
1086
1087 The immediate value is: S+1 bits '1' rotated to the right by R.
1088
1089 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1090 (remember S != E - 1). */
1091
1092 #define TOTAL_IMM_NB 5334
1093
1094 typedef struct
1095 {
1096 uint64_t imm;
1097 aarch64_insn encoding;
1098 } simd_imm_encoding;
1099
1100 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1101
1102 static int
1103 simd_imm_encoding_cmp(const void *i1, const void *i2)
1104 {
1105 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1106 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1107
1108 if (imm1->imm < imm2->imm)
1109 return -1;
1110 if (imm1->imm > imm2->imm)
1111 return +1;
1112 return 0;
1113 }
1114
1115 /* immediate bitfield standard encoding
1116 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1117 1 ssssss rrrrrr 64 rrrrrr ssssss
1118 0 0sssss 0rrrrr 32 rrrrr sssss
1119 0 10ssss 00rrrr 16 rrrr ssss
1120 0 110sss 000rrr 8 rrr sss
1121 0 1110ss 0000rr 4 rr ss
1122 0 11110s 00000r 2 r s */
1123 static inline int
1124 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1125 {
1126 return (is64 << 12) | (r << 6) | s;
1127 }
1128
1129 static void
1130 build_immediate_table (void)
1131 {
1132 uint32_t log_e, e, s, r, s_mask;
1133 uint64_t mask, imm;
1134 int nb_imms;
1135 int is64;
1136
1137 nb_imms = 0;
1138 for (log_e = 1; log_e <= 6; log_e++)
1139 {
1140 /* Get element size. */
1141 e = 1u << log_e;
1142 if (log_e == 6)
1143 {
1144 is64 = 1;
1145 mask = 0xffffffffffffffffull;
1146 s_mask = 0;
1147 }
1148 else
1149 {
1150 is64 = 0;
1151 mask = (1ull << e) - 1;
1152 /* log_e s_mask
1153 1 ((1 << 4) - 1) << 2 = 111100
1154 2 ((1 << 3) - 1) << 3 = 111000
1155 3 ((1 << 2) - 1) << 4 = 110000
1156 4 ((1 << 1) - 1) << 5 = 100000
1157 5 ((1 << 0) - 1) << 6 = 000000 */
1158 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1159 }
1160 for (s = 0; s < e - 1; s++)
1161 for (r = 0; r < e; r++)
1162 {
1163 /* s+1 consecutive bits to 1 (s < 63) */
1164 imm = (1ull << (s + 1)) - 1;
1165 /* rotate right by r */
1166 if (r != 0)
1167 imm = (imm >> r) | ((imm << (e - r)) & mask);
1168 /* replicate the constant depending on SIMD size */
1169 switch (log_e)
1170 {
1171 case 1: imm = (imm << 2) | imm;
1172 /* Fall through. */
1173 case 2: imm = (imm << 4) | imm;
1174 /* Fall through. */
1175 case 3: imm = (imm << 8) | imm;
1176 /* Fall through. */
1177 case 4: imm = (imm << 16) | imm;
1178 /* Fall through. */
1179 case 5: imm = (imm << 32) | imm;
1180 /* Fall through. */
1181 case 6: break;
1182 default: abort ();
1183 }
1184 simd_immediates[nb_imms].imm = imm;
1185 simd_immediates[nb_imms].encoding =
1186 encode_immediate_bitfield(is64, s | s_mask, r);
1187 nb_imms++;
1188 }
1189 }
1190 assert (nb_imms == TOTAL_IMM_NB);
1191 qsort(simd_immediates, nb_imms,
1192 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1193 }
1194
1195 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1196 be accepted by logical (immediate) instructions
1197 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1198
1199 ESIZE is the number of bytes in the decoded immediate value.
1200 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1201 VALUE will be returned in *ENCODING. */
1202
1203 bfd_boolean
1204 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1205 {
1206 simd_imm_encoding imm_enc;
1207 const simd_imm_encoding *imm_encoding;
1208 static bfd_boolean initialized = FALSE;
1209 uint64_t upper;
1210 int i;
1211
1212 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1213 value, esize);
1214
1215 if (!initialized)
1216 {
1217 build_immediate_table ();
1218 initialized = TRUE;
1219 }
1220
1221 /* Allow all zeros or all ones in top bits, so that
1222 constant expressions like ~1 are permitted. */
1223 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1224 if ((value & ~upper) != value && (value | upper) != value)
1225 return FALSE;
1226
1227 /* Replicate to a full 64-bit value. */
1228 value &= ~upper;
1229 for (i = esize * 8; i < 64; i *= 2)
1230 value |= (value << i);
1231
1232 imm_enc.imm = value;
1233 imm_encoding = (const simd_imm_encoding *)
1234 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1235 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1236 if (imm_encoding == NULL)
1237 {
1238 DEBUG_TRACE ("exit with FALSE");
1239 return FALSE;
1240 }
1241 if (encoding != NULL)
1242 *encoding = imm_encoding->encoding;
1243 DEBUG_TRACE ("exit with TRUE");
1244 return TRUE;
1245 }
1246
1247 /* If 64-bit immediate IMM is in the format of
1248 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1249 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1250 of value "abcdefgh". Otherwise return -1. */
1251 int
1252 aarch64_shrink_expanded_imm8 (uint64_t imm)
1253 {
1254 int i, ret;
1255 uint32_t byte;
1256
1257 ret = 0;
1258 for (i = 0; i < 8; i++)
1259 {
1260 byte = (imm >> (8 * i)) & 0xff;
1261 if (byte == 0xff)
1262 ret |= 1 << i;
1263 else if (byte != 0x00)
1264 return -1;
1265 }
1266 return ret;
1267 }
1268
1269 /* Utility inline functions for operand_general_constraint_met_p. */
1270
1271 static inline void
1272 set_error (aarch64_operand_error *mismatch_detail,
1273 enum aarch64_operand_error_kind kind, int idx,
1274 const char* error)
1275 {
1276 if (mismatch_detail == NULL)
1277 return;
1278 mismatch_detail->kind = kind;
1279 mismatch_detail->index = idx;
1280 mismatch_detail->error = error;
1281 }
1282
1283 static inline void
1284 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1285 const char* error)
1286 {
1287 if (mismatch_detail == NULL)
1288 return;
1289 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1290 }
1291
1292 static inline void
1293 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1294 int idx, int lower_bound, int upper_bound,
1295 const char* error)
1296 {
1297 if (mismatch_detail == NULL)
1298 return;
1299 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1300 mismatch_detail->data[0] = lower_bound;
1301 mismatch_detail->data[1] = upper_bound;
1302 }
1303
1304 static inline void
1305 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1306 int idx, int lower_bound, int upper_bound)
1307 {
1308 if (mismatch_detail == NULL)
1309 return;
1310 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1311 _("immediate value"));
1312 }
1313
1314 static inline void
1315 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1316 int idx, int lower_bound, int upper_bound)
1317 {
1318 if (mismatch_detail == NULL)
1319 return;
1320 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1321 _("immediate offset"));
1322 }
1323
1324 static inline void
1325 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1326 int idx, int lower_bound, int upper_bound)
1327 {
1328 if (mismatch_detail == NULL)
1329 return;
1330 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1331 _("register number"));
1332 }
1333
1334 static inline void
1335 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1336 int idx, int lower_bound, int upper_bound)
1337 {
1338 if (mismatch_detail == NULL)
1339 return;
1340 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1341 _("register element index"));
1342 }
1343
1344 static inline void
1345 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1346 int idx, int lower_bound, int upper_bound)
1347 {
1348 if (mismatch_detail == NULL)
1349 return;
1350 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1351 _("shift amount"));
1352 }
1353
1354 /* Report that the MUL modifier in operand IDX should be in the range
1355 [LOWER_BOUND, UPPER_BOUND]. */
1356 static inline void
1357 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1358 int idx, int lower_bound, int upper_bound)
1359 {
1360 if (mismatch_detail == NULL)
1361 return;
1362 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1363 _("multiplier"));
1364 }
1365
1366 static inline void
1367 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1368 int alignment)
1369 {
1370 if (mismatch_detail == NULL)
1371 return;
1372 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1373 mismatch_detail->data[0] = alignment;
1374 }
1375
1376 static inline void
1377 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1378 int expected_num)
1379 {
1380 if (mismatch_detail == NULL)
1381 return;
1382 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1383 mismatch_detail->data[0] = expected_num;
1384 }
1385
1386 static inline void
1387 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1388 const char* error)
1389 {
1390 if (mismatch_detail == NULL)
1391 return;
1392 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1393 }
1394
1395 /* General constraint checking based on operand code.
1396
1397 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1398 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1399
1400 This function has to be called after the qualifiers for all operands
1401 have been resolved.
1402
1403 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1404 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1405 of error message during the disassembling where error message is not
1406 wanted. We avoid the dynamic construction of strings of error messages
1407 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1408 use a combination of error code, static string and some integer data to
1409 represent an error. */
1410
1411 static int
1412 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1413 enum aarch64_opnd type,
1414 const aarch64_opcode *opcode,
1415 aarch64_operand_error *mismatch_detail)
1416 {
1417 unsigned num, modifiers, shift;
1418 unsigned char size;
1419 int64_t imm, min_value, max_value;
1420 uint64_t uvalue, mask;
1421 const aarch64_opnd_info *opnd = opnds + idx;
1422 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1423
1424 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1425
1426 switch (aarch64_operands[type].op_class)
1427 {
1428 case AARCH64_OPND_CLASS_INT_REG:
1429 /* Check pair reg constraints for cas* instructions. */
1430 if (type == AARCH64_OPND_PAIRREG)
1431 {
1432 assert (idx == 1 || idx == 3);
1433 if (opnds[idx - 1].reg.regno % 2 != 0)
1434 {
1435 set_syntax_error (mismatch_detail, idx - 1,
1436 _("reg pair must start from even reg"));
1437 return 0;
1438 }
1439 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1440 {
1441 set_syntax_error (mismatch_detail, idx,
1442 _("reg pair must be contiguous"));
1443 return 0;
1444 }
1445 break;
1446 }
1447
1448 /* <Xt> may be optional in some IC and TLBI instructions. */
1449 if (type == AARCH64_OPND_Rt_SYS)
1450 {
1451 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1452 == AARCH64_OPND_CLASS_SYSTEM));
1453 if (opnds[1].present
1454 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1455 {
1456 set_other_error (mismatch_detail, idx, _("extraneous register"));
1457 return 0;
1458 }
1459 if (!opnds[1].present
1460 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1461 {
1462 set_other_error (mismatch_detail, idx, _("missing register"));
1463 return 0;
1464 }
1465 }
1466 switch (qualifier)
1467 {
1468 case AARCH64_OPND_QLF_WSP:
1469 case AARCH64_OPND_QLF_SP:
1470 if (!aarch64_stack_pointer_p (opnd))
1471 {
1472 set_other_error (mismatch_detail, idx,
1473 _("stack pointer register expected"));
1474 return 0;
1475 }
1476 break;
1477 default:
1478 break;
1479 }
1480 break;
1481
1482 case AARCH64_OPND_CLASS_SVE_REG:
1483 switch (type)
1484 {
1485 case AARCH64_OPND_SVE_Zm3_INDEX:
1486 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1487 case AARCH64_OPND_SVE_Zm4_INDEX:
1488 size = get_operand_fields_width (get_operand_from_code (type));
1489 shift = get_operand_specific_data (&aarch64_operands[type]);
1490 mask = (1 << shift) - 1;
1491 if (opnd->reg.regno > mask)
1492 {
1493 assert (mask == 7 || mask == 15);
1494 set_other_error (mismatch_detail, idx,
1495 mask == 15
1496 ? _("z0-z15 expected")
1497 : _("z0-z7 expected"));
1498 return 0;
1499 }
1500 mask = (1 << (size - shift)) - 1;
1501 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1502 {
1503 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1504 return 0;
1505 }
1506 break;
1507
1508 case AARCH64_OPND_SVE_Zn_INDEX:
1509 size = aarch64_get_qualifier_esize (opnd->qualifier);
1510 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1511 {
1512 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1513 0, 64 / size - 1);
1514 return 0;
1515 }
1516 break;
1517
1518 case AARCH64_OPND_SVE_ZnxN:
1519 case AARCH64_OPND_SVE_ZtxN:
1520 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1521 {
1522 set_other_error (mismatch_detail, idx,
1523 _("invalid register list"));
1524 return 0;
1525 }
1526 break;
1527
1528 default:
1529 break;
1530 }
1531 break;
1532
1533 case AARCH64_OPND_CLASS_PRED_REG:
1534 if (opnd->reg.regno >= 8
1535 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1536 {
1537 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1538 return 0;
1539 }
1540 break;
1541
1542 case AARCH64_OPND_CLASS_COND:
1543 if (type == AARCH64_OPND_COND1
1544 && (opnds[idx].cond->value & 0xe) == 0xe)
1545 {
1546 /* Not allow AL or NV. */
1547 set_syntax_error (mismatch_detail, idx, NULL);
1548 }
1549 break;
1550
1551 case AARCH64_OPND_CLASS_ADDRESS:
1552 /* Check writeback. */
1553 switch (opcode->iclass)
1554 {
1555 case ldst_pos:
1556 case ldst_unscaled:
1557 case ldstnapair_offs:
1558 case ldstpair_off:
1559 case ldst_unpriv:
1560 if (opnd->addr.writeback == 1)
1561 {
1562 set_syntax_error (mismatch_detail, idx,
1563 _("unexpected address writeback"));
1564 return 0;
1565 }
1566 break;
1567 case ldst_imm10:
1568 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1569 {
1570 set_syntax_error (mismatch_detail, idx,
1571 _("unexpected address writeback"));
1572 return 0;
1573 }
1574 break;
1575 case ldst_imm9:
1576 case ldstpair_indexed:
1577 case asisdlsep:
1578 case asisdlsop:
1579 if (opnd->addr.writeback == 0)
1580 {
1581 set_syntax_error (mismatch_detail, idx,
1582 _("address writeback expected"));
1583 return 0;
1584 }
1585 break;
1586 default:
1587 assert (opnd->addr.writeback == 0);
1588 break;
1589 }
1590 switch (type)
1591 {
1592 case AARCH64_OPND_ADDR_SIMM7:
1593 /* Scaled signed 7 bits immediate offset. */
1594 /* Get the size of the data element that is accessed, which may be
1595 different from that of the source register size,
1596 e.g. in strb/ldrb. */
1597 size = aarch64_get_qualifier_esize (opnd->qualifier);
1598 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1599 {
1600 set_offset_out_of_range_error (mismatch_detail, idx,
1601 -64 * size, 63 * size);
1602 return 0;
1603 }
1604 if (!value_aligned_p (opnd->addr.offset.imm, size))
1605 {
1606 set_unaligned_error (mismatch_detail, idx, size);
1607 return 0;
1608 }
1609 break;
1610 case AARCH64_OPND_ADDR_OFFSET:
1611 case AARCH64_OPND_ADDR_SIMM9:
1612 /* Unscaled signed 9 bits immediate offset. */
1613 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1614 {
1615 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1616 return 0;
1617 }
1618 break;
1619
1620 case AARCH64_OPND_ADDR_SIMM9_2:
1621 /* Unscaled signed 9 bits immediate offset, which has to be negative
1622 or unaligned. */
1623 size = aarch64_get_qualifier_esize (qualifier);
1624 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1625 && !value_aligned_p (opnd->addr.offset.imm, size))
1626 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1627 return 1;
1628 set_other_error (mismatch_detail, idx,
1629 _("negative or unaligned offset expected"));
1630 return 0;
1631
1632 case AARCH64_OPND_ADDR_SIMM10:
1633 /* Scaled signed 10 bits immediate offset. */
1634 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1635 {
1636 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1637 return 0;
1638 }
1639 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1640 {
1641 set_unaligned_error (mismatch_detail, idx, 8);
1642 return 0;
1643 }
1644 break;
1645
1646 case AARCH64_OPND_SIMD_ADDR_POST:
1647 /* AdvSIMD load/store multiple structures, post-index. */
1648 assert (idx == 1);
1649 if (opnd->addr.offset.is_reg)
1650 {
1651 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1652 return 1;
1653 else
1654 {
1655 set_other_error (mismatch_detail, idx,
1656 _("invalid register offset"));
1657 return 0;
1658 }
1659 }
1660 else
1661 {
1662 const aarch64_opnd_info *prev = &opnds[idx-1];
1663 unsigned num_bytes; /* total number of bytes transferred. */
1664 /* The opcode dependent area stores the number of elements in
1665 each structure to be loaded/stored. */
1666 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1667 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1668 /* Special handling of loading single structure to all lane. */
1669 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1670 * aarch64_get_qualifier_esize (prev->qualifier);
1671 else
1672 num_bytes = prev->reglist.num_regs
1673 * aarch64_get_qualifier_esize (prev->qualifier)
1674 * aarch64_get_qualifier_nelem (prev->qualifier);
1675 if ((int) num_bytes != opnd->addr.offset.imm)
1676 {
1677 set_other_error (mismatch_detail, idx,
1678 _("invalid post-increment amount"));
1679 return 0;
1680 }
1681 }
1682 break;
1683
1684 case AARCH64_OPND_ADDR_REGOFF:
1685 /* Get the size of the data element that is accessed, which may be
1686 different from that of the source register size,
1687 e.g. in strb/ldrb. */
1688 size = aarch64_get_qualifier_esize (opnd->qualifier);
1689 /* It is either no shift or shift by the binary logarithm of SIZE. */
1690 if (opnd->shifter.amount != 0
1691 && opnd->shifter.amount != (int)get_logsz (size))
1692 {
1693 set_other_error (mismatch_detail, idx,
1694 _("invalid shift amount"));
1695 return 0;
1696 }
1697 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1698 operators. */
1699 switch (opnd->shifter.kind)
1700 {
1701 case AARCH64_MOD_UXTW:
1702 case AARCH64_MOD_LSL:
1703 case AARCH64_MOD_SXTW:
1704 case AARCH64_MOD_SXTX: break;
1705 default:
1706 set_other_error (mismatch_detail, idx,
1707 _("invalid extend/shift operator"));
1708 return 0;
1709 }
1710 break;
1711
1712 case AARCH64_OPND_ADDR_UIMM12:
1713 imm = opnd->addr.offset.imm;
1714 /* Get the size of the data element that is accessed, which may be
1715 different from that of the source register size,
1716 e.g. in strb/ldrb. */
1717 size = aarch64_get_qualifier_esize (qualifier);
1718 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1719 {
1720 set_offset_out_of_range_error (mismatch_detail, idx,
1721 0, 4095 * size);
1722 return 0;
1723 }
1724 if (!value_aligned_p (opnd->addr.offset.imm, size))
1725 {
1726 set_unaligned_error (mismatch_detail, idx, size);
1727 return 0;
1728 }
1729 break;
1730
1731 case AARCH64_OPND_ADDR_PCREL14:
1732 case AARCH64_OPND_ADDR_PCREL19:
1733 case AARCH64_OPND_ADDR_PCREL21:
1734 case AARCH64_OPND_ADDR_PCREL26:
1735 imm = opnd->imm.value;
1736 if (operand_need_shift_by_two (get_operand_from_code (type)))
1737 {
1738 /* The offset value in a PC-relative branch instruction is alway
1739 4-byte aligned and is encoded without the lowest 2 bits. */
1740 if (!value_aligned_p (imm, 4))
1741 {
1742 set_unaligned_error (mismatch_detail, idx, 4);
1743 return 0;
1744 }
1745 /* Right shift by 2 so that we can carry out the following check
1746 canonically. */
1747 imm >>= 2;
1748 }
1749 size = get_operand_fields_width (get_operand_from_code (type));
1750 if (!value_fit_signed_field_p (imm, size))
1751 {
1752 set_other_error (mismatch_detail, idx,
1753 _("immediate out of range"));
1754 return 0;
1755 }
1756 break;
1757
1758 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1759 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1760 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1761 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1762 min_value = -8;
1763 max_value = 7;
1764 sve_imm_offset_vl:
1765 assert (!opnd->addr.offset.is_reg);
1766 assert (opnd->addr.preind);
1767 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1768 min_value *= num;
1769 max_value *= num;
1770 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1771 || (opnd->shifter.operator_present
1772 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1773 {
1774 set_other_error (mismatch_detail, idx,
1775 _("invalid addressing mode"));
1776 return 0;
1777 }
1778 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1779 {
1780 set_offset_out_of_range_error (mismatch_detail, idx,
1781 min_value, max_value);
1782 return 0;
1783 }
1784 if (!value_aligned_p (opnd->addr.offset.imm, num))
1785 {
1786 set_unaligned_error (mismatch_detail, idx, num);
1787 return 0;
1788 }
1789 break;
1790
1791 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1792 min_value = -32;
1793 max_value = 31;
1794 goto sve_imm_offset_vl;
1795
1796 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1797 min_value = -256;
1798 max_value = 255;
1799 goto sve_imm_offset_vl;
1800
1801 case AARCH64_OPND_SVE_ADDR_RI_U6:
1802 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1803 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1804 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1805 min_value = 0;
1806 max_value = 63;
1807 sve_imm_offset:
1808 assert (!opnd->addr.offset.is_reg);
1809 assert (opnd->addr.preind);
1810 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1811 min_value *= num;
1812 max_value *= num;
1813 if (opnd->shifter.operator_present
1814 || opnd->shifter.amount_present)
1815 {
1816 set_other_error (mismatch_detail, idx,
1817 _("invalid addressing mode"));
1818 return 0;
1819 }
1820 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1821 {
1822 set_offset_out_of_range_error (mismatch_detail, idx,
1823 min_value, max_value);
1824 return 0;
1825 }
1826 if (!value_aligned_p (opnd->addr.offset.imm, num))
1827 {
1828 set_unaligned_error (mismatch_detail, idx, num);
1829 return 0;
1830 }
1831 break;
1832
1833 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1834 min_value = -8;
1835 max_value = 7;
1836 goto sve_imm_offset;
1837
1838 case AARCH64_OPND_SVE_ADDR_RR:
1839 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1840 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1841 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1842 case AARCH64_OPND_SVE_ADDR_RX:
1843 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1844 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1845 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1846 case AARCH64_OPND_SVE_ADDR_RZ:
1847 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1848 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1849 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1850 modifiers = 1 << AARCH64_MOD_LSL;
1851 sve_rr_operand:
1852 assert (opnd->addr.offset.is_reg);
1853 assert (opnd->addr.preind);
1854 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1855 && opnd->addr.offset.regno == 31)
1856 {
1857 set_other_error (mismatch_detail, idx,
1858 _("index register xzr is not allowed"));
1859 return 0;
1860 }
1861 if (((1 << opnd->shifter.kind) & modifiers) == 0
1862 || (opnd->shifter.amount
1863 != get_operand_specific_data (&aarch64_operands[type])))
1864 {
1865 set_other_error (mismatch_detail, idx,
1866 _("invalid addressing mode"));
1867 return 0;
1868 }
1869 break;
1870
1871 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1872 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1873 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1874 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1875 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1876 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1877 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1878 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1879 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1880 goto sve_rr_operand;
1881
1882 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1883 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1884 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1885 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1886 min_value = 0;
1887 max_value = 31;
1888 goto sve_imm_offset;
1889
1890 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1891 modifiers = 1 << AARCH64_MOD_LSL;
1892 sve_zz_operand:
1893 assert (opnd->addr.offset.is_reg);
1894 assert (opnd->addr.preind);
1895 if (((1 << opnd->shifter.kind) & modifiers) == 0
1896 || opnd->shifter.amount < 0
1897 || opnd->shifter.amount > 3)
1898 {
1899 set_other_error (mismatch_detail, idx,
1900 _("invalid addressing mode"));
1901 return 0;
1902 }
1903 break;
1904
1905 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1906 modifiers = (1 << AARCH64_MOD_SXTW);
1907 goto sve_zz_operand;
1908
1909 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1910 modifiers = 1 << AARCH64_MOD_UXTW;
1911 goto sve_zz_operand;
1912
1913 default:
1914 break;
1915 }
1916 break;
1917
1918 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1919 if (type == AARCH64_OPND_LEt)
1920 {
1921 /* Get the upper bound for the element index. */
1922 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1923 if (!value_in_range_p (opnd->reglist.index, 0, num))
1924 {
1925 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1926 return 0;
1927 }
1928 }
1929 /* The opcode dependent area stores the number of elements in
1930 each structure to be loaded/stored. */
1931 num = get_opcode_dependent_value (opcode);
1932 switch (type)
1933 {
1934 case AARCH64_OPND_LVt:
1935 assert (num >= 1 && num <= 4);
1936 /* Unless LD1/ST1, the number of registers should be equal to that
1937 of the structure elements. */
1938 if (num != 1 && opnd->reglist.num_regs != num)
1939 {
1940 set_reg_list_error (mismatch_detail, idx, num);
1941 return 0;
1942 }
1943 break;
1944 case AARCH64_OPND_LVt_AL:
1945 case AARCH64_OPND_LEt:
1946 assert (num >= 1 && num <= 4);
1947 /* The number of registers should be equal to that of the structure
1948 elements. */
1949 if (opnd->reglist.num_regs != num)
1950 {
1951 set_reg_list_error (mismatch_detail, idx, num);
1952 return 0;
1953 }
1954 break;
1955 default:
1956 break;
1957 }
1958 break;
1959
1960 case AARCH64_OPND_CLASS_IMMEDIATE:
1961 /* Constraint check on immediate operand. */
1962 imm = opnd->imm.value;
1963 /* E.g. imm_0_31 constrains value to be 0..31. */
1964 if (qualifier_value_in_range_constraint_p (qualifier)
1965 && !value_in_range_p (imm, get_lower_bound (qualifier),
1966 get_upper_bound (qualifier)))
1967 {
1968 set_imm_out_of_range_error (mismatch_detail, idx,
1969 get_lower_bound (qualifier),
1970 get_upper_bound (qualifier));
1971 return 0;
1972 }
1973
1974 switch (type)
1975 {
1976 case AARCH64_OPND_AIMM:
1977 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1978 {
1979 set_other_error (mismatch_detail, idx,
1980 _("invalid shift operator"));
1981 return 0;
1982 }
1983 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1984 {
1985 set_other_error (mismatch_detail, idx,
1986 _("shift amount must be 0 or 12"));
1987 return 0;
1988 }
1989 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1990 {
1991 set_other_error (mismatch_detail, idx,
1992 _("immediate out of range"));
1993 return 0;
1994 }
1995 break;
1996
1997 case AARCH64_OPND_HALF:
1998 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1999 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2000 {
2001 set_other_error (mismatch_detail, idx,
2002 _("invalid shift operator"));
2003 return 0;
2004 }
2005 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2006 if (!value_aligned_p (opnd->shifter.amount, 16))
2007 {
2008 set_other_error (mismatch_detail, idx,
2009 _("shift amount must be a multiple of 16"));
2010 return 0;
2011 }
2012 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2013 {
2014 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2015 0, size * 8 - 16);
2016 return 0;
2017 }
2018 if (opnd->imm.value < 0)
2019 {
2020 set_other_error (mismatch_detail, idx,
2021 _("negative immediate value not allowed"));
2022 return 0;
2023 }
2024 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2025 {
2026 set_other_error (mismatch_detail, idx,
2027 _("immediate out of range"));
2028 return 0;
2029 }
2030 break;
2031
2032 case AARCH64_OPND_IMM_MOV:
2033 {
2034 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2035 imm = opnd->imm.value;
2036 assert (idx == 1);
2037 switch (opcode->op)
2038 {
2039 case OP_MOV_IMM_WIDEN:
2040 imm = ~imm;
2041 /* Fall through. */
2042 case OP_MOV_IMM_WIDE:
2043 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2044 {
2045 set_other_error (mismatch_detail, idx,
2046 _("immediate out of range"));
2047 return 0;
2048 }
2049 break;
2050 case OP_MOV_IMM_LOG:
2051 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2052 {
2053 set_other_error (mismatch_detail, idx,
2054 _("immediate out of range"));
2055 return 0;
2056 }
2057 break;
2058 default:
2059 assert (0);
2060 return 0;
2061 }
2062 }
2063 break;
2064
2065 case AARCH64_OPND_NZCV:
2066 case AARCH64_OPND_CCMP_IMM:
2067 case AARCH64_OPND_EXCEPTION:
2068 case AARCH64_OPND_UIMM4:
2069 case AARCH64_OPND_UIMM7:
2070 case AARCH64_OPND_UIMM3_OP1:
2071 case AARCH64_OPND_UIMM3_OP2:
2072 case AARCH64_OPND_SVE_UIMM3:
2073 case AARCH64_OPND_SVE_UIMM7:
2074 case AARCH64_OPND_SVE_UIMM8:
2075 case AARCH64_OPND_SVE_UIMM8_53:
2076 size = get_operand_fields_width (get_operand_from_code (type));
2077 assert (size < 32);
2078 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2079 {
2080 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2081 (1 << size) - 1);
2082 return 0;
2083 }
2084 break;
2085
2086 case AARCH64_OPND_SIMM5:
2087 case AARCH64_OPND_SVE_SIMM5:
2088 case AARCH64_OPND_SVE_SIMM5B:
2089 case AARCH64_OPND_SVE_SIMM6:
2090 case AARCH64_OPND_SVE_SIMM8:
2091 size = get_operand_fields_width (get_operand_from_code (type));
2092 assert (size < 32);
2093 if (!value_fit_signed_field_p (opnd->imm.value, size))
2094 {
2095 set_imm_out_of_range_error (mismatch_detail, idx,
2096 -(1 << (size - 1)),
2097 (1 << (size - 1)) - 1);
2098 return 0;
2099 }
2100 break;
2101
2102 case AARCH64_OPND_WIDTH:
2103 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2104 && opnds[0].type == AARCH64_OPND_Rd);
2105 size = get_upper_bound (qualifier);
2106 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2107 /* lsb+width <= reg.size */
2108 {
2109 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2110 size - opnds[idx-1].imm.value);
2111 return 0;
2112 }
2113 break;
2114
2115 case AARCH64_OPND_LIMM:
2116 case AARCH64_OPND_SVE_LIMM:
2117 {
2118 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2119 uint64_t uimm = opnd->imm.value;
2120 if (opcode->op == OP_BIC)
2121 uimm = ~uimm;
2122 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2123 {
2124 set_other_error (mismatch_detail, idx,
2125 _("immediate out of range"));
2126 return 0;
2127 }
2128 }
2129 break;
2130
2131 case AARCH64_OPND_IMM0:
2132 case AARCH64_OPND_FPIMM0:
2133 if (opnd->imm.value != 0)
2134 {
2135 set_other_error (mismatch_detail, idx,
2136 _("immediate zero expected"));
2137 return 0;
2138 }
2139 break;
2140
2141 case AARCH64_OPND_IMM_ROT1:
2142 case AARCH64_OPND_IMM_ROT2:
2143 case AARCH64_OPND_SVE_IMM_ROT2:
2144 if (opnd->imm.value != 0
2145 && opnd->imm.value != 90
2146 && opnd->imm.value != 180
2147 && opnd->imm.value != 270)
2148 {
2149 set_other_error (mismatch_detail, idx,
2150 _("rotate expected to be 0, 90, 180 or 270"));
2151 return 0;
2152 }
2153 break;
2154
2155 case AARCH64_OPND_IMM_ROT3:
2156 case AARCH64_OPND_SVE_IMM_ROT1:
2157 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2158 {
2159 set_other_error (mismatch_detail, idx,
2160 _("rotate expected to be 90 or 270"));
2161 return 0;
2162 }
2163 break;
2164
2165 case AARCH64_OPND_SHLL_IMM:
2166 assert (idx == 2);
2167 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2168 if (opnd->imm.value != size)
2169 {
2170 set_other_error (mismatch_detail, idx,
2171 _("invalid shift amount"));
2172 return 0;
2173 }
2174 break;
2175
2176 case AARCH64_OPND_IMM_VLSL:
2177 size = aarch64_get_qualifier_esize (qualifier);
2178 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2179 {
2180 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2181 size * 8 - 1);
2182 return 0;
2183 }
2184 break;
2185
2186 case AARCH64_OPND_IMM_VLSR:
2187 size = aarch64_get_qualifier_esize (qualifier);
2188 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2189 {
2190 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2191 return 0;
2192 }
2193 break;
2194
2195 case AARCH64_OPND_SIMD_IMM:
2196 case AARCH64_OPND_SIMD_IMM_SFT:
2197 /* Qualifier check. */
2198 switch (qualifier)
2199 {
2200 case AARCH64_OPND_QLF_LSL:
2201 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2202 {
2203 set_other_error (mismatch_detail, idx,
2204 _("invalid shift operator"));
2205 return 0;
2206 }
2207 break;
2208 case AARCH64_OPND_QLF_MSL:
2209 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2210 {
2211 set_other_error (mismatch_detail, idx,
2212 _("invalid shift operator"));
2213 return 0;
2214 }
2215 break;
2216 case AARCH64_OPND_QLF_NIL:
2217 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2218 {
2219 set_other_error (mismatch_detail, idx,
2220 _("shift is not permitted"));
2221 return 0;
2222 }
2223 break;
2224 default:
2225 assert (0);
2226 return 0;
2227 }
2228 /* Is the immediate valid? */
2229 assert (idx == 1);
2230 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2231 {
2232 /* uimm8 or simm8 */
2233 if (!value_in_range_p (opnd->imm.value, -128, 255))
2234 {
2235 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2236 return 0;
2237 }
2238 }
2239 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2240 {
2241 /* uimm64 is not
2242 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2243 ffffffffgggggggghhhhhhhh'. */
2244 set_other_error (mismatch_detail, idx,
2245 _("invalid value for immediate"));
2246 return 0;
2247 }
2248 /* Is the shift amount valid? */
2249 switch (opnd->shifter.kind)
2250 {
2251 case AARCH64_MOD_LSL:
2252 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2253 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2254 {
2255 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2256 (size - 1) * 8);
2257 return 0;
2258 }
2259 if (!value_aligned_p (opnd->shifter.amount, 8))
2260 {
2261 set_unaligned_error (mismatch_detail, idx, 8);
2262 return 0;
2263 }
2264 break;
2265 case AARCH64_MOD_MSL:
2266 /* Only 8 and 16 are valid shift amount. */
2267 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2268 {
2269 set_other_error (mismatch_detail, idx,
2270 _("shift amount must be 0 or 16"));
2271 return 0;
2272 }
2273 break;
2274 default:
2275 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2276 {
2277 set_other_error (mismatch_detail, idx,
2278 _("invalid shift operator"));
2279 return 0;
2280 }
2281 break;
2282 }
2283 break;
2284
2285 case AARCH64_OPND_FPIMM:
2286 case AARCH64_OPND_SIMD_FPIMM:
2287 case AARCH64_OPND_SVE_FPIMM8:
2288 if (opnd->imm.is_fp == 0)
2289 {
2290 set_other_error (mismatch_detail, idx,
2291 _("floating-point immediate expected"));
2292 return 0;
2293 }
2294 /* The value is expected to be an 8-bit floating-point constant with
2295 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2296 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2297 instruction). */
2298 if (!value_in_range_p (opnd->imm.value, 0, 255))
2299 {
2300 set_other_error (mismatch_detail, idx,
2301 _("immediate out of range"));
2302 return 0;
2303 }
2304 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2305 {
2306 set_other_error (mismatch_detail, idx,
2307 _("invalid shift operator"));
2308 return 0;
2309 }
2310 break;
2311
2312 case AARCH64_OPND_SVE_AIMM:
2313 min_value = 0;
2314 sve_aimm:
2315 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2316 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2317 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2318 uvalue = opnd->imm.value;
2319 shift = opnd->shifter.amount;
2320 if (size == 1)
2321 {
2322 if (shift != 0)
2323 {
2324 set_other_error (mismatch_detail, idx,
2325 _("no shift amount allowed for"
2326 " 8-bit constants"));
2327 return 0;
2328 }
2329 }
2330 else
2331 {
2332 if (shift != 0 && shift != 8)
2333 {
2334 set_other_error (mismatch_detail, idx,
2335 _("shift amount must be 0 or 8"));
2336 return 0;
2337 }
2338 if (shift == 0 && (uvalue & 0xff) == 0)
2339 {
2340 shift = 8;
2341 uvalue = (int64_t) uvalue / 256;
2342 }
2343 }
2344 mask >>= shift;
2345 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2346 {
2347 set_other_error (mismatch_detail, idx,
2348 _("immediate too big for element size"));
2349 return 0;
2350 }
2351 uvalue = (uvalue - min_value) & mask;
2352 if (uvalue > 0xff)
2353 {
2354 set_other_error (mismatch_detail, idx,
2355 _("invalid arithmetic immediate"));
2356 return 0;
2357 }
2358 break;
2359
2360 case AARCH64_OPND_SVE_ASIMM:
2361 min_value = -128;
2362 goto sve_aimm;
2363
2364 case AARCH64_OPND_SVE_I1_HALF_ONE:
2365 assert (opnd->imm.is_fp);
2366 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2367 {
2368 set_other_error (mismatch_detail, idx,
2369 _("floating-point value must be 0.5 or 1.0"));
2370 return 0;
2371 }
2372 break;
2373
2374 case AARCH64_OPND_SVE_I1_HALF_TWO:
2375 assert (opnd->imm.is_fp);
2376 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2377 {
2378 set_other_error (mismatch_detail, idx,
2379 _("floating-point value must be 0.5 or 2.0"));
2380 return 0;
2381 }
2382 break;
2383
2384 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2385 assert (opnd->imm.is_fp);
2386 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2387 {
2388 set_other_error (mismatch_detail, idx,
2389 _("floating-point value must be 0.0 or 1.0"));
2390 return 0;
2391 }
2392 break;
2393
2394 case AARCH64_OPND_SVE_INV_LIMM:
2395 {
2396 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2397 uint64_t uimm = ~opnd->imm.value;
2398 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2399 {
2400 set_other_error (mismatch_detail, idx,
2401 _("immediate out of range"));
2402 return 0;
2403 }
2404 }
2405 break;
2406
2407 case AARCH64_OPND_SVE_LIMM_MOV:
2408 {
2409 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2410 uint64_t uimm = opnd->imm.value;
2411 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2412 {
2413 set_other_error (mismatch_detail, idx,
2414 _("immediate out of range"));
2415 return 0;
2416 }
2417 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2418 {
2419 set_other_error (mismatch_detail, idx,
2420 _("invalid replicated MOV immediate"));
2421 return 0;
2422 }
2423 }
2424 break;
2425
2426 case AARCH64_OPND_SVE_PATTERN_SCALED:
2427 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2428 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2429 {
2430 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2431 return 0;
2432 }
2433 break;
2434
2435 case AARCH64_OPND_SVE_SHLIMM_PRED:
2436 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2437 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2438 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2439 {
2440 set_imm_out_of_range_error (mismatch_detail, idx,
2441 0, 8 * size - 1);
2442 return 0;
2443 }
2444 break;
2445
2446 case AARCH64_OPND_SVE_SHRIMM_PRED:
2447 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2448 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2449 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2450 {
2451 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2452 return 0;
2453 }
2454 break;
2455
2456 default:
2457 break;
2458 }
2459 break;
2460
2461 case AARCH64_OPND_CLASS_SYSTEM:
2462 switch (type)
2463 {
2464 case AARCH64_OPND_PSTATEFIELD:
2465 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2466 /* MSR UAO, #uimm4
2467 MSR PAN, #uimm4
2468 The immediate must be #0 or #1. */
2469 if ((opnd->pstatefield == 0x03 /* UAO. */
2470 || opnd->pstatefield == 0x04 /* PAN. */
2471 || opnd->pstatefield == 0x1a) /* DIT. */
2472 && opnds[1].imm.value > 1)
2473 {
2474 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2475 return 0;
2476 }
2477 /* MSR SPSel, #uimm4
2478 Uses uimm4 as a control value to select the stack pointer: if
2479 bit 0 is set it selects the current exception level's stack
2480 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2481 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2482 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2483 {
2484 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2485 return 0;
2486 }
2487 break;
2488 default:
2489 break;
2490 }
2491 break;
2492
2493 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2494 /* Get the upper bound for the element index. */
2495 if (opcode->op == OP_FCMLA_ELEM)
2496 /* FCMLA index range depends on the vector size of other operands
2497 and is halfed because complex numbers take two elements. */
2498 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2499 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2500 else
2501 num = 16;
2502 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2503
2504 /* Index out-of-range. */
2505 if (!value_in_range_p (opnd->reglane.index, 0, num))
2506 {
2507 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2508 return 0;
2509 }
2510 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2511 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2512 number is encoded in "size:M:Rm":
2513 size <Vm>
2514 00 RESERVED
2515 01 0:Rm
2516 10 M:Rm
2517 11 RESERVED */
2518 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2519 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2520 {
2521 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2522 return 0;
2523 }
2524 break;
2525
2526 case AARCH64_OPND_CLASS_MODIFIED_REG:
2527 assert (idx == 1 || idx == 2);
2528 switch (type)
2529 {
2530 case AARCH64_OPND_Rm_EXT:
2531 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2532 && opnd->shifter.kind != AARCH64_MOD_LSL)
2533 {
2534 set_other_error (mismatch_detail, idx,
2535 _("extend operator expected"));
2536 return 0;
2537 }
2538 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2539 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2540 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2541 case. */
2542 if (!aarch64_stack_pointer_p (opnds + 0)
2543 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2544 {
2545 if (!opnd->shifter.operator_present)
2546 {
2547 set_other_error (mismatch_detail, idx,
2548 _("missing extend operator"));
2549 return 0;
2550 }
2551 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2552 {
2553 set_other_error (mismatch_detail, idx,
2554 _("'LSL' operator not allowed"));
2555 return 0;
2556 }
2557 }
2558 assert (opnd->shifter.operator_present /* Default to LSL. */
2559 || opnd->shifter.kind == AARCH64_MOD_LSL);
2560 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2561 {
2562 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2563 return 0;
2564 }
2565 /* In the 64-bit form, the final register operand is written as Wm
2566 for all but the (possibly omitted) UXTX/LSL and SXTX
2567 operators.
2568 N.B. GAS allows X register to be used with any operator as a
2569 programming convenience. */
2570 if (qualifier == AARCH64_OPND_QLF_X
2571 && opnd->shifter.kind != AARCH64_MOD_LSL
2572 && opnd->shifter.kind != AARCH64_MOD_UXTX
2573 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2574 {
2575 set_other_error (mismatch_detail, idx, _("W register expected"));
2576 return 0;
2577 }
2578 break;
2579
2580 case AARCH64_OPND_Rm_SFT:
2581 /* ROR is not available to the shifted register operand in
2582 arithmetic instructions. */
2583 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2584 {
2585 set_other_error (mismatch_detail, idx,
2586 _("shift operator expected"));
2587 return 0;
2588 }
2589 if (opnd->shifter.kind == AARCH64_MOD_ROR
2590 && opcode->iclass != log_shift)
2591 {
2592 set_other_error (mismatch_detail, idx,
2593 _("'ROR' operator not allowed"));
2594 return 0;
2595 }
2596 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2597 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2598 {
2599 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2600 return 0;
2601 }
2602 break;
2603
2604 default:
2605 break;
2606 }
2607 break;
2608
2609 default:
2610 break;
2611 }
2612
2613 return 1;
2614 }
2615
2616 /* Main entrypoint for the operand constraint checking.
2617
2618 Return 1 if operands of *INST meet the constraint applied by the operand
2619 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2620 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2621 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2622 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2623 error kind when it is notified that an instruction does not pass the check).
2624
2625 Un-determined operand qualifiers may get established during the process. */
2626
2627 int
2628 aarch64_match_operands_constraint (aarch64_inst *inst,
2629 aarch64_operand_error *mismatch_detail)
2630 {
2631 int i;
2632
2633 DEBUG_TRACE ("enter");
2634
2635 /* Check for cases where a source register needs to be the same as the
2636 destination register. Do this before matching qualifiers since if
2637 an instruction has both invalid tying and invalid qualifiers,
2638 the error about qualifiers would suggest several alternative
2639 instructions that also have invalid tying. */
2640 i = inst->opcode->tied_operand;
2641 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2642 {
2643 if (mismatch_detail)
2644 {
2645 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2646 mismatch_detail->index = i;
2647 mismatch_detail->error = NULL;
2648 }
2649 return 0;
2650 }
2651
2652 /* Match operands' qualifier.
2653 *INST has already had qualifier establish for some, if not all, of
2654 its operands; we need to find out whether these established
2655 qualifiers match one of the qualifier sequence in
2656 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2657 with the corresponding qualifier in such a sequence.
2658 Only basic operand constraint checking is done here; the more thorough
2659 constraint checking will carried out by operand_general_constraint_met_p,
2660 which has be to called after this in order to get all of the operands'
2661 qualifiers established. */
2662 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2663 {
2664 DEBUG_TRACE ("FAIL on operand qualifier matching");
2665 if (mismatch_detail)
2666 {
2667 /* Return an error type to indicate that it is the qualifier
2668 matching failure; we don't care about which operand as there
2669 are enough information in the opcode table to reproduce it. */
2670 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2671 mismatch_detail->index = -1;
2672 mismatch_detail->error = NULL;
2673 }
2674 return 0;
2675 }
2676
2677 /* Match operands' constraint. */
2678 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2679 {
2680 enum aarch64_opnd type = inst->opcode->operands[i];
2681 if (type == AARCH64_OPND_NIL)
2682 break;
2683 if (inst->operands[i].skip)
2684 {
2685 DEBUG_TRACE ("skip the incomplete operand %d", i);
2686 continue;
2687 }
2688 if (operand_general_constraint_met_p (inst->operands, i, type,
2689 inst->opcode, mismatch_detail) == 0)
2690 {
2691 DEBUG_TRACE ("FAIL on operand %d", i);
2692 return 0;
2693 }
2694 }
2695
2696 DEBUG_TRACE ("PASS");
2697
2698 return 1;
2699 }
2700
2701 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2702 Also updates the TYPE of each INST->OPERANDS with the corresponding
2703 value of OPCODE->OPERANDS.
2704
2705 Note that some operand qualifiers may need to be manually cleared by
2706 the caller before it further calls the aarch64_opcode_encode; by
2707 doing this, it helps the qualifier matching facilities work
2708 properly. */
2709
2710 const aarch64_opcode*
2711 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2712 {
2713 int i;
2714 const aarch64_opcode *old = inst->opcode;
2715
2716 inst->opcode = opcode;
2717
2718 /* Update the operand types. */
2719 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2720 {
2721 inst->operands[i].type = opcode->operands[i];
2722 if (opcode->operands[i] == AARCH64_OPND_NIL)
2723 break;
2724 }
2725
2726 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2727
2728 return old;
2729 }
2730
2731 int
2732 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2733 {
2734 int i;
2735 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2736 if (operands[i] == operand)
2737 return i;
2738 else if (operands[i] == AARCH64_OPND_NIL)
2739 break;
2740 return -1;
2741 }
2742 \f
2743 /* R0...R30, followed by FOR31. */
2744 #define BANK(R, FOR31) \
2745 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2746 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2747 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2748 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2749 /* [0][0] 32-bit integer regs with sp Wn
2750 [0][1] 64-bit integer regs with sp Xn sf=1
2751 [1][0] 32-bit integer regs with #0 Wn
2752 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2753 static const char *int_reg[2][2][32] = {
2754 #define R32(X) "w" #X
2755 #define R64(X) "x" #X
2756 { BANK (R32, "wsp"), BANK (R64, "sp") },
2757 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2758 #undef R64
2759 #undef R32
2760 };
2761
2762 /* Names of the SVE vector registers, first with .S suffixes,
2763 then with .D suffixes. */
2764
2765 static const char *sve_reg[2][32] = {
2766 #define ZS(X) "z" #X ".s"
2767 #define ZD(X) "z" #X ".d"
2768 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2769 #undef ZD
2770 #undef ZS
2771 };
2772 #undef BANK
2773
2774 /* Return the integer register name.
2775 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2776
2777 static inline const char *
2778 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2779 {
2780 const int has_zr = sp_reg_p ? 0 : 1;
2781 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2782 return int_reg[has_zr][is_64][regno];
2783 }
2784
2785 /* Like get_int_reg_name, but IS_64 is always 1. */
2786
2787 static inline const char *
2788 get_64bit_int_reg_name (int regno, int sp_reg_p)
2789 {
2790 const int has_zr = sp_reg_p ? 0 : 1;
2791 return int_reg[has_zr][1][regno];
2792 }
2793
2794 /* Get the name of the integer offset register in OPND, using the shift type
2795 to decide whether it's a word or doubleword. */
2796
2797 static inline const char *
2798 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2799 {
2800 switch (opnd->shifter.kind)
2801 {
2802 case AARCH64_MOD_UXTW:
2803 case AARCH64_MOD_SXTW:
2804 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2805
2806 case AARCH64_MOD_LSL:
2807 case AARCH64_MOD_SXTX:
2808 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2809
2810 default:
2811 abort ();
2812 }
2813 }
2814
2815 /* Get the name of the SVE vector offset register in OPND, using the operand
2816 qualifier to decide whether the suffix should be .S or .D. */
2817
2818 static inline const char *
2819 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2820 {
2821 assert (qualifier == AARCH64_OPND_QLF_S_S
2822 || qualifier == AARCH64_OPND_QLF_S_D);
2823 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2824 }
2825
2826 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2827
2828 typedef union
2829 {
2830 uint64_t i;
2831 double d;
2832 } double_conv_t;
2833
2834 typedef union
2835 {
2836 uint32_t i;
2837 float f;
2838 } single_conv_t;
2839
2840 typedef union
2841 {
2842 uint32_t i;
2843 float f;
2844 } half_conv_t;
2845
2846 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2847 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2848 (depending on the type of the instruction). IMM8 will be expanded to a
2849 single-precision floating-point value (SIZE == 4) or a double-precision
2850 floating-point value (SIZE == 8). A half-precision floating-point value
2851 (SIZE == 2) is expanded to a single-precision floating-point value. The
2852 expanded value is returned. */
2853
2854 static uint64_t
2855 expand_fp_imm (int size, uint32_t imm8)
2856 {
2857 uint64_t imm = 0;
2858 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2859
2860 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2861 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2862 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2863 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2864 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2865 if (size == 8)
2866 {
2867 imm = (imm8_7 << (63-32)) /* imm8<7> */
2868 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2869 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2870 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2871 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2872 imm <<= 32;
2873 }
2874 else if (size == 4 || size == 2)
2875 {
2876 imm = (imm8_7 << 31) /* imm8<7> */
2877 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2878 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2879 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2880 }
2881 else
2882 {
2883 /* An unsupported size. */
2884 assert (0);
2885 }
2886
2887 return imm;
2888 }
2889
2890 /* Produce the string representation of the register list operand *OPND
2891 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2892 the register name that comes before the register number, such as "v". */
2893 static void
2894 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2895 const char *prefix)
2896 {
2897 const int num_regs = opnd->reglist.num_regs;
2898 const int first_reg = opnd->reglist.first_regno;
2899 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2900 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2901 char tb[8]; /* Temporary buffer. */
2902
2903 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2904 assert (num_regs >= 1 && num_regs <= 4);
2905
2906 /* Prepare the index if any. */
2907 if (opnd->reglist.has_index)
2908 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2909 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
2910 else
2911 tb[0] = '\0';
2912
2913 /* The hyphenated form is preferred for disassembly if there are
2914 more than two registers in the list, and the register numbers
2915 are monotonically increasing in increments of one. */
2916 if (num_regs > 2 && last_reg > first_reg)
2917 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2918 prefix, last_reg, qlf_name, tb);
2919 else
2920 {
2921 const int reg0 = first_reg;
2922 const int reg1 = (first_reg + 1) & 0x1f;
2923 const int reg2 = (first_reg + 2) & 0x1f;
2924 const int reg3 = (first_reg + 3) & 0x1f;
2925
2926 switch (num_regs)
2927 {
2928 case 1:
2929 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2930 break;
2931 case 2:
2932 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2933 prefix, reg1, qlf_name, tb);
2934 break;
2935 case 3:
2936 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2937 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2938 prefix, reg2, qlf_name, tb);
2939 break;
2940 case 4:
2941 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2942 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2943 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2944 break;
2945 }
2946 }
2947 }
2948
2949 /* Print the register+immediate address in OPND to BUF, which has SIZE
2950 characters. BASE is the name of the base register. */
2951
2952 static void
2953 print_immediate_offset_address (char *buf, size_t size,
2954 const aarch64_opnd_info *opnd,
2955 const char *base)
2956 {
2957 if (opnd->addr.writeback)
2958 {
2959 if (opnd->addr.preind)
2960 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
2961 else
2962 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
2963 }
2964 else
2965 {
2966 if (opnd->shifter.operator_present)
2967 {
2968 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2969 snprintf (buf, size, "[%s, #%d, mul vl]",
2970 base, opnd->addr.offset.imm);
2971 }
2972 else if (opnd->addr.offset.imm)
2973 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
2974 else
2975 snprintf (buf, size, "[%s]", base);
2976 }
2977 }
2978
2979 /* Produce the string representation of the register offset address operand
2980 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2981 the names of the base and offset registers. */
2982 static void
2983 print_register_offset_address (char *buf, size_t size,
2984 const aarch64_opnd_info *opnd,
2985 const char *base, const char *offset)
2986 {
2987 char tb[16]; /* Temporary buffer. */
2988 bfd_boolean print_extend_p = TRUE;
2989 bfd_boolean print_amount_p = TRUE;
2990 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2991
2992 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2993 || !opnd->shifter.amount_present))
2994 {
2995 /* Not print the shift/extend amount when the amount is zero and
2996 when it is not the special case of 8-bit load/store instruction. */
2997 print_amount_p = FALSE;
2998 /* Likewise, no need to print the shift operator LSL in such a
2999 situation. */
3000 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3001 print_extend_p = FALSE;
3002 }
3003
3004 /* Prepare for the extend/shift. */
3005 if (print_extend_p)
3006 {
3007 if (print_amount_p)
3008 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3009 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3010 (opnd->shifter.amount % 100));
3011 else
3012 snprintf (tb, sizeof (tb), ", %s", shift_name);
3013 }
3014 else
3015 tb[0] = '\0';
3016
3017 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3018 }
3019
3020 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3021 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3022 PC, PCREL_P and ADDRESS are used to pass in and return information about
3023 the PC-relative address calculation, where the PC value is passed in
3024 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3025 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3026 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3027
3028 The function serves both the disassembler and the assembler diagnostics
3029 issuer, which is the reason why it lives in this file. */
3030
3031 void
3032 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3033 const aarch64_opcode *opcode,
3034 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3035 bfd_vma *address)
3036 {
3037 unsigned int i, num_conds;
3038 const char *name = NULL;
3039 const aarch64_opnd_info *opnd = opnds + idx;
3040 enum aarch64_modifier_kind kind;
3041 uint64_t addr, enum_value;
3042
3043 buf[0] = '\0';
3044 if (pcrel_p)
3045 *pcrel_p = 0;
3046
3047 switch (opnd->type)
3048 {
3049 case AARCH64_OPND_Rd:
3050 case AARCH64_OPND_Rn:
3051 case AARCH64_OPND_Rm:
3052 case AARCH64_OPND_Rt:
3053 case AARCH64_OPND_Rt2:
3054 case AARCH64_OPND_Rs:
3055 case AARCH64_OPND_Ra:
3056 case AARCH64_OPND_Rt_SYS:
3057 case AARCH64_OPND_PAIRREG:
3058 case AARCH64_OPND_SVE_Rm:
3059 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3060 the <ic_op>, therefore we use opnd->present to override the
3061 generic optional-ness information. */
3062 if (opnd->type == AARCH64_OPND_Rt_SYS)
3063 {
3064 if (!opnd->present)
3065 break;
3066 }
3067 /* Omit the operand, e.g. RET. */
3068 else if (optional_operand_p (opcode, idx)
3069 && (opnd->reg.regno
3070 == get_optional_operand_default_value (opcode)))
3071 break;
3072 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3073 || opnd->qualifier == AARCH64_OPND_QLF_X);
3074 snprintf (buf, size, "%s",
3075 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3076 break;
3077
3078 case AARCH64_OPND_Rd_SP:
3079 case AARCH64_OPND_Rn_SP:
3080 case AARCH64_OPND_SVE_Rn_SP:
3081 case AARCH64_OPND_Rm_SP:
3082 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3083 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3084 || opnd->qualifier == AARCH64_OPND_QLF_X
3085 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3086 snprintf (buf, size, "%s",
3087 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3088 break;
3089
3090 case AARCH64_OPND_Rm_EXT:
3091 kind = opnd->shifter.kind;
3092 assert (idx == 1 || idx == 2);
3093 if ((aarch64_stack_pointer_p (opnds)
3094 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3095 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3096 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3097 && kind == AARCH64_MOD_UXTW)
3098 || (opnd->qualifier == AARCH64_OPND_QLF_X
3099 && kind == AARCH64_MOD_UXTX)))
3100 {
3101 /* 'LSL' is the preferred form in this case. */
3102 kind = AARCH64_MOD_LSL;
3103 if (opnd->shifter.amount == 0)
3104 {
3105 /* Shifter omitted. */
3106 snprintf (buf, size, "%s",
3107 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3108 break;
3109 }
3110 }
3111 if (opnd->shifter.amount)
3112 snprintf (buf, size, "%s, %s #%" PRIi64,
3113 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3114 aarch64_operand_modifiers[kind].name,
3115 opnd->shifter.amount);
3116 else
3117 snprintf (buf, size, "%s, %s",
3118 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3119 aarch64_operand_modifiers[kind].name);
3120 break;
3121
3122 case AARCH64_OPND_Rm_SFT:
3123 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3124 || opnd->qualifier == AARCH64_OPND_QLF_X);
3125 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3126 snprintf (buf, size, "%s",
3127 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3128 else
3129 snprintf (buf, size, "%s, %s #%" PRIi64,
3130 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3131 aarch64_operand_modifiers[opnd->shifter.kind].name,
3132 opnd->shifter.amount);
3133 break;
3134
3135 case AARCH64_OPND_Fd:
3136 case AARCH64_OPND_Fn:
3137 case AARCH64_OPND_Fm:
3138 case AARCH64_OPND_Fa:
3139 case AARCH64_OPND_Ft:
3140 case AARCH64_OPND_Ft2:
3141 case AARCH64_OPND_Sd:
3142 case AARCH64_OPND_Sn:
3143 case AARCH64_OPND_Sm:
3144 case AARCH64_OPND_SVE_VZn:
3145 case AARCH64_OPND_SVE_Vd:
3146 case AARCH64_OPND_SVE_Vm:
3147 case AARCH64_OPND_SVE_Vn:
3148 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3149 opnd->reg.regno);
3150 break;
3151
3152 case AARCH64_OPND_Va:
3153 case AARCH64_OPND_Vd:
3154 case AARCH64_OPND_Vn:
3155 case AARCH64_OPND_Vm:
3156 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3157 aarch64_get_qualifier_name (opnd->qualifier));
3158 break;
3159
3160 case AARCH64_OPND_Ed:
3161 case AARCH64_OPND_En:
3162 case AARCH64_OPND_Em:
3163 case AARCH64_OPND_SM3_IMM2:
3164 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3165 aarch64_get_qualifier_name (opnd->qualifier),
3166 opnd->reglane.index);
3167 break;
3168
3169 case AARCH64_OPND_VdD1:
3170 case AARCH64_OPND_VnD1:
3171 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3172 break;
3173
3174 case AARCH64_OPND_LVn:
3175 case AARCH64_OPND_LVt:
3176 case AARCH64_OPND_LVt_AL:
3177 case AARCH64_OPND_LEt:
3178 print_register_list (buf, size, opnd, "v");
3179 break;
3180
3181 case AARCH64_OPND_SVE_Pd:
3182 case AARCH64_OPND_SVE_Pg3:
3183 case AARCH64_OPND_SVE_Pg4_5:
3184 case AARCH64_OPND_SVE_Pg4_10:
3185 case AARCH64_OPND_SVE_Pg4_16:
3186 case AARCH64_OPND_SVE_Pm:
3187 case AARCH64_OPND_SVE_Pn:
3188 case AARCH64_OPND_SVE_Pt:
3189 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3190 snprintf (buf, size, "p%d", opnd->reg.regno);
3191 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3192 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3193 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3194 aarch64_get_qualifier_name (opnd->qualifier));
3195 else
3196 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3197 aarch64_get_qualifier_name (opnd->qualifier));
3198 break;
3199
3200 case AARCH64_OPND_SVE_Za_5:
3201 case AARCH64_OPND_SVE_Za_16:
3202 case AARCH64_OPND_SVE_Zd:
3203 case AARCH64_OPND_SVE_Zm_5:
3204 case AARCH64_OPND_SVE_Zm_16:
3205 case AARCH64_OPND_SVE_Zn:
3206 case AARCH64_OPND_SVE_Zt:
3207 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3208 snprintf (buf, size, "z%d", opnd->reg.regno);
3209 else
3210 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3211 aarch64_get_qualifier_name (opnd->qualifier));
3212 break;
3213
3214 case AARCH64_OPND_SVE_ZnxN:
3215 case AARCH64_OPND_SVE_ZtxN:
3216 print_register_list (buf, size, opnd, "z");
3217 break;
3218
3219 case AARCH64_OPND_SVE_Zm3_INDEX:
3220 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3221 case AARCH64_OPND_SVE_Zm4_INDEX:
3222 case AARCH64_OPND_SVE_Zn_INDEX:
3223 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3224 aarch64_get_qualifier_name (opnd->qualifier),
3225 opnd->reglane.index);
3226 break;
3227
3228 case AARCH64_OPND_CRn:
3229 case AARCH64_OPND_CRm:
3230 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3231 break;
3232
3233 case AARCH64_OPND_IDX:
3234 case AARCH64_OPND_MASK:
3235 case AARCH64_OPND_IMM:
3236 case AARCH64_OPND_IMM_2:
3237 case AARCH64_OPND_WIDTH:
3238 case AARCH64_OPND_UIMM3_OP1:
3239 case AARCH64_OPND_UIMM3_OP2:
3240 case AARCH64_OPND_BIT_NUM:
3241 case AARCH64_OPND_IMM_VLSL:
3242 case AARCH64_OPND_IMM_VLSR:
3243 case AARCH64_OPND_SHLL_IMM:
3244 case AARCH64_OPND_IMM0:
3245 case AARCH64_OPND_IMMR:
3246 case AARCH64_OPND_IMMS:
3247 case AARCH64_OPND_FBITS:
3248 case AARCH64_OPND_SIMM5:
3249 case AARCH64_OPND_SVE_SHLIMM_PRED:
3250 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3251 case AARCH64_OPND_SVE_SHRIMM_PRED:
3252 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3253 case AARCH64_OPND_SVE_SIMM5:
3254 case AARCH64_OPND_SVE_SIMM5B:
3255 case AARCH64_OPND_SVE_SIMM6:
3256 case AARCH64_OPND_SVE_SIMM8:
3257 case AARCH64_OPND_SVE_UIMM3:
3258 case AARCH64_OPND_SVE_UIMM7:
3259 case AARCH64_OPND_SVE_UIMM8:
3260 case AARCH64_OPND_SVE_UIMM8_53:
3261 case AARCH64_OPND_IMM_ROT1:
3262 case AARCH64_OPND_IMM_ROT2:
3263 case AARCH64_OPND_IMM_ROT3:
3264 case AARCH64_OPND_SVE_IMM_ROT1:
3265 case AARCH64_OPND_SVE_IMM_ROT2:
3266 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3267 break;
3268
3269 case AARCH64_OPND_SVE_I1_HALF_ONE:
3270 case AARCH64_OPND_SVE_I1_HALF_TWO:
3271 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3272 {
3273 single_conv_t c;
3274 c.i = opnd->imm.value;
3275 snprintf (buf, size, "#%.1f", c.f);
3276 break;
3277 }
3278
3279 case AARCH64_OPND_SVE_PATTERN:
3280 if (optional_operand_p (opcode, idx)
3281 && opnd->imm.value == get_optional_operand_default_value (opcode))
3282 break;
3283 enum_value = opnd->imm.value;
3284 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3285 if (aarch64_sve_pattern_array[enum_value])
3286 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3287 else
3288 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3289 break;
3290
3291 case AARCH64_OPND_SVE_PATTERN_SCALED:
3292 if (optional_operand_p (opcode, idx)
3293 && !opnd->shifter.operator_present
3294 && opnd->imm.value == get_optional_operand_default_value (opcode))
3295 break;
3296 enum_value = opnd->imm.value;
3297 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3298 if (aarch64_sve_pattern_array[opnd->imm.value])
3299 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3300 else
3301 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3302 if (opnd->shifter.operator_present)
3303 {
3304 size_t len = strlen (buf);
3305 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3306 aarch64_operand_modifiers[opnd->shifter.kind].name,
3307 opnd->shifter.amount);
3308 }
3309 break;
3310
3311 case AARCH64_OPND_SVE_PRFOP:
3312 enum_value = opnd->imm.value;
3313 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3314 if (aarch64_sve_prfop_array[enum_value])
3315 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3316 else
3317 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3318 break;
3319
3320 case AARCH64_OPND_IMM_MOV:
3321 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3322 {
3323 case 4: /* e.g. MOV Wd, #<imm32>. */
3324 {
3325 int imm32 = opnd->imm.value;
3326 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3327 }
3328 break;
3329 case 8: /* e.g. MOV Xd, #<imm64>. */
3330 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3331 opnd->imm.value, opnd->imm.value);
3332 break;
3333 default: assert (0);
3334 }
3335 break;
3336
3337 case AARCH64_OPND_FPIMM0:
3338 snprintf (buf, size, "#0.0");
3339 break;
3340
3341 case AARCH64_OPND_LIMM:
3342 case AARCH64_OPND_AIMM:
3343 case AARCH64_OPND_HALF:
3344 case AARCH64_OPND_SVE_INV_LIMM:
3345 case AARCH64_OPND_SVE_LIMM:
3346 case AARCH64_OPND_SVE_LIMM_MOV:
3347 if (opnd->shifter.amount)
3348 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3349 opnd->shifter.amount);
3350 else
3351 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3352 break;
3353
3354 case AARCH64_OPND_SIMD_IMM:
3355 case AARCH64_OPND_SIMD_IMM_SFT:
3356 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3357 || opnd->shifter.kind == AARCH64_MOD_NONE)
3358 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3359 else
3360 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3361 aarch64_operand_modifiers[opnd->shifter.kind].name,
3362 opnd->shifter.amount);
3363 break;
3364
3365 case AARCH64_OPND_SVE_AIMM:
3366 case AARCH64_OPND_SVE_ASIMM:
3367 if (opnd->shifter.amount)
3368 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3369 opnd->shifter.amount);
3370 else
3371 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3372 break;
3373
3374 case AARCH64_OPND_FPIMM:
3375 case AARCH64_OPND_SIMD_FPIMM:
3376 case AARCH64_OPND_SVE_FPIMM8:
3377 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3378 {
3379 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3380 {
3381 half_conv_t c;
3382 c.i = expand_fp_imm (2, opnd->imm.value);
3383 snprintf (buf, size, "#%.18e", c.f);
3384 }
3385 break;
3386 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3387 {
3388 single_conv_t c;
3389 c.i = expand_fp_imm (4, opnd->imm.value);
3390 snprintf (buf, size, "#%.18e", c.f);
3391 }
3392 break;
3393 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3394 {
3395 double_conv_t c;
3396 c.i = expand_fp_imm (8, opnd->imm.value);
3397 snprintf (buf, size, "#%.18e", c.d);
3398 }
3399 break;
3400 default: assert (0);
3401 }
3402 break;
3403
3404 case AARCH64_OPND_CCMP_IMM:
3405 case AARCH64_OPND_NZCV:
3406 case AARCH64_OPND_EXCEPTION:
3407 case AARCH64_OPND_UIMM4:
3408 case AARCH64_OPND_UIMM7:
3409 if (optional_operand_p (opcode, idx) == TRUE
3410 && (opnd->imm.value ==
3411 (int64_t) get_optional_operand_default_value (opcode)))
3412 /* Omit the operand, e.g. DCPS1. */
3413 break;
3414 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3415 break;
3416
3417 case AARCH64_OPND_COND:
3418 case AARCH64_OPND_COND1:
3419 snprintf (buf, size, "%s", opnd->cond->names[0]);
3420 num_conds = ARRAY_SIZE (opnd->cond->names);
3421 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3422 {
3423 size_t len = strlen (buf);
3424 if (i == 1)
3425 snprintf (buf + len, size - len, " // %s = %s",
3426 opnd->cond->names[0], opnd->cond->names[i]);
3427 else
3428 snprintf (buf + len, size - len, ", %s",
3429 opnd->cond->names[i]);
3430 }
3431 break;
3432
3433 case AARCH64_OPND_ADDR_ADRP:
3434 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3435 + opnd->imm.value;
3436 if (pcrel_p)
3437 *pcrel_p = 1;
3438 if (address)
3439 *address = addr;
3440 /* This is not necessary during the disassembling, as print_address_func
3441 in the disassemble_info will take care of the printing. But some
3442 other callers may be still interested in getting the string in *STR,
3443 so here we do snprintf regardless. */
3444 snprintf (buf, size, "#0x%" PRIx64, addr);
3445 break;
3446
3447 case AARCH64_OPND_ADDR_PCREL14:
3448 case AARCH64_OPND_ADDR_PCREL19:
3449 case AARCH64_OPND_ADDR_PCREL21:
3450 case AARCH64_OPND_ADDR_PCREL26:
3451 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3452 if (pcrel_p)
3453 *pcrel_p = 1;
3454 if (address)
3455 *address = addr;
3456 /* This is not necessary during the disassembling, as print_address_func
3457 in the disassemble_info will take care of the printing. But some
3458 other callers may be still interested in getting the string in *STR,
3459 so here we do snprintf regardless. */
3460 snprintf (buf, size, "#0x%" PRIx64, addr);
3461 break;
3462
3463 case AARCH64_OPND_ADDR_SIMPLE:
3464 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3465 case AARCH64_OPND_SIMD_ADDR_POST:
3466 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3467 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3468 {
3469 if (opnd->addr.offset.is_reg)
3470 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3471 else
3472 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3473 }
3474 else
3475 snprintf (buf, size, "[%s]", name);
3476 break;
3477
3478 case AARCH64_OPND_ADDR_REGOFF:
3479 case AARCH64_OPND_SVE_ADDR_RR:
3480 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3481 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3482 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3483 case AARCH64_OPND_SVE_ADDR_RX:
3484 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3485 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3486 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3487 print_register_offset_address
3488 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3489 get_offset_int_reg_name (opnd));
3490 break;
3491
3492 case AARCH64_OPND_SVE_ADDR_RZ:
3493 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3494 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3495 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3496 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3497 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3498 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3499 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3500 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3501 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3502 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3503 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3504 print_register_offset_address
3505 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3506 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3507 break;
3508
3509 case AARCH64_OPND_ADDR_SIMM7:
3510 case AARCH64_OPND_ADDR_SIMM9:
3511 case AARCH64_OPND_ADDR_SIMM9_2:
3512 case AARCH64_OPND_ADDR_SIMM10:
3513 case AARCH64_OPND_ADDR_OFFSET:
3514 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3515 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3516 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3517 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3518 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3519 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3520 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3521 case AARCH64_OPND_SVE_ADDR_RI_U6:
3522 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3523 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3524 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3525 print_immediate_offset_address
3526 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3527 break;
3528
3529 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3530 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3531 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3532 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3533 print_immediate_offset_address
3534 (buf, size, opnd,
3535 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3536 break;
3537
3538 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3539 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3540 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3541 print_register_offset_address
3542 (buf, size, opnd,
3543 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3544 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3545 break;
3546
3547 case AARCH64_OPND_ADDR_UIMM12:
3548 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3549 if (opnd->addr.offset.imm)
3550 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3551 else
3552 snprintf (buf, size, "[%s]", name);
3553 break;
3554
3555 case AARCH64_OPND_SYSREG:
3556 for (i = 0; aarch64_sys_regs[i].name; ++i)
3557 if (aarch64_sys_regs[i].value == opnd->sysreg
3558 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3559 break;
3560 if (aarch64_sys_regs[i].name)
3561 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3562 else
3563 {
3564 /* Implementation defined system register. */
3565 unsigned int value = opnd->sysreg;
3566 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3567 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3568 value & 0x7);
3569 }
3570 break;
3571
3572 case AARCH64_OPND_PSTATEFIELD:
3573 for (i = 0; aarch64_pstatefields[i].name; ++i)
3574 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3575 break;
3576 assert (aarch64_pstatefields[i].name);
3577 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3578 break;
3579
3580 case AARCH64_OPND_SYSREG_AT:
3581 case AARCH64_OPND_SYSREG_DC:
3582 case AARCH64_OPND_SYSREG_IC:
3583 case AARCH64_OPND_SYSREG_TLBI:
3584 snprintf (buf, size, "%s", opnd->sysins_op->name);
3585 break;
3586
3587 case AARCH64_OPND_BARRIER:
3588 snprintf (buf, size, "%s", opnd->barrier->name);
3589 break;
3590
3591 case AARCH64_OPND_BARRIER_ISB:
3592 /* Operand can be omitted, e.g. in DCPS1. */
3593 if (! optional_operand_p (opcode, idx)
3594 || (opnd->barrier->value
3595 != get_optional_operand_default_value (opcode)))
3596 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3597 break;
3598
3599 case AARCH64_OPND_PRFOP:
3600 if (opnd->prfop->name != NULL)
3601 snprintf (buf, size, "%s", opnd->prfop->name);
3602 else
3603 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3604 break;
3605
3606 case AARCH64_OPND_BARRIER_PSB:
3607 snprintf (buf, size, "%s", opnd->hint_option->name);
3608 break;
3609
3610 default:
3611 assert (0);
3612 }
3613 }
3614 \f
3615 #define CPENC(op0,op1,crn,crm,op2) \
3616 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3617 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3618 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3619 /* for 3.9.10 System Instructions */
3620 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3621
3622 #define C0 0
3623 #define C1 1
3624 #define C2 2
3625 #define C3 3
3626 #define C4 4
3627 #define C5 5
3628 #define C6 6
3629 #define C7 7
3630 #define C8 8
3631 #define C9 9
3632 #define C10 10
3633 #define C11 11
3634 #define C12 12
3635 #define C13 13
3636 #define C14 14
3637 #define C15 15
3638
3639 #ifdef F_DEPRECATED
3640 #undef F_DEPRECATED
3641 #endif
3642 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3643
3644 #ifdef F_ARCHEXT
3645 #undef F_ARCHEXT
3646 #endif
3647 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3648
3649 #ifdef F_HASXT
3650 #undef F_HASXT
3651 #endif
3652 #define F_HASXT 0x4 /* System instruction register <Xt>
3653 operand. */
3654
3655
3656 /* TODO there are two more issues need to be resolved
3657 1. handle read-only and write-only system registers
3658 2. handle cpu-implementation-defined system registers. */
3659 const aarch64_sys_reg aarch64_sys_regs [] =
3660 {
3661 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3662 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3663 { "elr_el1", CPEN_(0,C0,1), 0 },
3664 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3665 { "sp_el0", CPEN_(0,C1,0), 0 },
3666 { "spsel", CPEN_(0,C2,0), 0 },
3667 { "daif", CPEN_(3,C2,1), 0 },
3668 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3669 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3670 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3671 { "nzcv", CPEN_(3,C2,0), 0 },
3672 { "fpcr", CPEN_(3,C4,0), 0 },
3673 { "fpsr", CPEN_(3,C4,1), 0 },
3674 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3675 { "dlr_el0", CPEN_(3,C5,1), 0 },
3676 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3677 { "elr_el2", CPEN_(4,C0,1), 0 },
3678 { "sp_el1", CPEN_(4,C1,0), 0 },
3679 { "spsr_irq", CPEN_(4,C3,0), 0 },
3680 { "spsr_abt", CPEN_(4,C3,1), 0 },
3681 { "spsr_und", CPEN_(4,C3,2), 0 },
3682 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3683 { "spsr_el3", CPEN_(6,C0,0), 0 },
3684 { "elr_el3", CPEN_(6,C0,1), 0 },
3685 { "sp_el2", CPEN_(6,C1,0), 0 },
3686 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3687 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3688 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3689 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3690 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3691 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3692 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3693 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3694 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3695 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3696 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3697 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3698 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3699 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3700 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3701 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3702 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3703 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3704 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3705 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3706 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3707 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3708 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3709 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3710 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3711 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3712 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3713 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3714 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3715 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3716 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3717 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3718 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3719 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3720 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3721 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3722 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3723 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3724 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT }, /* RO */
3725 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3726 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3727 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3728 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3729 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3730 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3731 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3732 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3733 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3734 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3735 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3736 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3737 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3738 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3739 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3740 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3741 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3742 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3743 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3744 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3745 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3746 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3747 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3748 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3749 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3750 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3751 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3752 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3753 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3754 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3755 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3756 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3757 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3758 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3759 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3760 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3761 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3762 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3763 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3764 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3765 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3766 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3767 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3768 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3769 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3770 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3771 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3772 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3773 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3774 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3775 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3776 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3777 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3778 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3779 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3780 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3781 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3782 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3783 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3784 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3785 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3786 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3787 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3788 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3789 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3790 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3791 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3792 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3793 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3794 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3795 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3796 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3797 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3798 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3799 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3800 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3801 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3802 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3803 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3804 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3805 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3806 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3807 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3808 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3809 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3810 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3811 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3812 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3813 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3814 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3815 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3816 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3817 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3818 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3819 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3820 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3821 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3822 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3823 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3824 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3825 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3826 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3827 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3828 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3829 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3830 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3831 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3832 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3833 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3834 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3835 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3836 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3837 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3838 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3839 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3840 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3841 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3842 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3843 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3844 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3845 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3846 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3847 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3848 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3849 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3850 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3851 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3852 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3853 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3854 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3855 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3856 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3857 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3858 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3859 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3860 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3861 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3862 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3863 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3864 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3865 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3866 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3867 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3868 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3869 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3870 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3871 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3872 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3873 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3874 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3875 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3876 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3877 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3878 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3879 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3880 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3881 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3882 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3883 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3884 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3885 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3886 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3887 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3888 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3889 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3890 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3891 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3892 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3893 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3894 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3895 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3896 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3897 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3898 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3899 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3900 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3901 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3902 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3903 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3904 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3905 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3906 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3907 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3908 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3909 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3910 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3911 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3912 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3913 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3914 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3915 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3916 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3917 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3918 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3919 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3920 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3921 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3922 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3923 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3924 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3925 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3926 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3927 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3928 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3929 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3930 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3931 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3932 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3933 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3934 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3935 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3936 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3937 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3938 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3939 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3940 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3941 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3942 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3943 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3944 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3945 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3946 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3947 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3948 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3949 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3950 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3951 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3952 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3953 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3954 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3955 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3956 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3957 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3958 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3959 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3960 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3961 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3962 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3963 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3964 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3965 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3966 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3967 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3968 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3969 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3970 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3971 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3972 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3973 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3974 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3975 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3976 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3977 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3978 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3979 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3980 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3981 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3982 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3983 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3984 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3985 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3986 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3987 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3988 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3989 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3990 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3991 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3992 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3993 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3994 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3995 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3996 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3997 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3998 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3999 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
4000 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4001 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4002 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4003 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4004 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4005 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4006 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4007 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4008 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4009 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4010 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4011 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4012 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4013 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4014 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4015 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4016 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4017 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4018 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4019 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4020 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4021 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4022 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4023 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4024 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4025 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4026 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4027 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4028 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4029 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4030 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4031 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4032 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4033 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4034 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4035 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4036 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4037
4038 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4039 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4040 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4041 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4042 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4043 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4044 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4045 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4046 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4047 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4048 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4049 { 0, CPENC(0,0,0,0,0), 0 },
4050 };
4051
4052 bfd_boolean
4053 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4054 {
4055 return (reg->flags & F_DEPRECATED) != 0;
4056 }
4057
4058 bfd_boolean
4059 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4060 const aarch64_sys_reg *reg)
4061 {
4062 if (!(reg->flags & F_ARCHEXT))
4063 return TRUE;
4064
4065 /* PAN. Values are from aarch64_sys_regs. */
4066 if (reg->value == CPEN_(0,C2,3)
4067 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4068 return FALSE;
4069
4070 /* Virtualization host extensions: system registers. */
4071 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4072 || reg->value == CPENC (3, 4, C13, C0, 1)
4073 || reg->value == CPENC (3, 4, C14, C3, 0)
4074 || reg->value == CPENC (3, 4, C14, C3, 1)
4075 || reg->value == CPENC (3, 4, C14, C3, 2))
4076 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4077 return FALSE;
4078
4079 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4080 if ((reg->value == CPEN_ (5, C0, 0)
4081 || reg->value == CPEN_ (5, C0, 1)
4082 || reg->value == CPENC (3, 5, C1, C0, 0)
4083 || reg->value == CPENC (3, 5, C1, C0, 2)
4084 || reg->value == CPENC (3, 5, C2, C0, 0)
4085 || reg->value == CPENC (3, 5, C2, C0, 1)
4086 || reg->value == CPENC (3, 5, C2, C0, 2)
4087 || reg->value == CPENC (3, 5, C5, C1, 0)
4088 || reg->value == CPENC (3, 5, C5, C1, 1)
4089 || reg->value == CPENC (3, 5, C5, C2, 0)
4090 || reg->value == CPENC (3, 5, C6, C0, 0)
4091 || reg->value == CPENC (3, 5, C10, C2, 0)
4092 || reg->value == CPENC (3, 5, C10, C3, 0)
4093 || reg->value == CPENC (3, 5, C12, C0, 0)
4094 || reg->value == CPENC (3, 5, C13, C0, 1)
4095 || reg->value == CPENC (3, 5, C14, C1, 0))
4096 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4097 return FALSE;
4098
4099 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4100 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4101 || reg->value == CPENC (3, 5, C14, C2, 1)
4102 || reg->value == CPENC (3, 5, C14, C2, 2)
4103 || reg->value == CPENC (3, 5, C14, C3, 0)
4104 || reg->value == CPENC (3, 5, C14, C3, 1)
4105 || reg->value == CPENC (3, 5, C14, C3, 2))
4106 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4107 return FALSE;
4108
4109 /* ARMv8.2 features. */
4110
4111 /* ID_AA64MMFR2_EL1. */
4112 if (reg->value == CPENC (3, 0, C0, C7, 2)
4113 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4114 return FALSE;
4115
4116 /* PSTATE.UAO. */
4117 if (reg->value == CPEN_ (0, C2, 4)
4118 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4119 return FALSE;
4120
4121 /* RAS extension. */
4122
4123 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4124 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4125 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4126 || reg->value == CPENC (3, 0, C5, C3, 1)
4127 || reg->value == CPENC (3, 0, C5, C3, 2)
4128 || reg->value == CPENC (3, 0, C5, C3, 3)
4129 || reg->value == CPENC (3, 0, C5, C4, 0)
4130 || reg->value == CPENC (3, 0, C5, C4, 1)
4131 || reg->value == CPENC (3, 0, C5, C4, 2)
4132 || reg->value == CPENC (3, 0, C5, C4, 3)
4133 || reg->value == CPENC (3, 0, C5, C5, 0)
4134 || reg->value == CPENC (3, 0, C5, C5, 1))
4135 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4136 return FALSE;
4137
4138 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4139 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4140 || reg->value == CPENC (3, 0, C12, C1, 1)
4141 || reg->value == CPENC (3, 4, C12, C1, 1))
4142 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4143 return FALSE;
4144
4145 /* Statistical Profiling extension. */
4146 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4147 || reg->value == CPENC (3, 0, C9, C10, 1)
4148 || reg->value == CPENC (3, 0, C9, C10, 3)
4149 || reg->value == CPENC (3, 0, C9, C10, 7)
4150 || reg->value == CPENC (3, 0, C9, C9, 0)
4151 || reg->value == CPENC (3, 0, C9, C9, 2)
4152 || reg->value == CPENC (3, 0, C9, C9, 3)
4153 || reg->value == CPENC (3, 0, C9, C9, 4)
4154 || reg->value == CPENC (3, 0, C9, C9, 5)
4155 || reg->value == CPENC (3, 0, C9, C9, 6)
4156 || reg->value == CPENC (3, 0, C9, C9, 7)
4157 || reg->value == CPENC (3, 4, C9, C9, 0)
4158 || reg->value == CPENC (3, 5, C9, C9, 0))
4159 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4160 return FALSE;
4161
4162 /* ARMv8.3 Pointer authentication keys. */
4163 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4164 || reg->value == CPENC (3, 0, C2, C1, 1)
4165 || reg->value == CPENC (3, 0, C2, C1, 2)
4166 || reg->value == CPENC (3, 0, C2, C1, 3)
4167 || reg->value == CPENC (3, 0, C2, C2, 0)
4168 || reg->value == CPENC (3, 0, C2, C2, 1)
4169 || reg->value == CPENC (3, 0, C2, C2, 2)
4170 || reg->value == CPENC (3, 0, C2, C2, 3)
4171 || reg->value == CPENC (3, 0, C2, C3, 0)
4172 || reg->value == CPENC (3, 0, C2, C3, 1))
4173 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4174 return FALSE;
4175
4176 /* SVE. */
4177 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4178 || reg->value == CPENC (3, 0, C1, C2, 0)
4179 || reg->value == CPENC (3, 4, C1, C2, 0)
4180 || reg->value == CPENC (3, 6, C1, C2, 0)
4181 || reg->value == CPENC (3, 5, C1, C2, 0)
4182 || reg->value == CPENC (3, 0, C0, C0, 7))
4183 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4184 return FALSE;
4185
4186 /* ARMv8.4 features. */
4187
4188 /* PSTATE.DIT. */
4189 if (reg->value == CPEN_ (3, C2, 5)
4190 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4191 return FALSE;
4192
4193 /* Virtualization extensions. */
4194 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4195 || reg->value == CPENC(3, 4, C2, C6, 0)
4196 || reg->value == CPENC(3, 4, C14, C4, 0)
4197 || reg->value == CPENC(3, 4, C14, C4, 2)
4198 || reg->value == CPENC(3, 4, C14, C4, 1)
4199 || reg->value == CPENC(3, 4, C14, C5, 0)
4200 || reg->value == CPENC(3, 4, C14, C5, 2)
4201 || reg->value == CPENC(3, 4, C14, C5, 1)
4202 || reg->value == CPENC(3, 4, C1, C3, 1)
4203 || reg->value == CPENC(3, 4, C2, C2, 0))
4204 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4205 return FALSE;
4206
4207 /* ARMv8.4 TLB instructions. */
4208 if ((reg->value == CPENS (0, C8, C1, 0)
4209 || reg->value == CPENS (0, C8, C1, 1)
4210 || reg->value == CPENS (0, C8, C1, 2)
4211 || reg->value == CPENS (0, C8, C1, 3)
4212 || reg->value == CPENS (0, C8, C1, 5)
4213 || reg->value == CPENS (0, C8, C1, 7)
4214 || reg->value == CPENS (4, C8, C4, 0)
4215 || reg->value == CPENS (4, C8, C4, 4)
4216 || reg->value == CPENS (4, C8, C1, 1)
4217 || reg->value == CPENS (4, C8, C1, 5)
4218 || reg->value == CPENS (4, C8, C1, 6)
4219 || reg->value == CPENS (6, C8, C1, 1)
4220 || reg->value == CPENS (6, C8, C1, 5)
4221 || reg->value == CPENS (4, C8, C1, 0)
4222 || reg->value == CPENS (4, C8, C1, 4)
4223 || reg->value == CPENS (6, C8, C1, 0)
4224 || reg->value == CPENS (0, C8, C6, 1)
4225 || reg->value == CPENS (0, C8, C6, 3)
4226 || reg->value == CPENS (0, C8, C6, 5)
4227 || reg->value == CPENS (0, C8, C6, 7)
4228 || reg->value == CPENS (0, C8, C2, 1)
4229 || reg->value == CPENS (0, C8, C2, 3)
4230 || reg->value == CPENS (0, C8, C2, 5)
4231 || reg->value == CPENS (0, C8, C2, 7)
4232 || reg->value == CPENS (0, C8, C5, 1)
4233 || reg->value == CPENS (0, C8, C5, 3)
4234 || reg->value == CPENS (0, C8, C5, 5)
4235 || reg->value == CPENS (0, C8, C5, 7)
4236 || reg->value == CPENS (4, C8, C0, 2)
4237 || reg->value == CPENS (4, C8, C0, 6)
4238 || reg->value == CPENS (4, C8, C4, 2)
4239 || reg->value == CPENS (4, C8, C4, 6)
4240 || reg->value == CPENS (4, C8, C4, 3)
4241 || reg->value == CPENS (4, C8, C4, 7)
4242 || reg->value == CPENS (4, C8, C6, 1)
4243 || reg->value == CPENS (4, C8, C6, 5)
4244 || reg->value == CPENS (4, C8, C2, 1)
4245 || reg->value == CPENS (4, C8, C2, 5)
4246 || reg->value == CPENS (4, C8, C5, 1)
4247 || reg->value == CPENS (4, C8, C5, 5)
4248 || reg->value == CPENS (6, C8, C6, 1)
4249 || reg->value == CPENS (6, C8, C6, 5)
4250 || reg->value == CPENS (6, C8, C2, 1)
4251 || reg->value == CPENS (6, C8, C2, 5)
4252 || reg->value == CPENS (6, C8, C5, 1)
4253 || reg->value == CPENS (6, C8, C5, 5))
4254 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4255 return FALSE;
4256
4257 return TRUE;
4258 }
4259
4260 /* The CPENC below is fairly misleading, the fields
4261 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4262 by ins_pstatefield, which just shifts the value by the width of the fields
4263 in a loop. So if you CPENC them only the first value will be set, the rest
4264 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4265 value of 0b110000000001000000 (0x30040) while what you want is
4266 0b011010 (0x1a). */
4267 const aarch64_sys_reg aarch64_pstatefields [] =
4268 {
4269 { "spsel", 0x05, 0 },
4270 { "daifset", 0x1e, 0 },
4271 { "daifclr", 0x1f, 0 },
4272 { "pan", 0x04, F_ARCHEXT },
4273 { "uao", 0x03, F_ARCHEXT },
4274 { "dit", 0x1a, F_ARCHEXT },
4275 { 0, CPENC(0,0,0,0,0), 0 },
4276 };
4277
4278 bfd_boolean
4279 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4280 const aarch64_sys_reg *reg)
4281 {
4282 if (!(reg->flags & F_ARCHEXT))
4283 return TRUE;
4284
4285 /* PAN. Values are from aarch64_pstatefields. */
4286 if (reg->value == 0x04
4287 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4288 return FALSE;
4289
4290 /* UAO. Values are from aarch64_pstatefields. */
4291 if (reg->value == 0x03
4292 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4293 return FALSE;
4294
4295 /* DIT. Values are from aarch64_pstatefields. */
4296 if (reg->value == 0x1a
4297 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4298 return FALSE;
4299
4300 return TRUE;
4301 }
4302
4303 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4304 {
4305 { "ialluis", CPENS(0,C7,C1,0), 0 },
4306 { "iallu", CPENS(0,C7,C5,0), 0 },
4307 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4308 { 0, CPENS(0,0,0,0), 0 }
4309 };
4310
4311 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4312 {
4313 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4314 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4315 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4316 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4317 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4318 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4319 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4320 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4321 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4322 { 0, CPENS(0,0,0,0), 0 }
4323 };
4324
4325 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4326 {
4327 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4328 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4329 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4330 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4331 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4332 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4333 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4334 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4335 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4336 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4337 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4338 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4339 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4340 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4341 { 0, CPENS(0,0,0,0), 0 }
4342 };
4343
4344 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4345 {
4346 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4347 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4348 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4349 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4350 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4351 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4352 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4353 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4354 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4355 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4356 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4357 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4358 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4359 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4360 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4361 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4362 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4363 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4364 { "alle2", CPENS(4,C8,C7,0), 0 },
4365 { "alle2is", CPENS(4,C8,C3,0), 0 },
4366 { "alle1", CPENS(4,C8,C7,4), 0 },
4367 { "alle1is", CPENS(4,C8,C3,4), 0 },
4368 { "alle3", CPENS(6,C8,C7,0), 0 },
4369 { "alle3is", CPENS(6,C8,C3,0), 0 },
4370 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4371 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4372 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4373 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4374 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4375 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4376 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4377 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4378
4379 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4380 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4381 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4382 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4383 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4384 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4385 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4386 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4387 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4388 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4389 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4390 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4391 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4392 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4393 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4394 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4395
4396 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4397 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4398 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4399 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4400 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4401 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4402 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4403 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4404 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4405 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4406 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4407 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4408 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4409 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4410 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4411 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4412 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4413 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4414 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4415 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4416 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4417 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4418 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4419 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4420 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4421 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4422 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4423 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4424 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4425 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4426
4427 { 0, CPENS(0,0,0,0), 0 }
4428 };
4429
4430 bfd_boolean
4431 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4432 {
4433 return (sys_ins_reg->flags & F_HASXT) != 0;
4434 }
4435
4436 extern bfd_boolean
4437 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4438 const aarch64_sys_ins_reg *reg)
4439 {
4440 if (!(reg->flags & F_ARCHEXT))
4441 return TRUE;
4442
4443 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4444 if (reg->value == CPENS (3, C7, C12, 1)
4445 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4446 return FALSE;
4447
4448 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4449 if ((reg->value == CPENS (0, C7, C9, 0)
4450 || reg->value == CPENS (0, C7, C9, 1))
4451 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4452 return FALSE;
4453
4454 return TRUE;
4455 }
4456
4457 #undef C0
4458 #undef C1
4459 #undef C2
4460 #undef C3
4461 #undef C4
4462 #undef C5
4463 #undef C6
4464 #undef C7
4465 #undef C8
4466 #undef C9
4467 #undef C10
4468 #undef C11
4469 #undef C12
4470 #undef C13
4471 #undef C14
4472 #undef C15
4473
4474 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4475 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4476
4477 static bfd_boolean
4478 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4479 const aarch64_insn insn)
4480 {
4481 int t = BITS (insn, 4, 0);
4482 int n = BITS (insn, 9, 5);
4483 int t2 = BITS (insn, 14, 10);
4484
4485 if (BIT (insn, 23))
4486 {
4487 /* Write back enabled. */
4488 if ((t == n || t2 == n) && n != 31)
4489 return FALSE;
4490 }
4491
4492 if (BIT (insn, 22))
4493 {
4494 /* Load */
4495 if (t == t2)
4496 return FALSE;
4497 }
4498
4499 return TRUE;
4500 }
4501
4502 /* Return true if VALUE cannot be moved into an SVE register using DUP
4503 (with any element size, not just ESIZE) and if using DUPM would
4504 therefore be OK. ESIZE is the number of bytes in the immediate. */
4505
4506 bfd_boolean
4507 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4508 {
4509 int64_t svalue = uvalue;
4510 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4511
4512 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4513 return FALSE;
4514 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4515 {
4516 svalue = (int32_t) uvalue;
4517 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4518 {
4519 svalue = (int16_t) uvalue;
4520 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4521 return FALSE;
4522 }
4523 }
4524 if ((svalue & 0xff) == 0)
4525 svalue /= 256;
4526 return svalue < -128 || svalue >= 128;
4527 }
4528
4529 /* Include the opcode description table as well as the operand description
4530 table. */
4531 #define VERIFIER(x) verify_##x
4532 #include "aarch64-tbl.h"
This page took 0.13683 seconds and 5 git commands to generate.