Add support for V_4B so we can properly reject it.
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
247 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
248 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
249 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
250 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
251 { 5, 14 }, /* imm14: in test bit and branch instructions. */
252 { 5, 16 }, /* imm16: in exception instructions. */
253 { 0, 26 }, /* imm26: in unconditional branch instructions. */
254 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
255 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
256 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
257 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
258 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
259 { 22, 1 }, /* N: in logical (immediate) instructions. */
260 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
261 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
262 { 31, 1 }, /* sf: in integer data processing instructions. */
263 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
264 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
265 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
266 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
267 { 31, 1 }, /* b5: in the test bit and branch instructions. */
268 { 19, 5 }, /* b40: in the test bit and branch instructions. */
269 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
270 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
271 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
272 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
273 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
274 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
275 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
276 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
277 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
278 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
279 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
280 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
281 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
282 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
283 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
284 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
285 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
286 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
289 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
290 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
292 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
293 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
294 { 5, 1 }, /* SVE_i1: single-bit immediate. */
295 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
296 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
297 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
298 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
299 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
300 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
301 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
302 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
303 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
304 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
305 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
306 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
307 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
308 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
309 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
310 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
311 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
312 { 16, 4 }, /* SVE_tsz: triangular size select. */
313 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
314 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
315 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
316 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
317 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
318 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
319 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
320 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
321 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
322 };
323
324 enum aarch64_operand_class
325 aarch64_get_operand_class (enum aarch64_opnd type)
326 {
327 return aarch64_operands[type].op_class;
328 }
329
330 const char *
331 aarch64_get_operand_name (enum aarch64_opnd type)
332 {
333 return aarch64_operands[type].name;
334 }
335
336 /* Get operand description string.
337 This is usually for the diagnosis purpose. */
338 const char *
339 aarch64_get_operand_desc (enum aarch64_opnd type)
340 {
341 return aarch64_operands[type].desc;
342 }
343
344 /* Table of all conditional affixes. */
345 const aarch64_cond aarch64_conds[16] =
346 {
347 {{"eq", "none"}, 0x0},
348 {{"ne", "any"}, 0x1},
349 {{"cs", "hs", "nlast"}, 0x2},
350 {{"cc", "lo", "ul", "last"}, 0x3},
351 {{"mi", "first"}, 0x4},
352 {{"pl", "nfrst"}, 0x5},
353 {{"vs"}, 0x6},
354 {{"vc"}, 0x7},
355 {{"hi", "pmore"}, 0x8},
356 {{"ls", "plast"}, 0x9},
357 {{"ge", "tcont"}, 0xa},
358 {{"lt", "tstop"}, 0xb},
359 {{"gt"}, 0xc},
360 {{"le"}, 0xd},
361 {{"al"}, 0xe},
362 {{"nv"}, 0xf},
363 };
364
365 const aarch64_cond *
366 get_cond_from_value (aarch64_insn value)
367 {
368 assert (value < 16);
369 return &aarch64_conds[(unsigned int) value];
370 }
371
372 const aarch64_cond *
373 get_inverted_cond (const aarch64_cond *cond)
374 {
375 return &aarch64_conds[cond->value ^ 0x1];
376 }
377
378 /* Table describing the operand extension/shifting operators; indexed by
379 enum aarch64_modifier_kind.
380
381 The value column provides the most common values for encoding modifiers,
382 which enables table-driven encoding/decoding for the modifiers. */
383 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
384 {
385 {"none", 0x0},
386 {"msl", 0x0},
387 {"ror", 0x3},
388 {"asr", 0x2},
389 {"lsr", 0x1},
390 {"lsl", 0x0},
391 {"uxtb", 0x0},
392 {"uxth", 0x1},
393 {"uxtw", 0x2},
394 {"uxtx", 0x3},
395 {"sxtb", 0x4},
396 {"sxth", 0x5},
397 {"sxtw", 0x6},
398 {"sxtx", 0x7},
399 {"mul", 0x0},
400 {"mul vl", 0x0},
401 {NULL, 0},
402 };
403
404 enum aarch64_modifier_kind
405 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
406 {
407 return desc - aarch64_operand_modifiers;
408 }
409
410 aarch64_insn
411 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
412 {
413 return aarch64_operand_modifiers[kind].value;
414 }
415
416 enum aarch64_modifier_kind
417 aarch64_get_operand_modifier_from_value (aarch64_insn value,
418 bfd_boolean extend_p)
419 {
420 if (extend_p == TRUE)
421 return AARCH64_MOD_UXTB + value;
422 else
423 return AARCH64_MOD_LSL - value;
424 }
425
426 bfd_boolean
427 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
428 {
429 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
430 ? TRUE : FALSE;
431 }
432
433 static inline bfd_boolean
434 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
435 {
436 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
437 ? TRUE : FALSE;
438 }
439
440 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
441 {
442 { "#0x00", 0x0 },
443 { "oshld", 0x1 },
444 { "oshst", 0x2 },
445 { "osh", 0x3 },
446 { "#0x04", 0x4 },
447 { "nshld", 0x5 },
448 { "nshst", 0x6 },
449 { "nsh", 0x7 },
450 { "#0x08", 0x8 },
451 { "ishld", 0x9 },
452 { "ishst", 0xa },
453 { "ish", 0xb },
454 { "#0x0c", 0xc },
455 { "ld", 0xd },
456 { "st", 0xe },
457 { "sy", 0xf },
458 };
459
460 /* Table describing the operands supported by the aliases of the HINT
461 instruction.
462
463 The name column is the operand that is accepted for the alias. The value
464 column is the hint number of the alias. The list of operands is terminated
465 by NULL in the name column. */
466
467 const struct aarch64_name_value_pair aarch64_hint_options[] =
468 {
469 { "csync", 0x11 }, /* PSB CSYNC. */
470 { NULL, 0x0 },
471 };
472
473 /* op -> op: load = 0 instruction = 1 store = 2
474 l -> level: 1-3
475 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
476 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
477 const struct aarch64_name_value_pair aarch64_prfops[32] =
478 {
479 { "pldl1keep", B(0, 1, 0) },
480 { "pldl1strm", B(0, 1, 1) },
481 { "pldl2keep", B(0, 2, 0) },
482 { "pldl2strm", B(0, 2, 1) },
483 { "pldl3keep", B(0, 3, 0) },
484 { "pldl3strm", B(0, 3, 1) },
485 { NULL, 0x06 },
486 { NULL, 0x07 },
487 { "plil1keep", B(1, 1, 0) },
488 { "plil1strm", B(1, 1, 1) },
489 { "plil2keep", B(1, 2, 0) },
490 { "plil2strm", B(1, 2, 1) },
491 { "plil3keep", B(1, 3, 0) },
492 { "plil3strm", B(1, 3, 1) },
493 { NULL, 0x0e },
494 { NULL, 0x0f },
495 { "pstl1keep", B(2, 1, 0) },
496 { "pstl1strm", B(2, 1, 1) },
497 { "pstl2keep", B(2, 2, 0) },
498 { "pstl2strm", B(2, 2, 1) },
499 { "pstl3keep", B(2, 3, 0) },
500 { "pstl3strm", B(2, 3, 1) },
501 { NULL, 0x16 },
502 { NULL, 0x17 },
503 { NULL, 0x18 },
504 { NULL, 0x19 },
505 { NULL, 0x1a },
506 { NULL, 0x1b },
507 { NULL, 0x1c },
508 { NULL, 0x1d },
509 { NULL, 0x1e },
510 { NULL, 0x1f },
511 };
512 #undef B
513 \f
514 /* Utilities on value constraint. */
515
516 static inline int
517 value_in_range_p (int64_t value, int low, int high)
518 {
519 return (value >= low && value <= high) ? 1 : 0;
520 }
521
522 /* Return true if VALUE is a multiple of ALIGN. */
523 static inline int
524 value_aligned_p (int64_t value, int align)
525 {
526 return (value % align) == 0;
527 }
528
529 /* A signed value fits in a field. */
530 static inline int
531 value_fit_signed_field_p (int64_t value, unsigned width)
532 {
533 assert (width < 32);
534 if (width < sizeof (value) * 8)
535 {
536 int64_t lim = (int64_t)1 << (width - 1);
537 if (value >= -lim && value < lim)
538 return 1;
539 }
540 return 0;
541 }
542
543 /* An unsigned value fits in a field. */
544 static inline int
545 value_fit_unsigned_field_p (int64_t value, unsigned width)
546 {
547 assert (width < 32);
548 if (width < sizeof (value) * 8)
549 {
550 int64_t lim = (int64_t)1 << width;
551 if (value >= 0 && value < lim)
552 return 1;
553 }
554 return 0;
555 }
556
557 /* Return 1 if OPERAND is SP or WSP. */
558 int
559 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
560 {
561 return ((aarch64_get_operand_class (operand->type)
562 == AARCH64_OPND_CLASS_INT_REG)
563 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
564 && operand->reg.regno == 31);
565 }
566
567 /* Return 1 if OPERAND is XZR or WZP. */
568 int
569 aarch64_zero_register_p (const aarch64_opnd_info *operand)
570 {
571 return ((aarch64_get_operand_class (operand->type)
572 == AARCH64_OPND_CLASS_INT_REG)
573 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
574 && operand->reg.regno == 31);
575 }
576
577 /* Return true if the operand *OPERAND that has the operand code
578 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
579 qualified by the qualifier TARGET. */
580
581 static inline int
582 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
583 aarch64_opnd_qualifier_t target)
584 {
585 switch (operand->qualifier)
586 {
587 case AARCH64_OPND_QLF_W:
588 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
589 return 1;
590 break;
591 case AARCH64_OPND_QLF_X:
592 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
593 return 1;
594 break;
595 case AARCH64_OPND_QLF_WSP:
596 if (target == AARCH64_OPND_QLF_W
597 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
598 return 1;
599 break;
600 case AARCH64_OPND_QLF_SP:
601 if (target == AARCH64_OPND_QLF_X
602 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
603 return 1;
604 break;
605 default:
606 break;
607 }
608
609 return 0;
610 }
611
612 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
613 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
614
615 Return NIL if more than one expected qualifiers are found. */
616
617 aarch64_opnd_qualifier_t
618 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
619 int idx,
620 const aarch64_opnd_qualifier_t known_qlf,
621 int known_idx)
622 {
623 int i, saved_i;
624
625 /* Special case.
626
627 When the known qualifier is NIL, we have to assume that there is only
628 one qualifier sequence in the *QSEQ_LIST and return the corresponding
629 qualifier directly. One scenario is that for instruction
630 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
631 which has only one possible valid qualifier sequence
632 NIL, S_D
633 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
634 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
635
636 Because the qualifier NIL has dual roles in the qualifier sequence:
637 it can mean no qualifier for the operand, or the qualifer sequence is
638 not in use (when all qualifiers in the sequence are NILs), we have to
639 handle this special case here. */
640 if (known_qlf == AARCH64_OPND_NIL)
641 {
642 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
643 return qseq_list[0][idx];
644 }
645
646 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
647 {
648 if (qseq_list[i][known_idx] == known_qlf)
649 {
650 if (saved_i != -1)
651 /* More than one sequences are found to have KNOWN_QLF at
652 KNOWN_IDX. */
653 return AARCH64_OPND_NIL;
654 saved_i = i;
655 }
656 }
657
658 return qseq_list[saved_i][idx];
659 }
660
661 enum operand_qualifier_kind
662 {
663 OQK_NIL,
664 OQK_OPD_VARIANT,
665 OQK_VALUE_IN_RANGE,
666 OQK_MISC,
667 };
668
669 /* Operand qualifier description. */
670 struct operand_qualifier_data
671 {
672 /* The usage of the three data fields depends on the qualifier kind. */
673 int data0;
674 int data1;
675 int data2;
676 /* Description. */
677 const char *desc;
678 /* Kind. */
679 enum operand_qualifier_kind kind;
680 };
681
682 /* Indexed by the operand qualifier enumerators. */
683 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
684 {
685 {0, 0, 0, "NIL", OQK_NIL},
686
687 /* Operand variant qualifiers.
688 First 3 fields:
689 element size, number of elements and common value for encoding. */
690
691 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
692 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
693 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
694 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
695
696 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
697 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
698 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
699 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
700 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
701
702 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
703 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
704 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
705 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
706 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
707 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
708 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
709 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
710 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
711 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
712 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
713
714 {0, 0, 0, "z", OQK_OPD_VARIANT},
715 {0, 0, 0, "m", OQK_OPD_VARIANT},
716
717 /* Qualifiers constraining the value range.
718 First 3 fields:
719 Lower bound, higher bound, unused. */
720
721 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
722 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
723 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
724 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
725 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
726 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
727 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
728
729 /* Qualifiers for miscellaneous purpose.
730 First 3 fields:
731 unused, unused and unused. */
732
733 {0, 0, 0, "lsl", 0},
734 {0, 0, 0, "msl", 0},
735
736 {0, 0, 0, "retrieving", 0},
737 };
738
739 static inline bfd_boolean
740 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
741 {
742 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
743 ? TRUE : FALSE;
744 }
745
746 static inline bfd_boolean
747 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
748 {
749 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
750 ? TRUE : FALSE;
751 }
752
753 const char*
754 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
755 {
756 return aarch64_opnd_qualifiers[qualifier].desc;
757 }
758
759 /* Given an operand qualifier, return the expected data element size
760 of a qualified operand. */
761 unsigned char
762 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
763 {
764 assert (operand_variant_qualifier_p (qualifier) == TRUE);
765 return aarch64_opnd_qualifiers[qualifier].data0;
766 }
767
768 unsigned char
769 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
770 {
771 assert (operand_variant_qualifier_p (qualifier) == TRUE);
772 return aarch64_opnd_qualifiers[qualifier].data1;
773 }
774
775 aarch64_insn
776 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
777 {
778 assert (operand_variant_qualifier_p (qualifier) == TRUE);
779 return aarch64_opnd_qualifiers[qualifier].data2;
780 }
781
782 static int
783 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
784 {
785 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
786 return aarch64_opnd_qualifiers[qualifier].data0;
787 }
788
789 static int
790 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
791 {
792 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
793 return aarch64_opnd_qualifiers[qualifier].data1;
794 }
795
796 #ifdef DEBUG_AARCH64
797 void
798 aarch64_verbose (const char *str, ...)
799 {
800 va_list ap;
801 va_start (ap, str);
802 printf ("#### ");
803 vprintf (str, ap);
804 printf ("\n");
805 va_end (ap);
806 }
807
808 static inline void
809 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
810 {
811 int i;
812 printf ("#### \t");
813 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
814 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
815 printf ("\n");
816 }
817
818 static void
819 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
820 const aarch64_opnd_qualifier_t *qualifier)
821 {
822 int i;
823 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
824
825 aarch64_verbose ("dump_match_qualifiers:");
826 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
827 curr[i] = opnd[i].qualifier;
828 dump_qualifier_sequence (curr);
829 aarch64_verbose ("against");
830 dump_qualifier_sequence (qualifier);
831 }
832 #endif /* DEBUG_AARCH64 */
833
834 /* TODO improve this, we can have an extra field at the runtime to
835 store the number of operands rather than calculating it every time. */
836
837 int
838 aarch64_num_of_operands (const aarch64_opcode *opcode)
839 {
840 int i = 0;
841 const enum aarch64_opnd *opnds = opcode->operands;
842 while (opnds[i++] != AARCH64_OPND_NIL)
843 ;
844 --i;
845 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
846 return i;
847 }
848
849 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
850 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
851
852 N.B. on the entry, it is very likely that only some operands in *INST
853 have had their qualifiers been established.
854
855 If STOP_AT is not -1, the function will only try to match
856 the qualifier sequence for operands before and including the operand
857 of index STOP_AT; and on success *RET will only be filled with the first
858 (STOP_AT+1) qualifiers.
859
860 A couple examples of the matching algorithm:
861
862 X,W,NIL should match
863 X,W,NIL
864
865 NIL,NIL should match
866 X ,NIL
867
868 Apart from serving the main encoding routine, this can also be called
869 during or after the operand decoding. */
870
871 int
872 aarch64_find_best_match (const aarch64_inst *inst,
873 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
874 int stop_at, aarch64_opnd_qualifier_t *ret)
875 {
876 int found = 0;
877 int i, num_opnds;
878 const aarch64_opnd_qualifier_t *qualifiers;
879
880 num_opnds = aarch64_num_of_operands (inst->opcode);
881 if (num_opnds == 0)
882 {
883 DEBUG_TRACE ("SUCCEED: no operand");
884 return 1;
885 }
886
887 if (stop_at < 0 || stop_at >= num_opnds)
888 stop_at = num_opnds - 1;
889
890 /* For each pattern. */
891 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
892 {
893 int j;
894 qualifiers = *qualifiers_list;
895
896 /* Start as positive. */
897 found = 1;
898
899 DEBUG_TRACE ("%d", i);
900 #ifdef DEBUG_AARCH64
901 if (debug_dump)
902 dump_match_qualifiers (inst->operands, qualifiers);
903 #endif
904
905 /* Most opcodes has much fewer patterns in the list.
906 First NIL qualifier indicates the end in the list. */
907 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
908 {
909 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
910 if (i)
911 found = 0;
912 break;
913 }
914
915 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
916 {
917 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
918 {
919 /* Either the operand does not have qualifier, or the qualifier
920 for the operand needs to be deduced from the qualifier
921 sequence.
922 In the latter case, any constraint checking related with
923 the obtained qualifier should be done later in
924 operand_general_constraint_met_p. */
925 continue;
926 }
927 else if (*qualifiers != inst->operands[j].qualifier)
928 {
929 /* Unless the target qualifier can also qualify the operand
930 (which has already had a non-nil qualifier), non-equal
931 qualifiers are generally un-matched. */
932 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
933 continue;
934 else
935 {
936 found = 0;
937 break;
938 }
939 }
940 else
941 continue; /* Equal qualifiers are certainly matched. */
942 }
943
944 /* Qualifiers established. */
945 if (found == 1)
946 break;
947 }
948
949 if (found == 1)
950 {
951 /* Fill the result in *RET. */
952 int j;
953 qualifiers = *qualifiers_list;
954
955 DEBUG_TRACE ("complete qualifiers using list %d", i);
956 #ifdef DEBUG_AARCH64
957 if (debug_dump)
958 dump_qualifier_sequence (qualifiers);
959 #endif
960
961 for (j = 0; j <= stop_at; ++j, ++qualifiers)
962 ret[j] = *qualifiers;
963 for (; j < AARCH64_MAX_OPND_NUM; ++j)
964 ret[j] = AARCH64_OPND_QLF_NIL;
965
966 DEBUG_TRACE ("SUCCESS");
967 return 1;
968 }
969
970 DEBUG_TRACE ("FAIL");
971 return 0;
972 }
973
974 /* Operand qualifier matching and resolving.
975
976 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
977 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
978
979 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
980 succeeds. */
981
982 static int
983 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
984 {
985 int i, nops;
986 aarch64_opnd_qualifier_seq_t qualifiers;
987
988 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
989 qualifiers))
990 {
991 DEBUG_TRACE ("matching FAIL");
992 return 0;
993 }
994
995 if (inst->opcode->flags & F_STRICT)
996 {
997 /* Require an exact qualifier match, even for NIL qualifiers. */
998 nops = aarch64_num_of_operands (inst->opcode);
999 for (i = 0; i < nops; ++i)
1000 if (inst->operands[i].qualifier != qualifiers[i])
1001 return FALSE;
1002 }
1003
1004 /* Update the qualifiers. */
1005 if (update_p == TRUE)
1006 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1007 {
1008 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1009 break;
1010 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1011 "update %s with %s for operand %d",
1012 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1013 aarch64_get_qualifier_name (qualifiers[i]), i);
1014 inst->operands[i].qualifier = qualifiers[i];
1015 }
1016
1017 DEBUG_TRACE ("matching SUCCESS");
1018 return 1;
1019 }
1020
1021 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1022 register by MOVZ.
1023
1024 IS32 indicates whether value is a 32-bit immediate or not.
1025 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1026 amount will be returned in *SHIFT_AMOUNT. */
1027
1028 bfd_boolean
1029 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1030 {
1031 int amount;
1032
1033 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1034
1035 if (is32)
1036 {
1037 /* Allow all zeros or all ones in top 32-bits, so that
1038 32-bit constant expressions like ~0x80000000 are
1039 permitted. */
1040 uint64_t ext = value;
1041 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1042 /* Immediate out of range. */
1043 return FALSE;
1044 value &= (int64_t) 0xffffffff;
1045 }
1046
1047 /* first, try movz then movn */
1048 amount = -1;
1049 if ((value & ((int64_t) 0xffff << 0)) == value)
1050 amount = 0;
1051 else if ((value & ((int64_t) 0xffff << 16)) == value)
1052 amount = 16;
1053 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1054 amount = 32;
1055 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1056 amount = 48;
1057
1058 if (amount == -1)
1059 {
1060 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1061 return FALSE;
1062 }
1063
1064 if (shift_amount != NULL)
1065 *shift_amount = amount;
1066
1067 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1068
1069 return TRUE;
1070 }
1071
1072 /* Build the accepted values for immediate logical SIMD instructions.
1073
1074 The standard encodings of the immediate value are:
1075 N imms immr SIMD size R S
1076 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1077 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1078 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1079 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1080 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1081 0 11110s 00000r 2 UInt(r) UInt(s)
1082 where all-ones value of S is reserved.
1083
1084 Let's call E the SIMD size.
1085
1086 The immediate value is: S+1 bits '1' rotated to the right by R.
1087
1088 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1089 (remember S != E - 1). */
1090
1091 #define TOTAL_IMM_NB 5334
1092
1093 typedef struct
1094 {
1095 uint64_t imm;
1096 aarch64_insn encoding;
1097 } simd_imm_encoding;
1098
1099 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1100
1101 static int
1102 simd_imm_encoding_cmp(const void *i1, const void *i2)
1103 {
1104 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1105 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1106
1107 if (imm1->imm < imm2->imm)
1108 return -1;
1109 if (imm1->imm > imm2->imm)
1110 return +1;
1111 return 0;
1112 }
1113
1114 /* immediate bitfield standard encoding
1115 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1116 1 ssssss rrrrrr 64 rrrrrr ssssss
1117 0 0sssss 0rrrrr 32 rrrrr sssss
1118 0 10ssss 00rrrr 16 rrrr ssss
1119 0 110sss 000rrr 8 rrr sss
1120 0 1110ss 0000rr 4 rr ss
1121 0 11110s 00000r 2 r s */
1122 static inline int
1123 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1124 {
1125 return (is64 << 12) | (r << 6) | s;
1126 }
1127
1128 static void
1129 build_immediate_table (void)
1130 {
1131 uint32_t log_e, e, s, r, s_mask;
1132 uint64_t mask, imm;
1133 int nb_imms;
1134 int is64;
1135
1136 nb_imms = 0;
1137 for (log_e = 1; log_e <= 6; log_e++)
1138 {
1139 /* Get element size. */
1140 e = 1u << log_e;
1141 if (log_e == 6)
1142 {
1143 is64 = 1;
1144 mask = 0xffffffffffffffffull;
1145 s_mask = 0;
1146 }
1147 else
1148 {
1149 is64 = 0;
1150 mask = (1ull << e) - 1;
1151 /* log_e s_mask
1152 1 ((1 << 4) - 1) << 2 = 111100
1153 2 ((1 << 3) - 1) << 3 = 111000
1154 3 ((1 << 2) - 1) << 4 = 110000
1155 4 ((1 << 1) - 1) << 5 = 100000
1156 5 ((1 << 0) - 1) << 6 = 000000 */
1157 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1158 }
1159 for (s = 0; s < e - 1; s++)
1160 for (r = 0; r < e; r++)
1161 {
1162 /* s+1 consecutive bits to 1 (s < 63) */
1163 imm = (1ull << (s + 1)) - 1;
1164 /* rotate right by r */
1165 if (r != 0)
1166 imm = (imm >> r) | ((imm << (e - r)) & mask);
1167 /* replicate the constant depending on SIMD size */
1168 switch (log_e)
1169 {
1170 case 1: imm = (imm << 2) | imm;
1171 /* Fall through. */
1172 case 2: imm = (imm << 4) | imm;
1173 /* Fall through. */
1174 case 3: imm = (imm << 8) | imm;
1175 /* Fall through. */
1176 case 4: imm = (imm << 16) | imm;
1177 /* Fall through. */
1178 case 5: imm = (imm << 32) | imm;
1179 /* Fall through. */
1180 case 6: break;
1181 default: abort ();
1182 }
1183 simd_immediates[nb_imms].imm = imm;
1184 simd_immediates[nb_imms].encoding =
1185 encode_immediate_bitfield(is64, s | s_mask, r);
1186 nb_imms++;
1187 }
1188 }
1189 assert (nb_imms == TOTAL_IMM_NB);
1190 qsort(simd_immediates, nb_imms,
1191 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1192 }
1193
1194 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1195 be accepted by logical (immediate) instructions
1196 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1197
1198 ESIZE is the number of bytes in the decoded immediate value.
1199 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1200 VALUE will be returned in *ENCODING. */
1201
1202 bfd_boolean
1203 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1204 {
1205 simd_imm_encoding imm_enc;
1206 const simd_imm_encoding *imm_encoding;
1207 static bfd_boolean initialized = FALSE;
1208 uint64_t upper;
1209 int i;
1210
1211 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1212 value, esize);
1213
1214 if (!initialized)
1215 {
1216 build_immediate_table ();
1217 initialized = TRUE;
1218 }
1219
1220 /* Allow all zeros or all ones in top bits, so that
1221 constant expressions like ~1 are permitted. */
1222 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1223 if ((value & ~upper) != value && (value | upper) != value)
1224 return FALSE;
1225
1226 /* Replicate to a full 64-bit value. */
1227 value &= ~upper;
1228 for (i = esize * 8; i < 64; i *= 2)
1229 value |= (value << i);
1230
1231 imm_enc.imm = value;
1232 imm_encoding = (const simd_imm_encoding *)
1233 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1234 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1235 if (imm_encoding == NULL)
1236 {
1237 DEBUG_TRACE ("exit with FALSE");
1238 return FALSE;
1239 }
1240 if (encoding != NULL)
1241 *encoding = imm_encoding->encoding;
1242 DEBUG_TRACE ("exit with TRUE");
1243 return TRUE;
1244 }
1245
1246 /* If 64-bit immediate IMM is in the format of
1247 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1248 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1249 of value "abcdefgh". Otherwise return -1. */
1250 int
1251 aarch64_shrink_expanded_imm8 (uint64_t imm)
1252 {
1253 int i, ret;
1254 uint32_t byte;
1255
1256 ret = 0;
1257 for (i = 0; i < 8; i++)
1258 {
1259 byte = (imm >> (8 * i)) & 0xff;
1260 if (byte == 0xff)
1261 ret |= 1 << i;
1262 else if (byte != 0x00)
1263 return -1;
1264 }
1265 return ret;
1266 }
1267
1268 /* Utility inline functions for operand_general_constraint_met_p. */
1269
1270 static inline void
1271 set_error (aarch64_operand_error *mismatch_detail,
1272 enum aarch64_operand_error_kind kind, int idx,
1273 const char* error)
1274 {
1275 if (mismatch_detail == NULL)
1276 return;
1277 mismatch_detail->kind = kind;
1278 mismatch_detail->index = idx;
1279 mismatch_detail->error = error;
1280 }
1281
1282 static inline void
1283 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1284 const char* error)
1285 {
1286 if (mismatch_detail == NULL)
1287 return;
1288 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1289 }
1290
1291 static inline void
1292 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1293 int idx, int lower_bound, int upper_bound,
1294 const char* error)
1295 {
1296 if (mismatch_detail == NULL)
1297 return;
1298 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1299 mismatch_detail->data[0] = lower_bound;
1300 mismatch_detail->data[1] = upper_bound;
1301 }
1302
1303 static inline void
1304 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1305 int idx, int lower_bound, int upper_bound)
1306 {
1307 if (mismatch_detail == NULL)
1308 return;
1309 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1310 _("immediate value"));
1311 }
1312
1313 static inline void
1314 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1315 int idx, int lower_bound, int upper_bound)
1316 {
1317 if (mismatch_detail == NULL)
1318 return;
1319 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1320 _("immediate offset"));
1321 }
1322
1323 static inline void
1324 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1325 int idx, int lower_bound, int upper_bound)
1326 {
1327 if (mismatch_detail == NULL)
1328 return;
1329 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1330 _("register number"));
1331 }
1332
1333 static inline void
1334 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1335 int idx, int lower_bound, int upper_bound)
1336 {
1337 if (mismatch_detail == NULL)
1338 return;
1339 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1340 _("register element index"));
1341 }
1342
1343 static inline void
1344 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1345 int idx, int lower_bound, int upper_bound)
1346 {
1347 if (mismatch_detail == NULL)
1348 return;
1349 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1350 _("shift amount"));
1351 }
1352
1353 /* Report that the MUL modifier in operand IDX should be in the range
1354 [LOWER_BOUND, UPPER_BOUND]. */
1355 static inline void
1356 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1357 int idx, int lower_bound, int upper_bound)
1358 {
1359 if (mismatch_detail == NULL)
1360 return;
1361 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1362 _("multiplier"));
1363 }
1364
1365 static inline void
1366 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1367 int alignment)
1368 {
1369 if (mismatch_detail == NULL)
1370 return;
1371 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1372 mismatch_detail->data[0] = alignment;
1373 }
1374
1375 static inline void
1376 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1377 int expected_num)
1378 {
1379 if (mismatch_detail == NULL)
1380 return;
1381 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1382 mismatch_detail->data[0] = expected_num;
1383 }
1384
1385 static inline void
1386 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1387 const char* error)
1388 {
1389 if (mismatch_detail == NULL)
1390 return;
1391 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1392 }
1393
1394 /* General constraint checking based on operand code.
1395
1396 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1397 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1398
1399 This function has to be called after the qualifiers for all operands
1400 have been resolved.
1401
1402 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1403 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1404 of error message during the disassembling where error message is not
1405 wanted. We avoid the dynamic construction of strings of error messages
1406 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1407 use a combination of error code, static string and some integer data to
1408 represent an error. */
1409
1410 static int
1411 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1412 enum aarch64_opnd type,
1413 const aarch64_opcode *opcode,
1414 aarch64_operand_error *mismatch_detail)
1415 {
1416 unsigned num, modifiers, shift;
1417 unsigned char size;
1418 int64_t imm, min_value, max_value;
1419 uint64_t uvalue, mask;
1420 const aarch64_opnd_info *opnd = opnds + idx;
1421 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1422
1423 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1424
1425 switch (aarch64_operands[type].op_class)
1426 {
1427 case AARCH64_OPND_CLASS_INT_REG:
1428 /* Check pair reg constraints for cas* instructions. */
1429 if (type == AARCH64_OPND_PAIRREG)
1430 {
1431 assert (idx == 1 || idx == 3);
1432 if (opnds[idx - 1].reg.regno % 2 != 0)
1433 {
1434 set_syntax_error (mismatch_detail, idx - 1,
1435 _("reg pair must start from even reg"));
1436 return 0;
1437 }
1438 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1439 {
1440 set_syntax_error (mismatch_detail, idx,
1441 _("reg pair must be contiguous"));
1442 return 0;
1443 }
1444 break;
1445 }
1446
1447 /* <Xt> may be optional in some IC and TLBI instructions. */
1448 if (type == AARCH64_OPND_Rt_SYS)
1449 {
1450 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1451 == AARCH64_OPND_CLASS_SYSTEM));
1452 if (opnds[1].present
1453 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1454 {
1455 set_other_error (mismatch_detail, idx, _("extraneous register"));
1456 return 0;
1457 }
1458 if (!opnds[1].present
1459 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1460 {
1461 set_other_error (mismatch_detail, idx, _("missing register"));
1462 return 0;
1463 }
1464 }
1465 switch (qualifier)
1466 {
1467 case AARCH64_OPND_QLF_WSP:
1468 case AARCH64_OPND_QLF_SP:
1469 if (!aarch64_stack_pointer_p (opnd))
1470 {
1471 set_other_error (mismatch_detail, idx,
1472 _("stack pointer register expected"));
1473 return 0;
1474 }
1475 break;
1476 default:
1477 break;
1478 }
1479 break;
1480
1481 case AARCH64_OPND_CLASS_SVE_REG:
1482 switch (type)
1483 {
1484 case AARCH64_OPND_SVE_Zm3_INDEX:
1485 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1486 case AARCH64_OPND_SVE_Zm4_INDEX:
1487 size = get_operand_fields_width (get_operand_from_code (type));
1488 shift = get_operand_specific_data (&aarch64_operands[type]);
1489 mask = (1 << shift) - 1;
1490 if (opnd->reg.regno > mask)
1491 {
1492 assert (mask == 7 || mask == 15);
1493 set_other_error (mismatch_detail, idx,
1494 mask == 15
1495 ? _("z0-z15 expected")
1496 : _("z0-z7 expected"));
1497 return 0;
1498 }
1499 mask = (1 << (size - shift)) - 1;
1500 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1501 {
1502 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1503 return 0;
1504 }
1505 break;
1506
1507 case AARCH64_OPND_SVE_Zn_INDEX:
1508 size = aarch64_get_qualifier_esize (opnd->qualifier);
1509 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1510 {
1511 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1512 0, 64 / size - 1);
1513 return 0;
1514 }
1515 break;
1516
1517 case AARCH64_OPND_SVE_ZnxN:
1518 case AARCH64_OPND_SVE_ZtxN:
1519 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1520 {
1521 set_other_error (mismatch_detail, idx,
1522 _("invalid register list"));
1523 return 0;
1524 }
1525 break;
1526
1527 default:
1528 break;
1529 }
1530 break;
1531
1532 case AARCH64_OPND_CLASS_PRED_REG:
1533 if (opnd->reg.regno >= 8
1534 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1535 {
1536 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1537 return 0;
1538 }
1539 break;
1540
1541 case AARCH64_OPND_CLASS_COND:
1542 if (type == AARCH64_OPND_COND1
1543 && (opnds[idx].cond->value & 0xe) == 0xe)
1544 {
1545 /* Not allow AL or NV. */
1546 set_syntax_error (mismatch_detail, idx, NULL);
1547 }
1548 break;
1549
1550 case AARCH64_OPND_CLASS_ADDRESS:
1551 /* Check writeback. */
1552 switch (opcode->iclass)
1553 {
1554 case ldst_pos:
1555 case ldst_unscaled:
1556 case ldstnapair_offs:
1557 case ldstpair_off:
1558 case ldst_unpriv:
1559 if (opnd->addr.writeback == 1)
1560 {
1561 set_syntax_error (mismatch_detail, idx,
1562 _("unexpected address writeback"));
1563 return 0;
1564 }
1565 break;
1566 case ldst_imm10:
1567 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1568 {
1569 set_syntax_error (mismatch_detail, idx,
1570 _("unexpected address writeback"));
1571 return 0;
1572 }
1573 break;
1574 case ldst_imm9:
1575 case ldstpair_indexed:
1576 case asisdlsep:
1577 case asisdlsop:
1578 if (opnd->addr.writeback == 0)
1579 {
1580 set_syntax_error (mismatch_detail, idx,
1581 _("address writeback expected"));
1582 return 0;
1583 }
1584 break;
1585 default:
1586 assert (opnd->addr.writeback == 0);
1587 break;
1588 }
1589 switch (type)
1590 {
1591 case AARCH64_OPND_ADDR_SIMM7:
1592 /* Scaled signed 7 bits immediate offset. */
1593 /* Get the size of the data element that is accessed, which may be
1594 different from that of the source register size,
1595 e.g. in strb/ldrb. */
1596 size = aarch64_get_qualifier_esize (opnd->qualifier);
1597 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1598 {
1599 set_offset_out_of_range_error (mismatch_detail, idx,
1600 -64 * size, 63 * size);
1601 return 0;
1602 }
1603 if (!value_aligned_p (opnd->addr.offset.imm, size))
1604 {
1605 set_unaligned_error (mismatch_detail, idx, size);
1606 return 0;
1607 }
1608 break;
1609 case AARCH64_OPND_ADDR_OFFSET:
1610 case AARCH64_OPND_ADDR_SIMM9:
1611 /* Unscaled signed 9 bits immediate offset. */
1612 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1613 {
1614 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1615 return 0;
1616 }
1617 break;
1618
1619 case AARCH64_OPND_ADDR_SIMM9_2:
1620 /* Unscaled signed 9 bits immediate offset, which has to be negative
1621 or unaligned. */
1622 size = aarch64_get_qualifier_esize (qualifier);
1623 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1624 && !value_aligned_p (opnd->addr.offset.imm, size))
1625 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1626 return 1;
1627 set_other_error (mismatch_detail, idx,
1628 _("negative or unaligned offset expected"));
1629 return 0;
1630
1631 case AARCH64_OPND_ADDR_SIMM10:
1632 /* Scaled signed 10 bits immediate offset. */
1633 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1634 {
1635 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1636 return 0;
1637 }
1638 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1639 {
1640 set_unaligned_error (mismatch_detail, idx, 8);
1641 return 0;
1642 }
1643 break;
1644
1645 case AARCH64_OPND_SIMD_ADDR_POST:
1646 /* AdvSIMD load/store multiple structures, post-index. */
1647 assert (idx == 1);
1648 if (opnd->addr.offset.is_reg)
1649 {
1650 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1651 return 1;
1652 else
1653 {
1654 set_other_error (mismatch_detail, idx,
1655 _("invalid register offset"));
1656 return 0;
1657 }
1658 }
1659 else
1660 {
1661 const aarch64_opnd_info *prev = &opnds[idx-1];
1662 unsigned num_bytes; /* total number of bytes transferred. */
1663 /* The opcode dependent area stores the number of elements in
1664 each structure to be loaded/stored. */
1665 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1666 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1667 /* Special handling of loading single structure to all lane. */
1668 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1669 * aarch64_get_qualifier_esize (prev->qualifier);
1670 else
1671 num_bytes = prev->reglist.num_regs
1672 * aarch64_get_qualifier_esize (prev->qualifier)
1673 * aarch64_get_qualifier_nelem (prev->qualifier);
1674 if ((int) num_bytes != opnd->addr.offset.imm)
1675 {
1676 set_other_error (mismatch_detail, idx,
1677 _("invalid post-increment amount"));
1678 return 0;
1679 }
1680 }
1681 break;
1682
1683 case AARCH64_OPND_ADDR_REGOFF:
1684 /* Get the size of the data element that is accessed, which may be
1685 different from that of the source register size,
1686 e.g. in strb/ldrb. */
1687 size = aarch64_get_qualifier_esize (opnd->qualifier);
1688 /* It is either no shift or shift by the binary logarithm of SIZE. */
1689 if (opnd->shifter.amount != 0
1690 && opnd->shifter.amount != (int)get_logsz (size))
1691 {
1692 set_other_error (mismatch_detail, idx,
1693 _("invalid shift amount"));
1694 return 0;
1695 }
1696 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1697 operators. */
1698 switch (opnd->shifter.kind)
1699 {
1700 case AARCH64_MOD_UXTW:
1701 case AARCH64_MOD_LSL:
1702 case AARCH64_MOD_SXTW:
1703 case AARCH64_MOD_SXTX: break;
1704 default:
1705 set_other_error (mismatch_detail, idx,
1706 _("invalid extend/shift operator"));
1707 return 0;
1708 }
1709 break;
1710
1711 case AARCH64_OPND_ADDR_UIMM12:
1712 imm = opnd->addr.offset.imm;
1713 /* Get the size of the data element that is accessed, which may be
1714 different from that of the source register size,
1715 e.g. in strb/ldrb. */
1716 size = aarch64_get_qualifier_esize (qualifier);
1717 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1718 {
1719 set_offset_out_of_range_error (mismatch_detail, idx,
1720 0, 4095 * size);
1721 return 0;
1722 }
1723 if (!value_aligned_p (opnd->addr.offset.imm, size))
1724 {
1725 set_unaligned_error (mismatch_detail, idx, size);
1726 return 0;
1727 }
1728 break;
1729
1730 case AARCH64_OPND_ADDR_PCREL14:
1731 case AARCH64_OPND_ADDR_PCREL19:
1732 case AARCH64_OPND_ADDR_PCREL21:
1733 case AARCH64_OPND_ADDR_PCREL26:
1734 imm = opnd->imm.value;
1735 if (operand_need_shift_by_two (get_operand_from_code (type)))
1736 {
1737 /* The offset value in a PC-relative branch instruction is alway
1738 4-byte aligned and is encoded without the lowest 2 bits. */
1739 if (!value_aligned_p (imm, 4))
1740 {
1741 set_unaligned_error (mismatch_detail, idx, 4);
1742 return 0;
1743 }
1744 /* Right shift by 2 so that we can carry out the following check
1745 canonically. */
1746 imm >>= 2;
1747 }
1748 size = get_operand_fields_width (get_operand_from_code (type));
1749 if (!value_fit_signed_field_p (imm, size))
1750 {
1751 set_other_error (mismatch_detail, idx,
1752 _("immediate out of range"));
1753 return 0;
1754 }
1755 break;
1756
1757 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1758 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1759 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1760 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1761 min_value = -8;
1762 max_value = 7;
1763 sve_imm_offset_vl:
1764 assert (!opnd->addr.offset.is_reg);
1765 assert (opnd->addr.preind);
1766 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1767 min_value *= num;
1768 max_value *= num;
1769 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1770 || (opnd->shifter.operator_present
1771 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1772 {
1773 set_other_error (mismatch_detail, idx,
1774 _("invalid addressing mode"));
1775 return 0;
1776 }
1777 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1778 {
1779 set_offset_out_of_range_error (mismatch_detail, idx,
1780 min_value, max_value);
1781 return 0;
1782 }
1783 if (!value_aligned_p (opnd->addr.offset.imm, num))
1784 {
1785 set_unaligned_error (mismatch_detail, idx, num);
1786 return 0;
1787 }
1788 break;
1789
1790 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1791 min_value = -32;
1792 max_value = 31;
1793 goto sve_imm_offset_vl;
1794
1795 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1796 min_value = -256;
1797 max_value = 255;
1798 goto sve_imm_offset_vl;
1799
1800 case AARCH64_OPND_SVE_ADDR_RI_U6:
1801 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1802 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1803 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1804 min_value = 0;
1805 max_value = 63;
1806 sve_imm_offset:
1807 assert (!opnd->addr.offset.is_reg);
1808 assert (opnd->addr.preind);
1809 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1810 min_value *= num;
1811 max_value *= num;
1812 if (opnd->shifter.operator_present
1813 || opnd->shifter.amount_present)
1814 {
1815 set_other_error (mismatch_detail, idx,
1816 _("invalid addressing mode"));
1817 return 0;
1818 }
1819 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1820 {
1821 set_offset_out_of_range_error (mismatch_detail, idx,
1822 min_value, max_value);
1823 return 0;
1824 }
1825 if (!value_aligned_p (opnd->addr.offset.imm, num))
1826 {
1827 set_unaligned_error (mismatch_detail, idx, num);
1828 return 0;
1829 }
1830 break;
1831
1832 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1833 min_value = -8;
1834 max_value = 7;
1835 goto sve_imm_offset;
1836
1837 case AARCH64_OPND_SVE_ADDR_RR:
1838 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1839 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1840 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1841 case AARCH64_OPND_SVE_ADDR_RX:
1842 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1843 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1844 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1845 case AARCH64_OPND_SVE_ADDR_RZ:
1846 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1847 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1848 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1849 modifiers = 1 << AARCH64_MOD_LSL;
1850 sve_rr_operand:
1851 assert (opnd->addr.offset.is_reg);
1852 assert (opnd->addr.preind);
1853 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1854 && opnd->addr.offset.regno == 31)
1855 {
1856 set_other_error (mismatch_detail, idx,
1857 _("index register xzr is not allowed"));
1858 return 0;
1859 }
1860 if (((1 << opnd->shifter.kind) & modifiers) == 0
1861 || (opnd->shifter.amount
1862 != get_operand_specific_data (&aarch64_operands[type])))
1863 {
1864 set_other_error (mismatch_detail, idx,
1865 _("invalid addressing mode"));
1866 return 0;
1867 }
1868 break;
1869
1870 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1871 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1872 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1873 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1874 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1875 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1876 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1877 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1878 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1879 goto sve_rr_operand;
1880
1881 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1882 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1883 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1884 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1885 min_value = 0;
1886 max_value = 31;
1887 goto sve_imm_offset;
1888
1889 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1890 modifiers = 1 << AARCH64_MOD_LSL;
1891 sve_zz_operand:
1892 assert (opnd->addr.offset.is_reg);
1893 assert (opnd->addr.preind);
1894 if (((1 << opnd->shifter.kind) & modifiers) == 0
1895 || opnd->shifter.amount < 0
1896 || opnd->shifter.amount > 3)
1897 {
1898 set_other_error (mismatch_detail, idx,
1899 _("invalid addressing mode"));
1900 return 0;
1901 }
1902 break;
1903
1904 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1905 modifiers = (1 << AARCH64_MOD_SXTW);
1906 goto sve_zz_operand;
1907
1908 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1909 modifiers = 1 << AARCH64_MOD_UXTW;
1910 goto sve_zz_operand;
1911
1912 default:
1913 break;
1914 }
1915 break;
1916
1917 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1918 if (type == AARCH64_OPND_LEt)
1919 {
1920 /* Get the upper bound for the element index. */
1921 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1922 if (!value_in_range_p (opnd->reglist.index, 0, num))
1923 {
1924 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1925 return 0;
1926 }
1927 }
1928 /* The opcode dependent area stores the number of elements in
1929 each structure to be loaded/stored. */
1930 num = get_opcode_dependent_value (opcode);
1931 switch (type)
1932 {
1933 case AARCH64_OPND_LVt:
1934 assert (num >= 1 && num <= 4);
1935 /* Unless LD1/ST1, the number of registers should be equal to that
1936 of the structure elements. */
1937 if (num != 1 && opnd->reglist.num_regs != num)
1938 {
1939 set_reg_list_error (mismatch_detail, idx, num);
1940 return 0;
1941 }
1942 break;
1943 case AARCH64_OPND_LVt_AL:
1944 case AARCH64_OPND_LEt:
1945 assert (num >= 1 && num <= 4);
1946 /* The number of registers should be equal to that of the structure
1947 elements. */
1948 if (opnd->reglist.num_regs != num)
1949 {
1950 set_reg_list_error (mismatch_detail, idx, num);
1951 return 0;
1952 }
1953 break;
1954 default:
1955 break;
1956 }
1957 break;
1958
1959 case AARCH64_OPND_CLASS_IMMEDIATE:
1960 /* Constraint check on immediate operand. */
1961 imm = opnd->imm.value;
1962 /* E.g. imm_0_31 constrains value to be 0..31. */
1963 if (qualifier_value_in_range_constraint_p (qualifier)
1964 && !value_in_range_p (imm, get_lower_bound (qualifier),
1965 get_upper_bound (qualifier)))
1966 {
1967 set_imm_out_of_range_error (mismatch_detail, idx,
1968 get_lower_bound (qualifier),
1969 get_upper_bound (qualifier));
1970 return 0;
1971 }
1972
1973 switch (type)
1974 {
1975 case AARCH64_OPND_AIMM:
1976 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1977 {
1978 set_other_error (mismatch_detail, idx,
1979 _("invalid shift operator"));
1980 return 0;
1981 }
1982 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1983 {
1984 set_other_error (mismatch_detail, idx,
1985 _("shift amount must be 0 or 12"));
1986 return 0;
1987 }
1988 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1989 {
1990 set_other_error (mismatch_detail, idx,
1991 _("immediate out of range"));
1992 return 0;
1993 }
1994 break;
1995
1996 case AARCH64_OPND_HALF:
1997 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1998 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1999 {
2000 set_other_error (mismatch_detail, idx,
2001 _("invalid shift operator"));
2002 return 0;
2003 }
2004 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2005 if (!value_aligned_p (opnd->shifter.amount, 16))
2006 {
2007 set_other_error (mismatch_detail, idx,
2008 _("shift amount must be a multiple of 16"));
2009 return 0;
2010 }
2011 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2012 {
2013 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2014 0, size * 8 - 16);
2015 return 0;
2016 }
2017 if (opnd->imm.value < 0)
2018 {
2019 set_other_error (mismatch_detail, idx,
2020 _("negative immediate value not allowed"));
2021 return 0;
2022 }
2023 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2024 {
2025 set_other_error (mismatch_detail, idx,
2026 _("immediate out of range"));
2027 return 0;
2028 }
2029 break;
2030
2031 case AARCH64_OPND_IMM_MOV:
2032 {
2033 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2034 imm = opnd->imm.value;
2035 assert (idx == 1);
2036 switch (opcode->op)
2037 {
2038 case OP_MOV_IMM_WIDEN:
2039 imm = ~imm;
2040 /* Fall through. */
2041 case OP_MOV_IMM_WIDE:
2042 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2043 {
2044 set_other_error (mismatch_detail, idx,
2045 _("immediate out of range"));
2046 return 0;
2047 }
2048 break;
2049 case OP_MOV_IMM_LOG:
2050 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2051 {
2052 set_other_error (mismatch_detail, idx,
2053 _("immediate out of range"));
2054 return 0;
2055 }
2056 break;
2057 default:
2058 assert (0);
2059 return 0;
2060 }
2061 }
2062 break;
2063
2064 case AARCH64_OPND_NZCV:
2065 case AARCH64_OPND_CCMP_IMM:
2066 case AARCH64_OPND_EXCEPTION:
2067 case AARCH64_OPND_UIMM4:
2068 case AARCH64_OPND_UIMM7:
2069 case AARCH64_OPND_UIMM3_OP1:
2070 case AARCH64_OPND_UIMM3_OP2:
2071 case AARCH64_OPND_SVE_UIMM3:
2072 case AARCH64_OPND_SVE_UIMM7:
2073 case AARCH64_OPND_SVE_UIMM8:
2074 case AARCH64_OPND_SVE_UIMM8_53:
2075 size = get_operand_fields_width (get_operand_from_code (type));
2076 assert (size < 32);
2077 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2078 {
2079 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2080 (1 << size) - 1);
2081 return 0;
2082 }
2083 break;
2084
2085 case AARCH64_OPND_SIMM5:
2086 case AARCH64_OPND_SVE_SIMM5:
2087 case AARCH64_OPND_SVE_SIMM5B:
2088 case AARCH64_OPND_SVE_SIMM6:
2089 case AARCH64_OPND_SVE_SIMM8:
2090 size = get_operand_fields_width (get_operand_from_code (type));
2091 assert (size < 32);
2092 if (!value_fit_signed_field_p (opnd->imm.value, size))
2093 {
2094 set_imm_out_of_range_error (mismatch_detail, idx,
2095 -(1 << (size - 1)),
2096 (1 << (size - 1)) - 1);
2097 return 0;
2098 }
2099 break;
2100
2101 case AARCH64_OPND_WIDTH:
2102 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2103 && opnds[0].type == AARCH64_OPND_Rd);
2104 size = get_upper_bound (qualifier);
2105 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2106 /* lsb+width <= reg.size */
2107 {
2108 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2109 size - opnds[idx-1].imm.value);
2110 return 0;
2111 }
2112 break;
2113
2114 case AARCH64_OPND_LIMM:
2115 case AARCH64_OPND_SVE_LIMM:
2116 {
2117 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2118 uint64_t uimm = opnd->imm.value;
2119 if (opcode->op == OP_BIC)
2120 uimm = ~uimm;
2121 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2122 {
2123 set_other_error (mismatch_detail, idx,
2124 _("immediate out of range"));
2125 return 0;
2126 }
2127 }
2128 break;
2129
2130 case AARCH64_OPND_IMM0:
2131 case AARCH64_OPND_FPIMM0:
2132 if (opnd->imm.value != 0)
2133 {
2134 set_other_error (mismatch_detail, idx,
2135 _("immediate zero expected"));
2136 return 0;
2137 }
2138 break;
2139
2140 case AARCH64_OPND_IMM_ROT1:
2141 case AARCH64_OPND_IMM_ROT2:
2142 case AARCH64_OPND_SVE_IMM_ROT2:
2143 if (opnd->imm.value != 0
2144 && opnd->imm.value != 90
2145 && opnd->imm.value != 180
2146 && opnd->imm.value != 270)
2147 {
2148 set_other_error (mismatch_detail, idx,
2149 _("rotate expected to be 0, 90, 180 or 270"));
2150 return 0;
2151 }
2152 break;
2153
2154 case AARCH64_OPND_IMM_ROT3:
2155 case AARCH64_OPND_SVE_IMM_ROT1:
2156 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2157 {
2158 set_other_error (mismatch_detail, idx,
2159 _("rotate expected to be 90 or 270"));
2160 return 0;
2161 }
2162 break;
2163
2164 case AARCH64_OPND_SHLL_IMM:
2165 assert (idx == 2);
2166 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2167 if (opnd->imm.value != size)
2168 {
2169 set_other_error (mismatch_detail, idx,
2170 _("invalid shift amount"));
2171 return 0;
2172 }
2173 break;
2174
2175 case AARCH64_OPND_IMM_VLSL:
2176 size = aarch64_get_qualifier_esize (qualifier);
2177 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2178 {
2179 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2180 size * 8 - 1);
2181 return 0;
2182 }
2183 break;
2184
2185 case AARCH64_OPND_IMM_VLSR:
2186 size = aarch64_get_qualifier_esize (qualifier);
2187 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2188 {
2189 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2190 return 0;
2191 }
2192 break;
2193
2194 case AARCH64_OPND_SIMD_IMM:
2195 case AARCH64_OPND_SIMD_IMM_SFT:
2196 /* Qualifier check. */
2197 switch (qualifier)
2198 {
2199 case AARCH64_OPND_QLF_LSL:
2200 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2201 {
2202 set_other_error (mismatch_detail, idx,
2203 _("invalid shift operator"));
2204 return 0;
2205 }
2206 break;
2207 case AARCH64_OPND_QLF_MSL:
2208 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2209 {
2210 set_other_error (mismatch_detail, idx,
2211 _("invalid shift operator"));
2212 return 0;
2213 }
2214 break;
2215 case AARCH64_OPND_QLF_NIL:
2216 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2217 {
2218 set_other_error (mismatch_detail, idx,
2219 _("shift is not permitted"));
2220 return 0;
2221 }
2222 break;
2223 default:
2224 assert (0);
2225 return 0;
2226 }
2227 /* Is the immediate valid? */
2228 assert (idx == 1);
2229 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2230 {
2231 /* uimm8 or simm8 */
2232 if (!value_in_range_p (opnd->imm.value, -128, 255))
2233 {
2234 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2235 return 0;
2236 }
2237 }
2238 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2239 {
2240 /* uimm64 is not
2241 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2242 ffffffffgggggggghhhhhhhh'. */
2243 set_other_error (mismatch_detail, idx,
2244 _("invalid value for immediate"));
2245 return 0;
2246 }
2247 /* Is the shift amount valid? */
2248 switch (opnd->shifter.kind)
2249 {
2250 case AARCH64_MOD_LSL:
2251 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2252 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2253 {
2254 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2255 (size - 1) * 8);
2256 return 0;
2257 }
2258 if (!value_aligned_p (opnd->shifter.amount, 8))
2259 {
2260 set_unaligned_error (mismatch_detail, idx, 8);
2261 return 0;
2262 }
2263 break;
2264 case AARCH64_MOD_MSL:
2265 /* Only 8 and 16 are valid shift amount. */
2266 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2267 {
2268 set_other_error (mismatch_detail, idx,
2269 _("shift amount must be 0 or 16"));
2270 return 0;
2271 }
2272 break;
2273 default:
2274 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2275 {
2276 set_other_error (mismatch_detail, idx,
2277 _("invalid shift operator"));
2278 return 0;
2279 }
2280 break;
2281 }
2282 break;
2283
2284 case AARCH64_OPND_FPIMM:
2285 case AARCH64_OPND_SIMD_FPIMM:
2286 case AARCH64_OPND_SVE_FPIMM8:
2287 if (opnd->imm.is_fp == 0)
2288 {
2289 set_other_error (mismatch_detail, idx,
2290 _("floating-point immediate expected"));
2291 return 0;
2292 }
2293 /* The value is expected to be an 8-bit floating-point constant with
2294 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2295 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2296 instruction). */
2297 if (!value_in_range_p (opnd->imm.value, 0, 255))
2298 {
2299 set_other_error (mismatch_detail, idx,
2300 _("immediate out of range"));
2301 return 0;
2302 }
2303 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2304 {
2305 set_other_error (mismatch_detail, idx,
2306 _("invalid shift operator"));
2307 return 0;
2308 }
2309 break;
2310
2311 case AARCH64_OPND_SVE_AIMM:
2312 min_value = 0;
2313 sve_aimm:
2314 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2315 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2316 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2317 uvalue = opnd->imm.value;
2318 shift = opnd->shifter.amount;
2319 if (size == 1)
2320 {
2321 if (shift != 0)
2322 {
2323 set_other_error (mismatch_detail, idx,
2324 _("no shift amount allowed for"
2325 " 8-bit constants"));
2326 return 0;
2327 }
2328 }
2329 else
2330 {
2331 if (shift != 0 && shift != 8)
2332 {
2333 set_other_error (mismatch_detail, idx,
2334 _("shift amount must be 0 or 8"));
2335 return 0;
2336 }
2337 if (shift == 0 && (uvalue & 0xff) == 0)
2338 {
2339 shift = 8;
2340 uvalue = (int64_t) uvalue / 256;
2341 }
2342 }
2343 mask >>= shift;
2344 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2345 {
2346 set_other_error (mismatch_detail, idx,
2347 _("immediate too big for element size"));
2348 return 0;
2349 }
2350 uvalue = (uvalue - min_value) & mask;
2351 if (uvalue > 0xff)
2352 {
2353 set_other_error (mismatch_detail, idx,
2354 _("invalid arithmetic immediate"));
2355 return 0;
2356 }
2357 break;
2358
2359 case AARCH64_OPND_SVE_ASIMM:
2360 min_value = -128;
2361 goto sve_aimm;
2362
2363 case AARCH64_OPND_SVE_I1_HALF_ONE:
2364 assert (opnd->imm.is_fp);
2365 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2366 {
2367 set_other_error (mismatch_detail, idx,
2368 _("floating-point value must be 0.5 or 1.0"));
2369 return 0;
2370 }
2371 break;
2372
2373 case AARCH64_OPND_SVE_I1_HALF_TWO:
2374 assert (opnd->imm.is_fp);
2375 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2376 {
2377 set_other_error (mismatch_detail, idx,
2378 _("floating-point value must be 0.5 or 2.0"));
2379 return 0;
2380 }
2381 break;
2382
2383 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2384 assert (opnd->imm.is_fp);
2385 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2386 {
2387 set_other_error (mismatch_detail, idx,
2388 _("floating-point value must be 0.0 or 1.0"));
2389 return 0;
2390 }
2391 break;
2392
2393 case AARCH64_OPND_SVE_INV_LIMM:
2394 {
2395 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2396 uint64_t uimm = ~opnd->imm.value;
2397 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2398 {
2399 set_other_error (mismatch_detail, idx,
2400 _("immediate out of range"));
2401 return 0;
2402 }
2403 }
2404 break;
2405
2406 case AARCH64_OPND_SVE_LIMM_MOV:
2407 {
2408 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2409 uint64_t uimm = opnd->imm.value;
2410 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2411 {
2412 set_other_error (mismatch_detail, idx,
2413 _("immediate out of range"));
2414 return 0;
2415 }
2416 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2417 {
2418 set_other_error (mismatch_detail, idx,
2419 _("invalid replicated MOV immediate"));
2420 return 0;
2421 }
2422 }
2423 break;
2424
2425 case AARCH64_OPND_SVE_PATTERN_SCALED:
2426 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2427 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2428 {
2429 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2430 return 0;
2431 }
2432 break;
2433
2434 case AARCH64_OPND_SVE_SHLIMM_PRED:
2435 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2436 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2437 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2438 {
2439 set_imm_out_of_range_error (mismatch_detail, idx,
2440 0, 8 * size - 1);
2441 return 0;
2442 }
2443 break;
2444
2445 case AARCH64_OPND_SVE_SHRIMM_PRED:
2446 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2447 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2448 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2449 {
2450 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2451 return 0;
2452 }
2453 break;
2454
2455 default:
2456 break;
2457 }
2458 break;
2459
2460 case AARCH64_OPND_CLASS_SYSTEM:
2461 switch (type)
2462 {
2463 case AARCH64_OPND_PSTATEFIELD:
2464 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2465 /* MSR UAO, #uimm4
2466 MSR PAN, #uimm4
2467 The immediate must be #0 or #1. */
2468 if ((opnd->pstatefield == 0x03 /* UAO. */
2469 || opnd->pstatefield == 0x04 /* PAN. */
2470 || opnd->pstatefield == 0x1a) /* DIT. */
2471 && opnds[1].imm.value > 1)
2472 {
2473 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2474 return 0;
2475 }
2476 /* MSR SPSel, #uimm4
2477 Uses uimm4 as a control value to select the stack pointer: if
2478 bit 0 is set it selects the current exception level's stack
2479 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2480 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2481 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2482 {
2483 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2484 return 0;
2485 }
2486 break;
2487 default:
2488 break;
2489 }
2490 break;
2491
2492 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2493 /* Get the upper bound for the element index. */
2494 if (opcode->op == OP_FCMLA_ELEM)
2495 /* FCMLA index range depends on the vector size of other operands
2496 and is halfed because complex numbers take two elements. */
2497 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2498 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2499 else
2500 num = 16;
2501 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2502
2503 /* Index out-of-range. */
2504 if (!value_in_range_p (opnd->reglane.index, 0, num))
2505 {
2506 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2507 return 0;
2508 }
2509 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2510 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2511 number is encoded in "size:M:Rm":
2512 size <Vm>
2513 00 RESERVED
2514 01 0:Rm
2515 10 M:Rm
2516 11 RESERVED */
2517 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2518 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2519 {
2520 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2521 return 0;
2522 }
2523 break;
2524
2525 case AARCH64_OPND_CLASS_MODIFIED_REG:
2526 assert (idx == 1 || idx == 2);
2527 switch (type)
2528 {
2529 case AARCH64_OPND_Rm_EXT:
2530 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2531 && opnd->shifter.kind != AARCH64_MOD_LSL)
2532 {
2533 set_other_error (mismatch_detail, idx,
2534 _("extend operator expected"));
2535 return 0;
2536 }
2537 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2538 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2539 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2540 case. */
2541 if (!aarch64_stack_pointer_p (opnds + 0)
2542 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2543 {
2544 if (!opnd->shifter.operator_present)
2545 {
2546 set_other_error (mismatch_detail, idx,
2547 _("missing extend operator"));
2548 return 0;
2549 }
2550 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2551 {
2552 set_other_error (mismatch_detail, idx,
2553 _("'LSL' operator not allowed"));
2554 return 0;
2555 }
2556 }
2557 assert (opnd->shifter.operator_present /* Default to LSL. */
2558 || opnd->shifter.kind == AARCH64_MOD_LSL);
2559 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2560 {
2561 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2562 return 0;
2563 }
2564 /* In the 64-bit form, the final register operand is written as Wm
2565 for all but the (possibly omitted) UXTX/LSL and SXTX
2566 operators.
2567 N.B. GAS allows X register to be used with any operator as a
2568 programming convenience. */
2569 if (qualifier == AARCH64_OPND_QLF_X
2570 && opnd->shifter.kind != AARCH64_MOD_LSL
2571 && opnd->shifter.kind != AARCH64_MOD_UXTX
2572 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2573 {
2574 set_other_error (mismatch_detail, idx, _("W register expected"));
2575 return 0;
2576 }
2577 break;
2578
2579 case AARCH64_OPND_Rm_SFT:
2580 /* ROR is not available to the shifted register operand in
2581 arithmetic instructions. */
2582 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2583 {
2584 set_other_error (mismatch_detail, idx,
2585 _("shift operator expected"));
2586 return 0;
2587 }
2588 if (opnd->shifter.kind == AARCH64_MOD_ROR
2589 && opcode->iclass != log_shift)
2590 {
2591 set_other_error (mismatch_detail, idx,
2592 _("'ROR' operator not allowed"));
2593 return 0;
2594 }
2595 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2596 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2597 {
2598 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2599 return 0;
2600 }
2601 break;
2602
2603 default:
2604 break;
2605 }
2606 break;
2607
2608 default:
2609 break;
2610 }
2611
2612 return 1;
2613 }
2614
2615 /* Main entrypoint for the operand constraint checking.
2616
2617 Return 1 if operands of *INST meet the constraint applied by the operand
2618 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2619 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2620 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2621 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2622 error kind when it is notified that an instruction does not pass the check).
2623
2624 Un-determined operand qualifiers may get established during the process. */
2625
2626 int
2627 aarch64_match_operands_constraint (aarch64_inst *inst,
2628 aarch64_operand_error *mismatch_detail)
2629 {
2630 int i;
2631
2632 DEBUG_TRACE ("enter");
2633
2634 /* Check for cases where a source register needs to be the same as the
2635 destination register. Do this before matching qualifiers since if
2636 an instruction has both invalid tying and invalid qualifiers,
2637 the error about qualifiers would suggest several alternative
2638 instructions that also have invalid tying. */
2639 i = inst->opcode->tied_operand;
2640 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2641 {
2642 if (mismatch_detail)
2643 {
2644 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2645 mismatch_detail->index = i;
2646 mismatch_detail->error = NULL;
2647 }
2648 return 0;
2649 }
2650
2651 /* Match operands' qualifier.
2652 *INST has already had qualifier establish for some, if not all, of
2653 its operands; we need to find out whether these established
2654 qualifiers match one of the qualifier sequence in
2655 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2656 with the corresponding qualifier in such a sequence.
2657 Only basic operand constraint checking is done here; the more thorough
2658 constraint checking will carried out by operand_general_constraint_met_p,
2659 which has be to called after this in order to get all of the operands'
2660 qualifiers established. */
2661 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2662 {
2663 DEBUG_TRACE ("FAIL on operand qualifier matching");
2664 if (mismatch_detail)
2665 {
2666 /* Return an error type to indicate that it is the qualifier
2667 matching failure; we don't care about which operand as there
2668 are enough information in the opcode table to reproduce it. */
2669 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2670 mismatch_detail->index = -1;
2671 mismatch_detail->error = NULL;
2672 }
2673 return 0;
2674 }
2675
2676 /* Match operands' constraint. */
2677 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2678 {
2679 enum aarch64_opnd type = inst->opcode->operands[i];
2680 if (type == AARCH64_OPND_NIL)
2681 break;
2682 if (inst->operands[i].skip)
2683 {
2684 DEBUG_TRACE ("skip the incomplete operand %d", i);
2685 continue;
2686 }
2687 if (operand_general_constraint_met_p (inst->operands, i, type,
2688 inst->opcode, mismatch_detail) == 0)
2689 {
2690 DEBUG_TRACE ("FAIL on operand %d", i);
2691 return 0;
2692 }
2693 }
2694
2695 DEBUG_TRACE ("PASS");
2696
2697 return 1;
2698 }
2699
2700 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2701 Also updates the TYPE of each INST->OPERANDS with the corresponding
2702 value of OPCODE->OPERANDS.
2703
2704 Note that some operand qualifiers may need to be manually cleared by
2705 the caller before it further calls the aarch64_opcode_encode; by
2706 doing this, it helps the qualifier matching facilities work
2707 properly. */
2708
2709 const aarch64_opcode*
2710 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2711 {
2712 int i;
2713 const aarch64_opcode *old = inst->opcode;
2714
2715 inst->opcode = opcode;
2716
2717 /* Update the operand types. */
2718 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2719 {
2720 inst->operands[i].type = opcode->operands[i];
2721 if (opcode->operands[i] == AARCH64_OPND_NIL)
2722 break;
2723 }
2724
2725 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2726
2727 return old;
2728 }
2729
2730 int
2731 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2732 {
2733 int i;
2734 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2735 if (operands[i] == operand)
2736 return i;
2737 else if (operands[i] == AARCH64_OPND_NIL)
2738 break;
2739 return -1;
2740 }
2741 \f
2742 /* R0...R30, followed by FOR31. */
2743 #define BANK(R, FOR31) \
2744 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2745 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2746 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2747 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2748 /* [0][0] 32-bit integer regs with sp Wn
2749 [0][1] 64-bit integer regs with sp Xn sf=1
2750 [1][0] 32-bit integer regs with #0 Wn
2751 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2752 static const char *int_reg[2][2][32] = {
2753 #define R32(X) "w" #X
2754 #define R64(X) "x" #X
2755 { BANK (R32, "wsp"), BANK (R64, "sp") },
2756 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2757 #undef R64
2758 #undef R32
2759 };
2760
2761 /* Names of the SVE vector registers, first with .S suffixes,
2762 then with .D suffixes. */
2763
2764 static const char *sve_reg[2][32] = {
2765 #define ZS(X) "z" #X ".s"
2766 #define ZD(X) "z" #X ".d"
2767 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2768 #undef ZD
2769 #undef ZS
2770 };
2771 #undef BANK
2772
2773 /* Return the integer register name.
2774 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2775
2776 static inline const char *
2777 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2778 {
2779 const int has_zr = sp_reg_p ? 0 : 1;
2780 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2781 return int_reg[has_zr][is_64][regno];
2782 }
2783
2784 /* Like get_int_reg_name, but IS_64 is always 1. */
2785
2786 static inline const char *
2787 get_64bit_int_reg_name (int regno, int sp_reg_p)
2788 {
2789 const int has_zr = sp_reg_p ? 0 : 1;
2790 return int_reg[has_zr][1][regno];
2791 }
2792
2793 /* Get the name of the integer offset register in OPND, using the shift type
2794 to decide whether it's a word or doubleword. */
2795
2796 static inline const char *
2797 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2798 {
2799 switch (opnd->shifter.kind)
2800 {
2801 case AARCH64_MOD_UXTW:
2802 case AARCH64_MOD_SXTW:
2803 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2804
2805 case AARCH64_MOD_LSL:
2806 case AARCH64_MOD_SXTX:
2807 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2808
2809 default:
2810 abort ();
2811 }
2812 }
2813
2814 /* Get the name of the SVE vector offset register in OPND, using the operand
2815 qualifier to decide whether the suffix should be .S or .D. */
2816
2817 static inline const char *
2818 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2819 {
2820 assert (qualifier == AARCH64_OPND_QLF_S_S
2821 || qualifier == AARCH64_OPND_QLF_S_D);
2822 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2823 }
2824
2825 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2826
2827 typedef union
2828 {
2829 uint64_t i;
2830 double d;
2831 } double_conv_t;
2832
2833 typedef union
2834 {
2835 uint32_t i;
2836 float f;
2837 } single_conv_t;
2838
2839 typedef union
2840 {
2841 uint32_t i;
2842 float f;
2843 } half_conv_t;
2844
2845 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2846 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2847 (depending on the type of the instruction). IMM8 will be expanded to a
2848 single-precision floating-point value (SIZE == 4) or a double-precision
2849 floating-point value (SIZE == 8). A half-precision floating-point value
2850 (SIZE == 2) is expanded to a single-precision floating-point value. The
2851 expanded value is returned. */
2852
2853 static uint64_t
2854 expand_fp_imm (int size, uint32_t imm8)
2855 {
2856 uint64_t imm = 0;
2857 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2858
2859 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2860 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2861 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2862 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2863 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2864 if (size == 8)
2865 {
2866 imm = (imm8_7 << (63-32)) /* imm8<7> */
2867 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2868 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2869 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2870 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2871 imm <<= 32;
2872 }
2873 else if (size == 4 || size == 2)
2874 {
2875 imm = (imm8_7 << 31) /* imm8<7> */
2876 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2877 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2878 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2879 }
2880 else
2881 {
2882 /* An unsupported size. */
2883 assert (0);
2884 }
2885
2886 return imm;
2887 }
2888
2889 /* Produce the string representation of the register list operand *OPND
2890 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2891 the register name that comes before the register number, such as "v". */
2892 static void
2893 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2894 const char *prefix)
2895 {
2896 const int num_regs = opnd->reglist.num_regs;
2897 const int first_reg = opnd->reglist.first_regno;
2898 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2899 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2900 char tb[8]; /* Temporary buffer. */
2901
2902 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2903 assert (num_regs >= 1 && num_regs <= 4);
2904
2905 /* Prepare the index if any. */
2906 if (opnd->reglist.has_index)
2907 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2908 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
2909 else
2910 tb[0] = '\0';
2911
2912 /* The hyphenated form is preferred for disassembly if there are
2913 more than two registers in the list, and the register numbers
2914 are monotonically increasing in increments of one. */
2915 if (num_regs > 2 && last_reg > first_reg)
2916 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2917 prefix, last_reg, qlf_name, tb);
2918 else
2919 {
2920 const int reg0 = first_reg;
2921 const int reg1 = (first_reg + 1) & 0x1f;
2922 const int reg2 = (first_reg + 2) & 0x1f;
2923 const int reg3 = (first_reg + 3) & 0x1f;
2924
2925 switch (num_regs)
2926 {
2927 case 1:
2928 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2929 break;
2930 case 2:
2931 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2932 prefix, reg1, qlf_name, tb);
2933 break;
2934 case 3:
2935 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2936 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2937 prefix, reg2, qlf_name, tb);
2938 break;
2939 case 4:
2940 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2941 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2942 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2943 break;
2944 }
2945 }
2946 }
2947
2948 /* Print the register+immediate address in OPND to BUF, which has SIZE
2949 characters. BASE is the name of the base register. */
2950
2951 static void
2952 print_immediate_offset_address (char *buf, size_t size,
2953 const aarch64_opnd_info *opnd,
2954 const char *base)
2955 {
2956 if (opnd->addr.writeback)
2957 {
2958 if (opnd->addr.preind)
2959 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
2960 else
2961 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
2962 }
2963 else
2964 {
2965 if (opnd->shifter.operator_present)
2966 {
2967 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2968 snprintf (buf, size, "[%s, #%d, mul vl]",
2969 base, opnd->addr.offset.imm);
2970 }
2971 else if (opnd->addr.offset.imm)
2972 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
2973 else
2974 snprintf (buf, size, "[%s]", base);
2975 }
2976 }
2977
2978 /* Produce the string representation of the register offset address operand
2979 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2980 the names of the base and offset registers. */
2981 static void
2982 print_register_offset_address (char *buf, size_t size,
2983 const aarch64_opnd_info *opnd,
2984 const char *base, const char *offset)
2985 {
2986 char tb[16]; /* Temporary buffer. */
2987 bfd_boolean print_extend_p = TRUE;
2988 bfd_boolean print_amount_p = TRUE;
2989 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2990
2991 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2992 || !opnd->shifter.amount_present))
2993 {
2994 /* Not print the shift/extend amount when the amount is zero and
2995 when it is not the special case of 8-bit load/store instruction. */
2996 print_amount_p = FALSE;
2997 /* Likewise, no need to print the shift operator LSL in such a
2998 situation. */
2999 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3000 print_extend_p = FALSE;
3001 }
3002
3003 /* Prepare for the extend/shift. */
3004 if (print_extend_p)
3005 {
3006 if (print_amount_p)
3007 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3008 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3009 (opnd->shifter.amount % 100));
3010 else
3011 snprintf (tb, sizeof (tb), ", %s", shift_name);
3012 }
3013 else
3014 tb[0] = '\0';
3015
3016 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3017 }
3018
3019 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3020 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3021 PC, PCREL_P and ADDRESS are used to pass in and return information about
3022 the PC-relative address calculation, where the PC value is passed in
3023 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3024 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3025 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3026
3027 The function serves both the disassembler and the assembler diagnostics
3028 issuer, which is the reason why it lives in this file. */
3029
3030 void
3031 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3032 const aarch64_opcode *opcode,
3033 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3034 bfd_vma *address)
3035 {
3036 unsigned int i, num_conds;
3037 const char *name = NULL;
3038 const aarch64_opnd_info *opnd = opnds + idx;
3039 enum aarch64_modifier_kind kind;
3040 uint64_t addr, enum_value;
3041
3042 buf[0] = '\0';
3043 if (pcrel_p)
3044 *pcrel_p = 0;
3045
3046 switch (opnd->type)
3047 {
3048 case AARCH64_OPND_Rd:
3049 case AARCH64_OPND_Rn:
3050 case AARCH64_OPND_Rm:
3051 case AARCH64_OPND_Rt:
3052 case AARCH64_OPND_Rt2:
3053 case AARCH64_OPND_Rs:
3054 case AARCH64_OPND_Ra:
3055 case AARCH64_OPND_Rt_SYS:
3056 case AARCH64_OPND_PAIRREG:
3057 case AARCH64_OPND_SVE_Rm:
3058 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3059 the <ic_op>, therefore we use opnd->present to override the
3060 generic optional-ness information. */
3061 if (opnd->type == AARCH64_OPND_Rt_SYS)
3062 {
3063 if (!opnd->present)
3064 break;
3065 }
3066 /* Omit the operand, e.g. RET. */
3067 else if (optional_operand_p (opcode, idx)
3068 && (opnd->reg.regno
3069 == get_optional_operand_default_value (opcode)))
3070 break;
3071 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3072 || opnd->qualifier == AARCH64_OPND_QLF_X);
3073 snprintf (buf, size, "%s",
3074 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3075 break;
3076
3077 case AARCH64_OPND_Rd_SP:
3078 case AARCH64_OPND_Rn_SP:
3079 case AARCH64_OPND_SVE_Rn_SP:
3080 case AARCH64_OPND_Rm_SP:
3081 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3082 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3083 || opnd->qualifier == AARCH64_OPND_QLF_X
3084 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3085 snprintf (buf, size, "%s",
3086 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3087 break;
3088
3089 case AARCH64_OPND_Rm_EXT:
3090 kind = opnd->shifter.kind;
3091 assert (idx == 1 || idx == 2);
3092 if ((aarch64_stack_pointer_p (opnds)
3093 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3094 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3095 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3096 && kind == AARCH64_MOD_UXTW)
3097 || (opnd->qualifier == AARCH64_OPND_QLF_X
3098 && kind == AARCH64_MOD_UXTX)))
3099 {
3100 /* 'LSL' is the preferred form in this case. */
3101 kind = AARCH64_MOD_LSL;
3102 if (opnd->shifter.amount == 0)
3103 {
3104 /* Shifter omitted. */
3105 snprintf (buf, size, "%s",
3106 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3107 break;
3108 }
3109 }
3110 if (opnd->shifter.amount)
3111 snprintf (buf, size, "%s, %s #%" PRIi64,
3112 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3113 aarch64_operand_modifiers[kind].name,
3114 opnd->shifter.amount);
3115 else
3116 snprintf (buf, size, "%s, %s",
3117 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3118 aarch64_operand_modifiers[kind].name);
3119 break;
3120
3121 case AARCH64_OPND_Rm_SFT:
3122 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3123 || opnd->qualifier == AARCH64_OPND_QLF_X);
3124 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3125 snprintf (buf, size, "%s",
3126 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3127 else
3128 snprintf (buf, size, "%s, %s #%" PRIi64,
3129 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3130 aarch64_operand_modifiers[opnd->shifter.kind].name,
3131 opnd->shifter.amount);
3132 break;
3133
3134 case AARCH64_OPND_Fd:
3135 case AARCH64_OPND_Fn:
3136 case AARCH64_OPND_Fm:
3137 case AARCH64_OPND_Fa:
3138 case AARCH64_OPND_Ft:
3139 case AARCH64_OPND_Ft2:
3140 case AARCH64_OPND_Sd:
3141 case AARCH64_OPND_Sn:
3142 case AARCH64_OPND_Sm:
3143 case AARCH64_OPND_SVE_VZn:
3144 case AARCH64_OPND_SVE_Vd:
3145 case AARCH64_OPND_SVE_Vm:
3146 case AARCH64_OPND_SVE_Vn:
3147 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3148 opnd->reg.regno);
3149 break;
3150
3151 case AARCH64_OPND_Va:
3152 case AARCH64_OPND_Vd:
3153 case AARCH64_OPND_Vn:
3154 case AARCH64_OPND_Vm:
3155 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3156 aarch64_get_qualifier_name (opnd->qualifier));
3157 break;
3158
3159 case AARCH64_OPND_Ed:
3160 case AARCH64_OPND_En:
3161 case AARCH64_OPND_Em:
3162 case AARCH64_OPND_SM3_IMM2:
3163 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3164 aarch64_get_qualifier_name (opnd->qualifier),
3165 opnd->reglane.index);
3166 break;
3167
3168 case AARCH64_OPND_VdD1:
3169 case AARCH64_OPND_VnD1:
3170 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3171 break;
3172
3173 case AARCH64_OPND_LVn:
3174 case AARCH64_OPND_LVt:
3175 case AARCH64_OPND_LVt_AL:
3176 case AARCH64_OPND_LEt:
3177 print_register_list (buf, size, opnd, "v");
3178 break;
3179
3180 case AARCH64_OPND_SVE_Pd:
3181 case AARCH64_OPND_SVE_Pg3:
3182 case AARCH64_OPND_SVE_Pg4_5:
3183 case AARCH64_OPND_SVE_Pg4_10:
3184 case AARCH64_OPND_SVE_Pg4_16:
3185 case AARCH64_OPND_SVE_Pm:
3186 case AARCH64_OPND_SVE_Pn:
3187 case AARCH64_OPND_SVE_Pt:
3188 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3189 snprintf (buf, size, "p%d", opnd->reg.regno);
3190 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3191 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3192 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3193 aarch64_get_qualifier_name (opnd->qualifier));
3194 else
3195 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3196 aarch64_get_qualifier_name (opnd->qualifier));
3197 break;
3198
3199 case AARCH64_OPND_SVE_Za_5:
3200 case AARCH64_OPND_SVE_Za_16:
3201 case AARCH64_OPND_SVE_Zd:
3202 case AARCH64_OPND_SVE_Zm_5:
3203 case AARCH64_OPND_SVE_Zm_16:
3204 case AARCH64_OPND_SVE_Zn:
3205 case AARCH64_OPND_SVE_Zt:
3206 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3207 snprintf (buf, size, "z%d", opnd->reg.regno);
3208 else
3209 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3210 aarch64_get_qualifier_name (opnd->qualifier));
3211 break;
3212
3213 case AARCH64_OPND_SVE_ZnxN:
3214 case AARCH64_OPND_SVE_ZtxN:
3215 print_register_list (buf, size, opnd, "z");
3216 break;
3217
3218 case AARCH64_OPND_SVE_Zm3_INDEX:
3219 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3220 case AARCH64_OPND_SVE_Zm4_INDEX:
3221 case AARCH64_OPND_SVE_Zn_INDEX:
3222 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3223 aarch64_get_qualifier_name (opnd->qualifier),
3224 opnd->reglane.index);
3225 break;
3226
3227 case AARCH64_OPND_CRn:
3228 case AARCH64_OPND_CRm:
3229 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3230 break;
3231
3232 case AARCH64_OPND_IDX:
3233 case AARCH64_OPND_MASK:
3234 case AARCH64_OPND_IMM:
3235 case AARCH64_OPND_IMM_2:
3236 case AARCH64_OPND_WIDTH:
3237 case AARCH64_OPND_UIMM3_OP1:
3238 case AARCH64_OPND_UIMM3_OP2:
3239 case AARCH64_OPND_BIT_NUM:
3240 case AARCH64_OPND_IMM_VLSL:
3241 case AARCH64_OPND_IMM_VLSR:
3242 case AARCH64_OPND_SHLL_IMM:
3243 case AARCH64_OPND_IMM0:
3244 case AARCH64_OPND_IMMR:
3245 case AARCH64_OPND_IMMS:
3246 case AARCH64_OPND_FBITS:
3247 case AARCH64_OPND_SIMM5:
3248 case AARCH64_OPND_SVE_SHLIMM_PRED:
3249 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3250 case AARCH64_OPND_SVE_SHRIMM_PRED:
3251 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3252 case AARCH64_OPND_SVE_SIMM5:
3253 case AARCH64_OPND_SVE_SIMM5B:
3254 case AARCH64_OPND_SVE_SIMM6:
3255 case AARCH64_OPND_SVE_SIMM8:
3256 case AARCH64_OPND_SVE_UIMM3:
3257 case AARCH64_OPND_SVE_UIMM7:
3258 case AARCH64_OPND_SVE_UIMM8:
3259 case AARCH64_OPND_SVE_UIMM8_53:
3260 case AARCH64_OPND_IMM_ROT1:
3261 case AARCH64_OPND_IMM_ROT2:
3262 case AARCH64_OPND_IMM_ROT3:
3263 case AARCH64_OPND_SVE_IMM_ROT1:
3264 case AARCH64_OPND_SVE_IMM_ROT2:
3265 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3266 break;
3267
3268 case AARCH64_OPND_SVE_I1_HALF_ONE:
3269 case AARCH64_OPND_SVE_I1_HALF_TWO:
3270 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3271 {
3272 single_conv_t c;
3273 c.i = opnd->imm.value;
3274 snprintf (buf, size, "#%.1f", c.f);
3275 break;
3276 }
3277
3278 case AARCH64_OPND_SVE_PATTERN:
3279 if (optional_operand_p (opcode, idx)
3280 && opnd->imm.value == get_optional_operand_default_value (opcode))
3281 break;
3282 enum_value = opnd->imm.value;
3283 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3284 if (aarch64_sve_pattern_array[enum_value])
3285 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3286 else
3287 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3288 break;
3289
3290 case AARCH64_OPND_SVE_PATTERN_SCALED:
3291 if (optional_operand_p (opcode, idx)
3292 && !opnd->shifter.operator_present
3293 && opnd->imm.value == get_optional_operand_default_value (opcode))
3294 break;
3295 enum_value = opnd->imm.value;
3296 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3297 if (aarch64_sve_pattern_array[opnd->imm.value])
3298 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3299 else
3300 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3301 if (opnd->shifter.operator_present)
3302 {
3303 size_t len = strlen (buf);
3304 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3305 aarch64_operand_modifiers[opnd->shifter.kind].name,
3306 opnd->shifter.amount);
3307 }
3308 break;
3309
3310 case AARCH64_OPND_SVE_PRFOP:
3311 enum_value = opnd->imm.value;
3312 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3313 if (aarch64_sve_prfop_array[enum_value])
3314 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3315 else
3316 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3317 break;
3318
3319 case AARCH64_OPND_IMM_MOV:
3320 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3321 {
3322 case 4: /* e.g. MOV Wd, #<imm32>. */
3323 {
3324 int imm32 = opnd->imm.value;
3325 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3326 }
3327 break;
3328 case 8: /* e.g. MOV Xd, #<imm64>. */
3329 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3330 opnd->imm.value, opnd->imm.value);
3331 break;
3332 default: assert (0);
3333 }
3334 break;
3335
3336 case AARCH64_OPND_FPIMM0:
3337 snprintf (buf, size, "#0.0");
3338 break;
3339
3340 case AARCH64_OPND_LIMM:
3341 case AARCH64_OPND_AIMM:
3342 case AARCH64_OPND_HALF:
3343 case AARCH64_OPND_SVE_INV_LIMM:
3344 case AARCH64_OPND_SVE_LIMM:
3345 case AARCH64_OPND_SVE_LIMM_MOV:
3346 if (opnd->shifter.amount)
3347 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3348 opnd->shifter.amount);
3349 else
3350 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3351 break;
3352
3353 case AARCH64_OPND_SIMD_IMM:
3354 case AARCH64_OPND_SIMD_IMM_SFT:
3355 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3356 || opnd->shifter.kind == AARCH64_MOD_NONE)
3357 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3358 else
3359 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3360 aarch64_operand_modifiers[opnd->shifter.kind].name,
3361 opnd->shifter.amount);
3362 break;
3363
3364 case AARCH64_OPND_SVE_AIMM:
3365 case AARCH64_OPND_SVE_ASIMM:
3366 if (opnd->shifter.amount)
3367 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3368 opnd->shifter.amount);
3369 else
3370 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3371 break;
3372
3373 case AARCH64_OPND_FPIMM:
3374 case AARCH64_OPND_SIMD_FPIMM:
3375 case AARCH64_OPND_SVE_FPIMM8:
3376 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3377 {
3378 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3379 {
3380 half_conv_t c;
3381 c.i = expand_fp_imm (2, opnd->imm.value);
3382 snprintf (buf, size, "#%.18e", c.f);
3383 }
3384 break;
3385 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3386 {
3387 single_conv_t c;
3388 c.i = expand_fp_imm (4, opnd->imm.value);
3389 snprintf (buf, size, "#%.18e", c.f);
3390 }
3391 break;
3392 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3393 {
3394 double_conv_t c;
3395 c.i = expand_fp_imm (8, opnd->imm.value);
3396 snprintf (buf, size, "#%.18e", c.d);
3397 }
3398 break;
3399 default: assert (0);
3400 }
3401 break;
3402
3403 case AARCH64_OPND_CCMP_IMM:
3404 case AARCH64_OPND_NZCV:
3405 case AARCH64_OPND_EXCEPTION:
3406 case AARCH64_OPND_UIMM4:
3407 case AARCH64_OPND_UIMM7:
3408 if (optional_operand_p (opcode, idx) == TRUE
3409 && (opnd->imm.value ==
3410 (int64_t) get_optional_operand_default_value (opcode)))
3411 /* Omit the operand, e.g. DCPS1. */
3412 break;
3413 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3414 break;
3415
3416 case AARCH64_OPND_COND:
3417 case AARCH64_OPND_COND1:
3418 snprintf (buf, size, "%s", opnd->cond->names[0]);
3419 num_conds = ARRAY_SIZE (opnd->cond->names);
3420 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3421 {
3422 size_t len = strlen (buf);
3423 if (i == 1)
3424 snprintf (buf + len, size - len, " // %s = %s",
3425 opnd->cond->names[0], opnd->cond->names[i]);
3426 else
3427 snprintf (buf + len, size - len, ", %s",
3428 opnd->cond->names[i]);
3429 }
3430 break;
3431
3432 case AARCH64_OPND_ADDR_ADRP:
3433 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3434 + opnd->imm.value;
3435 if (pcrel_p)
3436 *pcrel_p = 1;
3437 if (address)
3438 *address = addr;
3439 /* This is not necessary during the disassembling, as print_address_func
3440 in the disassemble_info will take care of the printing. But some
3441 other callers may be still interested in getting the string in *STR,
3442 so here we do snprintf regardless. */
3443 snprintf (buf, size, "#0x%" PRIx64, addr);
3444 break;
3445
3446 case AARCH64_OPND_ADDR_PCREL14:
3447 case AARCH64_OPND_ADDR_PCREL19:
3448 case AARCH64_OPND_ADDR_PCREL21:
3449 case AARCH64_OPND_ADDR_PCREL26:
3450 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3451 if (pcrel_p)
3452 *pcrel_p = 1;
3453 if (address)
3454 *address = addr;
3455 /* This is not necessary during the disassembling, as print_address_func
3456 in the disassemble_info will take care of the printing. But some
3457 other callers may be still interested in getting the string in *STR,
3458 so here we do snprintf regardless. */
3459 snprintf (buf, size, "#0x%" PRIx64, addr);
3460 break;
3461
3462 case AARCH64_OPND_ADDR_SIMPLE:
3463 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3464 case AARCH64_OPND_SIMD_ADDR_POST:
3465 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3466 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3467 {
3468 if (opnd->addr.offset.is_reg)
3469 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3470 else
3471 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3472 }
3473 else
3474 snprintf (buf, size, "[%s]", name);
3475 break;
3476
3477 case AARCH64_OPND_ADDR_REGOFF:
3478 case AARCH64_OPND_SVE_ADDR_RR:
3479 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3480 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3481 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3482 case AARCH64_OPND_SVE_ADDR_RX:
3483 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3484 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3485 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3486 print_register_offset_address
3487 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3488 get_offset_int_reg_name (opnd));
3489 break;
3490
3491 case AARCH64_OPND_SVE_ADDR_RZ:
3492 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3493 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3494 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3495 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3496 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3497 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3498 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3499 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3500 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3501 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3502 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3503 print_register_offset_address
3504 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3505 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3506 break;
3507
3508 case AARCH64_OPND_ADDR_SIMM7:
3509 case AARCH64_OPND_ADDR_SIMM9:
3510 case AARCH64_OPND_ADDR_SIMM9_2:
3511 case AARCH64_OPND_ADDR_SIMM10:
3512 case AARCH64_OPND_ADDR_OFFSET:
3513 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3514 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3515 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3516 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3517 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3518 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3519 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3520 case AARCH64_OPND_SVE_ADDR_RI_U6:
3521 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3522 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3523 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3524 print_immediate_offset_address
3525 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3526 break;
3527
3528 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3529 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3530 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3531 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3532 print_immediate_offset_address
3533 (buf, size, opnd,
3534 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3535 break;
3536
3537 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3538 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3539 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3540 print_register_offset_address
3541 (buf, size, opnd,
3542 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3543 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3544 break;
3545
3546 case AARCH64_OPND_ADDR_UIMM12:
3547 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3548 if (opnd->addr.offset.imm)
3549 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3550 else
3551 snprintf (buf, size, "[%s]", name);
3552 break;
3553
3554 case AARCH64_OPND_SYSREG:
3555 for (i = 0; aarch64_sys_regs[i].name; ++i)
3556 if (aarch64_sys_regs[i].value == opnd->sysreg
3557 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3558 break;
3559 if (aarch64_sys_regs[i].name)
3560 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3561 else
3562 {
3563 /* Implementation defined system register. */
3564 unsigned int value = opnd->sysreg;
3565 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3566 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3567 value & 0x7);
3568 }
3569 break;
3570
3571 case AARCH64_OPND_PSTATEFIELD:
3572 for (i = 0; aarch64_pstatefields[i].name; ++i)
3573 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3574 break;
3575 assert (aarch64_pstatefields[i].name);
3576 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3577 break;
3578
3579 case AARCH64_OPND_SYSREG_AT:
3580 case AARCH64_OPND_SYSREG_DC:
3581 case AARCH64_OPND_SYSREG_IC:
3582 case AARCH64_OPND_SYSREG_TLBI:
3583 snprintf (buf, size, "%s", opnd->sysins_op->name);
3584 break;
3585
3586 case AARCH64_OPND_BARRIER:
3587 snprintf (buf, size, "%s", opnd->barrier->name);
3588 break;
3589
3590 case AARCH64_OPND_BARRIER_ISB:
3591 /* Operand can be omitted, e.g. in DCPS1. */
3592 if (! optional_operand_p (opcode, idx)
3593 || (opnd->barrier->value
3594 != get_optional_operand_default_value (opcode)))
3595 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3596 break;
3597
3598 case AARCH64_OPND_PRFOP:
3599 if (opnd->prfop->name != NULL)
3600 snprintf (buf, size, "%s", opnd->prfop->name);
3601 else
3602 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3603 break;
3604
3605 case AARCH64_OPND_BARRIER_PSB:
3606 snprintf (buf, size, "%s", opnd->hint_option->name);
3607 break;
3608
3609 default:
3610 assert (0);
3611 }
3612 }
3613 \f
3614 #define CPENC(op0,op1,crn,crm,op2) \
3615 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3616 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3617 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3618 /* for 3.9.10 System Instructions */
3619 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3620
3621 #define C0 0
3622 #define C1 1
3623 #define C2 2
3624 #define C3 3
3625 #define C4 4
3626 #define C5 5
3627 #define C6 6
3628 #define C7 7
3629 #define C8 8
3630 #define C9 9
3631 #define C10 10
3632 #define C11 11
3633 #define C12 12
3634 #define C13 13
3635 #define C14 14
3636 #define C15 15
3637
3638 #ifdef F_DEPRECATED
3639 #undef F_DEPRECATED
3640 #endif
3641 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3642
3643 #ifdef F_ARCHEXT
3644 #undef F_ARCHEXT
3645 #endif
3646 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3647
3648 #ifdef F_HASXT
3649 #undef F_HASXT
3650 #endif
3651 #define F_HASXT 0x4 /* System instruction register <Xt>
3652 operand. */
3653
3654
3655 /* TODO there are two more issues need to be resolved
3656 1. handle read-only and write-only system registers
3657 2. handle cpu-implementation-defined system registers. */
3658 const aarch64_sys_reg aarch64_sys_regs [] =
3659 {
3660 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3661 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3662 { "elr_el1", CPEN_(0,C0,1), 0 },
3663 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3664 { "sp_el0", CPEN_(0,C1,0), 0 },
3665 { "spsel", CPEN_(0,C2,0), 0 },
3666 { "daif", CPEN_(3,C2,1), 0 },
3667 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3668 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3669 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3670 { "nzcv", CPEN_(3,C2,0), 0 },
3671 { "fpcr", CPEN_(3,C4,0), 0 },
3672 { "fpsr", CPEN_(3,C4,1), 0 },
3673 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3674 { "dlr_el0", CPEN_(3,C5,1), 0 },
3675 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3676 { "elr_el2", CPEN_(4,C0,1), 0 },
3677 { "sp_el1", CPEN_(4,C1,0), 0 },
3678 { "spsr_irq", CPEN_(4,C3,0), 0 },
3679 { "spsr_abt", CPEN_(4,C3,1), 0 },
3680 { "spsr_und", CPEN_(4,C3,2), 0 },
3681 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3682 { "spsr_el3", CPEN_(6,C0,0), 0 },
3683 { "elr_el3", CPEN_(6,C0,1), 0 },
3684 { "sp_el2", CPEN_(6,C1,0), 0 },
3685 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3686 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3687 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3688 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3689 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3690 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3691 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3692 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3693 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3694 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3695 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3696 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3697 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3698 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3699 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3700 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3701 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3702 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3703 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3704 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3705 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3706 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3707 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3708 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3709 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3710 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3711 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3712 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3713 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3714 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3715 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3716 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3717 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3718 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3719 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3720 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3721 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3722 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3723 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT }, /* RO */
3724 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3725 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3726 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3727 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3728 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3729 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3730 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3731 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3732 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3733 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3734 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3735 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3736 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3737 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3738 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3739 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3740 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3741 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3742 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3743 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3744 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3745 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3746 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3747 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3748 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3749 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3750 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3751 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3752 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3753 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3754 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3755 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3756 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3757 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3758 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3759 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3760 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3761 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3762 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3763 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3764 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3765 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3766 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3767 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3768 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3769 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3770 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3771 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3772 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3773 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3774 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3775 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3776 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3777 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3778 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3779 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3780 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3781 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3782 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3783 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3784 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3785 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3786 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3787 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3788 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3789 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3790 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3791 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3792 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3793 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3794 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3795 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3796 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3797 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3798 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3799 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3800 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3801 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3802 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3803 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3804 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3805 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3806 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3807 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3808 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3809 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3810 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3811 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3812 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3813 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3814 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3815 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3816 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3817 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3818 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3819 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3820 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3821 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3822 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3823 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3824 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3825 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3826 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3827 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3828 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3829 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3830 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3831 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3832 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3833 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3834 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3835 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3836 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3837 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3838 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3839 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3840 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3841 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3842 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3843 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3844 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3845 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3846 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3847 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3848 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3849 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3850 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3851 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3852 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3853 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3854 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3855 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3856 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3857 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3858 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3859 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3860 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3861 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3862 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3863 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3864 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3865 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3866 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3867 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3868 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3869 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3870 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3871 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3872 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3873 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3874 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3875 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3876 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3877 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3878 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3879 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3880 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3881 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3882 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3883 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3884 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3885 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3886 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3887 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3888 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3889 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3890 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3891 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3892 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3893 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3894 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3895 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3896 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3897 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3898 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3899 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3900 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3901 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3902 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3903 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3904 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3905 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3906 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3907 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3908 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3909 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3910 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3911 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3912 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3913 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3914 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3915 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3916 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3917 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3918 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3919 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3920 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3921 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3922 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3923 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3924 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3925 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3926 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3927 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3928 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3929 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3930 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3931 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3932 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3933 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3934 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3935 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3936 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3937 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3938 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3939 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3940 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3941 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3942 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3943 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3944 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3945 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3946 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3947 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3948 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3949 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3950 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3951 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3952 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3953 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3954 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3955 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3956 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3957 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3958 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3959 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3960 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3961 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3962 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3963 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3964 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3965 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3966 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3967 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3968 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3969 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3970 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3971 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3972 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3973 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3974 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3975 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3976 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3977 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3978 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3979 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3980 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3981 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3982 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3983 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3984 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3985 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3986 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3987 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3988 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3989 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3990 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3991 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3992 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3993 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3994 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3995 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3996 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3997 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3998 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3999 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4000 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4001 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4002 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4003 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4004 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4005 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4006 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4007 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4008 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4009 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4010 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4011 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4012 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4013 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4014 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4015 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4016 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4017 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4018 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4019 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4020 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4021 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4022 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4023 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4024 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4025 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4026 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4027 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4028 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4029 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4030 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4031 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4032 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4033 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4034 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4035 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4036
4037 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4038 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4039 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4040 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4041 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4042 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4043 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4044 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4045 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4046 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4047 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4048 { 0, CPENC(0,0,0,0,0), 0 },
4049 };
4050
4051 bfd_boolean
4052 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4053 {
4054 return (reg->flags & F_DEPRECATED) != 0;
4055 }
4056
4057 bfd_boolean
4058 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4059 const aarch64_sys_reg *reg)
4060 {
4061 if (!(reg->flags & F_ARCHEXT))
4062 return TRUE;
4063
4064 /* PAN. Values are from aarch64_sys_regs. */
4065 if (reg->value == CPEN_(0,C2,3)
4066 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4067 return FALSE;
4068
4069 /* Virtualization host extensions: system registers. */
4070 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4071 || reg->value == CPENC (3, 4, C13, C0, 1)
4072 || reg->value == CPENC (3, 4, C14, C3, 0)
4073 || reg->value == CPENC (3, 4, C14, C3, 1)
4074 || reg->value == CPENC (3, 4, C14, C3, 2))
4075 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4076 return FALSE;
4077
4078 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4079 if ((reg->value == CPEN_ (5, C0, 0)
4080 || reg->value == CPEN_ (5, C0, 1)
4081 || reg->value == CPENC (3, 5, C1, C0, 0)
4082 || reg->value == CPENC (3, 5, C1, C0, 2)
4083 || reg->value == CPENC (3, 5, C2, C0, 0)
4084 || reg->value == CPENC (3, 5, C2, C0, 1)
4085 || reg->value == CPENC (3, 5, C2, C0, 2)
4086 || reg->value == CPENC (3, 5, C5, C1, 0)
4087 || reg->value == CPENC (3, 5, C5, C1, 1)
4088 || reg->value == CPENC (3, 5, C5, C2, 0)
4089 || reg->value == CPENC (3, 5, C6, C0, 0)
4090 || reg->value == CPENC (3, 5, C10, C2, 0)
4091 || reg->value == CPENC (3, 5, C10, C3, 0)
4092 || reg->value == CPENC (3, 5, C12, C0, 0)
4093 || reg->value == CPENC (3, 5, C13, C0, 1)
4094 || reg->value == CPENC (3, 5, C14, C1, 0))
4095 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4096 return FALSE;
4097
4098 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4099 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4100 || reg->value == CPENC (3, 5, C14, C2, 1)
4101 || reg->value == CPENC (3, 5, C14, C2, 2)
4102 || reg->value == CPENC (3, 5, C14, C3, 0)
4103 || reg->value == CPENC (3, 5, C14, C3, 1)
4104 || reg->value == CPENC (3, 5, C14, C3, 2))
4105 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4106 return FALSE;
4107
4108 /* ARMv8.2 features. */
4109
4110 /* ID_AA64MMFR2_EL1. */
4111 if (reg->value == CPENC (3, 0, C0, C7, 2)
4112 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4113 return FALSE;
4114
4115 /* PSTATE.UAO. */
4116 if (reg->value == CPEN_ (0, C2, 4)
4117 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4118 return FALSE;
4119
4120 /* RAS extension. */
4121
4122 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4123 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4124 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4125 || reg->value == CPENC (3, 0, C5, C3, 1)
4126 || reg->value == CPENC (3, 0, C5, C3, 2)
4127 || reg->value == CPENC (3, 0, C5, C3, 3)
4128 || reg->value == CPENC (3, 0, C5, C4, 0)
4129 || reg->value == CPENC (3, 0, C5, C4, 1)
4130 || reg->value == CPENC (3, 0, C5, C4, 2)
4131 || reg->value == CPENC (3, 0, C5, C4, 3)
4132 || reg->value == CPENC (3, 0, C5, C5, 0)
4133 || reg->value == CPENC (3, 0, C5, C5, 1))
4134 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4135 return FALSE;
4136
4137 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4138 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4139 || reg->value == CPENC (3, 0, C12, C1, 1)
4140 || reg->value == CPENC (3, 4, C12, C1, 1))
4141 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4142 return FALSE;
4143
4144 /* Statistical Profiling extension. */
4145 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4146 || reg->value == CPENC (3, 0, C9, C10, 1)
4147 || reg->value == CPENC (3, 0, C9, C10, 3)
4148 || reg->value == CPENC (3, 0, C9, C10, 7)
4149 || reg->value == CPENC (3, 0, C9, C9, 0)
4150 || reg->value == CPENC (3, 0, C9, C9, 2)
4151 || reg->value == CPENC (3, 0, C9, C9, 3)
4152 || reg->value == CPENC (3, 0, C9, C9, 4)
4153 || reg->value == CPENC (3, 0, C9, C9, 5)
4154 || reg->value == CPENC (3, 0, C9, C9, 6)
4155 || reg->value == CPENC (3, 0, C9, C9, 7)
4156 || reg->value == CPENC (3, 4, C9, C9, 0)
4157 || reg->value == CPENC (3, 5, C9, C9, 0))
4158 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4159 return FALSE;
4160
4161 /* ARMv8.3 Pointer authentication keys. */
4162 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4163 || reg->value == CPENC (3, 0, C2, C1, 1)
4164 || reg->value == CPENC (3, 0, C2, C1, 2)
4165 || reg->value == CPENC (3, 0, C2, C1, 3)
4166 || reg->value == CPENC (3, 0, C2, C2, 0)
4167 || reg->value == CPENC (3, 0, C2, C2, 1)
4168 || reg->value == CPENC (3, 0, C2, C2, 2)
4169 || reg->value == CPENC (3, 0, C2, C2, 3)
4170 || reg->value == CPENC (3, 0, C2, C3, 0)
4171 || reg->value == CPENC (3, 0, C2, C3, 1))
4172 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4173 return FALSE;
4174
4175 /* SVE. */
4176 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4177 || reg->value == CPENC (3, 0, C1, C2, 0)
4178 || reg->value == CPENC (3, 4, C1, C2, 0)
4179 || reg->value == CPENC (3, 6, C1, C2, 0)
4180 || reg->value == CPENC (3, 5, C1, C2, 0)
4181 || reg->value == CPENC (3, 0, C0, C0, 7))
4182 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4183 return FALSE;
4184
4185 /* ARMv8.4 features. */
4186
4187 /* PSTATE.DIT. */
4188 if (reg->value == CPEN_ (3, C2, 5)
4189 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4190 return FALSE;
4191
4192 /* Virtualization extensions. */
4193 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4194 || reg->value == CPENC(3, 4, C2, C6, 0)
4195 || reg->value == CPENC(3, 4, C14, C4, 0)
4196 || reg->value == CPENC(3, 4, C14, C4, 2)
4197 || reg->value == CPENC(3, 4, C14, C4, 1)
4198 || reg->value == CPENC(3, 4, C14, C5, 0)
4199 || reg->value == CPENC(3, 4, C14, C5, 2)
4200 || reg->value == CPENC(3, 4, C14, C5, 1)
4201 || reg->value == CPENC(3, 4, C1, C3, 1)
4202 || reg->value == CPENC(3, 4, C2, C2, 0))
4203 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4204 return FALSE;
4205
4206 /* ARMv8.4 TLB instructions. */
4207 if ((reg->value == CPENS (0, C8, C1, 0)
4208 || reg->value == CPENS (0, C8, C1, 1)
4209 || reg->value == CPENS (0, C8, C1, 2)
4210 || reg->value == CPENS (0, C8, C1, 3)
4211 || reg->value == CPENS (0, C8, C1, 5)
4212 || reg->value == CPENS (0, C8, C1, 7)
4213 || reg->value == CPENS (4, C8, C4, 0)
4214 || reg->value == CPENS (4, C8, C4, 4)
4215 || reg->value == CPENS (4, C8, C1, 1)
4216 || reg->value == CPENS (4, C8, C1, 5)
4217 || reg->value == CPENS (4, C8, C1, 6)
4218 || reg->value == CPENS (6, C8, C1, 1)
4219 || reg->value == CPENS (6, C8, C1, 5)
4220 || reg->value == CPENS (4, C8, C1, 0)
4221 || reg->value == CPENS (4, C8, C1, 4)
4222 || reg->value == CPENS (6, C8, C1, 0)
4223 || reg->value == CPENS (0, C8, C6, 1)
4224 || reg->value == CPENS (0, C8, C6, 3)
4225 || reg->value == CPENS (0, C8, C6, 5)
4226 || reg->value == CPENS (0, C8, C6, 7)
4227 || reg->value == CPENS (0, C8, C2, 1)
4228 || reg->value == CPENS (0, C8, C2, 3)
4229 || reg->value == CPENS (0, C8, C2, 5)
4230 || reg->value == CPENS (0, C8, C2, 7)
4231 || reg->value == CPENS (0, C8, C5, 1)
4232 || reg->value == CPENS (0, C8, C5, 3)
4233 || reg->value == CPENS (0, C8, C5, 5)
4234 || reg->value == CPENS (0, C8, C5, 7)
4235 || reg->value == CPENS (4, C8, C0, 2)
4236 || reg->value == CPENS (4, C8, C0, 6)
4237 || reg->value == CPENS (4, C8, C4, 2)
4238 || reg->value == CPENS (4, C8, C4, 6)
4239 || reg->value == CPENS (4, C8, C4, 3)
4240 || reg->value == CPENS (4, C8, C4, 7)
4241 || reg->value == CPENS (4, C8, C6, 1)
4242 || reg->value == CPENS (4, C8, C6, 5)
4243 || reg->value == CPENS (4, C8, C2, 1)
4244 || reg->value == CPENS (4, C8, C2, 5)
4245 || reg->value == CPENS (4, C8, C5, 1)
4246 || reg->value == CPENS (4, C8, C5, 5)
4247 || reg->value == CPENS (6, C8, C6, 1)
4248 || reg->value == CPENS (6, C8, C6, 5)
4249 || reg->value == CPENS (6, C8, C2, 1)
4250 || reg->value == CPENS (6, C8, C2, 5)
4251 || reg->value == CPENS (6, C8, C5, 1)
4252 || reg->value == CPENS (6, C8, C5, 5))
4253 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4254 return FALSE;
4255
4256 return TRUE;
4257 }
4258
4259 /* The CPENC below is fairly misleading, the fields
4260 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4261 by ins_pstatefield, which just shifts the value by the width of the fields
4262 in a loop. So if you CPENC them only the first value will be set, the rest
4263 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4264 value of 0b110000000001000000 (0x30040) while what you want is
4265 0b011010 (0x1a). */
4266 const aarch64_sys_reg aarch64_pstatefields [] =
4267 {
4268 { "spsel", 0x05, 0 },
4269 { "daifset", 0x1e, 0 },
4270 { "daifclr", 0x1f, 0 },
4271 { "pan", 0x04, F_ARCHEXT },
4272 { "uao", 0x03, F_ARCHEXT },
4273 { "dit", 0x1a, F_ARCHEXT },
4274 { 0, CPENC(0,0,0,0,0), 0 },
4275 };
4276
4277 bfd_boolean
4278 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4279 const aarch64_sys_reg *reg)
4280 {
4281 if (!(reg->flags & F_ARCHEXT))
4282 return TRUE;
4283
4284 /* PAN. Values are from aarch64_pstatefields. */
4285 if (reg->value == 0x04
4286 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4287 return FALSE;
4288
4289 /* UAO. Values are from aarch64_pstatefields. */
4290 if (reg->value == 0x03
4291 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4292 return FALSE;
4293
4294 /* DIT. Values are from aarch64_pstatefields. */
4295 if (reg->value == 0x1a
4296 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4297 return FALSE;
4298
4299 return TRUE;
4300 }
4301
4302 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4303 {
4304 { "ialluis", CPENS(0,C7,C1,0), 0 },
4305 { "iallu", CPENS(0,C7,C5,0), 0 },
4306 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4307 { 0, CPENS(0,0,0,0), 0 }
4308 };
4309
4310 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4311 {
4312 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4313 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4314 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4315 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4316 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4317 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4318 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4319 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4320 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4321 { 0, CPENS(0,0,0,0), 0 }
4322 };
4323
4324 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4325 {
4326 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4327 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4328 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4329 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4330 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4331 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4332 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4333 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4334 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4335 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4336 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4337 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4338 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4339 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4340 { 0, CPENS(0,0,0,0), 0 }
4341 };
4342
4343 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4344 {
4345 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4346 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4347 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4348 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4349 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4350 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4351 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4352 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4353 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4354 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4355 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4356 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4357 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4358 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4359 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4360 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4361 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4362 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4363 { "alle2", CPENS(4,C8,C7,0), 0 },
4364 { "alle2is", CPENS(4,C8,C3,0), 0 },
4365 { "alle1", CPENS(4,C8,C7,4), 0 },
4366 { "alle1is", CPENS(4,C8,C3,4), 0 },
4367 { "alle3", CPENS(6,C8,C7,0), 0 },
4368 { "alle3is", CPENS(6,C8,C3,0), 0 },
4369 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4370 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4371 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4372 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4373 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4374 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4375 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4376 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4377
4378 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4379 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4380 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4381 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4382 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4383 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4384 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4385 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4386 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4387 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4388 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4389 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4390 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4391 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4392 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4393 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4394
4395 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4396 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4397 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4398 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4399 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4400 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4401 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4402 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4403 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4404 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4405 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4406 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4407 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4408 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4409 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4410 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4411 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4412 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4413 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4414 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4415 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4416 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4417 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4418 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4419 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4420 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4421 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4422 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4423 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4424 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4425
4426 { 0, CPENS(0,0,0,0), 0 }
4427 };
4428
4429 bfd_boolean
4430 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4431 {
4432 return (sys_ins_reg->flags & F_HASXT) != 0;
4433 }
4434
4435 extern bfd_boolean
4436 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4437 const aarch64_sys_ins_reg *reg)
4438 {
4439 if (!(reg->flags & F_ARCHEXT))
4440 return TRUE;
4441
4442 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4443 if (reg->value == CPENS (3, C7, C12, 1)
4444 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4445 return FALSE;
4446
4447 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4448 if ((reg->value == CPENS (0, C7, C9, 0)
4449 || reg->value == CPENS (0, C7, C9, 1))
4450 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4451 return FALSE;
4452
4453 return TRUE;
4454 }
4455
4456 #undef C0
4457 #undef C1
4458 #undef C2
4459 #undef C3
4460 #undef C4
4461 #undef C5
4462 #undef C6
4463 #undef C7
4464 #undef C8
4465 #undef C9
4466 #undef C10
4467 #undef C11
4468 #undef C12
4469 #undef C13
4470 #undef C14
4471 #undef C15
4472
4473 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4474 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4475
4476 static bfd_boolean
4477 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4478 const aarch64_insn insn)
4479 {
4480 int t = BITS (insn, 4, 0);
4481 int n = BITS (insn, 9, 5);
4482 int t2 = BITS (insn, 14, 10);
4483
4484 if (BIT (insn, 23))
4485 {
4486 /* Write back enabled. */
4487 if ((t == n || t2 == n) && n != 31)
4488 return FALSE;
4489 }
4490
4491 if (BIT (insn, 22))
4492 {
4493 /* Load */
4494 if (t == t2)
4495 return FALSE;
4496 }
4497
4498 return TRUE;
4499 }
4500
4501 /* Return true if VALUE cannot be moved into an SVE register using DUP
4502 (with any element size, not just ESIZE) and if using DUPM would
4503 therefore be OK. ESIZE is the number of bytes in the immediate. */
4504
4505 bfd_boolean
4506 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4507 {
4508 int64_t svalue = uvalue;
4509 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4510
4511 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4512 return FALSE;
4513 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4514 {
4515 svalue = (int32_t) uvalue;
4516 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4517 {
4518 svalue = (int16_t) uvalue;
4519 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4520 return FALSE;
4521 }
4522 }
4523 if ((svalue & 0xff) == 0)
4524 svalue /= 256;
4525 return svalue < -128 || svalue >= 128;
4526 }
4527
4528 /* Include the opcode description table as well as the operand description
4529 table. */
4530 #define VERIFIER(x) verify_##x
4531 #include "aarch64-tbl.h"
This page took 0.212069 seconds and 4 git commands to generate.