Add the operand encoding types for the new Armv8.2-a back-ported instructions. These...
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
247 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
248 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
249 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
250 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
251 { 5, 14 }, /* imm14: in test bit and branch instructions. */
252 { 5, 16 }, /* imm16: in exception instructions. */
253 { 0, 26 }, /* imm26: in unconditional branch instructions. */
254 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
255 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
256 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
257 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
258 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
259 { 22, 1 }, /* N: in logical (immediate) instructions. */
260 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
261 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
262 { 31, 1 }, /* sf: in integer data processing instructions. */
263 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
264 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
265 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
266 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
267 { 31, 1 }, /* b5: in the test bit and branch instructions. */
268 { 19, 5 }, /* b40: in the test bit and branch instructions. */
269 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
270 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
271 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
272 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
273 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
274 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
275 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
276 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
277 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
278 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
279 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
280 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
281 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
282 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
283 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
284 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
285 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
286 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
289 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
290 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
292 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
293 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
294 { 5, 1 }, /* SVE_i1: single-bit immediate. */
295 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
296 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
297 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
298 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
299 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
300 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
301 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
302 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
303 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
304 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
305 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
306 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
307 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
308 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
309 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
310 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
311 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
312 { 16, 4 }, /* SVE_tsz: triangular size select. */
313 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
314 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
315 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
316 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
317 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
318 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
319 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
320 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
321 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
322 };
323
324 enum aarch64_operand_class
325 aarch64_get_operand_class (enum aarch64_opnd type)
326 {
327 return aarch64_operands[type].op_class;
328 }
329
330 const char *
331 aarch64_get_operand_name (enum aarch64_opnd type)
332 {
333 return aarch64_operands[type].name;
334 }
335
336 /* Get operand description string.
337 This is usually for the diagnosis purpose. */
338 const char *
339 aarch64_get_operand_desc (enum aarch64_opnd type)
340 {
341 return aarch64_operands[type].desc;
342 }
343
344 /* Table of all conditional affixes. */
345 const aarch64_cond aarch64_conds[16] =
346 {
347 {{"eq", "none"}, 0x0},
348 {{"ne", "any"}, 0x1},
349 {{"cs", "hs", "nlast"}, 0x2},
350 {{"cc", "lo", "ul", "last"}, 0x3},
351 {{"mi", "first"}, 0x4},
352 {{"pl", "nfrst"}, 0x5},
353 {{"vs"}, 0x6},
354 {{"vc"}, 0x7},
355 {{"hi", "pmore"}, 0x8},
356 {{"ls", "plast"}, 0x9},
357 {{"ge", "tcont"}, 0xa},
358 {{"lt", "tstop"}, 0xb},
359 {{"gt"}, 0xc},
360 {{"le"}, 0xd},
361 {{"al"}, 0xe},
362 {{"nv"}, 0xf},
363 };
364
365 const aarch64_cond *
366 get_cond_from_value (aarch64_insn value)
367 {
368 assert (value < 16);
369 return &aarch64_conds[(unsigned int) value];
370 }
371
372 const aarch64_cond *
373 get_inverted_cond (const aarch64_cond *cond)
374 {
375 return &aarch64_conds[cond->value ^ 0x1];
376 }
377
378 /* Table describing the operand extension/shifting operators; indexed by
379 enum aarch64_modifier_kind.
380
381 The value column provides the most common values for encoding modifiers,
382 which enables table-driven encoding/decoding for the modifiers. */
383 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
384 {
385 {"none", 0x0},
386 {"msl", 0x0},
387 {"ror", 0x3},
388 {"asr", 0x2},
389 {"lsr", 0x1},
390 {"lsl", 0x0},
391 {"uxtb", 0x0},
392 {"uxth", 0x1},
393 {"uxtw", 0x2},
394 {"uxtx", 0x3},
395 {"sxtb", 0x4},
396 {"sxth", 0x5},
397 {"sxtw", 0x6},
398 {"sxtx", 0x7},
399 {"mul", 0x0},
400 {"mul vl", 0x0},
401 {NULL, 0},
402 };
403
404 enum aarch64_modifier_kind
405 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
406 {
407 return desc - aarch64_operand_modifiers;
408 }
409
410 aarch64_insn
411 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
412 {
413 return aarch64_operand_modifiers[kind].value;
414 }
415
416 enum aarch64_modifier_kind
417 aarch64_get_operand_modifier_from_value (aarch64_insn value,
418 bfd_boolean extend_p)
419 {
420 if (extend_p == TRUE)
421 return AARCH64_MOD_UXTB + value;
422 else
423 return AARCH64_MOD_LSL - value;
424 }
425
426 bfd_boolean
427 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
428 {
429 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
430 ? TRUE : FALSE;
431 }
432
433 static inline bfd_boolean
434 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
435 {
436 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
437 ? TRUE : FALSE;
438 }
439
440 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
441 {
442 { "#0x00", 0x0 },
443 { "oshld", 0x1 },
444 { "oshst", 0x2 },
445 { "osh", 0x3 },
446 { "#0x04", 0x4 },
447 { "nshld", 0x5 },
448 { "nshst", 0x6 },
449 { "nsh", 0x7 },
450 { "#0x08", 0x8 },
451 { "ishld", 0x9 },
452 { "ishst", 0xa },
453 { "ish", 0xb },
454 { "#0x0c", 0xc },
455 { "ld", 0xd },
456 { "st", 0xe },
457 { "sy", 0xf },
458 };
459
460 /* Table describing the operands supported by the aliases of the HINT
461 instruction.
462
463 The name column is the operand that is accepted for the alias. The value
464 column is the hint number of the alias. The list of operands is terminated
465 by NULL in the name column. */
466
467 const struct aarch64_name_value_pair aarch64_hint_options[] =
468 {
469 { "csync", 0x11 }, /* PSB CSYNC. */
470 { NULL, 0x0 },
471 };
472
473 /* op -> op: load = 0 instruction = 1 store = 2
474 l -> level: 1-3
475 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
476 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
477 const struct aarch64_name_value_pair aarch64_prfops[32] =
478 {
479 { "pldl1keep", B(0, 1, 0) },
480 { "pldl1strm", B(0, 1, 1) },
481 { "pldl2keep", B(0, 2, 0) },
482 { "pldl2strm", B(0, 2, 1) },
483 { "pldl3keep", B(0, 3, 0) },
484 { "pldl3strm", B(0, 3, 1) },
485 { NULL, 0x06 },
486 { NULL, 0x07 },
487 { "plil1keep", B(1, 1, 0) },
488 { "plil1strm", B(1, 1, 1) },
489 { "plil2keep", B(1, 2, 0) },
490 { "plil2strm", B(1, 2, 1) },
491 { "plil3keep", B(1, 3, 0) },
492 { "plil3strm", B(1, 3, 1) },
493 { NULL, 0x0e },
494 { NULL, 0x0f },
495 { "pstl1keep", B(2, 1, 0) },
496 { "pstl1strm", B(2, 1, 1) },
497 { "pstl2keep", B(2, 2, 0) },
498 { "pstl2strm", B(2, 2, 1) },
499 { "pstl3keep", B(2, 3, 0) },
500 { "pstl3strm", B(2, 3, 1) },
501 { NULL, 0x16 },
502 { NULL, 0x17 },
503 { NULL, 0x18 },
504 { NULL, 0x19 },
505 { NULL, 0x1a },
506 { NULL, 0x1b },
507 { NULL, 0x1c },
508 { NULL, 0x1d },
509 { NULL, 0x1e },
510 { NULL, 0x1f },
511 };
512 #undef B
513 \f
514 /* Utilities on value constraint. */
515
516 static inline int
517 value_in_range_p (int64_t value, int low, int high)
518 {
519 return (value >= low && value <= high) ? 1 : 0;
520 }
521
522 /* Return true if VALUE is a multiple of ALIGN. */
523 static inline int
524 value_aligned_p (int64_t value, int align)
525 {
526 return (value % align) == 0;
527 }
528
529 /* A signed value fits in a field. */
530 static inline int
531 value_fit_signed_field_p (int64_t value, unsigned width)
532 {
533 assert (width < 32);
534 if (width < sizeof (value) * 8)
535 {
536 int64_t lim = (int64_t)1 << (width - 1);
537 if (value >= -lim && value < lim)
538 return 1;
539 }
540 return 0;
541 }
542
543 /* An unsigned value fits in a field. */
544 static inline int
545 value_fit_unsigned_field_p (int64_t value, unsigned width)
546 {
547 assert (width < 32);
548 if (width < sizeof (value) * 8)
549 {
550 int64_t lim = (int64_t)1 << width;
551 if (value >= 0 && value < lim)
552 return 1;
553 }
554 return 0;
555 }
556
557 /* Return 1 if OPERAND is SP or WSP. */
558 int
559 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
560 {
561 return ((aarch64_get_operand_class (operand->type)
562 == AARCH64_OPND_CLASS_INT_REG)
563 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
564 && operand->reg.regno == 31);
565 }
566
567 /* Return 1 if OPERAND is XZR or WZP. */
568 int
569 aarch64_zero_register_p (const aarch64_opnd_info *operand)
570 {
571 return ((aarch64_get_operand_class (operand->type)
572 == AARCH64_OPND_CLASS_INT_REG)
573 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
574 && operand->reg.regno == 31);
575 }
576
577 /* Return true if the operand *OPERAND that has the operand code
578 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
579 qualified by the qualifier TARGET. */
580
581 static inline int
582 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
583 aarch64_opnd_qualifier_t target)
584 {
585 switch (operand->qualifier)
586 {
587 case AARCH64_OPND_QLF_W:
588 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
589 return 1;
590 break;
591 case AARCH64_OPND_QLF_X:
592 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
593 return 1;
594 break;
595 case AARCH64_OPND_QLF_WSP:
596 if (target == AARCH64_OPND_QLF_W
597 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
598 return 1;
599 break;
600 case AARCH64_OPND_QLF_SP:
601 if (target == AARCH64_OPND_QLF_X
602 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
603 return 1;
604 break;
605 default:
606 break;
607 }
608
609 return 0;
610 }
611
612 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
613 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
614
615 Return NIL if more than one expected qualifiers are found. */
616
617 aarch64_opnd_qualifier_t
618 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
619 int idx,
620 const aarch64_opnd_qualifier_t known_qlf,
621 int known_idx)
622 {
623 int i, saved_i;
624
625 /* Special case.
626
627 When the known qualifier is NIL, we have to assume that there is only
628 one qualifier sequence in the *QSEQ_LIST and return the corresponding
629 qualifier directly. One scenario is that for instruction
630 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
631 which has only one possible valid qualifier sequence
632 NIL, S_D
633 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
634 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
635
636 Because the qualifier NIL has dual roles in the qualifier sequence:
637 it can mean no qualifier for the operand, or the qualifer sequence is
638 not in use (when all qualifiers in the sequence are NILs), we have to
639 handle this special case here. */
640 if (known_qlf == AARCH64_OPND_NIL)
641 {
642 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
643 return qseq_list[0][idx];
644 }
645
646 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
647 {
648 if (qseq_list[i][known_idx] == known_qlf)
649 {
650 if (saved_i != -1)
651 /* More than one sequences are found to have KNOWN_QLF at
652 KNOWN_IDX. */
653 return AARCH64_OPND_NIL;
654 saved_i = i;
655 }
656 }
657
658 return qseq_list[saved_i][idx];
659 }
660
661 enum operand_qualifier_kind
662 {
663 OQK_NIL,
664 OQK_OPD_VARIANT,
665 OQK_VALUE_IN_RANGE,
666 OQK_MISC,
667 };
668
669 /* Operand qualifier description. */
670 struct operand_qualifier_data
671 {
672 /* The usage of the three data fields depends on the qualifier kind. */
673 int data0;
674 int data1;
675 int data2;
676 /* Description. */
677 const char *desc;
678 /* Kind. */
679 enum operand_qualifier_kind kind;
680 };
681
682 /* Indexed by the operand qualifier enumerators. */
683 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
684 {
685 {0, 0, 0, "NIL", OQK_NIL},
686
687 /* Operand variant qualifiers.
688 First 3 fields:
689 element size, number of elements and common value for encoding. */
690
691 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
692 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
693 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
694 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
695
696 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
697 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
698 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
699 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
700 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
701
702 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
703 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
704 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
705 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
706 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
707 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
708 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
709 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
710 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
711 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
712
713 {0, 0, 0, "z", OQK_OPD_VARIANT},
714 {0, 0, 0, "m", OQK_OPD_VARIANT},
715
716 /* Qualifiers constraining the value range.
717 First 3 fields:
718 Lower bound, higher bound, unused. */
719
720 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
721 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
722 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
723 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
724 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
725 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
726 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
727
728 /* Qualifiers for miscellaneous purpose.
729 First 3 fields:
730 unused, unused and unused. */
731
732 {0, 0, 0, "lsl", 0},
733 {0, 0, 0, "msl", 0},
734
735 {0, 0, 0, "retrieving", 0},
736 };
737
738 static inline bfd_boolean
739 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
740 {
741 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
742 ? TRUE : FALSE;
743 }
744
745 static inline bfd_boolean
746 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
747 {
748 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
749 ? TRUE : FALSE;
750 }
751
752 const char*
753 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
754 {
755 return aarch64_opnd_qualifiers[qualifier].desc;
756 }
757
758 /* Given an operand qualifier, return the expected data element size
759 of a qualified operand. */
760 unsigned char
761 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
762 {
763 assert (operand_variant_qualifier_p (qualifier) == TRUE);
764 return aarch64_opnd_qualifiers[qualifier].data0;
765 }
766
767 unsigned char
768 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
769 {
770 assert (operand_variant_qualifier_p (qualifier) == TRUE);
771 return aarch64_opnd_qualifiers[qualifier].data1;
772 }
773
774 aarch64_insn
775 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
776 {
777 assert (operand_variant_qualifier_p (qualifier) == TRUE);
778 return aarch64_opnd_qualifiers[qualifier].data2;
779 }
780
781 static int
782 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
783 {
784 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
785 return aarch64_opnd_qualifiers[qualifier].data0;
786 }
787
788 static int
789 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
790 {
791 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
792 return aarch64_opnd_qualifiers[qualifier].data1;
793 }
794
795 #ifdef DEBUG_AARCH64
796 void
797 aarch64_verbose (const char *str, ...)
798 {
799 va_list ap;
800 va_start (ap, str);
801 printf ("#### ");
802 vprintf (str, ap);
803 printf ("\n");
804 va_end (ap);
805 }
806
807 static inline void
808 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
809 {
810 int i;
811 printf ("#### \t");
812 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
813 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
814 printf ("\n");
815 }
816
817 static void
818 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
819 const aarch64_opnd_qualifier_t *qualifier)
820 {
821 int i;
822 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
823
824 aarch64_verbose ("dump_match_qualifiers:");
825 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
826 curr[i] = opnd[i].qualifier;
827 dump_qualifier_sequence (curr);
828 aarch64_verbose ("against");
829 dump_qualifier_sequence (qualifier);
830 }
831 #endif /* DEBUG_AARCH64 */
832
833 /* TODO improve this, we can have an extra field at the runtime to
834 store the number of operands rather than calculating it every time. */
835
836 int
837 aarch64_num_of_operands (const aarch64_opcode *opcode)
838 {
839 int i = 0;
840 const enum aarch64_opnd *opnds = opcode->operands;
841 while (opnds[i++] != AARCH64_OPND_NIL)
842 ;
843 --i;
844 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
845 return i;
846 }
847
848 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
849 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
850
851 N.B. on the entry, it is very likely that only some operands in *INST
852 have had their qualifiers been established.
853
854 If STOP_AT is not -1, the function will only try to match
855 the qualifier sequence for operands before and including the operand
856 of index STOP_AT; and on success *RET will only be filled with the first
857 (STOP_AT+1) qualifiers.
858
859 A couple examples of the matching algorithm:
860
861 X,W,NIL should match
862 X,W,NIL
863
864 NIL,NIL should match
865 X ,NIL
866
867 Apart from serving the main encoding routine, this can also be called
868 during or after the operand decoding. */
869
870 int
871 aarch64_find_best_match (const aarch64_inst *inst,
872 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
873 int stop_at, aarch64_opnd_qualifier_t *ret)
874 {
875 int found = 0;
876 int i, num_opnds;
877 const aarch64_opnd_qualifier_t *qualifiers;
878
879 num_opnds = aarch64_num_of_operands (inst->opcode);
880 if (num_opnds == 0)
881 {
882 DEBUG_TRACE ("SUCCEED: no operand");
883 return 1;
884 }
885
886 if (stop_at < 0 || stop_at >= num_opnds)
887 stop_at = num_opnds - 1;
888
889 /* For each pattern. */
890 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
891 {
892 int j;
893 qualifiers = *qualifiers_list;
894
895 /* Start as positive. */
896 found = 1;
897
898 DEBUG_TRACE ("%d", i);
899 #ifdef DEBUG_AARCH64
900 if (debug_dump)
901 dump_match_qualifiers (inst->operands, qualifiers);
902 #endif
903
904 /* Most opcodes has much fewer patterns in the list.
905 First NIL qualifier indicates the end in the list. */
906 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
907 {
908 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
909 if (i)
910 found = 0;
911 break;
912 }
913
914 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
915 {
916 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
917 {
918 /* Either the operand does not have qualifier, or the qualifier
919 for the operand needs to be deduced from the qualifier
920 sequence.
921 In the latter case, any constraint checking related with
922 the obtained qualifier should be done later in
923 operand_general_constraint_met_p. */
924 continue;
925 }
926 else if (*qualifiers != inst->operands[j].qualifier)
927 {
928 /* Unless the target qualifier can also qualify the operand
929 (which has already had a non-nil qualifier), non-equal
930 qualifiers are generally un-matched. */
931 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
932 continue;
933 else
934 {
935 found = 0;
936 break;
937 }
938 }
939 else
940 continue; /* Equal qualifiers are certainly matched. */
941 }
942
943 /* Qualifiers established. */
944 if (found == 1)
945 break;
946 }
947
948 if (found == 1)
949 {
950 /* Fill the result in *RET. */
951 int j;
952 qualifiers = *qualifiers_list;
953
954 DEBUG_TRACE ("complete qualifiers using list %d", i);
955 #ifdef DEBUG_AARCH64
956 if (debug_dump)
957 dump_qualifier_sequence (qualifiers);
958 #endif
959
960 for (j = 0; j <= stop_at; ++j, ++qualifiers)
961 ret[j] = *qualifiers;
962 for (; j < AARCH64_MAX_OPND_NUM; ++j)
963 ret[j] = AARCH64_OPND_QLF_NIL;
964
965 DEBUG_TRACE ("SUCCESS");
966 return 1;
967 }
968
969 DEBUG_TRACE ("FAIL");
970 return 0;
971 }
972
973 /* Operand qualifier matching and resolving.
974
975 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
976 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
977
978 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
979 succeeds. */
980
981 static int
982 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
983 {
984 int i, nops;
985 aarch64_opnd_qualifier_seq_t qualifiers;
986
987 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
988 qualifiers))
989 {
990 DEBUG_TRACE ("matching FAIL");
991 return 0;
992 }
993
994 if (inst->opcode->flags & F_STRICT)
995 {
996 /* Require an exact qualifier match, even for NIL qualifiers. */
997 nops = aarch64_num_of_operands (inst->opcode);
998 for (i = 0; i < nops; ++i)
999 if (inst->operands[i].qualifier != qualifiers[i])
1000 return FALSE;
1001 }
1002
1003 /* Update the qualifiers. */
1004 if (update_p == TRUE)
1005 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1006 {
1007 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1008 break;
1009 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1010 "update %s with %s for operand %d",
1011 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1012 aarch64_get_qualifier_name (qualifiers[i]), i);
1013 inst->operands[i].qualifier = qualifiers[i];
1014 }
1015
1016 DEBUG_TRACE ("matching SUCCESS");
1017 return 1;
1018 }
1019
1020 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1021 register by MOVZ.
1022
1023 IS32 indicates whether value is a 32-bit immediate or not.
1024 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1025 amount will be returned in *SHIFT_AMOUNT. */
1026
1027 bfd_boolean
1028 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1029 {
1030 int amount;
1031
1032 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1033
1034 if (is32)
1035 {
1036 /* Allow all zeros or all ones in top 32-bits, so that
1037 32-bit constant expressions like ~0x80000000 are
1038 permitted. */
1039 uint64_t ext = value;
1040 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1041 /* Immediate out of range. */
1042 return FALSE;
1043 value &= (int64_t) 0xffffffff;
1044 }
1045
1046 /* first, try movz then movn */
1047 amount = -1;
1048 if ((value & ((int64_t) 0xffff << 0)) == value)
1049 amount = 0;
1050 else if ((value & ((int64_t) 0xffff << 16)) == value)
1051 amount = 16;
1052 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1053 amount = 32;
1054 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1055 amount = 48;
1056
1057 if (amount == -1)
1058 {
1059 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1060 return FALSE;
1061 }
1062
1063 if (shift_amount != NULL)
1064 *shift_amount = amount;
1065
1066 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1067
1068 return TRUE;
1069 }
1070
1071 /* Build the accepted values for immediate logical SIMD instructions.
1072
1073 The standard encodings of the immediate value are:
1074 N imms immr SIMD size R S
1075 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1076 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1077 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1078 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1079 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1080 0 11110s 00000r 2 UInt(r) UInt(s)
1081 where all-ones value of S is reserved.
1082
1083 Let's call E the SIMD size.
1084
1085 The immediate value is: S+1 bits '1' rotated to the right by R.
1086
1087 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1088 (remember S != E - 1). */
1089
1090 #define TOTAL_IMM_NB 5334
1091
1092 typedef struct
1093 {
1094 uint64_t imm;
1095 aarch64_insn encoding;
1096 } simd_imm_encoding;
1097
1098 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1099
1100 static int
1101 simd_imm_encoding_cmp(const void *i1, const void *i2)
1102 {
1103 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1104 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1105
1106 if (imm1->imm < imm2->imm)
1107 return -1;
1108 if (imm1->imm > imm2->imm)
1109 return +1;
1110 return 0;
1111 }
1112
1113 /* immediate bitfield standard encoding
1114 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1115 1 ssssss rrrrrr 64 rrrrrr ssssss
1116 0 0sssss 0rrrrr 32 rrrrr sssss
1117 0 10ssss 00rrrr 16 rrrr ssss
1118 0 110sss 000rrr 8 rrr sss
1119 0 1110ss 0000rr 4 rr ss
1120 0 11110s 00000r 2 r s */
1121 static inline int
1122 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1123 {
1124 return (is64 << 12) | (r << 6) | s;
1125 }
1126
1127 static void
1128 build_immediate_table (void)
1129 {
1130 uint32_t log_e, e, s, r, s_mask;
1131 uint64_t mask, imm;
1132 int nb_imms;
1133 int is64;
1134
1135 nb_imms = 0;
1136 for (log_e = 1; log_e <= 6; log_e++)
1137 {
1138 /* Get element size. */
1139 e = 1u << log_e;
1140 if (log_e == 6)
1141 {
1142 is64 = 1;
1143 mask = 0xffffffffffffffffull;
1144 s_mask = 0;
1145 }
1146 else
1147 {
1148 is64 = 0;
1149 mask = (1ull << e) - 1;
1150 /* log_e s_mask
1151 1 ((1 << 4) - 1) << 2 = 111100
1152 2 ((1 << 3) - 1) << 3 = 111000
1153 3 ((1 << 2) - 1) << 4 = 110000
1154 4 ((1 << 1) - 1) << 5 = 100000
1155 5 ((1 << 0) - 1) << 6 = 000000 */
1156 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1157 }
1158 for (s = 0; s < e - 1; s++)
1159 for (r = 0; r < e; r++)
1160 {
1161 /* s+1 consecutive bits to 1 (s < 63) */
1162 imm = (1ull << (s + 1)) - 1;
1163 /* rotate right by r */
1164 if (r != 0)
1165 imm = (imm >> r) | ((imm << (e - r)) & mask);
1166 /* replicate the constant depending on SIMD size */
1167 switch (log_e)
1168 {
1169 case 1: imm = (imm << 2) | imm;
1170 /* Fall through. */
1171 case 2: imm = (imm << 4) | imm;
1172 /* Fall through. */
1173 case 3: imm = (imm << 8) | imm;
1174 /* Fall through. */
1175 case 4: imm = (imm << 16) | imm;
1176 /* Fall through. */
1177 case 5: imm = (imm << 32) | imm;
1178 /* Fall through. */
1179 case 6: break;
1180 default: abort ();
1181 }
1182 simd_immediates[nb_imms].imm = imm;
1183 simd_immediates[nb_imms].encoding =
1184 encode_immediate_bitfield(is64, s | s_mask, r);
1185 nb_imms++;
1186 }
1187 }
1188 assert (nb_imms == TOTAL_IMM_NB);
1189 qsort(simd_immediates, nb_imms,
1190 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1191 }
1192
1193 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1194 be accepted by logical (immediate) instructions
1195 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1196
1197 ESIZE is the number of bytes in the decoded immediate value.
1198 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1199 VALUE will be returned in *ENCODING. */
1200
1201 bfd_boolean
1202 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1203 {
1204 simd_imm_encoding imm_enc;
1205 const simd_imm_encoding *imm_encoding;
1206 static bfd_boolean initialized = FALSE;
1207 uint64_t upper;
1208 int i;
1209
1210 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1211 value, esize);
1212
1213 if (!initialized)
1214 {
1215 build_immediate_table ();
1216 initialized = TRUE;
1217 }
1218
1219 /* Allow all zeros or all ones in top bits, so that
1220 constant expressions like ~1 are permitted. */
1221 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1222 if ((value & ~upper) != value && (value | upper) != value)
1223 return FALSE;
1224
1225 /* Replicate to a full 64-bit value. */
1226 value &= ~upper;
1227 for (i = esize * 8; i < 64; i *= 2)
1228 value |= (value << i);
1229
1230 imm_enc.imm = value;
1231 imm_encoding = (const simd_imm_encoding *)
1232 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1233 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1234 if (imm_encoding == NULL)
1235 {
1236 DEBUG_TRACE ("exit with FALSE");
1237 return FALSE;
1238 }
1239 if (encoding != NULL)
1240 *encoding = imm_encoding->encoding;
1241 DEBUG_TRACE ("exit with TRUE");
1242 return TRUE;
1243 }
1244
1245 /* If 64-bit immediate IMM is in the format of
1246 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1247 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1248 of value "abcdefgh". Otherwise return -1. */
1249 int
1250 aarch64_shrink_expanded_imm8 (uint64_t imm)
1251 {
1252 int i, ret;
1253 uint32_t byte;
1254
1255 ret = 0;
1256 for (i = 0; i < 8; i++)
1257 {
1258 byte = (imm >> (8 * i)) & 0xff;
1259 if (byte == 0xff)
1260 ret |= 1 << i;
1261 else if (byte != 0x00)
1262 return -1;
1263 }
1264 return ret;
1265 }
1266
1267 /* Utility inline functions for operand_general_constraint_met_p. */
1268
1269 static inline void
1270 set_error (aarch64_operand_error *mismatch_detail,
1271 enum aarch64_operand_error_kind kind, int idx,
1272 const char* error)
1273 {
1274 if (mismatch_detail == NULL)
1275 return;
1276 mismatch_detail->kind = kind;
1277 mismatch_detail->index = idx;
1278 mismatch_detail->error = error;
1279 }
1280
1281 static inline void
1282 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1283 const char* error)
1284 {
1285 if (mismatch_detail == NULL)
1286 return;
1287 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1288 }
1289
1290 static inline void
1291 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1292 int idx, int lower_bound, int upper_bound,
1293 const char* error)
1294 {
1295 if (mismatch_detail == NULL)
1296 return;
1297 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1298 mismatch_detail->data[0] = lower_bound;
1299 mismatch_detail->data[1] = upper_bound;
1300 }
1301
1302 static inline void
1303 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1304 int idx, int lower_bound, int upper_bound)
1305 {
1306 if (mismatch_detail == NULL)
1307 return;
1308 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1309 _("immediate value"));
1310 }
1311
1312 static inline void
1313 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1314 int idx, int lower_bound, int upper_bound)
1315 {
1316 if (mismatch_detail == NULL)
1317 return;
1318 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1319 _("immediate offset"));
1320 }
1321
1322 static inline void
1323 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1324 int idx, int lower_bound, int upper_bound)
1325 {
1326 if (mismatch_detail == NULL)
1327 return;
1328 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1329 _("register number"));
1330 }
1331
1332 static inline void
1333 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1334 int idx, int lower_bound, int upper_bound)
1335 {
1336 if (mismatch_detail == NULL)
1337 return;
1338 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1339 _("register element index"));
1340 }
1341
1342 static inline void
1343 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1344 int idx, int lower_bound, int upper_bound)
1345 {
1346 if (mismatch_detail == NULL)
1347 return;
1348 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1349 _("shift amount"));
1350 }
1351
1352 /* Report that the MUL modifier in operand IDX should be in the range
1353 [LOWER_BOUND, UPPER_BOUND]. */
1354 static inline void
1355 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1356 int idx, int lower_bound, int upper_bound)
1357 {
1358 if (mismatch_detail == NULL)
1359 return;
1360 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1361 _("multiplier"));
1362 }
1363
1364 static inline void
1365 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1366 int alignment)
1367 {
1368 if (mismatch_detail == NULL)
1369 return;
1370 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1371 mismatch_detail->data[0] = alignment;
1372 }
1373
1374 static inline void
1375 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1376 int expected_num)
1377 {
1378 if (mismatch_detail == NULL)
1379 return;
1380 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1381 mismatch_detail->data[0] = expected_num;
1382 }
1383
1384 static inline void
1385 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1386 const char* error)
1387 {
1388 if (mismatch_detail == NULL)
1389 return;
1390 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1391 }
1392
1393 /* General constraint checking based on operand code.
1394
1395 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1396 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1397
1398 This function has to be called after the qualifiers for all operands
1399 have been resolved.
1400
1401 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1402 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1403 of error message during the disassembling where error message is not
1404 wanted. We avoid the dynamic construction of strings of error messages
1405 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1406 use a combination of error code, static string and some integer data to
1407 represent an error. */
1408
1409 static int
1410 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1411 enum aarch64_opnd type,
1412 const aarch64_opcode *opcode,
1413 aarch64_operand_error *mismatch_detail)
1414 {
1415 unsigned num, modifiers, shift;
1416 unsigned char size;
1417 int64_t imm, min_value, max_value;
1418 uint64_t uvalue, mask;
1419 const aarch64_opnd_info *opnd = opnds + idx;
1420 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1421
1422 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1423
1424 switch (aarch64_operands[type].op_class)
1425 {
1426 case AARCH64_OPND_CLASS_INT_REG:
1427 /* Check pair reg constraints for cas* instructions. */
1428 if (type == AARCH64_OPND_PAIRREG)
1429 {
1430 assert (idx == 1 || idx == 3);
1431 if (opnds[idx - 1].reg.regno % 2 != 0)
1432 {
1433 set_syntax_error (mismatch_detail, idx - 1,
1434 _("reg pair must start from even reg"));
1435 return 0;
1436 }
1437 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1438 {
1439 set_syntax_error (mismatch_detail, idx,
1440 _("reg pair must be contiguous"));
1441 return 0;
1442 }
1443 break;
1444 }
1445
1446 /* <Xt> may be optional in some IC and TLBI instructions. */
1447 if (type == AARCH64_OPND_Rt_SYS)
1448 {
1449 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1450 == AARCH64_OPND_CLASS_SYSTEM));
1451 if (opnds[1].present
1452 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1453 {
1454 set_other_error (mismatch_detail, idx, _("extraneous register"));
1455 return 0;
1456 }
1457 if (!opnds[1].present
1458 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1459 {
1460 set_other_error (mismatch_detail, idx, _("missing register"));
1461 return 0;
1462 }
1463 }
1464 switch (qualifier)
1465 {
1466 case AARCH64_OPND_QLF_WSP:
1467 case AARCH64_OPND_QLF_SP:
1468 if (!aarch64_stack_pointer_p (opnd))
1469 {
1470 set_other_error (mismatch_detail, idx,
1471 _("stack pointer register expected"));
1472 return 0;
1473 }
1474 break;
1475 default:
1476 break;
1477 }
1478 break;
1479
1480 case AARCH64_OPND_CLASS_SVE_REG:
1481 switch (type)
1482 {
1483 case AARCH64_OPND_SVE_Zm3_INDEX:
1484 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1485 case AARCH64_OPND_SVE_Zm4_INDEX:
1486 size = get_operand_fields_width (get_operand_from_code (type));
1487 shift = get_operand_specific_data (&aarch64_operands[type]);
1488 mask = (1 << shift) - 1;
1489 if (opnd->reg.regno > mask)
1490 {
1491 assert (mask == 7 || mask == 15);
1492 set_other_error (mismatch_detail, idx,
1493 mask == 15
1494 ? _("z0-z15 expected")
1495 : _("z0-z7 expected"));
1496 return 0;
1497 }
1498 mask = (1 << (size - shift)) - 1;
1499 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1500 {
1501 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1502 return 0;
1503 }
1504 break;
1505
1506 case AARCH64_OPND_SVE_Zn_INDEX:
1507 size = aarch64_get_qualifier_esize (opnd->qualifier);
1508 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1509 {
1510 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1511 0, 64 / size - 1);
1512 return 0;
1513 }
1514 break;
1515
1516 case AARCH64_OPND_SVE_ZnxN:
1517 case AARCH64_OPND_SVE_ZtxN:
1518 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1519 {
1520 set_other_error (mismatch_detail, idx,
1521 _("invalid register list"));
1522 return 0;
1523 }
1524 break;
1525
1526 default:
1527 break;
1528 }
1529 break;
1530
1531 case AARCH64_OPND_CLASS_PRED_REG:
1532 if (opnd->reg.regno >= 8
1533 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1534 {
1535 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1536 return 0;
1537 }
1538 break;
1539
1540 case AARCH64_OPND_CLASS_COND:
1541 if (type == AARCH64_OPND_COND1
1542 && (opnds[idx].cond->value & 0xe) == 0xe)
1543 {
1544 /* Not allow AL or NV. */
1545 set_syntax_error (mismatch_detail, idx, NULL);
1546 }
1547 break;
1548
1549 case AARCH64_OPND_CLASS_ADDRESS:
1550 /* Check writeback. */
1551 switch (opcode->iclass)
1552 {
1553 case ldst_pos:
1554 case ldst_unscaled:
1555 case ldstnapair_offs:
1556 case ldstpair_off:
1557 case ldst_unpriv:
1558 if (opnd->addr.writeback == 1)
1559 {
1560 set_syntax_error (mismatch_detail, idx,
1561 _("unexpected address writeback"));
1562 return 0;
1563 }
1564 break;
1565 case ldst_imm10:
1566 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1567 {
1568 set_syntax_error (mismatch_detail, idx,
1569 _("unexpected address writeback"));
1570 return 0;
1571 }
1572 break;
1573 case ldst_imm9:
1574 case ldstpair_indexed:
1575 case asisdlsep:
1576 case asisdlsop:
1577 if (opnd->addr.writeback == 0)
1578 {
1579 set_syntax_error (mismatch_detail, idx,
1580 _("address writeback expected"));
1581 return 0;
1582 }
1583 break;
1584 default:
1585 assert (opnd->addr.writeback == 0);
1586 break;
1587 }
1588 switch (type)
1589 {
1590 case AARCH64_OPND_ADDR_SIMM7:
1591 /* Scaled signed 7 bits immediate offset. */
1592 /* Get the size of the data element that is accessed, which may be
1593 different from that of the source register size,
1594 e.g. in strb/ldrb. */
1595 size = aarch64_get_qualifier_esize (opnd->qualifier);
1596 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1597 {
1598 set_offset_out_of_range_error (mismatch_detail, idx,
1599 -64 * size, 63 * size);
1600 return 0;
1601 }
1602 if (!value_aligned_p (opnd->addr.offset.imm, size))
1603 {
1604 set_unaligned_error (mismatch_detail, idx, size);
1605 return 0;
1606 }
1607 break;
1608 case AARCH64_OPND_ADDR_OFFSET:
1609 case AARCH64_OPND_ADDR_SIMM9:
1610 /* Unscaled signed 9 bits immediate offset. */
1611 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1612 {
1613 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1614 return 0;
1615 }
1616 break;
1617
1618 case AARCH64_OPND_ADDR_SIMM9_2:
1619 /* Unscaled signed 9 bits immediate offset, which has to be negative
1620 or unaligned. */
1621 size = aarch64_get_qualifier_esize (qualifier);
1622 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1623 && !value_aligned_p (opnd->addr.offset.imm, size))
1624 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1625 return 1;
1626 set_other_error (mismatch_detail, idx,
1627 _("negative or unaligned offset expected"));
1628 return 0;
1629
1630 case AARCH64_OPND_ADDR_SIMM10:
1631 /* Scaled signed 10 bits immediate offset. */
1632 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1633 {
1634 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1635 return 0;
1636 }
1637 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1638 {
1639 set_unaligned_error (mismatch_detail, idx, 8);
1640 return 0;
1641 }
1642 break;
1643
1644 case AARCH64_OPND_SIMD_ADDR_POST:
1645 /* AdvSIMD load/store multiple structures, post-index. */
1646 assert (idx == 1);
1647 if (opnd->addr.offset.is_reg)
1648 {
1649 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1650 return 1;
1651 else
1652 {
1653 set_other_error (mismatch_detail, idx,
1654 _("invalid register offset"));
1655 return 0;
1656 }
1657 }
1658 else
1659 {
1660 const aarch64_opnd_info *prev = &opnds[idx-1];
1661 unsigned num_bytes; /* total number of bytes transferred. */
1662 /* The opcode dependent area stores the number of elements in
1663 each structure to be loaded/stored. */
1664 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1665 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1666 /* Special handling of loading single structure to all lane. */
1667 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1668 * aarch64_get_qualifier_esize (prev->qualifier);
1669 else
1670 num_bytes = prev->reglist.num_regs
1671 * aarch64_get_qualifier_esize (prev->qualifier)
1672 * aarch64_get_qualifier_nelem (prev->qualifier);
1673 if ((int) num_bytes != opnd->addr.offset.imm)
1674 {
1675 set_other_error (mismatch_detail, idx,
1676 _("invalid post-increment amount"));
1677 return 0;
1678 }
1679 }
1680 break;
1681
1682 case AARCH64_OPND_ADDR_REGOFF:
1683 /* Get the size of the data element that is accessed, which may be
1684 different from that of the source register size,
1685 e.g. in strb/ldrb. */
1686 size = aarch64_get_qualifier_esize (opnd->qualifier);
1687 /* It is either no shift or shift by the binary logarithm of SIZE. */
1688 if (opnd->shifter.amount != 0
1689 && opnd->shifter.amount != (int)get_logsz (size))
1690 {
1691 set_other_error (mismatch_detail, idx,
1692 _("invalid shift amount"));
1693 return 0;
1694 }
1695 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1696 operators. */
1697 switch (opnd->shifter.kind)
1698 {
1699 case AARCH64_MOD_UXTW:
1700 case AARCH64_MOD_LSL:
1701 case AARCH64_MOD_SXTW:
1702 case AARCH64_MOD_SXTX: break;
1703 default:
1704 set_other_error (mismatch_detail, idx,
1705 _("invalid extend/shift operator"));
1706 return 0;
1707 }
1708 break;
1709
1710 case AARCH64_OPND_ADDR_UIMM12:
1711 imm = opnd->addr.offset.imm;
1712 /* Get the size of the data element that is accessed, which may be
1713 different from that of the source register size,
1714 e.g. in strb/ldrb. */
1715 size = aarch64_get_qualifier_esize (qualifier);
1716 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1717 {
1718 set_offset_out_of_range_error (mismatch_detail, idx,
1719 0, 4095 * size);
1720 return 0;
1721 }
1722 if (!value_aligned_p (opnd->addr.offset.imm, size))
1723 {
1724 set_unaligned_error (mismatch_detail, idx, size);
1725 return 0;
1726 }
1727 break;
1728
1729 case AARCH64_OPND_ADDR_PCREL14:
1730 case AARCH64_OPND_ADDR_PCREL19:
1731 case AARCH64_OPND_ADDR_PCREL21:
1732 case AARCH64_OPND_ADDR_PCREL26:
1733 imm = opnd->imm.value;
1734 if (operand_need_shift_by_two (get_operand_from_code (type)))
1735 {
1736 /* The offset value in a PC-relative branch instruction is alway
1737 4-byte aligned and is encoded without the lowest 2 bits. */
1738 if (!value_aligned_p (imm, 4))
1739 {
1740 set_unaligned_error (mismatch_detail, idx, 4);
1741 return 0;
1742 }
1743 /* Right shift by 2 so that we can carry out the following check
1744 canonically. */
1745 imm >>= 2;
1746 }
1747 size = get_operand_fields_width (get_operand_from_code (type));
1748 if (!value_fit_signed_field_p (imm, size))
1749 {
1750 set_other_error (mismatch_detail, idx,
1751 _("immediate out of range"));
1752 return 0;
1753 }
1754 break;
1755
1756 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1757 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1758 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1759 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1760 min_value = -8;
1761 max_value = 7;
1762 sve_imm_offset_vl:
1763 assert (!opnd->addr.offset.is_reg);
1764 assert (opnd->addr.preind);
1765 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1766 min_value *= num;
1767 max_value *= num;
1768 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1769 || (opnd->shifter.operator_present
1770 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1771 {
1772 set_other_error (mismatch_detail, idx,
1773 _("invalid addressing mode"));
1774 return 0;
1775 }
1776 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1777 {
1778 set_offset_out_of_range_error (mismatch_detail, idx,
1779 min_value, max_value);
1780 return 0;
1781 }
1782 if (!value_aligned_p (opnd->addr.offset.imm, num))
1783 {
1784 set_unaligned_error (mismatch_detail, idx, num);
1785 return 0;
1786 }
1787 break;
1788
1789 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1790 min_value = -32;
1791 max_value = 31;
1792 goto sve_imm_offset_vl;
1793
1794 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1795 min_value = -256;
1796 max_value = 255;
1797 goto sve_imm_offset_vl;
1798
1799 case AARCH64_OPND_SVE_ADDR_RI_U6:
1800 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1801 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1802 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1803 min_value = 0;
1804 max_value = 63;
1805 sve_imm_offset:
1806 assert (!opnd->addr.offset.is_reg);
1807 assert (opnd->addr.preind);
1808 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1809 min_value *= num;
1810 max_value *= num;
1811 if (opnd->shifter.operator_present
1812 || opnd->shifter.amount_present)
1813 {
1814 set_other_error (mismatch_detail, idx,
1815 _("invalid addressing mode"));
1816 return 0;
1817 }
1818 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1819 {
1820 set_offset_out_of_range_error (mismatch_detail, idx,
1821 min_value, max_value);
1822 return 0;
1823 }
1824 if (!value_aligned_p (opnd->addr.offset.imm, num))
1825 {
1826 set_unaligned_error (mismatch_detail, idx, num);
1827 return 0;
1828 }
1829 break;
1830
1831 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1832 min_value = -8;
1833 max_value = 7;
1834 goto sve_imm_offset;
1835
1836 case AARCH64_OPND_SVE_ADDR_RR:
1837 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1838 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1839 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1840 case AARCH64_OPND_SVE_ADDR_RX:
1841 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1842 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1843 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1844 case AARCH64_OPND_SVE_ADDR_RZ:
1845 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1846 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1847 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1848 modifiers = 1 << AARCH64_MOD_LSL;
1849 sve_rr_operand:
1850 assert (opnd->addr.offset.is_reg);
1851 assert (opnd->addr.preind);
1852 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1853 && opnd->addr.offset.regno == 31)
1854 {
1855 set_other_error (mismatch_detail, idx,
1856 _("index register xzr is not allowed"));
1857 return 0;
1858 }
1859 if (((1 << opnd->shifter.kind) & modifiers) == 0
1860 || (opnd->shifter.amount
1861 != get_operand_specific_data (&aarch64_operands[type])))
1862 {
1863 set_other_error (mismatch_detail, idx,
1864 _("invalid addressing mode"));
1865 return 0;
1866 }
1867 break;
1868
1869 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1870 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1871 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1872 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1873 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1874 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1875 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1876 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1877 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1878 goto sve_rr_operand;
1879
1880 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1881 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1882 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1883 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1884 min_value = 0;
1885 max_value = 31;
1886 goto sve_imm_offset;
1887
1888 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1889 modifiers = 1 << AARCH64_MOD_LSL;
1890 sve_zz_operand:
1891 assert (opnd->addr.offset.is_reg);
1892 assert (opnd->addr.preind);
1893 if (((1 << opnd->shifter.kind) & modifiers) == 0
1894 || opnd->shifter.amount < 0
1895 || opnd->shifter.amount > 3)
1896 {
1897 set_other_error (mismatch_detail, idx,
1898 _("invalid addressing mode"));
1899 return 0;
1900 }
1901 break;
1902
1903 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1904 modifiers = (1 << AARCH64_MOD_SXTW);
1905 goto sve_zz_operand;
1906
1907 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1908 modifiers = 1 << AARCH64_MOD_UXTW;
1909 goto sve_zz_operand;
1910
1911 default:
1912 break;
1913 }
1914 break;
1915
1916 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1917 if (type == AARCH64_OPND_LEt)
1918 {
1919 /* Get the upper bound for the element index. */
1920 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1921 if (!value_in_range_p (opnd->reglist.index, 0, num))
1922 {
1923 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1924 return 0;
1925 }
1926 }
1927 /* The opcode dependent area stores the number of elements in
1928 each structure to be loaded/stored. */
1929 num = get_opcode_dependent_value (opcode);
1930 switch (type)
1931 {
1932 case AARCH64_OPND_LVt:
1933 assert (num >= 1 && num <= 4);
1934 /* Unless LD1/ST1, the number of registers should be equal to that
1935 of the structure elements. */
1936 if (num != 1 && opnd->reglist.num_regs != num)
1937 {
1938 set_reg_list_error (mismatch_detail, idx, num);
1939 return 0;
1940 }
1941 break;
1942 case AARCH64_OPND_LVt_AL:
1943 case AARCH64_OPND_LEt:
1944 assert (num >= 1 && num <= 4);
1945 /* The number of registers should be equal to that of the structure
1946 elements. */
1947 if (opnd->reglist.num_regs != num)
1948 {
1949 set_reg_list_error (mismatch_detail, idx, num);
1950 return 0;
1951 }
1952 break;
1953 default:
1954 break;
1955 }
1956 break;
1957
1958 case AARCH64_OPND_CLASS_IMMEDIATE:
1959 /* Constraint check on immediate operand. */
1960 imm = opnd->imm.value;
1961 /* E.g. imm_0_31 constrains value to be 0..31. */
1962 if (qualifier_value_in_range_constraint_p (qualifier)
1963 && !value_in_range_p (imm, get_lower_bound (qualifier),
1964 get_upper_bound (qualifier)))
1965 {
1966 set_imm_out_of_range_error (mismatch_detail, idx,
1967 get_lower_bound (qualifier),
1968 get_upper_bound (qualifier));
1969 return 0;
1970 }
1971
1972 switch (type)
1973 {
1974 case AARCH64_OPND_AIMM:
1975 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1976 {
1977 set_other_error (mismatch_detail, idx,
1978 _("invalid shift operator"));
1979 return 0;
1980 }
1981 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1982 {
1983 set_other_error (mismatch_detail, idx,
1984 _("shift amount must be 0 or 12"));
1985 return 0;
1986 }
1987 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1988 {
1989 set_other_error (mismatch_detail, idx,
1990 _("immediate out of range"));
1991 return 0;
1992 }
1993 break;
1994
1995 case AARCH64_OPND_HALF:
1996 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1997 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1998 {
1999 set_other_error (mismatch_detail, idx,
2000 _("invalid shift operator"));
2001 return 0;
2002 }
2003 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2004 if (!value_aligned_p (opnd->shifter.amount, 16))
2005 {
2006 set_other_error (mismatch_detail, idx,
2007 _("shift amount must be a multiple of 16"));
2008 return 0;
2009 }
2010 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2011 {
2012 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2013 0, size * 8 - 16);
2014 return 0;
2015 }
2016 if (opnd->imm.value < 0)
2017 {
2018 set_other_error (mismatch_detail, idx,
2019 _("negative immediate value not allowed"));
2020 return 0;
2021 }
2022 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2023 {
2024 set_other_error (mismatch_detail, idx,
2025 _("immediate out of range"));
2026 return 0;
2027 }
2028 break;
2029
2030 case AARCH64_OPND_IMM_MOV:
2031 {
2032 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2033 imm = opnd->imm.value;
2034 assert (idx == 1);
2035 switch (opcode->op)
2036 {
2037 case OP_MOV_IMM_WIDEN:
2038 imm = ~imm;
2039 /* Fall through. */
2040 case OP_MOV_IMM_WIDE:
2041 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2042 {
2043 set_other_error (mismatch_detail, idx,
2044 _("immediate out of range"));
2045 return 0;
2046 }
2047 break;
2048 case OP_MOV_IMM_LOG:
2049 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2050 {
2051 set_other_error (mismatch_detail, idx,
2052 _("immediate out of range"));
2053 return 0;
2054 }
2055 break;
2056 default:
2057 assert (0);
2058 return 0;
2059 }
2060 }
2061 break;
2062
2063 case AARCH64_OPND_NZCV:
2064 case AARCH64_OPND_CCMP_IMM:
2065 case AARCH64_OPND_EXCEPTION:
2066 case AARCH64_OPND_UIMM4:
2067 case AARCH64_OPND_UIMM7:
2068 case AARCH64_OPND_UIMM3_OP1:
2069 case AARCH64_OPND_UIMM3_OP2:
2070 case AARCH64_OPND_SVE_UIMM3:
2071 case AARCH64_OPND_SVE_UIMM7:
2072 case AARCH64_OPND_SVE_UIMM8:
2073 case AARCH64_OPND_SVE_UIMM8_53:
2074 size = get_operand_fields_width (get_operand_from_code (type));
2075 assert (size < 32);
2076 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2077 {
2078 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2079 (1 << size) - 1);
2080 return 0;
2081 }
2082 break;
2083
2084 case AARCH64_OPND_SIMM5:
2085 case AARCH64_OPND_SVE_SIMM5:
2086 case AARCH64_OPND_SVE_SIMM5B:
2087 case AARCH64_OPND_SVE_SIMM6:
2088 case AARCH64_OPND_SVE_SIMM8:
2089 size = get_operand_fields_width (get_operand_from_code (type));
2090 assert (size < 32);
2091 if (!value_fit_signed_field_p (opnd->imm.value, size))
2092 {
2093 set_imm_out_of_range_error (mismatch_detail, idx,
2094 -(1 << (size - 1)),
2095 (1 << (size - 1)) - 1);
2096 return 0;
2097 }
2098 break;
2099
2100 case AARCH64_OPND_WIDTH:
2101 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2102 && opnds[0].type == AARCH64_OPND_Rd);
2103 size = get_upper_bound (qualifier);
2104 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2105 /* lsb+width <= reg.size */
2106 {
2107 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2108 size - opnds[idx-1].imm.value);
2109 return 0;
2110 }
2111 break;
2112
2113 case AARCH64_OPND_LIMM:
2114 case AARCH64_OPND_SVE_LIMM:
2115 {
2116 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2117 uint64_t uimm = opnd->imm.value;
2118 if (opcode->op == OP_BIC)
2119 uimm = ~uimm;
2120 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2121 {
2122 set_other_error (mismatch_detail, idx,
2123 _("immediate out of range"));
2124 return 0;
2125 }
2126 }
2127 break;
2128
2129 case AARCH64_OPND_IMM0:
2130 case AARCH64_OPND_FPIMM0:
2131 if (opnd->imm.value != 0)
2132 {
2133 set_other_error (mismatch_detail, idx,
2134 _("immediate zero expected"));
2135 return 0;
2136 }
2137 break;
2138
2139 case AARCH64_OPND_IMM_ROT1:
2140 case AARCH64_OPND_IMM_ROT2:
2141 case AARCH64_OPND_SVE_IMM_ROT2:
2142 if (opnd->imm.value != 0
2143 && opnd->imm.value != 90
2144 && opnd->imm.value != 180
2145 && opnd->imm.value != 270)
2146 {
2147 set_other_error (mismatch_detail, idx,
2148 _("rotate expected to be 0, 90, 180 or 270"));
2149 return 0;
2150 }
2151 break;
2152
2153 case AARCH64_OPND_IMM_ROT3:
2154 case AARCH64_OPND_SVE_IMM_ROT1:
2155 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2156 {
2157 set_other_error (mismatch_detail, idx,
2158 _("rotate expected to be 90 or 270"));
2159 return 0;
2160 }
2161 break;
2162
2163 case AARCH64_OPND_SHLL_IMM:
2164 assert (idx == 2);
2165 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2166 if (opnd->imm.value != size)
2167 {
2168 set_other_error (mismatch_detail, idx,
2169 _("invalid shift amount"));
2170 return 0;
2171 }
2172 break;
2173
2174 case AARCH64_OPND_IMM_VLSL:
2175 size = aarch64_get_qualifier_esize (qualifier);
2176 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2177 {
2178 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2179 size * 8 - 1);
2180 return 0;
2181 }
2182 break;
2183
2184 case AARCH64_OPND_IMM_VLSR:
2185 size = aarch64_get_qualifier_esize (qualifier);
2186 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2187 {
2188 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2189 return 0;
2190 }
2191 break;
2192
2193 case AARCH64_OPND_SIMD_IMM:
2194 case AARCH64_OPND_SIMD_IMM_SFT:
2195 /* Qualifier check. */
2196 switch (qualifier)
2197 {
2198 case AARCH64_OPND_QLF_LSL:
2199 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2200 {
2201 set_other_error (mismatch_detail, idx,
2202 _("invalid shift operator"));
2203 return 0;
2204 }
2205 break;
2206 case AARCH64_OPND_QLF_MSL:
2207 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2208 {
2209 set_other_error (mismatch_detail, idx,
2210 _("invalid shift operator"));
2211 return 0;
2212 }
2213 break;
2214 case AARCH64_OPND_QLF_NIL:
2215 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2216 {
2217 set_other_error (mismatch_detail, idx,
2218 _("shift is not permitted"));
2219 return 0;
2220 }
2221 break;
2222 default:
2223 assert (0);
2224 return 0;
2225 }
2226 /* Is the immediate valid? */
2227 assert (idx == 1);
2228 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2229 {
2230 /* uimm8 or simm8 */
2231 if (!value_in_range_p (opnd->imm.value, -128, 255))
2232 {
2233 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2234 return 0;
2235 }
2236 }
2237 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2238 {
2239 /* uimm64 is not
2240 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2241 ffffffffgggggggghhhhhhhh'. */
2242 set_other_error (mismatch_detail, idx,
2243 _("invalid value for immediate"));
2244 return 0;
2245 }
2246 /* Is the shift amount valid? */
2247 switch (opnd->shifter.kind)
2248 {
2249 case AARCH64_MOD_LSL:
2250 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2251 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2252 {
2253 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2254 (size - 1) * 8);
2255 return 0;
2256 }
2257 if (!value_aligned_p (opnd->shifter.amount, 8))
2258 {
2259 set_unaligned_error (mismatch_detail, idx, 8);
2260 return 0;
2261 }
2262 break;
2263 case AARCH64_MOD_MSL:
2264 /* Only 8 and 16 are valid shift amount. */
2265 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2266 {
2267 set_other_error (mismatch_detail, idx,
2268 _("shift amount must be 0 or 16"));
2269 return 0;
2270 }
2271 break;
2272 default:
2273 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2274 {
2275 set_other_error (mismatch_detail, idx,
2276 _("invalid shift operator"));
2277 return 0;
2278 }
2279 break;
2280 }
2281 break;
2282
2283 case AARCH64_OPND_FPIMM:
2284 case AARCH64_OPND_SIMD_FPIMM:
2285 case AARCH64_OPND_SVE_FPIMM8:
2286 if (opnd->imm.is_fp == 0)
2287 {
2288 set_other_error (mismatch_detail, idx,
2289 _("floating-point immediate expected"));
2290 return 0;
2291 }
2292 /* The value is expected to be an 8-bit floating-point constant with
2293 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2294 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2295 instruction). */
2296 if (!value_in_range_p (opnd->imm.value, 0, 255))
2297 {
2298 set_other_error (mismatch_detail, idx,
2299 _("immediate out of range"));
2300 return 0;
2301 }
2302 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2303 {
2304 set_other_error (mismatch_detail, idx,
2305 _("invalid shift operator"));
2306 return 0;
2307 }
2308 break;
2309
2310 case AARCH64_OPND_SVE_AIMM:
2311 min_value = 0;
2312 sve_aimm:
2313 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2314 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2315 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2316 uvalue = opnd->imm.value;
2317 shift = opnd->shifter.amount;
2318 if (size == 1)
2319 {
2320 if (shift != 0)
2321 {
2322 set_other_error (mismatch_detail, idx,
2323 _("no shift amount allowed for"
2324 " 8-bit constants"));
2325 return 0;
2326 }
2327 }
2328 else
2329 {
2330 if (shift != 0 && shift != 8)
2331 {
2332 set_other_error (mismatch_detail, idx,
2333 _("shift amount must be 0 or 8"));
2334 return 0;
2335 }
2336 if (shift == 0 && (uvalue & 0xff) == 0)
2337 {
2338 shift = 8;
2339 uvalue = (int64_t) uvalue / 256;
2340 }
2341 }
2342 mask >>= shift;
2343 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2344 {
2345 set_other_error (mismatch_detail, idx,
2346 _("immediate too big for element size"));
2347 return 0;
2348 }
2349 uvalue = (uvalue - min_value) & mask;
2350 if (uvalue > 0xff)
2351 {
2352 set_other_error (mismatch_detail, idx,
2353 _("invalid arithmetic immediate"));
2354 return 0;
2355 }
2356 break;
2357
2358 case AARCH64_OPND_SVE_ASIMM:
2359 min_value = -128;
2360 goto sve_aimm;
2361
2362 case AARCH64_OPND_SVE_I1_HALF_ONE:
2363 assert (opnd->imm.is_fp);
2364 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2365 {
2366 set_other_error (mismatch_detail, idx,
2367 _("floating-point value must be 0.5 or 1.0"));
2368 return 0;
2369 }
2370 break;
2371
2372 case AARCH64_OPND_SVE_I1_HALF_TWO:
2373 assert (opnd->imm.is_fp);
2374 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2375 {
2376 set_other_error (mismatch_detail, idx,
2377 _("floating-point value must be 0.5 or 2.0"));
2378 return 0;
2379 }
2380 break;
2381
2382 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2383 assert (opnd->imm.is_fp);
2384 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2385 {
2386 set_other_error (mismatch_detail, idx,
2387 _("floating-point value must be 0.0 or 1.0"));
2388 return 0;
2389 }
2390 break;
2391
2392 case AARCH64_OPND_SVE_INV_LIMM:
2393 {
2394 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2395 uint64_t uimm = ~opnd->imm.value;
2396 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2397 {
2398 set_other_error (mismatch_detail, idx,
2399 _("immediate out of range"));
2400 return 0;
2401 }
2402 }
2403 break;
2404
2405 case AARCH64_OPND_SVE_LIMM_MOV:
2406 {
2407 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2408 uint64_t uimm = opnd->imm.value;
2409 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2410 {
2411 set_other_error (mismatch_detail, idx,
2412 _("immediate out of range"));
2413 return 0;
2414 }
2415 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2416 {
2417 set_other_error (mismatch_detail, idx,
2418 _("invalid replicated MOV immediate"));
2419 return 0;
2420 }
2421 }
2422 break;
2423
2424 case AARCH64_OPND_SVE_PATTERN_SCALED:
2425 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2426 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2427 {
2428 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2429 return 0;
2430 }
2431 break;
2432
2433 case AARCH64_OPND_SVE_SHLIMM_PRED:
2434 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2435 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2436 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2437 {
2438 set_imm_out_of_range_error (mismatch_detail, idx,
2439 0, 8 * size - 1);
2440 return 0;
2441 }
2442 break;
2443
2444 case AARCH64_OPND_SVE_SHRIMM_PRED:
2445 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2446 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2447 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2448 {
2449 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2450 return 0;
2451 }
2452 break;
2453
2454 default:
2455 break;
2456 }
2457 break;
2458
2459 case AARCH64_OPND_CLASS_SYSTEM:
2460 switch (type)
2461 {
2462 case AARCH64_OPND_PSTATEFIELD:
2463 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2464 /* MSR UAO, #uimm4
2465 MSR PAN, #uimm4
2466 The immediate must be #0 or #1. */
2467 if ((opnd->pstatefield == 0x03 /* UAO. */
2468 || opnd->pstatefield == 0x04) /* PAN. */
2469 && opnds[1].imm.value > 1)
2470 {
2471 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2472 return 0;
2473 }
2474 /* MSR SPSel, #uimm4
2475 Uses uimm4 as a control value to select the stack pointer: if
2476 bit 0 is set it selects the current exception level's stack
2477 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2478 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2479 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2480 {
2481 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2482 return 0;
2483 }
2484 break;
2485 default:
2486 break;
2487 }
2488 break;
2489
2490 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2491 /* Get the upper bound for the element index. */
2492 if (opcode->op == OP_FCMLA_ELEM)
2493 /* FCMLA index range depends on the vector size of other operands
2494 and is halfed because complex numbers take two elements. */
2495 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2496 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2497 else
2498 num = 16;
2499 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2500
2501 /* Index out-of-range. */
2502 if (!value_in_range_p (opnd->reglane.index, 0, num))
2503 {
2504 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2505 return 0;
2506 }
2507 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2508 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2509 number is encoded in "size:M:Rm":
2510 size <Vm>
2511 00 RESERVED
2512 01 0:Rm
2513 10 M:Rm
2514 11 RESERVED */
2515 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2516 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2517 {
2518 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2519 return 0;
2520 }
2521 break;
2522
2523 case AARCH64_OPND_CLASS_MODIFIED_REG:
2524 assert (idx == 1 || idx == 2);
2525 switch (type)
2526 {
2527 case AARCH64_OPND_Rm_EXT:
2528 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2529 && opnd->shifter.kind != AARCH64_MOD_LSL)
2530 {
2531 set_other_error (mismatch_detail, idx,
2532 _("extend operator expected"));
2533 return 0;
2534 }
2535 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2536 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2537 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2538 case. */
2539 if (!aarch64_stack_pointer_p (opnds + 0)
2540 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2541 {
2542 if (!opnd->shifter.operator_present)
2543 {
2544 set_other_error (mismatch_detail, idx,
2545 _("missing extend operator"));
2546 return 0;
2547 }
2548 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2549 {
2550 set_other_error (mismatch_detail, idx,
2551 _("'LSL' operator not allowed"));
2552 return 0;
2553 }
2554 }
2555 assert (opnd->shifter.operator_present /* Default to LSL. */
2556 || opnd->shifter.kind == AARCH64_MOD_LSL);
2557 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2558 {
2559 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2560 return 0;
2561 }
2562 /* In the 64-bit form, the final register operand is written as Wm
2563 for all but the (possibly omitted) UXTX/LSL and SXTX
2564 operators.
2565 N.B. GAS allows X register to be used with any operator as a
2566 programming convenience. */
2567 if (qualifier == AARCH64_OPND_QLF_X
2568 && opnd->shifter.kind != AARCH64_MOD_LSL
2569 && opnd->shifter.kind != AARCH64_MOD_UXTX
2570 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2571 {
2572 set_other_error (mismatch_detail, idx, _("W register expected"));
2573 return 0;
2574 }
2575 break;
2576
2577 case AARCH64_OPND_Rm_SFT:
2578 /* ROR is not available to the shifted register operand in
2579 arithmetic instructions. */
2580 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2581 {
2582 set_other_error (mismatch_detail, idx,
2583 _("shift operator expected"));
2584 return 0;
2585 }
2586 if (opnd->shifter.kind == AARCH64_MOD_ROR
2587 && opcode->iclass != log_shift)
2588 {
2589 set_other_error (mismatch_detail, idx,
2590 _("'ROR' operator not allowed"));
2591 return 0;
2592 }
2593 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2594 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2595 {
2596 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2597 return 0;
2598 }
2599 break;
2600
2601 default:
2602 break;
2603 }
2604 break;
2605
2606 default:
2607 break;
2608 }
2609
2610 return 1;
2611 }
2612
2613 /* Main entrypoint for the operand constraint checking.
2614
2615 Return 1 if operands of *INST meet the constraint applied by the operand
2616 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2617 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2618 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2619 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2620 error kind when it is notified that an instruction does not pass the check).
2621
2622 Un-determined operand qualifiers may get established during the process. */
2623
2624 int
2625 aarch64_match_operands_constraint (aarch64_inst *inst,
2626 aarch64_operand_error *mismatch_detail)
2627 {
2628 int i;
2629
2630 DEBUG_TRACE ("enter");
2631
2632 /* Check for cases where a source register needs to be the same as the
2633 destination register. Do this before matching qualifiers since if
2634 an instruction has both invalid tying and invalid qualifiers,
2635 the error about qualifiers would suggest several alternative
2636 instructions that also have invalid tying. */
2637 i = inst->opcode->tied_operand;
2638 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2639 {
2640 if (mismatch_detail)
2641 {
2642 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2643 mismatch_detail->index = i;
2644 mismatch_detail->error = NULL;
2645 }
2646 return 0;
2647 }
2648
2649 /* Match operands' qualifier.
2650 *INST has already had qualifier establish for some, if not all, of
2651 its operands; we need to find out whether these established
2652 qualifiers match one of the qualifier sequence in
2653 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2654 with the corresponding qualifier in such a sequence.
2655 Only basic operand constraint checking is done here; the more thorough
2656 constraint checking will carried out by operand_general_constraint_met_p,
2657 which has be to called after this in order to get all of the operands'
2658 qualifiers established. */
2659 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2660 {
2661 DEBUG_TRACE ("FAIL on operand qualifier matching");
2662 if (mismatch_detail)
2663 {
2664 /* Return an error type to indicate that it is the qualifier
2665 matching failure; we don't care about which operand as there
2666 are enough information in the opcode table to reproduce it. */
2667 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2668 mismatch_detail->index = -1;
2669 mismatch_detail->error = NULL;
2670 }
2671 return 0;
2672 }
2673
2674 /* Match operands' constraint. */
2675 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2676 {
2677 enum aarch64_opnd type = inst->opcode->operands[i];
2678 if (type == AARCH64_OPND_NIL)
2679 break;
2680 if (inst->operands[i].skip)
2681 {
2682 DEBUG_TRACE ("skip the incomplete operand %d", i);
2683 continue;
2684 }
2685 if (operand_general_constraint_met_p (inst->operands, i, type,
2686 inst->opcode, mismatch_detail) == 0)
2687 {
2688 DEBUG_TRACE ("FAIL on operand %d", i);
2689 return 0;
2690 }
2691 }
2692
2693 DEBUG_TRACE ("PASS");
2694
2695 return 1;
2696 }
2697
2698 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2699 Also updates the TYPE of each INST->OPERANDS with the corresponding
2700 value of OPCODE->OPERANDS.
2701
2702 Note that some operand qualifiers may need to be manually cleared by
2703 the caller before it further calls the aarch64_opcode_encode; by
2704 doing this, it helps the qualifier matching facilities work
2705 properly. */
2706
2707 const aarch64_opcode*
2708 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2709 {
2710 int i;
2711 const aarch64_opcode *old = inst->opcode;
2712
2713 inst->opcode = opcode;
2714
2715 /* Update the operand types. */
2716 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2717 {
2718 inst->operands[i].type = opcode->operands[i];
2719 if (opcode->operands[i] == AARCH64_OPND_NIL)
2720 break;
2721 }
2722
2723 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2724
2725 return old;
2726 }
2727
2728 int
2729 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2730 {
2731 int i;
2732 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2733 if (operands[i] == operand)
2734 return i;
2735 else if (operands[i] == AARCH64_OPND_NIL)
2736 break;
2737 return -1;
2738 }
2739 \f
2740 /* R0...R30, followed by FOR31. */
2741 #define BANK(R, FOR31) \
2742 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2743 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2744 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2745 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2746 /* [0][0] 32-bit integer regs with sp Wn
2747 [0][1] 64-bit integer regs with sp Xn sf=1
2748 [1][0] 32-bit integer regs with #0 Wn
2749 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2750 static const char *int_reg[2][2][32] = {
2751 #define R32(X) "w" #X
2752 #define R64(X) "x" #X
2753 { BANK (R32, "wsp"), BANK (R64, "sp") },
2754 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2755 #undef R64
2756 #undef R32
2757 };
2758
2759 /* Names of the SVE vector registers, first with .S suffixes,
2760 then with .D suffixes. */
2761
2762 static const char *sve_reg[2][32] = {
2763 #define ZS(X) "z" #X ".s"
2764 #define ZD(X) "z" #X ".d"
2765 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2766 #undef ZD
2767 #undef ZS
2768 };
2769 #undef BANK
2770
2771 /* Return the integer register name.
2772 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2773
2774 static inline const char *
2775 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2776 {
2777 const int has_zr = sp_reg_p ? 0 : 1;
2778 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2779 return int_reg[has_zr][is_64][regno];
2780 }
2781
2782 /* Like get_int_reg_name, but IS_64 is always 1. */
2783
2784 static inline const char *
2785 get_64bit_int_reg_name (int regno, int sp_reg_p)
2786 {
2787 const int has_zr = sp_reg_p ? 0 : 1;
2788 return int_reg[has_zr][1][regno];
2789 }
2790
2791 /* Get the name of the integer offset register in OPND, using the shift type
2792 to decide whether it's a word or doubleword. */
2793
2794 static inline const char *
2795 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2796 {
2797 switch (opnd->shifter.kind)
2798 {
2799 case AARCH64_MOD_UXTW:
2800 case AARCH64_MOD_SXTW:
2801 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2802
2803 case AARCH64_MOD_LSL:
2804 case AARCH64_MOD_SXTX:
2805 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2806
2807 default:
2808 abort ();
2809 }
2810 }
2811
2812 /* Get the name of the SVE vector offset register in OPND, using the operand
2813 qualifier to decide whether the suffix should be .S or .D. */
2814
2815 static inline const char *
2816 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2817 {
2818 assert (qualifier == AARCH64_OPND_QLF_S_S
2819 || qualifier == AARCH64_OPND_QLF_S_D);
2820 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2821 }
2822
2823 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2824
2825 typedef union
2826 {
2827 uint64_t i;
2828 double d;
2829 } double_conv_t;
2830
2831 typedef union
2832 {
2833 uint32_t i;
2834 float f;
2835 } single_conv_t;
2836
2837 typedef union
2838 {
2839 uint32_t i;
2840 float f;
2841 } half_conv_t;
2842
2843 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2844 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2845 (depending on the type of the instruction). IMM8 will be expanded to a
2846 single-precision floating-point value (SIZE == 4) or a double-precision
2847 floating-point value (SIZE == 8). A half-precision floating-point value
2848 (SIZE == 2) is expanded to a single-precision floating-point value. The
2849 expanded value is returned. */
2850
2851 static uint64_t
2852 expand_fp_imm (int size, uint32_t imm8)
2853 {
2854 uint64_t imm = 0;
2855 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2856
2857 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2858 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2859 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2860 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2861 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2862 if (size == 8)
2863 {
2864 imm = (imm8_7 << (63-32)) /* imm8<7> */
2865 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2866 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2867 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2868 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2869 imm <<= 32;
2870 }
2871 else if (size == 4 || size == 2)
2872 {
2873 imm = (imm8_7 << 31) /* imm8<7> */
2874 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2875 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2876 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2877 }
2878 else
2879 {
2880 /* An unsupported size. */
2881 assert (0);
2882 }
2883
2884 return imm;
2885 }
2886
2887 /* Produce the string representation of the register list operand *OPND
2888 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2889 the register name that comes before the register number, such as "v". */
2890 static void
2891 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2892 const char *prefix)
2893 {
2894 const int num_regs = opnd->reglist.num_regs;
2895 const int first_reg = opnd->reglist.first_regno;
2896 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2897 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2898 char tb[8]; /* Temporary buffer. */
2899
2900 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2901 assert (num_regs >= 1 && num_regs <= 4);
2902
2903 /* Prepare the index if any. */
2904 if (opnd->reglist.has_index)
2905 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2906 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
2907 else
2908 tb[0] = '\0';
2909
2910 /* The hyphenated form is preferred for disassembly if there are
2911 more than two registers in the list, and the register numbers
2912 are monotonically increasing in increments of one. */
2913 if (num_regs > 2 && last_reg > first_reg)
2914 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2915 prefix, last_reg, qlf_name, tb);
2916 else
2917 {
2918 const int reg0 = first_reg;
2919 const int reg1 = (first_reg + 1) & 0x1f;
2920 const int reg2 = (first_reg + 2) & 0x1f;
2921 const int reg3 = (first_reg + 3) & 0x1f;
2922
2923 switch (num_regs)
2924 {
2925 case 1:
2926 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2927 break;
2928 case 2:
2929 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2930 prefix, reg1, qlf_name, tb);
2931 break;
2932 case 3:
2933 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2934 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2935 prefix, reg2, qlf_name, tb);
2936 break;
2937 case 4:
2938 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2939 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2940 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2941 break;
2942 }
2943 }
2944 }
2945
2946 /* Print the register+immediate address in OPND to BUF, which has SIZE
2947 characters. BASE is the name of the base register. */
2948
2949 static void
2950 print_immediate_offset_address (char *buf, size_t size,
2951 const aarch64_opnd_info *opnd,
2952 const char *base)
2953 {
2954 if (opnd->addr.writeback)
2955 {
2956 if (opnd->addr.preind)
2957 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
2958 else
2959 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
2960 }
2961 else
2962 {
2963 if (opnd->shifter.operator_present)
2964 {
2965 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2966 snprintf (buf, size, "[%s, #%d, mul vl]",
2967 base, opnd->addr.offset.imm);
2968 }
2969 else if (opnd->addr.offset.imm)
2970 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
2971 else
2972 snprintf (buf, size, "[%s]", base);
2973 }
2974 }
2975
2976 /* Produce the string representation of the register offset address operand
2977 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2978 the names of the base and offset registers. */
2979 static void
2980 print_register_offset_address (char *buf, size_t size,
2981 const aarch64_opnd_info *opnd,
2982 const char *base, const char *offset)
2983 {
2984 char tb[16]; /* Temporary buffer. */
2985 bfd_boolean print_extend_p = TRUE;
2986 bfd_boolean print_amount_p = TRUE;
2987 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2988
2989 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2990 || !opnd->shifter.amount_present))
2991 {
2992 /* Not print the shift/extend amount when the amount is zero and
2993 when it is not the special case of 8-bit load/store instruction. */
2994 print_amount_p = FALSE;
2995 /* Likewise, no need to print the shift operator LSL in such a
2996 situation. */
2997 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2998 print_extend_p = FALSE;
2999 }
3000
3001 /* Prepare for the extend/shift. */
3002 if (print_extend_p)
3003 {
3004 if (print_amount_p)
3005 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3006 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3007 (opnd->shifter.amount % 100));
3008 else
3009 snprintf (tb, sizeof (tb), ", %s", shift_name);
3010 }
3011 else
3012 tb[0] = '\0';
3013
3014 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3015 }
3016
3017 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3018 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3019 PC, PCREL_P and ADDRESS are used to pass in and return information about
3020 the PC-relative address calculation, where the PC value is passed in
3021 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3022 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3023 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3024
3025 The function serves both the disassembler and the assembler diagnostics
3026 issuer, which is the reason why it lives in this file. */
3027
3028 void
3029 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3030 const aarch64_opcode *opcode,
3031 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3032 bfd_vma *address)
3033 {
3034 unsigned int i, num_conds;
3035 const char *name = NULL;
3036 const aarch64_opnd_info *opnd = opnds + idx;
3037 enum aarch64_modifier_kind kind;
3038 uint64_t addr, enum_value;
3039
3040 buf[0] = '\0';
3041 if (pcrel_p)
3042 *pcrel_p = 0;
3043
3044 switch (opnd->type)
3045 {
3046 case AARCH64_OPND_Rd:
3047 case AARCH64_OPND_Rn:
3048 case AARCH64_OPND_Rm:
3049 case AARCH64_OPND_Rt:
3050 case AARCH64_OPND_Rt2:
3051 case AARCH64_OPND_Rs:
3052 case AARCH64_OPND_Ra:
3053 case AARCH64_OPND_Rt_SYS:
3054 case AARCH64_OPND_PAIRREG:
3055 case AARCH64_OPND_SVE_Rm:
3056 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3057 the <ic_op>, therefore we use opnd->present to override the
3058 generic optional-ness information. */
3059 if (opnd->type == AARCH64_OPND_Rt_SYS)
3060 {
3061 if (!opnd->present)
3062 break;
3063 }
3064 /* Omit the operand, e.g. RET. */
3065 else if (optional_operand_p (opcode, idx)
3066 && (opnd->reg.regno
3067 == get_optional_operand_default_value (opcode)))
3068 break;
3069 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3070 || opnd->qualifier == AARCH64_OPND_QLF_X);
3071 snprintf (buf, size, "%s",
3072 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3073 break;
3074
3075 case AARCH64_OPND_Rd_SP:
3076 case AARCH64_OPND_Rn_SP:
3077 case AARCH64_OPND_SVE_Rn_SP:
3078 case AARCH64_OPND_Rm_SP:
3079 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3080 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3081 || opnd->qualifier == AARCH64_OPND_QLF_X
3082 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3083 snprintf (buf, size, "%s",
3084 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3085 break;
3086
3087 case AARCH64_OPND_Rm_EXT:
3088 kind = opnd->shifter.kind;
3089 assert (idx == 1 || idx == 2);
3090 if ((aarch64_stack_pointer_p (opnds)
3091 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3092 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3093 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3094 && kind == AARCH64_MOD_UXTW)
3095 || (opnd->qualifier == AARCH64_OPND_QLF_X
3096 && kind == AARCH64_MOD_UXTX)))
3097 {
3098 /* 'LSL' is the preferred form in this case. */
3099 kind = AARCH64_MOD_LSL;
3100 if (opnd->shifter.amount == 0)
3101 {
3102 /* Shifter omitted. */
3103 snprintf (buf, size, "%s",
3104 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3105 break;
3106 }
3107 }
3108 if (opnd->shifter.amount)
3109 snprintf (buf, size, "%s, %s #%" PRIi64,
3110 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3111 aarch64_operand_modifiers[kind].name,
3112 opnd->shifter.amount);
3113 else
3114 snprintf (buf, size, "%s, %s",
3115 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3116 aarch64_operand_modifiers[kind].name);
3117 break;
3118
3119 case AARCH64_OPND_Rm_SFT:
3120 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3121 || opnd->qualifier == AARCH64_OPND_QLF_X);
3122 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3123 snprintf (buf, size, "%s",
3124 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3125 else
3126 snprintf (buf, size, "%s, %s #%" PRIi64,
3127 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3128 aarch64_operand_modifiers[opnd->shifter.kind].name,
3129 opnd->shifter.amount);
3130 break;
3131
3132 case AARCH64_OPND_Fd:
3133 case AARCH64_OPND_Fn:
3134 case AARCH64_OPND_Fm:
3135 case AARCH64_OPND_Fa:
3136 case AARCH64_OPND_Ft:
3137 case AARCH64_OPND_Ft2:
3138 case AARCH64_OPND_Sd:
3139 case AARCH64_OPND_Sn:
3140 case AARCH64_OPND_Sm:
3141 case AARCH64_OPND_SVE_VZn:
3142 case AARCH64_OPND_SVE_Vd:
3143 case AARCH64_OPND_SVE_Vm:
3144 case AARCH64_OPND_SVE_Vn:
3145 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3146 opnd->reg.regno);
3147 break;
3148
3149 case AARCH64_OPND_Va:
3150 case AARCH64_OPND_Vd:
3151 case AARCH64_OPND_Vn:
3152 case AARCH64_OPND_Vm:
3153 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3154 aarch64_get_qualifier_name (opnd->qualifier));
3155 break;
3156
3157 case AARCH64_OPND_Ed:
3158 case AARCH64_OPND_En:
3159 case AARCH64_OPND_Em:
3160 case AARCH64_OPND_SM3_IMM2:
3161 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3162 aarch64_get_qualifier_name (opnd->qualifier),
3163 opnd->reglane.index);
3164 break;
3165
3166 case AARCH64_OPND_VdD1:
3167 case AARCH64_OPND_VnD1:
3168 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3169 break;
3170
3171 case AARCH64_OPND_LVn:
3172 case AARCH64_OPND_LVt:
3173 case AARCH64_OPND_LVt_AL:
3174 case AARCH64_OPND_LEt:
3175 print_register_list (buf, size, opnd, "v");
3176 break;
3177
3178 case AARCH64_OPND_SVE_Pd:
3179 case AARCH64_OPND_SVE_Pg3:
3180 case AARCH64_OPND_SVE_Pg4_5:
3181 case AARCH64_OPND_SVE_Pg4_10:
3182 case AARCH64_OPND_SVE_Pg4_16:
3183 case AARCH64_OPND_SVE_Pm:
3184 case AARCH64_OPND_SVE_Pn:
3185 case AARCH64_OPND_SVE_Pt:
3186 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3187 snprintf (buf, size, "p%d", opnd->reg.regno);
3188 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3189 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3190 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3191 aarch64_get_qualifier_name (opnd->qualifier));
3192 else
3193 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3194 aarch64_get_qualifier_name (opnd->qualifier));
3195 break;
3196
3197 case AARCH64_OPND_SVE_Za_5:
3198 case AARCH64_OPND_SVE_Za_16:
3199 case AARCH64_OPND_SVE_Zd:
3200 case AARCH64_OPND_SVE_Zm_5:
3201 case AARCH64_OPND_SVE_Zm_16:
3202 case AARCH64_OPND_SVE_Zn:
3203 case AARCH64_OPND_SVE_Zt:
3204 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3205 snprintf (buf, size, "z%d", opnd->reg.regno);
3206 else
3207 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3208 aarch64_get_qualifier_name (opnd->qualifier));
3209 break;
3210
3211 case AARCH64_OPND_SVE_ZnxN:
3212 case AARCH64_OPND_SVE_ZtxN:
3213 print_register_list (buf, size, opnd, "z");
3214 break;
3215
3216 case AARCH64_OPND_SVE_Zm3_INDEX:
3217 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3218 case AARCH64_OPND_SVE_Zm4_INDEX:
3219 case AARCH64_OPND_SVE_Zn_INDEX:
3220 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3221 aarch64_get_qualifier_name (opnd->qualifier),
3222 opnd->reglane.index);
3223 break;
3224
3225 case AARCH64_OPND_CRn:
3226 case AARCH64_OPND_CRm:
3227 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3228 break;
3229
3230 case AARCH64_OPND_IDX:
3231 case AARCH64_OPND_MASK:
3232 case AARCH64_OPND_IMM:
3233 case AARCH64_OPND_IMM_2:
3234 case AARCH64_OPND_WIDTH:
3235 case AARCH64_OPND_UIMM3_OP1:
3236 case AARCH64_OPND_UIMM3_OP2:
3237 case AARCH64_OPND_BIT_NUM:
3238 case AARCH64_OPND_IMM_VLSL:
3239 case AARCH64_OPND_IMM_VLSR:
3240 case AARCH64_OPND_SHLL_IMM:
3241 case AARCH64_OPND_IMM0:
3242 case AARCH64_OPND_IMMR:
3243 case AARCH64_OPND_IMMS:
3244 case AARCH64_OPND_FBITS:
3245 case AARCH64_OPND_SIMM5:
3246 case AARCH64_OPND_SVE_SHLIMM_PRED:
3247 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3248 case AARCH64_OPND_SVE_SHRIMM_PRED:
3249 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3250 case AARCH64_OPND_SVE_SIMM5:
3251 case AARCH64_OPND_SVE_SIMM5B:
3252 case AARCH64_OPND_SVE_SIMM6:
3253 case AARCH64_OPND_SVE_SIMM8:
3254 case AARCH64_OPND_SVE_UIMM3:
3255 case AARCH64_OPND_SVE_UIMM7:
3256 case AARCH64_OPND_SVE_UIMM8:
3257 case AARCH64_OPND_SVE_UIMM8_53:
3258 case AARCH64_OPND_IMM_ROT1:
3259 case AARCH64_OPND_IMM_ROT2:
3260 case AARCH64_OPND_IMM_ROT3:
3261 case AARCH64_OPND_SVE_IMM_ROT1:
3262 case AARCH64_OPND_SVE_IMM_ROT2:
3263 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3264 break;
3265
3266 case AARCH64_OPND_SVE_I1_HALF_ONE:
3267 case AARCH64_OPND_SVE_I1_HALF_TWO:
3268 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3269 {
3270 single_conv_t c;
3271 c.i = opnd->imm.value;
3272 snprintf (buf, size, "#%.1f", c.f);
3273 break;
3274 }
3275
3276 case AARCH64_OPND_SVE_PATTERN:
3277 if (optional_operand_p (opcode, idx)
3278 && opnd->imm.value == get_optional_operand_default_value (opcode))
3279 break;
3280 enum_value = opnd->imm.value;
3281 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3282 if (aarch64_sve_pattern_array[enum_value])
3283 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3284 else
3285 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3286 break;
3287
3288 case AARCH64_OPND_SVE_PATTERN_SCALED:
3289 if (optional_operand_p (opcode, idx)
3290 && !opnd->shifter.operator_present
3291 && opnd->imm.value == get_optional_operand_default_value (opcode))
3292 break;
3293 enum_value = opnd->imm.value;
3294 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3295 if (aarch64_sve_pattern_array[opnd->imm.value])
3296 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3297 else
3298 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3299 if (opnd->shifter.operator_present)
3300 {
3301 size_t len = strlen (buf);
3302 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3303 aarch64_operand_modifiers[opnd->shifter.kind].name,
3304 opnd->shifter.amount);
3305 }
3306 break;
3307
3308 case AARCH64_OPND_SVE_PRFOP:
3309 enum_value = opnd->imm.value;
3310 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3311 if (aarch64_sve_prfop_array[enum_value])
3312 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3313 else
3314 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3315 break;
3316
3317 case AARCH64_OPND_IMM_MOV:
3318 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3319 {
3320 case 4: /* e.g. MOV Wd, #<imm32>. */
3321 {
3322 int imm32 = opnd->imm.value;
3323 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3324 }
3325 break;
3326 case 8: /* e.g. MOV Xd, #<imm64>. */
3327 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3328 opnd->imm.value, opnd->imm.value);
3329 break;
3330 default: assert (0);
3331 }
3332 break;
3333
3334 case AARCH64_OPND_FPIMM0:
3335 snprintf (buf, size, "#0.0");
3336 break;
3337
3338 case AARCH64_OPND_LIMM:
3339 case AARCH64_OPND_AIMM:
3340 case AARCH64_OPND_HALF:
3341 case AARCH64_OPND_SVE_INV_LIMM:
3342 case AARCH64_OPND_SVE_LIMM:
3343 case AARCH64_OPND_SVE_LIMM_MOV:
3344 if (opnd->shifter.amount)
3345 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3346 opnd->shifter.amount);
3347 else
3348 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3349 break;
3350
3351 case AARCH64_OPND_SIMD_IMM:
3352 case AARCH64_OPND_SIMD_IMM_SFT:
3353 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3354 || opnd->shifter.kind == AARCH64_MOD_NONE)
3355 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3356 else
3357 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3358 aarch64_operand_modifiers[opnd->shifter.kind].name,
3359 opnd->shifter.amount);
3360 break;
3361
3362 case AARCH64_OPND_SVE_AIMM:
3363 case AARCH64_OPND_SVE_ASIMM:
3364 if (opnd->shifter.amount)
3365 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3366 opnd->shifter.amount);
3367 else
3368 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3369 break;
3370
3371 case AARCH64_OPND_FPIMM:
3372 case AARCH64_OPND_SIMD_FPIMM:
3373 case AARCH64_OPND_SVE_FPIMM8:
3374 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3375 {
3376 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3377 {
3378 half_conv_t c;
3379 c.i = expand_fp_imm (2, opnd->imm.value);
3380 snprintf (buf, size, "#%.18e", c.f);
3381 }
3382 break;
3383 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3384 {
3385 single_conv_t c;
3386 c.i = expand_fp_imm (4, opnd->imm.value);
3387 snprintf (buf, size, "#%.18e", c.f);
3388 }
3389 break;
3390 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3391 {
3392 double_conv_t c;
3393 c.i = expand_fp_imm (8, opnd->imm.value);
3394 snprintf (buf, size, "#%.18e", c.d);
3395 }
3396 break;
3397 default: assert (0);
3398 }
3399 break;
3400
3401 case AARCH64_OPND_CCMP_IMM:
3402 case AARCH64_OPND_NZCV:
3403 case AARCH64_OPND_EXCEPTION:
3404 case AARCH64_OPND_UIMM4:
3405 case AARCH64_OPND_UIMM7:
3406 if (optional_operand_p (opcode, idx) == TRUE
3407 && (opnd->imm.value ==
3408 (int64_t) get_optional_operand_default_value (opcode)))
3409 /* Omit the operand, e.g. DCPS1. */
3410 break;
3411 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3412 break;
3413
3414 case AARCH64_OPND_COND:
3415 case AARCH64_OPND_COND1:
3416 snprintf (buf, size, "%s", opnd->cond->names[0]);
3417 num_conds = ARRAY_SIZE (opnd->cond->names);
3418 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3419 {
3420 size_t len = strlen (buf);
3421 if (i == 1)
3422 snprintf (buf + len, size - len, " // %s = %s",
3423 opnd->cond->names[0], opnd->cond->names[i]);
3424 else
3425 snprintf (buf + len, size - len, ", %s",
3426 opnd->cond->names[i]);
3427 }
3428 break;
3429
3430 case AARCH64_OPND_ADDR_ADRP:
3431 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3432 + opnd->imm.value;
3433 if (pcrel_p)
3434 *pcrel_p = 1;
3435 if (address)
3436 *address = addr;
3437 /* This is not necessary during the disassembling, as print_address_func
3438 in the disassemble_info will take care of the printing. But some
3439 other callers may be still interested in getting the string in *STR,
3440 so here we do snprintf regardless. */
3441 snprintf (buf, size, "#0x%" PRIx64, addr);
3442 break;
3443
3444 case AARCH64_OPND_ADDR_PCREL14:
3445 case AARCH64_OPND_ADDR_PCREL19:
3446 case AARCH64_OPND_ADDR_PCREL21:
3447 case AARCH64_OPND_ADDR_PCREL26:
3448 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3449 if (pcrel_p)
3450 *pcrel_p = 1;
3451 if (address)
3452 *address = addr;
3453 /* This is not necessary during the disassembling, as print_address_func
3454 in the disassemble_info will take care of the printing. But some
3455 other callers may be still interested in getting the string in *STR,
3456 so here we do snprintf regardless. */
3457 snprintf (buf, size, "#0x%" PRIx64, addr);
3458 break;
3459
3460 case AARCH64_OPND_ADDR_SIMPLE:
3461 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3462 case AARCH64_OPND_SIMD_ADDR_POST:
3463 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3464 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3465 {
3466 if (opnd->addr.offset.is_reg)
3467 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3468 else
3469 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3470 }
3471 else
3472 snprintf (buf, size, "[%s]", name);
3473 break;
3474
3475 case AARCH64_OPND_ADDR_REGOFF:
3476 case AARCH64_OPND_SVE_ADDR_RR:
3477 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3478 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3479 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3480 case AARCH64_OPND_SVE_ADDR_RX:
3481 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3482 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3483 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3484 print_register_offset_address
3485 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3486 get_offset_int_reg_name (opnd));
3487 break;
3488
3489 case AARCH64_OPND_SVE_ADDR_RZ:
3490 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3491 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3492 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3493 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3494 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3495 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3496 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3497 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3498 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3499 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3500 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3501 print_register_offset_address
3502 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3503 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3504 break;
3505
3506 case AARCH64_OPND_ADDR_SIMM7:
3507 case AARCH64_OPND_ADDR_SIMM9:
3508 case AARCH64_OPND_ADDR_SIMM9_2:
3509 case AARCH64_OPND_ADDR_SIMM10:
3510 case AARCH64_OPND_ADDR_OFFSET:
3511 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3512 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3513 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3514 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3515 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3516 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3517 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3518 case AARCH64_OPND_SVE_ADDR_RI_U6:
3519 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3520 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3521 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3522 print_immediate_offset_address
3523 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3524 break;
3525
3526 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3527 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3528 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3529 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3530 print_immediate_offset_address
3531 (buf, size, opnd,
3532 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3533 break;
3534
3535 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3536 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3537 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3538 print_register_offset_address
3539 (buf, size, opnd,
3540 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3541 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3542 break;
3543
3544 case AARCH64_OPND_ADDR_UIMM12:
3545 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3546 if (opnd->addr.offset.imm)
3547 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3548 else
3549 snprintf (buf, size, "[%s]", name);
3550 break;
3551
3552 case AARCH64_OPND_SYSREG:
3553 for (i = 0; aarch64_sys_regs[i].name; ++i)
3554 if (aarch64_sys_regs[i].value == opnd->sysreg
3555 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3556 break;
3557 if (aarch64_sys_regs[i].name)
3558 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3559 else
3560 {
3561 /* Implementation defined system register. */
3562 unsigned int value = opnd->sysreg;
3563 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3564 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3565 value & 0x7);
3566 }
3567 break;
3568
3569 case AARCH64_OPND_PSTATEFIELD:
3570 for (i = 0; aarch64_pstatefields[i].name; ++i)
3571 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3572 break;
3573 assert (aarch64_pstatefields[i].name);
3574 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3575 break;
3576
3577 case AARCH64_OPND_SYSREG_AT:
3578 case AARCH64_OPND_SYSREG_DC:
3579 case AARCH64_OPND_SYSREG_IC:
3580 case AARCH64_OPND_SYSREG_TLBI:
3581 snprintf (buf, size, "%s", opnd->sysins_op->name);
3582 break;
3583
3584 case AARCH64_OPND_BARRIER:
3585 snprintf (buf, size, "%s", opnd->barrier->name);
3586 break;
3587
3588 case AARCH64_OPND_BARRIER_ISB:
3589 /* Operand can be omitted, e.g. in DCPS1. */
3590 if (! optional_operand_p (opcode, idx)
3591 || (opnd->barrier->value
3592 != get_optional_operand_default_value (opcode)))
3593 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3594 break;
3595
3596 case AARCH64_OPND_PRFOP:
3597 if (opnd->prfop->name != NULL)
3598 snprintf (buf, size, "%s", opnd->prfop->name);
3599 else
3600 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3601 break;
3602
3603 case AARCH64_OPND_BARRIER_PSB:
3604 snprintf (buf, size, "%s", opnd->hint_option->name);
3605 break;
3606
3607 default:
3608 assert (0);
3609 }
3610 }
3611 \f
3612 #define CPENC(op0,op1,crn,crm,op2) \
3613 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3614 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3615 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3616 /* for 3.9.10 System Instructions */
3617 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3618
3619 #define C0 0
3620 #define C1 1
3621 #define C2 2
3622 #define C3 3
3623 #define C4 4
3624 #define C5 5
3625 #define C6 6
3626 #define C7 7
3627 #define C8 8
3628 #define C9 9
3629 #define C10 10
3630 #define C11 11
3631 #define C12 12
3632 #define C13 13
3633 #define C14 14
3634 #define C15 15
3635
3636 #ifdef F_DEPRECATED
3637 #undef F_DEPRECATED
3638 #endif
3639 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3640
3641 #ifdef F_ARCHEXT
3642 #undef F_ARCHEXT
3643 #endif
3644 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3645
3646 #ifdef F_HASXT
3647 #undef F_HASXT
3648 #endif
3649 #define F_HASXT 0x4 /* System instruction register <Xt>
3650 operand. */
3651
3652
3653 /* TODO there are two more issues need to be resolved
3654 1. handle read-only and write-only system registers
3655 2. handle cpu-implementation-defined system registers. */
3656 const aarch64_sys_reg aarch64_sys_regs [] =
3657 {
3658 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3659 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3660 { "elr_el1", CPEN_(0,C0,1), 0 },
3661 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3662 { "sp_el0", CPEN_(0,C1,0), 0 },
3663 { "spsel", CPEN_(0,C2,0), 0 },
3664 { "daif", CPEN_(3,C2,1), 0 },
3665 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3666 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3667 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3668 { "nzcv", CPEN_(3,C2,0), 0 },
3669 { "fpcr", CPEN_(3,C4,0), 0 },
3670 { "fpsr", CPEN_(3,C4,1), 0 },
3671 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3672 { "dlr_el0", CPEN_(3,C5,1), 0 },
3673 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3674 { "elr_el2", CPEN_(4,C0,1), 0 },
3675 { "sp_el1", CPEN_(4,C1,0), 0 },
3676 { "spsr_irq", CPEN_(4,C3,0), 0 },
3677 { "spsr_abt", CPEN_(4,C3,1), 0 },
3678 { "spsr_und", CPEN_(4,C3,2), 0 },
3679 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3680 { "spsr_el3", CPEN_(6,C0,0), 0 },
3681 { "elr_el3", CPEN_(6,C0,1), 0 },
3682 { "sp_el2", CPEN_(6,C1,0), 0 },
3683 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3684 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3685 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3686 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3687 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3688 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3689 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3690 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3691 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3692 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3693 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3694 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3695 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3696 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3697 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3698 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3699 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3700 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3701 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3702 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3703 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3704 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3705 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3706 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3707 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3708 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3709 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3710 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3711 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3712 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3713 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3714 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3715 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3716 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3717 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3718 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3719 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3720 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3721 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT }, /* RO */
3722 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3723 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3724 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3725 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3726 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3727 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3728 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3729 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3730 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3731 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3732 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3733 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3734 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3735 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3736 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3737 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3738 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3739 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3740 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3741 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3742 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3743 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3744 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3745 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3746 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3747 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3748 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3749 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3750 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3751 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3752 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3753 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3754 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3755 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3756 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3757 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3758 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3759 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3760 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3761 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3762 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3763 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3764 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3765 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3766 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3767 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3768 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3769 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3770 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3771 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3772 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3773 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3774 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3775 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3776 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3777 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3778 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3779 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3780 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3781 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3782 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3783 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3784 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3785 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3786 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3787 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3788 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3789 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3790 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3791 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3792 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3793 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3794 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3795 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3796 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3797 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3798 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3799 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3800 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3801 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3802 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3803 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3804 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3805 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3806 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3807 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3808 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3809 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3810 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3811 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3812 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3813 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3814 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3815 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3816 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3817 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3818 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3819 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3820 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3821 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3822 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3823 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3824 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3825 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3826 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3827 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3828 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3829 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3830 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3831 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3832 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3833 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3834 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3835 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3836 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3837 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3838 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3839 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3840 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3841 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3842 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3843 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3844 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3845 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3846 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3847 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3848 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3849 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3850 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3851 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3852 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3853 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3854 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3855 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3856 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3857 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3858 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3859 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3860 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3861 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3862 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3863 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3864 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3865 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3866 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3867 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3868 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3869 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3870 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3871 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3872 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3873 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3874 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3875 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3876 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3877 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3878 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3879 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3880 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3881 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3882 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3883 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3884 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3885 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3886 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3887 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3888 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3889 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3890 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3891 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3892 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3893 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3894 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3895 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3896 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3897 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3898 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3899 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3900 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3901 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3902 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3903 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3904 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3905 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3906 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3907 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3908 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3909 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3910 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3911 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3912 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3913 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3914 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3915 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3916 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3917 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3918 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3919 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3920 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3921 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3922 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3923 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3924 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3925 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3926 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3927 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3928 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3929 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3930 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3931 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3932 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3933 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3934 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3935 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3936 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3937 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3938 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3939 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3940 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3941 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3942 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3943 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3944 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3945 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3946 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3947 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3948 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3949 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3950 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3951 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3952 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3953 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3954 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3955 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3956 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3957 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3958 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3959 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3960 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3961 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3962 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3963 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3964 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3965 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3966 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3967 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3968 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3969 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3970 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3971 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3972 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3973 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3974 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3975 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3976 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3977 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3978 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3979 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3980 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3981 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3982 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3983 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3984 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3985 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3986 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3987 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3988 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3989 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3990 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3991 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3992 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3993 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3994 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3995 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3996 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3997 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3998 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3999 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4000 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4001 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4002 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4003 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4004 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4005 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4006 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4007 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4008 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4009 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4010 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4011 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4012 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4013 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4014 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4015 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4016 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4017 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4018 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4019 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4020 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4021 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4022 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4023 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4024 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4025 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4026 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4027 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4028 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4029 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4030 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4031 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4032 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4033 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4034 { 0, CPENC(0,0,0,0,0), 0 },
4035 };
4036
4037 bfd_boolean
4038 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4039 {
4040 return (reg->flags & F_DEPRECATED) != 0;
4041 }
4042
4043 bfd_boolean
4044 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4045 const aarch64_sys_reg *reg)
4046 {
4047 if (!(reg->flags & F_ARCHEXT))
4048 return TRUE;
4049
4050 /* PAN. Values are from aarch64_sys_regs. */
4051 if (reg->value == CPEN_(0,C2,3)
4052 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4053 return FALSE;
4054
4055 /* Virtualization host extensions: system registers. */
4056 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4057 || reg->value == CPENC (3, 4, C13, C0, 1)
4058 || reg->value == CPENC (3, 4, C14, C3, 0)
4059 || reg->value == CPENC (3, 4, C14, C3, 1)
4060 || reg->value == CPENC (3, 4, C14, C3, 2))
4061 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4062 return FALSE;
4063
4064 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4065 if ((reg->value == CPEN_ (5, C0, 0)
4066 || reg->value == CPEN_ (5, C0, 1)
4067 || reg->value == CPENC (3, 5, C1, C0, 0)
4068 || reg->value == CPENC (3, 5, C1, C0, 2)
4069 || reg->value == CPENC (3, 5, C2, C0, 0)
4070 || reg->value == CPENC (3, 5, C2, C0, 1)
4071 || reg->value == CPENC (3, 5, C2, C0, 2)
4072 || reg->value == CPENC (3, 5, C5, C1, 0)
4073 || reg->value == CPENC (3, 5, C5, C1, 1)
4074 || reg->value == CPENC (3, 5, C5, C2, 0)
4075 || reg->value == CPENC (3, 5, C6, C0, 0)
4076 || reg->value == CPENC (3, 5, C10, C2, 0)
4077 || reg->value == CPENC (3, 5, C10, C3, 0)
4078 || reg->value == CPENC (3, 5, C12, C0, 0)
4079 || reg->value == CPENC (3, 5, C13, C0, 1)
4080 || reg->value == CPENC (3, 5, C14, C1, 0))
4081 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4082 return FALSE;
4083
4084 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4085 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4086 || reg->value == CPENC (3, 5, C14, C2, 1)
4087 || reg->value == CPENC (3, 5, C14, C2, 2)
4088 || reg->value == CPENC (3, 5, C14, C3, 0)
4089 || reg->value == CPENC (3, 5, C14, C3, 1)
4090 || reg->value == CPENC (3, 5, C14, C3, 2))
4091 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4092 return FALSE;
4093
4094 /* ARMv8.2 features. */
4095
4096 /* ID_AA64MMFR2_EL1. */
4097 if (reg->value == CPENC (3, 0, C0, C7, 2)
4098 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4099 return FALSE;
4100
4101 /* PSTATE.UAO. */
4102 if (reg->value == CPEN_ (0, C2, 4)
4103 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4104 return FALSE;
4105
4106 /* RAS extension. */
4107
4108 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4109 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4110 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4111 || reg->value == CPENC (3, 0, C5, C3, 1)
4112 || reg->value == CPENC (3, 0, C5, C3, 2)
4113 || reg->value == CPENC (3, 0, C5, C3, 3)
4114 || reg->value == CPENC (3, 0, C5, C4, 0)
4115 || reg->value == CPENC (3, 0, C5, C4, 1)
4116 || reg->value == CPENC (3, 0, C5, C4, 2)
4117 || reg->value == CPENC (3, 0, C5, C4, 3)
4118 || reg->value == CPENC (3, 0, C5, C5, 0)
4119 || reg->value == CPENC (3, 0, C5, C5, 1))
4120 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4121 return FALSE;
4122
4123 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4124 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4125 || reg->value == CPENC (3, 0, C12, C1, 1)
4126 || reg->value == CPENC (3, 4, C12, C1, 1))
4127 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4128 return FALSE;
4129
4130 /* Statistical Profiling extension. */
4131 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4132 || reg->value == CPENC (3, 0, C9, C10, 1)
4133 || reg->value == CPENC (3, 0, C9, C10, 3)
4134 || reg->value == CPENC (3, 0, C9, C10, 7)
4135 || reg->value == CPENC (3, 0, C9, C9, 0)
4136 || reg->value == CPENC (3, 0, C9, C9, 2)
4137 || reg->value == CPENC (3, 0, C9, C9, 3)
4138 || reg->value == CPENC (3, 0, C9, C9, 4)
4139 || reg->value == CPENC (3, 0, C9, C9, 5)
4140 || reg->value == CPENC (3, 0, C9, C9, 6)
4141 || reg->value == CPENC (3, 0, C9, C9, 7)
4142 || reg->value == CPENC (3, 4, C9, C9, 0)
4143 || reg->value == CPENC (3, 5, C9, C9, 0))
4144 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4145 return FALSE;
4146
4147 /* ARMv8.3 Pointer authentication keys. */
4148 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4149 || reg->value == CPENC (3, 0, C2, C1, 1)
4150 || reg->value == CPENC (3, 0, C2, C1, 2)
4151 || reg->value == CPENC (3, 0, C2, C1, 3)
4152 || reg->value == CPENC (3, 0, C2, C2, 0)
4153 || reg->value == CPENC (3, 0, C2, C2, 1)
4154 || reg->value == CPENC (3, 0, C2, C2, 2)
4155 || reg->value == CPENC (3, 0, C2, C2, 3)
4156 || reg->value == CPENC (3, 0, C2, C3, 0)
4157 || reg->value == CPENC (3, 0, C2, C3, 1))
4158 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4159 return FALSE;
4160
4161 /* SVE. */
4162 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4163 || reg->value == CPENC (3, 0, C1, C2, 0)
4164 || reg->value == CPENC (3, 4, C1, C2, 0)
4165 || reg->value == CPENC (3, 6, C1, C2, 0)
4166 || reg->value == CPENC (3, 5, C1, C2, 0)
4167 || reg->value == CPENC (3, 0, C0, C0, 7))
4168 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4169 return FALSE;
4170
4171 return TRUE;
4172 }
4173
4174 const aarch64_sys_reg aarch64_pstatefields [] =
4175 {
4176 { "spsel", 0x05, 0 },
4177 { "daifset", 0x1e, 0 },
4178 { "daifclr", 0x1f, 0 },
4179 { "pan", 0x04, F_ARCHEXT },
4180 { "uao", 0x03, F_ARCHEXT },
4181 { 0, CPENC(0,0,0,0,0), 0 },
4182 };
4183
4184 bfd_boolean
4185 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4186 const aarch64_sys_reg *reg)
4187 {
4188 if (!(reg->flags & F_ARCHEXT))
4189 return TRUE;
4190
4191 /* PAN. Values are from aarch64_pstatefields. */
4192 if (reg->value == 0x04
4193 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4194 return FALSE;
4195
4196 /* UAO. Values are from aarch64_pstatefields. */
4197 if (reg->value == 0x03
4198 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4199 return FALSE;
4200
4201 return TRUE;
4202 }
4203
4204 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4205 {
4206 { "ialluis", CPENS(0,C7,C1,0), 0 },
4207 { "iallu", CPENS(0,C7,C5,0), 0 },
4208 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4209 { 0, CPENS(0,0,0,0), 0 }
4210 };
4211
4212 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4213 {
4214 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4215 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4216 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4217 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4218 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4219 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4220 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4221 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4222 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4223 { 0, CPENS(0,0,0,0), 0 }
4224 };
4225
4226 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4227 {
4228 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4229 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4230 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4231 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4232 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4233 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4234 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4235 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4236 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4237 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4238 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4239 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4240 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4241 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4242 { 0, CPENS(0,0,0,0), 0 }
4243 };
4244
4245 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4246 {
4247 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4248 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4249 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4250 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4251 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4252 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4253 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4254 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4255 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4256 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4257 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4258 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4259 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4260 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4261 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4262 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4263 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4264 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4265 { "alle2", CPENS(4,C8,C7,0), 0 },
4266 { "alle2is", CPENS(4,C8,C3,0), 0 },
4267 { "alle1", CPENS(4,C8,C7,4), 0 },
4268 { "alle1is", CPENS(4,C8,C3,4), 0 },
4269 { "alle3", CPENS(6,C8,C7,0), 0 },
4270 { "alle3is", CPENS(6,C8,C3,0), 0 },
4271 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4272 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4273 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4274 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4275 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4276 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4277 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4278 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4279 { 0, CPENS(0,0,0,0), 0 }
4280 };
4281
4282 bfd_boolean
4283 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4284 {
4285 return (sys_ins_reg->flags & F_HASXT) != 0;
4286 }
4287
4288 extern bfd_boolean
4289 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4290 const aarch64_sys_ins_reg *reg)
4291 {
4292 if (!(reg->flags & F_ARCHEXT))
4293 return TRUE;
4294
4295 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4296 if (reg->value == CPENS (3, C7, C12, 1)
4297 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4298 return FALSE;
4299
4300 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4301 if ((reg->value == CPENS (0, C7, C9, 0)
4302 || reg->value == CPENS (0, C7, C9, 1))
4303 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4304 return FALSE;
4305
4306 return TRUE;
4307 }
4308
4309 #undef C0
4310 #undef C1
4311 #undef C2
4312 #undef C3
4313 #undef C4
4314 #undef C5
4315 #undef C6
4316 #undef C7
4317 #undef C8
4318 #undef C9
4319 #undef C10
4320 #undef C11
4321 #undef C12
4322 #undef C13
4323 #undef C14
4324 #undef C15
4325
4326 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4327 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4328
4329 static bfd_boolean
4330 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4331 const aarch64_insn insn)
4332 {
4333 int t = BITS (insn, 4, 0);
4334 int n = BITS (insn, 9, 5);
4335 int t2 = BITS (insn, 14, 10);
4336
4337 if (BIT (insn, 23))
4338 {
4339 /* Write back enabled. */
4340 if ((t == n || t2 == n) && n != 31)
4341 return FALSE;
4342 }
4343
4344 if (BIT (insn, 22))
4345 {
4346 /* Load */
4347 if (t == t2)
4348 return FALSE;
4349 }
4350
4351 return TRUE;
4352 }
4353
4354 /* Return true if VALUE cannot be moved into an SVE register using DUP
4355 (with any element size, not just ESIZE) and if using DUPM would
4356 therefore be OK. ESIZE is the number of bytes in the immediate. */
4357
4358 bfd_boolean
4359 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4360 {
4361 int64_t svalue = uvalue;
4362 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4363
4364 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4365 return FALSE;
4366 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4367 {
4368 svalue = (int32_t) uvalue;
4369 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4370 {
4371 svalue = (int16_t) uvalue;
4372 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4373 return FALSE;
4374 }
4375 }
4376 if ((svalue & 0xff) == 0)
4377 svalue /= 256;
4378 return svalue < -128 || svalue >= 128;
4379 }
4380
4381 /* Include the opcode description table as well as the operand description
4382 table. */
4383 #define VERIFIER(x) verify_##x
4384 #include "aarch64-tbl.h"
This page took 0.160245 seconds and 4 git commands to generate.