AArch64: Fix error checking for SIMD udot (by element)
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
247 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
248 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
249 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
250 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
251 { 5, 14 }, /* imm14: in test bit and branch instructions. */
252 { 5, 16 }, /* imm16: in exception instructions. */
253 { 0, 26 }, /* imm26: in unconditional branch instructions. */
254 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
255 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
256 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
257 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
258 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
259 { 22, 1 }, /* N: in logical (immediate) instructions. */
260 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
261 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
262 { 31, 1 }, /* sf: in integer data processing instructions. */
263 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
264 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
265 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
266 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
267 { 31, 1 }, /* b5: in the test bit and branch instructions. */
268 { 19, 5 }, /* b40: in the test bit and branch instructions. */
269 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
270 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
271 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
272 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
273 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
274 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
275 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
276 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
277 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
278 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
279 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
280 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
281 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
282 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
283 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
284 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
285 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
286 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
289 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
290 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
292 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
293 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
294 { 5, 1 }, /* SVE_i1: single-bit immediate. */
295 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
296 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
297 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
298 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
299 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
300 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
301 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
302 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
303 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
304 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
305 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
306 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
307 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
308 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
309 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
310 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
311 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
312 { 16, 4 }, /* SVE_tsz: triangular size select. */
313 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
314 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
315 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
316 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
317 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
318 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
319 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
320 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
321 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
322 };
323
324 enum aarch64_operand_class
325 aarch64_get_operand_class (enum aarch64_opnd type)
326 {
327 return aarch64_operands[type].op_class;
328 }
329
330 const char *
331 aarch64_get_operand_name (enum aarch64_opnd type)
332 {
333 return aarch64_operands[type].name;
334 }
335
336 /* Get operand description string.
337 This is usually for the diagnosis purpose. */
338 const char *
339 aarch64_get_operand_desc (enum aarch64_opnd type)
340 {
341 return aarch64_operands[type].desc;
342 }
343
344 /* Table of all conditional affixes. */
345 const aarch64_cond aarch64_conds[16] =
346 {
347 {{"eq", "none"}, 0x0},
348 {{"ne", "any"}, 0x1},
349 {{"cs", "hs", "nlast"}, 0x2},
350 {{"cc", "lo", "ul", "last"}, 0x3},
351 {{"mi", "first"}, 0x4},
352 {{"pl", "nfrst"}, 0x5},
353 {{"vs"}, 0x6},
354 {{"vc"}, 0x7},
355 {{"hi", "pmore"}, 0x8},
356 {{"ls", "plast"}, 0x9},
357 {{"ge", "tcont"}, 0xa},
358 {{"lt", "tstop"}, 0xb},
359 {{"gt"}, 0xc},
360 {{"le"}, 0xd},
361 {{"al"}, 0xe},
362 {{"nv"}, 0xf},
363 };
364
365 const aarch64_cond *
366 get_cond_from_value (aarch64_insn value)
367 {
368 assert (value < 16);
369 return &aarch64_conds[(unsigned int) value];
370 }
371
372 const aarch64_cond *
373 get_inverted_cond (const aarch64_cond *cond)
374 {
375 return &aarch64_conds[cond->value ^ 0x1];
376 }
377
378 /* Table describing the operand extension/shifting operators; indexed by
379 enum aarch64_modifier_kind.
380
381 The value column provides the most common values for encoding modifiers,
382 which enables table-driven encoding/decoding for the modifiers. */
383 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
384 {
385 {"none", 0x0},
386 {"msl", 0x0},
387 {"ror", 0x3},
388 {"asr", 0x2},
389 {"lsr", 0x1},
390 {"lsl", 0x0},
391 {"uxtb", 0x0},
392 {"uxth", 0x1},
393 {"uxtw", 0x2},
394 {"uxtx", 0x3},
395 {"sxtb", 0x4},
396 {"sxth", 0x5},
397 {"sxtw", 0x6},
398 {"sxtx", 0x7},
399 {"mul", 0x0},
400 {"mul vl", 0x0},
401 {NULL, 0},
402 };
403
404 enum aarch64_modifier_kind
405 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
406 {
407 return desc - aarch64_operand_modifiers;
408 }
409
410 aarch64_insn
411 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
412 {
413 return aarch64_operand_modifiers[kind].value;
414 }
415
416 enum aarch64_modifier_kind
417 aarch64_get_operand_modifier_from_value (aarch64_insn value,
418 bfd_boolean extend_p)
419 {
420 if (extend_p == TRUE)
421 return AARCH64_MOD_UXTB + value;
422 else
423 return AARCH64_MOD_LSL - value;
424 }
425
426 bfd_boolean
427 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
428 {
429 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
430 ? TRUE : FALSE;
431 }
432
433 static inline bfd_boolean
434 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
435 {
436 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
437 ? TRUE : FALSE;
438 }
439
440 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
441 {
442 { "#0x00", 0x0 },
443 { "oshld", 0x1 },
444 { "oshst", 0x2 },
445 { "osh", 0x3 },
446 { "#0x04", 0x4 },
447 { "nshld", 0x5 },
448 { "nshst", 0x6 },
449 { "nsh", 0x7 },
450 { "#0x08", 0x8 },
451 { "ishld", 0x9 },
452 { "ishst", 0xa },
453 { "ish", 0xb },
454 { "#0x0c", 0xc },
455 { "ld", 0xd },
456 { "st", 0xe },
457 { "sy", 0xf },
458 };
459
460 /* Table describing the operands supported by the aliases of the HINT
461 instruction.
462
463 The name column is the operand that is accepted for the alias. The value
464 column is the hint number of the alias. The list of operands is terminated
465 by NULL in the name column. */
466
467 const struct aarch64_name_value_pair aarch64_hint_options[] =
468 {
469 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
470 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
471 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
472 { "c", HINT_OPD_C }, /* BTI C. */
473 { "j", HINT_OPD_J }, /* BTI J. */
474 { "jc", HINT_OPD_JC }, /* BTI JC. */
475 { NULL, HINT_OPD_NULL },
476 };
477
478 /* op -> op: load = 0 instruction = 1 store = 2
479 l -> level: 1-3
480 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
481 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
482 const struct aarch64_name_value_pair aarch64_prfops[32] =
483 {
484 { "pldl1keep", B(0, 1, 0) },
485 { "pldl1strm", B(0, 1, 1) },
486 { "pldl2keep", B(0, 2, 0) },
487 { "pldl2strm", B(0, 2, 1) },
488 { "pldl3keep", B(0, 3, 0) },
489 { "pldl3strm", B(0, 3, 1) },
490 { NULL, 0x06 },
491 { NULL, 0x07 },
492 { "plil1keep", B(1, 1, 0) },
493 { "plil1strm", B(1, 1, 1) },
494 { "plil2keep", B(1, 2, 0) },
495 { "plil2strm", B(1, 2, 1) },
496 { "plil3keep", B(1, 3, 0) },
497 { "plil3strm", B(1, 3, 1) },
498 { NULL, 0x0e },
499 { NULL, 0x0f },
500 { "pstl1keep", B(2, 1, 0) },
501 { "pstl1strm", B(2, 1, 1) },
502 { "pstl2keep", B(2, 2, 0) },
503 { "pstl2strm", B(2, 2, 1) },
504 { "pstl3keep", B(2, 3, 0) },
505 { "pstl3strm", B(2, 3, 1) },
506 { NULL, 0x16 },
507 { NULL, 0x17 },
508 { NULL, 0x18 },
509 { NULL, 0x19 },
510 { NULL, 0x1a },
511 { NULL, 0x1b },
512 { NULL, 0x1c },
513 { NULL, 0x1d },
514 { NULL, 0x1e },
515 { NULL, 0x1f },
516 };
517 #undef B
518 \f
519 /* Utilities on value constraint. */
520
521 static inline int
522 value_in_range_p (int64_t value, int low, int high)
523 {
524 return (value >= low && value <= high) ? 1 : 0;
525 }
526
527 /* Return true if VALUE is a multiple of ALIGN. */
528 static inline int
529 value_aligned_p (int64_t value, int align)
530 {
531 return (value % align) == 0;
532 }
533
534 /* A signed value fits in a field. */
535 static inline int
536 value_fit_signed_field_p (int64_t value, unsigned width)
537 {
538 assert (width < 32);
539 if (width < sizeof (value) * 8)
540 {
541 int64_t lim = (int64_t)1 << (width - 1);
542 if (value >= -lim && value < lim)
543 return 1;
544 }
545 return 0;
546 }
547
548 /* An unsigned value fits in a field. */
549 static inline int
550 value_fit_unsigned_field_p (int64_t value, unsigned width)
551 {
552 assert (width < 32);
553 if (width < sizeof (value) * 8)
554 {
555 int64_t lim = (int64_t)1 << width;
556 if (value >= 0 && value < lim)
557 return 1;
558 }
559 return 0;
560 }
561
562 /* Return 1 if OPERAND is SP or WSP. */
563 int
564 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
565 {
566 return ((aarch64_get_operand_class (operand->type)
567 == AARCH64_OPND_CLASS_INT_REG)
568 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
569 && operand->reg.regno == 31);
570 }
571
572 /* Return 1 if OPERAND is XZR or WZP. */
573 int
574 aarch64_zero_register_p (const aarch64_opnd_info *operand)
575 {
576 return ((aarch64_get_operand_class (operand->type)
577 == AARCH64_OPND_CLASS_INT_REG)
578 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
579 && operand->reg.regno == 31);
580 }
581
582 /* Return true if the operand *OPERAND that has the operand code
583 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
584 qualified by the qualifier TARGET. */
585
586 static inline int
587 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
588 aarch64_opnd_qualifier_t target)
589 {
590 switch (operand->qualifier)
591 {
592 case AARCH64_OPND_QLF_W:
593 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
594 return 1;
595 break;
596 case AARCH64_OPND_QLF_X:
597 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
598 return 1;
599 break;
600 case AARCH64_OPND_QLF_WSP:
601 if (target == AARCH64_OPND_QLF_W
602 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
603 return 1;
604 break;
605 case AARCH64_OPND_QLF_SP:
606 if (target == AARCH64_OPND_QLF_X
607 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
608 return 1;
609 break;
610 default:
611 break;
612 }
613
614 return 0;
615 }
616
617 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
618 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
619
620 Return NIL if more than one expected qualifiers are found. */
621
622 aarch64_opnd_qualifier_t
623 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
624 int idx,
625 const aarch64_opnd_qualifier_t known_qlf,
626 int known_idx)
627 {
628 int i, saved_i;
629
630 /* Special case.
631
632 When the known qualifier is NIL, we have to assume that there is only
633 one qualifier sequence in the *QSEQ_LIST and return the corresponding
634 qualifier directly. One scenario is that for instruction
635 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
636 which has only one possible valid qualifier sequence
637 NIL, S_D
638 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
639 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
640
641 Because the qualifier NIL has dual roles in the qualifier sequence:
642 it can mean no qualifier for the operand, or the qualifer sequence is
643 not in use (when all qualifiers in the sequence are NILs), we have to
644 handle this special case here. */
645 if (known_qlf == AARCH64_OPND_NIL)
646 {
647 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
648 return qseq_list[0][idx];
649 }
650
651 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
652 {
653 if (qseq_list[i][known_idx] == known_qlf)
654 {
655 if (saved_i != -1)
656 /* More than one sequences are found to have KNOWN_QLF at
657 KNOWN_IDX. */
658 return AARCH64_OPND_NIL;
659 saved_i = i;
660 }
661 }
662
663 return qseq_list[saved_i][idx];
664 }
665
666 enum operand_qualifier_kind
667 {
668 OQK_NIL,
669 OQK_OPD_VARIANT,
670 OQK_VALUE_IN_RANGE,
671 OQK_MISC,
672 };
673
674 /* Operand qualifier description. */
675 struct operand_qualifier_data
676 {
677 /* The usage of the three data fields depends on the qualifier kind. */
678 int data0;
679 int data1;
680 int data2;
681 /* Description. */
682 const char *desc;
683 /* Kind. */
684 enum operand_qualifier_kind kind;
685 };
686
687 /* Indexed by the operand qualifier enumerators. */
688 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
689 {
690 {0, 0, 0, "NIL", OQK_NIL},
691
692 /* Operand variant qualifiers.
693 First 3 fields:
694 element size, number of elements and common value for encoding. */
695
696 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
697 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
698 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
699 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
700
701 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
702 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
703 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
704 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
705 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
706 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
707
708 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
709 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
710 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
711 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
712 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
713 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
714 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
715 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
716 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
717 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
718 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
719
720 {0, 0, 0, "z", OQK_OPD_VARIANT},
721 {0, 0, 0, "m", OQK_OPD_VARIANT},
722
723 /* Qualifiers constraining the value range.
724 First 3 fields:
725 Lower bound, higher bound, unused. */
726
727 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
728 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
729 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
730 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
731 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
732 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
733 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
734
735 /* Qualifiers for miscellaneous purpose.
736 First 3 fields:
737 unused, unused and unused. */
738
739 {0, 0, 0, "lsl", 0},
740 {0, 0, 0, "msl", 0},
741
742 {0, 0, 0, "retrieving", 0},
743 };
744
745 static inline bfd_boolean
746 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
747 {
748 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
749 ? TRUE : FALSE;
750 }
751
752 static inline bfd_boolean
753 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
754 {
755 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
756 ? TRUE : FALSE;
757 }
758
759 const char*
760 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
761 {
762 return aarch64_opnd_qualifiers[qualifier].desc;
763 }
764
765 /* Given an operand qualifier, return the expected data element size
766 of a qualified operand. */
767 unsigned char
768 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
769 {
770 assert (operand_variant_qualifier_p (qualifier) == TRUE);
771 return aarch64_opnd_qualifiers[qualifier].data0;
772 }
773
774 unsigned char
775 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
776 {
777 assert (operand_variant_qualifier_p (qualifier) == TRUE);
778 return aarch64_opnd_qualifiers[qualifier].data1;
779 }
780
781 aarch64_insn
782 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
783 {
784 assert (operand_variant_qualifier_p (qualifier) == TRUE);
785 return aarch64_opnd_qualifiers[qualifier].data2;
786 }
787
788 static int
789 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
790 {
791 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
792 return aarch64_opnd_qualifiers[qualifier].data0;
793 }
794
795 static int
796 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
797 {
798 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
799 return aarch64_opnd_qualifiers[qualifier].data1;
800 }
801
802 #ifdef DEBUG_AARCH64
803 void
804 aarch64_verbose (const char *str, ...)
805 {
806 va_list ap;
807 va_start (ap, str);
808 printf ("#### ");
809 vprintf (str, ap);
810 printf ("\n");
811 va_end (ap);
812 }
813
814 static inline void
815 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
816 {
817 int i;
818 printf ("#### \t");
819 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
820 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
821 printf ("\n");
822 }
823
824 static void
825 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
826 const aarch64_opnd_qualifier_t *qualifier)
827 {
828 int i;
829 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
830
831 aarch64_verbose ("dump_match_qualifiers:");
832 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
833 curr[i] = opnd[i].qualifier;
834 dump_qualifier_sequence (curr);
835 aarch64_verbose ("against");
836 dump_qualifier_sequence (qualifier);
837 }
838 #endif /* DEBUG_AARCH64 */
839
840 /* This function checks if the given instruction INSN is a destructive
841 instruction based on the usage of the registers. It does not recognize
842 unary destructive instructions. */
843 bfd_boolean
844 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
845 {
846 int i = 0;
847 const enum aarch64_opnd *opnds = opcode->operands;
848
849 if (opnds[0] == AARCH64_OPND_NIL)
850 return FALSE;
851
852 while (opnds[++i] != AARCH64_OPND_NIL)
853 if (opnds[i] == opnds[0])
854 return TRUE;
855
856 return FALSE;
857 }
858
859 /* TODO improve this, we can have an extra field at the runtime to
860 store the number of operands rather than calculating it every time. */
861
862 int
863 aarch64_num_of_operands (const aarch64_opcode *opcode)
864 {
865 int i = 0;
866 const enum aarch64_opnd *opnds = opcode->operands;
867 while (opnds[i++] != AARCH64_OPND_NIL)
868 ;
869 --i;
870 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
871 return i;
872 }
873
874 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
875 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
876
877 N.B. on the entry, it is very likely that only some operands in *INST
878 have had their qualifiers been established.
879
880 If STOP_AT is not -1, the function will only try to match
881 the qualifier sequence for operands before and including the operand
882 of index STOP_AT; and on success *RET will only be filled with the first
883 (STOP_AT+1) qualifiers.
884
885 A couple examples of the matching algorithm:
886
887 X,W,NIL should match
888 X,W,NIL
889
890 NIL,NIL should match
891 X ,NIL
892
893 Apart from serving the main encoding routine, this can also be called
894 during or after the operand decoding. */
895
896 int
897 aarch64_find_best_match (const aarch64_inst *inst,
898 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
899 int stop_at, aarch64_opnd_qualifier_t *ret)
900 {
901 int found = 0;
902 int i, num_opnds;
903 const aarch64_opnd_qualifier_t *qualifiers;
904
905 num_opnds = aarch64_num_of_operands (inst->opcode);
906 if (num_opnds == 0)
907 {
908 DEBUG_TRACE ("SUCCEED: no operand");
909 return 1;
910 }
911
912 if (stop_at < 0 || stop_at >= num_opnds)
913 stop_at = num_opnds - 1;
914
915 /* For each pattern. */
916 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
917 {
918 int j;
919 qualifiers = *qualifiers_list;
920
921 /* Start as positive. */
922 found = 1;
923
924 DEBUG_TRACE ("%d", i);
925 #ifdef DEBUG_AARCH64
926 if (debug_dump)
927 dump_match_qualifiers (inst->operands, qualifiers);
928 #endif
929
930 /* Most opcodes has much fewer patterns in the list.
931 First NIL qualifier indicates the end in the list. */
932 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
933 {
934 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
935 if (i)
936 found = 0;
937 break;
938 }
939
940 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
941 {
942 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
943 {
944 /* Either the operand does not have qualifier, or the qualifier
945 for the operand needs to be deduced from the qualifier
946 sequence.
947 In the latter case, any constraint checking related with
948 the obtained qualifier should be done later in
949 operand_general_constraint_met_p. */
950 continue;
951 }
952 else if (*qualifiers != inst->operands[j].qualifier)
953 {
954 /* Unless the target qualifier can also qualify the operand
955 (which has already had a non-nil qualifier), non-equal
956 qualifiers are generally un-matched. */
957 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
958 continue;
959 else
960 {
961 found = 0;
962 break;
963 }
964 }
965 else
966 continue; /* Equal qualifiers are certainly matched. */
967 }
968
969 /* Qualifiers established. */
970 if (found == 1)
971 break;
972 }
973
974 if (found == 1)
975 {
976 /* Fill the result in *RET. */
977 int j;
978 qualifiers = *qualifiers_list;
979
980 DEBUG_TRACE ("complete qualifiers using list %d", i);
981 #ifdef DEBUG_AARCH64
982 if (debug_dump)
983 dump_qualifier_sequence (qualifiers);
984 #endif
985
986 for (j = 0; j <= stop_at; ++j, ++qualifiers)
987 ret[j] = *qualifiers;
988 for (; j < AARCH64_MAX_OPND_NUM; ++j)
989 ret[j] = AARCH64_OPND_QLF_NIL;
990
991 DEBUG_TRACE ("SUCCESS");
992 return 1;
993 }
994
995 DEBUG_TRACE ("FAIL");
996 return 0;
997 }
998
999 /* Operand qualifier matching and resolving.
1000
1001 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1002 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1003
1004 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1005 succeeds. */
1006
1007 static int
1008 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
1009 {
1010 int i, nops;
1011 aarch64_opnd_qualifier_seq_t qualifiers;
1012
1013 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1014 qualifiers))
1015 {
1016 DEBUG_TRACE ("matching FAIL");
1017 return 0;
1018 }
1019
1020 if (inst->opcode->flags & F_STRICT)
1021 {
1022 /* Require an exact qualifier match, even for NIL qualifiers. */
1023 nops = aarch64_num_of_operands (inst->opcode);
1024 for (i = 0; i < nops; ++i)
1025 if (inst->operands[i].qualifier != qualifiers[i])
1026 return FALSE;
1027 }
1028
1029 /* Update the qualifiers. */
1030 if (update_p == TRUE)
1031 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1032 {
1033 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1034 break;
1035 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1036 "update %s with %s for operand %d",
1037 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1038 aarch64_get_qualifier_name (qualifiers[i]), i);
1039 inst->operands[i].qualifier = qualifiers[i];
1040 }
1041
1042 DEBUG_TRACE ("matching SUCCESS");
1043 return 1;
1044 }
1045
1046 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1047 register by MOVZ.
1048
1049 IS32 indicates whether value is a 32-bit immediate or not.
1050 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1051 amount will be returned in *SHIFT_AMOUNT. */
1052
1053 bfd_boolean
1054 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1055 {
1056 int amount;
1057
1058 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1059
1060 if (is32)
1061 {
1062 /* Allow all zeros or all ones in top 32-bits, so that
1063 32-bit constant expressions like ~0x80000000 are
1064 permitted. */
1065 uint64_t ext = value;
1066 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1067 /* Immediate out of range. */
1068 return FALSE;
1069 value &= (int64_t) 0xffffffff;
1070 }
1071
1072 /* first, try movz then movn */
1073 amount = -1;
1074 if ((value & ((int64_t) 0xffff << 0)) == value)
1075 amount = 0;
1076 else if ((value & ((int64_t) 0xffff << 16)) == value)
1077 amount = 16;
1078 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1079 amount = 32;
1080 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1081 amount = 48;
1082
1083 if (amount == -1)
1084 {
1085 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1086 return FALSE;
1087 }
1088
1089 if (shift_amount != NULL)
1090 *shift_amount = amount;
1091
1092 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1093
1094 return TRUE;
1095 }
1096
1097 /* Build the accepted values for immediate logical SIMD instructions.
1098
1099 The standard encodings of the immediate value are:
1100 N imms immr SIMD size R S
1101 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1102 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1103 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1104 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1105 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1106 0 11110s 00000r 2 UInt(r) UInt(s)
1107 where all-ones value of S is reserved.
1108
1109 Let's call E the SIMD size.
1110
1111 The immediate value is: S+1 bits '1' rotated to the right by R.
1112
1113 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1114 (remember S != E - 1). */
1115
1116 #define TOTAL_IMM_NB 5334
1117
1118 typedef struct
1119 {
1120 uint64_t imm;
1121 aarch64_insn encoding;
1122 } simd_imm_encoding;
1123
1124 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1125
1126 static int
1127 simd_imm_encoding_cmp(const void *i1, const void *i2)
1128 {
1129 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1130 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1131
1132 if (imm1->imm < imm2->imm)
1133 return -1;
1134 if (imm1->imm > imm2->imm)
1135 return +1;
1136 return 0;
1137 }
1138
1139 /* immediate bitfield standard encoding
1140 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1141 1 ssssss rrrrrr 64 rrrrrr ssssss
1142 0 0sssss 0rrrrr 32 rrrrr sssss
1143 0 10ssss 00rrrr 16 rrrr ssss
1144 0 110sss 000rrr 8 rrr sss
1145 0 1110ss 0000rr 4 rr ss
1146 0 11110s 00000r 2 r s */
1147 static inline int
1148 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1149 {
1150 return (is64 << 12) | (r << 6) | s;
1151 }
1152
1153 static void
1154 build_immediate_table (void)
1155 {
1156 uint32_t log_e, e, s, r, s_mask;
1157 uint64_t mask, imm;
1158 int nb_imms;
1159 int is64;
1160
1161 nb_imms = 0;
1162 for (log_e = 1; log_e <= 6; log_e++)
1163 {
1164 /* Get element size. */
1165 e = 1u << log_e;
1166 if (log_e == 6)
1167 {
1168 is64 = 1;
1169 mask = 0xffffffffffffffffull;
1170 s_mask = 0;
1171 }
1172 else
1173 {
1174 is64 = 0;
1175 mask = (1ull << e) - 1;
1176 /* log_e s_mask
1177 1 ((1 << 4) - 1) << 2 = 111100
1178 2 ((1 << 3) - 1) << 3 = 111000
1179 3 ((1 << 2) - 1) << 4 = 110000
1180 4 ((1 << 1) - 1) << 5 = 100000
1181 5 ((1 << 0) - 1) << 6 = 000000 */
1182 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1183 }
1184 for (s = 0; s < e - 1; s++)
1185 for (r = 0; r < e; r++)
1186 {
1187 /* s+1 consecutive bits to 1 (s < 63) */
1188 imm = (1ull << (s + 1)) - 1;
1189 /* rotate right by r */
1190 if (r != 0)
1191 imm = (imm >> r) | ((imm << (e - r)) & mask);
1192 /* replicate the constant depending on SIMD size */
1193 switch (log_e)
1194 {
1195 case 1: imm = (imm << 2) | imm;
1196 /* Fall through. */
1197 case 2: imm = (imm << 4) | imm;
1198 /* Fall through. */
1199 case 3: imm = (imm << 8) | imm;
1200 /* Fall through. */
1201 case 4: imm = (imm << 16) | imm;
1202 /* Fall through. */
1203 case 5: imm = (imm << 32) | imm;
1204 /* Fall through. */
1205 case 6: break;
1206 default: abort ();
1207 }
1208 simd_immediates[nb_imms].imm = imm;
1209 simd_immediates[nb_imms].encoding =
1210 encode_immediate_bitfield(is64, s | s_mask, r);
1211 nb_imms++;
1212 }
1213 }
1214 assert (nb_imms == TOTAL_IMM_NB);
1215 qsort(simd_immediates, nb_imms,
1216 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1217 }
1218
1219 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1220 be accepted by logical (immediate) instructions
1221 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1222
1223 ESIZE is the number of bytes in the decoded immediate value.
1224 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1225 VALUE will be returned in *ENCODING. */
1226
1227 bfd_boolean
1228 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1229 {
1230 simd_imm_encoding imm_enc;
1231 const simd_imm_encoding *imm_encoding;
1232 static bfd_boolean initialized = FALSE;
1233 uint64_t upper;
1234 int i;
1235
1236 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1237 value, esize);
1238
1239 if (!initialized)
1240 {
1241 build_immediate_table ();
1242 initialized = TRUE;
1243 }
1244
1245 /* Allow all zeros or all ones in top bits, so that
1246 constant expressions like ~1 are permitted. */
1247 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1248 if ((value & ~upper) != value && (value | upper) != value)
1249 return FALSE;
1250
1251 /* Replicate to a full 64-bit value. */
1252 value &= ~upper;
1253 for (i = esize * 8; i < 64; i *= 2)
1254 value |= (value << i);
1255
1256 imm_enc.imm = value;
1257 imm_encoding = (const simd_imm_encoding *)
1258 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1259 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1260 if (imm_encoding == NULL)
1261 {
1262 DEBUG_TRACE ("exit with FALSE");
1263 return FALSE;
1264 }
1265 if (encoding != NULL)
1266 *encoding = imm_encoding->encoding;
1267 DEBUG_TRACE ("exit with TRUE");
1268 return TRUE;
1269 }
1270
1271 /* If 64-bit immediate IMM is in the format of
1272 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1273 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1274 of value "abcdefgh". Otherwise return -1. */
1275 int
1276 aarch64_shrink_expanded_imm8 (uint64_t imm)
1277 {
1278 int i, ret;
1279 uint32_t byte;
1280
1281 ret = 0;
1282 for (i = 0; i < 8; i++)
1283 {
1284 byte = (imm >> (8 * i)) & 0xff;
1285 if (byte == 0xff)
1286 ret |= 1 << i;
1287 else if (byte != 0x00)
1288 return -1;
1289 }
1290 return ret;
1291 }
1292
1293 /* Utility inline functions for operand_general_constraint_met_p. */
1294
1295 static inline void
1296 set_error (aarch64_operand_error *mismatch_detail,
1297 enum aarch64_operand_error_kind kind, int idx,
1298 const char* error)
1299 {
1300 if (mismatch_detail == NULL)
1301 return;
1302 mismatch_detail->kind = kind;
1303 mismatch_detail->index = idx;
1304 mismatch_detail->error = error;
1305 }
1306
1307 static inline void
1308 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1309 const char* error)
1310 {
1311 if (mismatch_detail == NULL)
1312 return;
1313 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1314 }
1315
1316 static inline void
1317 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1318 int idx, int lower_bound, int upper_bound,
1319 const char* error)
1320 {
1321 if (mismatch_detail == NULL)
1322 return;
1323 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1324 mismatch_detail->data[0] = lower_bound;
1325 mismatch_detail->data[1] = upper_bound;
1326 }
1327
1328 static inline void
1329 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1330 int idx, int lower_bound, int upper_bound)
1331 {
1332 if (mismatch_detail == NULL)
1333 return;
1334 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1335 _("immediate value"));
1336 }
1337
1338 static inline void
1339 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1340 int idx, int lower_bound, int upper_bound)
1341 {
1342 if (mismatch_detail == NULL)
1343 return;
1344 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1345 _("immediate offset"));
1346 }
1347
1348 static inline void
1349 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1350 int idx, int lower_bound, int upper_bound)
1351 {
1352 if (mismatch_detail == NULL)
1353 return;
1354 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1355 _("register number"));
1356 }
1357
1358 static inline void
1359 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1360 int idx, int lower_bound, int upper_bound)
1361 {
1362 if (mismatch_detail == NULL)
1363 return;
1364 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1365 _("register element index"));
1366 }
1367
1368 static inline void
1369 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1370 int idx, int lower_bound, int upper_bound)
1371 {
1372 if (mismatch_detail == NULL)
1373 return;
1374 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1375 _("shift amount"));
1376 }
1377
1378 /* Report that the MUL modifier in operand IDX should be in the range
1379 [LOWER_BOUND, UPPER_BOUND]. */
1380 static inline void
1381 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1382 int idx, int lower_bound, int upper_bound)
1383 {
1384 if (mismatch_detail == NULL)
1385 return;
1386 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1387 _("multiplier"));
1388 }
1389
1390 static inline void
1391 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1392 int alignment)
1393 {
1394 if (mismatch_detail == NULL)
1395 return;
1396 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1397 mismatch_detail->data[0] = alignment;
1398 }
1399
1400 static inline void
1401 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1402 int expected_num)
1403 {
1404 if (mismatch_detail == NULL)
1405 return;
1406 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1407 mismatch_detail->data[0] = expected_num;
1408 }
1409
1410 static inline void
1411 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1412 const char* error)
1413 {
1414 if (mismatch_detail == NULL)
1415 return;
1416 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1417 }
1418
1419 /* General constraint checking based on operand code.
1420
1421 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1422 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1423
1424 This function has to be called after the qualifiers for all operands
1425 have been resolved.
1426
1427 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1428 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1429 of error message during the disassembling where error message is not
1430 wanted. We avoid the dynamic construction of strings of error messages
1431 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1432 use a combination of error code, static string and some integer data to
1433 represent an error. */
1434
1435 static int
1436 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1437 enum aarch64_opnd type,
1438 const aarch64_opcode *opcode,
1439 aarch64_operand_error *mismatch_detail)
1440 {
1441 unsigned num, modifiers, shift;
1442 unsigned char size;
1443 int64_t imm, min_value, max_value;
1444 uint64_t uvalue, mask;
1445 const aarch64_opnd_info *opnd = opnds + idx;
1446 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1447
1448 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1449
1450 switch (aarch64_operands[type].op_class)
1451 {
1452 case AARCH64_OPND_CLASS_INT_REG:
1453 /* Check pair reg constraints for cas* instructions. */
1454 if (type == AARCH64_OPND_PAIRREG)
1455 {
1456 assert (idx == 1 || idx == 3);
1457 if (opnds[idx - 1].reg.regno % 2 != 0)
1458 {
1459 set_syntax_error (mismatch_detail, idx - 1,
1460 _("reg pair must start from even reg"));
1461 return 0;
1462 }
1463 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1464 {
1465 set_syntax_error (mismatch_detail, idx,
1466 _("reg pair must be contiguous"));
1467 return 0;
1468 }
1469 break;
1470 }
1471
1472 /* <Xt> may be optional in some IC and TLBI instructions. */
1473 if (type == AARCH64_OPND_Rt_SYS)
1474 {
1475 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1476 == AARCH64_OPND_CLASS_SYSTEM));
1477 if (opnds[1].present
1478 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1479 {
1480 set_other_error (mismatch_detail, idx, _("extraneous register"));
1481 return 0;
1482 }
1483 if (!opnds[1].present
1484 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1485 {
1486 set_other_error (mismatch_detail, idx, _("missing register"));
1487 return 0;
1488 }
1489 }
1490 switch (qualifier)
1491 {
1492 case AARCH64_OPND_QLF_WSP:
1493 case AARCH64_OPND_QLF_SP:
1494 if (!aarch64_stack_pointer_p (opnd))
1495 {
1496 set_other_error (mismatch_detail, idx,
1497 _("stack pointer register expected"));
1498 return 0;
1499 }
1500 break;
1501 default:
1502 break;
1503 }
1504 break;
1505
1506 case AARCH64_OPND_CLASS_SVE_REG:
1507 switch (type)
1508 {
1509 case AARCH64_OPND_SVE_Zm3_INDEX:
1510 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1511 case AARCH64_OPND_SVE_Zm4_INDEX:
1512 size = get_operand_fields_width (get_operand_from_code (type));
1513 shift = get_operand_specific_data (&aarch64_operands[type]);
1514 mask = (1 << shift) - 1;
1515 if (opnd->reg.regno > mask)
1516 {
1517 assert (mask == 7 || mask == 15);
1518 set_other_error (mismatch_detail, idx,
1519 mask == 15
1520 ? _("z0-z15 expected")
1521 : _("z0-z7 expected"));
1522 return 0;
1523 }
1524 mask = (1 << (size - shift)) - 1;
1525 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1526 {
1527 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1528 return 0;
1529 }
1530 break;
1531
1532 case AARCH64_OPND_SVE_Zn_INDEX:
1533 size = aarch64_get_qualifier_esize (opnd->qualifier);
1534 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1535 {
1536 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1537 0, 64 / size - 1);
1538 return 0;
1539 }
1540 break;
1541
1542 case AARCH64_OPND_SVE_ZnxN:
1543 case AARCH64_OPND_SVE_ZtxN:
1544 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1545 {
1546 set_other_error (mismatch_detail, idx,
1547 _("invalid register list"));
1548 return 0;
1549 }
1550 break;
1551
1552 default:
1553 break;
1554 }
1555 break;
1556
1557 case AARCH64_OPND_CLASS_PRED_REG:
1558 if (opnd->reg.regno >= 8
1559 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1560 {
1561 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1562 return 0;
1563 }
1564 break;
1565
1566 case AARCH64_OPND_CLASS_COND:
1567 if (type == AARCH64_OPND_COND1
1568 && (opnds[idx].cond->value & 0xe) == 0xe)
1569 {
1570 /* Not allow AL or NV. */
1571 set_syntax_error (mismatch_detail, idx, NULL);
1572 }
1573 break;
1574
1575 case AARCH64_OPND_CLASS_ADDRESS:
1576 /* Check writeback. */
1577 switch (opcode->iclass)
1578 {
1579 case ldst_pos:
1580 case ldst_unscaled:
1581 case ldstnapair_offs:
1582 case ldstpair_off:
1583 case ldst_unpriv:
1584 if (opnd->addr.writeback == 1)
1585 {
1586 set_syntax_error (mismatch_detail, idx,
1587 _("unexpected address writeback"));
1588 return 0;
1589 }
1590 break;
1591 case ldst_imm10:
1592 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1593 {
1594 set_syntax_error (mismatch_detail, idx,
1595 _("unexpected address writeback"));
1596 return 0;
1597 }
1598 break;
1599 case ldst_imm9:
1600 case ldstpair_indexed:
1601 case asisdlsep:
1602 case asisdlsop:
1603 if (opnd->addr.writeback == 0)
1604 {
1605 set_syntax_error (mismatch_detail, idx,
1606 _("address writeback expected"));
1607 return 0;
1608 }
1609 break;
1610 default:
1611 assert (opnd->addr.writeback == 0);
1612 break;
1613 }
1614 switch (type)
1615 {
1616 case AARCH64_OPND_ADDR_SIMM7:
1617 /* Scaled signed 7 bits immediate offset. */
1618 /* Get the size of the data element that is accessed, which may be
1619 different from that of the source register size,
1620 e.g. in strb/ldrb. */
1621 size = aarch64_get_qualifier_esize (opnd->qualifier);
1622 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1623 {
1624 set_offset_out_of_range_error (mismatch_detail, idx,
1625 -64 * size, 63 * size);
1626 return 0;
1627 }
1628 if (!value_aligned_p (opnd->addr.offset.imm, size))
1629 {
1630 set_unaligned_error (mismatch_detail, idx, size);
1631 return 0;
1632 }
1633 break;
1634 case AARCH64_OPND_ADDR_OFFSET:
1635 case AARCH64_OPND_ADDR_SIMM9:
1636 /* Unscaled signed 9 bits immediate offset. */
1637 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1638 {
1639 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1640 return 0;
1641 }
1642 break;
1643
1644 case AARCH64_OPND_ADDR_SIMM9_2:
1645 /* Unscaled signed 9 bits immediate offset, which has to be negative
1646 or unaligned. */
1647 size = aarch64_get_qualifier_esize (qualifier);
1648 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1649 && !value_aligned_p (opnd->addr.offset.imm, size))
1650 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1651 return 1;
1652 set_other_error (mismatch_detail, idx,
1653 _("negative or unaligned offset expected"));
1654 return 0;
1655
1656 case AARCH64_OPND_ADDR_SIMM10:
1657 /* Scaled signed 10 bits immediate offset. */
1658 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1659 {
1660 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1661 return 0;
1662 }
1663 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1664 {
1665 set_unaligned_error (mismatch_detail, idx, 8);
1666 return 0;
1667 }
1668 break;
1669
1670 case AARCH64_OPND_SIMD_ADDR_POST:
1671 /* AdvSIMD load/store multiple structures, post-index. */
1672 assert (idx == 1);
1673 if (opnd->addr.offset.is_reg)
1674 {
1675 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1676 return 1;
1677 else
1678 {
1679 set_other_error (mismatch_detail, idx,
1680 _("invalid register offset"));
1681 return 0;
1682 }
1683 }
1684 else
1685 {
1686 const aarch64_opnd_info *prev = &opnds[idx-1];
1687 unsigned num_bytes; /* total number of bytes transferred. */
1688 /* The opcode dependent area stores the number of elements in
1689 each structure to be loaded/stored. */
1690 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1691 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1692 /* Special handling of loading single structure to all lane. */
1693 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1694 * aarch64_get_qualifier_esize (prev->qualifier);
1695 else
1696 num_bytes = prev->reglist.num_regs
1697 * aarch64_get_qualifier_esize (prev->qualifier)
1698 * aarch64_get_qualifier_nelem (prev->qualifier);
1699 if ((int) num_bytes != opnd->addr.offset.imm)
1700 {
1701 set_other_error (mismatch_detail, idx,
1702 _("invalid post-increment amount"));
1703 return 0;
1704 }
1705 }
1706 break;
1707
1708 case AARCH64_OPND_ADDR_REGOFF:
1709 /* Get the size of the data element that is accessed, which may be
1710 different from that of the source register size,
1711 e.g. in strb/ldrb. */
1712 size = aarch64_get_qualifier_esize (opnd->qualifier);
1713 /* It is either no shift or shift by the binary logarithm of SIZE. */
1714 if (opnd->shifter.amount != 0
1715 && opnd->shifter.amount != (int)get_logsz (size))
1716 {
1717 set_other_error (mismatch_detail, idx,
1718 _("invalid shift amount"));
1719 return 0;
1720 }
1721 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1722 operators. */
1723 switch (opnd->shifter.kind)
1724 {
1725 case AARCH64_MOD_UXTW:
1726 case AARCH64_MOD_LSL:
1727 case AARCH64_MOD_SXTW:
1728 case AARCH64_MOD_SXTX: break;
1729 default:
1730 set_other_error (mismatch_detail, idx,
1731 _("invalid extend/shift operator"));
1732 return 0;
1733 }
1734 break;
1735
1736 case AARCH64_OPND_ADDR_UIMM12:
1737 imm = opnd->addr.offset.imm;
1738 /* Get the size of the data element that is accessed, which may be
1739 different from that of the source register size,
1740 e.g. in strb/ldrb. */
1741 size = aarch64_get_qualifier_esize (qualifier);
1742 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1743 {
1744 set_offset_out_of_range_error (mismatch_detail, idx,
1745 0, 4095 * size);
1746 return 0;
1747 }
1748 if (!value_aligned_p (opnd->addr.offset.imm, size))
1749 {
1750 set_unaligned_error (mismatch_detail, idx, size);
1751 return 0;
1752 }
1753 break;
1754
1755 case AARCH64_OPND_ADDR_PCREL14:
1756 case AARCH64_OPND_ADDR_PCREL19:
1757 case AARCH64_OPND_ADDR_PCREL21:
1758 case AARCH64_OPND_ADDR_PCREL26:
1759 imm = opnd->imm.value;
1760 if (operand_need_shift_by_two (get_operand_from_code (type)))
1761 {
1762 /* The offset value in a PC-relative branch instruction is alway
1763 4-byte aligned and is encoded without the lowest 2 bits. */
1764 if (!value_aligned_p (imm, 4))
1765 {
1766 set_unaligned_error (mismatch_detail, idx, 4);
1767 return 0;
1768 }
1769 /* Right shift by 2 so that we can carry out the following check
1770 canonically. */
1771 imm >>= 2;
1772 }
1773 size = get_operand_fields_width (get_operand_from_code (type));
1774 if (!value_fit_signed_field_p (imm, size))
1775 {
1776 set_other_error (mismatch_detail, idx,
1777 _("immediate out of range"));
1778 return 0;
1779 }
1780 break;
1781
1782 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1783 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1784 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1785 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1786 min_value = -8;
1787 max_value = 7;
1788 sve_imm_offset_vl:
1789 assert (!opnd->addr.offset.is_reg);
1790 assert (opnd->addr.preind);
1791 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1792 min_value *= num;
1793 max_value *= num;
1794 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1795 || (opnd->shifter.operator_present
1796 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1797 {
1798 set_other_error (mismatch_detail, idx,
1799 _("invalid addressing mode"));
1800 return 0;
1801 }
1802 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1803 {
1804 set_offset_out_of_range_error (mismatch_detail, idx,
1805 min_value, max_value);
1806 return 0;
1807 }
1808 if (!value_aligned_p (opnd->addr.offset.imm, num))
1809 {
1810 set_unaligned_error (mismatch_detail, idx, num);
1811 return 0;
1812 }
1813 break;
1814
1815 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1816 min_value = -32;
1817 max_value = 31;
1818 goto sve_imm_offset_vl;
1819
1820 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1821 min_value = -256;
1822 max_value = 255;
1823 goto sve_imm_offset_vl;
1824
1825 case AARCH64_OPND_SVE_ADDR_RI_U6:
1826 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1827 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1828 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1829 min_value = 0;
1830 max_value = 63;
1831 sve_imm_offset:
1832 assert (!opnd->addr.offset.is_reg);
1833 assert (opnd->addr.preind);
1834 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1835 min_value *= num;
1836 max_value *= num;
1837 if (opnd->shifter.operator_present
1838 || opnd->shifter.amount_present)
1839 {
1840 set_other_error (mismatch_detail, idx,
1841 _("invalid addressing mode"));
1842 return 0;
1843 }
1844 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1845 {
1846 set_offset_out_of_range_error (mismatch_detail, idx,
1847 min_value, max_value);
1848 return 0;
1849 }
1850 if (!value_aligned_p (opnd->addr.offset.imm, num))
1851 {
1852 set_unaligned_error (mismatch_detail, idx, num);
1853 return 0;
1854 }
1855 break;
1856
1857 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1858 min_value = -8;
1859 max_value = 7;
1860 goto sve_imm_offset;
1861
1862 case AARCH64_OPND_SVE_ADDR_R:
1863 case AARCH64_OPND_SVE_ADDR_RR:
1864 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1865 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1866 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1867 case AARCH64_OPND_SVE_ADDR_RX:
1868 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1869 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1870 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1871 case AARCH64_OPND_SVE_ADDR_RZ:
1872 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1873 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1874 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1875 modifiers = 1 << AARCH64_MOD_LSL;
1876 sve_rr_operand:
1877 assert (opnd->addr.offset.is_reg);
1878 assert (opnd->addr.preind);
1879 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1880 && opnd->addr.offset.regno == 31)
1881 {
1882 set_other_error (mismatch_detail, idx,
1883 _("index register xzr is not allowed"));
1884 return 0;
1885 }
1886 if (((1 << opnd->shifter.kind) & modifiers) == 0
1887 || (opnd->shifter.amount
1888 != get_operand_specific_data (&aarch64_operands[type])))
1889 {
1890 set_other_error (mismatch_detail, idx,
1891 _("invalid addressing mode"));
1892 return 0;
1893 }
1894 break;
1895
1896 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1897 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1898 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1899 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1900 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1901 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1902 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1903 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1904 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1905 goto sve_rr_operand;
1906
1907 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1908 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1909 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1910 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1911 min_value = 0;
1912 max_value = 31;
1913 goto sve_imm_offset;
1914
1915 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1916 modifiers = 1 << AARCH64_MOD_LSL;
1917 sve_zz_operand:
1918 assert (opnd->addr.offset.is_reg);
1919 assert (opnd->addr.preind);
1920 if (((1 << opnd->shifter.kind) & modifiers) == 0
1921 || opnd->shifter.amount < 0
1922 || opnd->shifter.amount > 3)
1923 {
1924 set_other_error (mismatch_detail, idx,
1925 _("invalid addressing mode"));
1926 return 0;
1927 }
1928 break;
1929
1930 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1931 modifiers = (1 << AARCH64_MOD_SXTW);
1932 goto sve_zz_operand;
1933
1934 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1935 modifiers = 1 << AARCH64_MOD_UXTW;
1936 goto sve_zz_operand;
1937
1938 default:
1939 break;
1940 }
1941 break;
1942
1943 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1944 if (type == AARCH64_OPND_LEt)
1945 {
1946 /* Get the upper bound for the element index. */
1947 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1948 if (!value_in_range_p (opnd->reglist.index, 0, num))
1949 {
1950 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1951 return 0;
1952 }
1953 }
1954 /* The opcode dependent area stores the number of elements in
1955 each structure to be loaded/stored. */
1956 num = get_opcode_dependent_value (opcode);
1957 switch (type)
1958 {
1959 case AARCH64_OPND_LVt:
1960 assert (num >= 1 && num <= 4);
1961 /* Unless LD1/ST1, the number of registers should be equal to that
1962 of the structure elements. */
1963 if (num != 1 && opnd->reglist.num_regs != num)
1964 {
1965 set_reg_list_error (mismatch_detail, idx, num);
1966 return 0;
1967 }
1968 break;
1969 case AARCH64_OPND_LVt_AL:
1970 case AARCH64_OPND_LEt:
1971 assert (num >= 1 && num <= 4);
1972 /* The number of registers should be equal to that of the structure
1973 elements. */
1974 if (opnd->reglist.num_regs != num)
1975 {
1976 set_reg_list_error (mismatch_detail, idx, num);
1977 return 0;
1978 }
1979 break;
1980 default:
1981 break;
1982 }
1983 break;
1984
1985 case AARCH64_OPND_CLASS_IMMEDIATE:
1986 /* Constraint check on immediate operand. */
1987 imm = opnd->imm.value;
1988 /* E.g. imm_0_31 constrains value to be 0..31. */
1989 if (qualifier_value_in_range_constraint_p (qualifier)
1990 && !value_in_range_p (imm, get_lower_bound (qualifier),
1991 get_upper_bound (qualifier)))
1992 {
1993 set_imm_out_of_range_error (mismatch_detail, idx,
1994 get_lower_bound (qualifier),
1995 get_upper_bound (qualifier));
1996 return 0;
1997 }
1998
1999 switch (type)
2000 {
2001 case AARCH64_OPND_AIMM:
2002 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2003 {
2004 set_other_error (mismatch_detail, idx,
2005 _("invalid shift operator"));
2006 return 0;
2007 }
2008 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2009 {
2010 set_other_error (mismatch_detail, idx,
2011 _("shift amount must be 0 or 12"));
2012 return 0;
2013 }
2014 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2015 {
2016 set_other_error (mismatch_detail, idx,
2017 _("immediate out of range"));
2018 return 0;
2019 }
2020 break;
2021
2022 case AARCH64_OPND_HALF:
2023 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2024 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2025 {
2026 set_other_error (mismatch_detail, idx,
2027 _("invalid shift operator"));
2028 return 0;
2029 }
2030 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2031 if (!value_aligned_p (opnd->shifter.amount, 16))
2032 {
2033 set_other_error (mismatch_detail, idx,
2034 _("shift amount must be a multiple of 16"));
2035 return 0;
2036 }
2037 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2038 {
2039 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2040 0, size * 8 - 16);
2041 return 0;
2042 }
2043 if (opnd->imm.value < 0)
2044 {
2045 set_other_error (mismatch_detail, idx,
2046 _("negative immediate value not allowed"));
2047 return 0;
2048 }
2049 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2050 {
2051 set_other_error (mismatch_detail, idx,
2052 _("immediate out of range"));
2053 return 0;
2054 }
2055 break;
2056
2057 case AARCH64_OPND_IMM_MOV:
2058 {
2059 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2060 imm = opnd->imm.value;
2061 assert (idx == 1);
2062 switch (opcode->op)
2063 {
2064 case OP_MOV_IMM_WIDEN:
2065 imm = ~imm;
2066 /* Fall through. */
2067 case OP_MOV_IMM_WIDE:
2068 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2069 {
2070 set_other_error (mismatch_detail, idx,
2071 _("immediate out of range"));
2072 return 0;
2073 }
2074 break;
2075 case OP_MOV_IMM_LOG:
2076 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2077 {
2078 set_other_error (mismatch_detail, idx,
2079 _("immediate out of range"));
2080 return 0;
2081 }
2082 break;
2083 default:
2084 assert (0);
2085 return 0;
2086 }
2087 }
2088 break;
2089
2090 case AARCH64_OPND_NZCV:
2091 case AARCH64_OPND_CCMP_IMM:
2092 case AARCH64_OPND_EXCEPTION:
2093 case AARCH64_OPND_UIMM4:
2094 case AARCH64_OPND_UIMM7:
2095 case AARCH64_OPND_UIMM3_OP1:
2096 case AARCH64_OPND_UIMM3_OP2:
2097 case AARCH64_OPND_SVE_UIMM3:
2098 case AARCH64_OPND_SVE_UIMM7:
2099 case AARCH64_OPND_SVE_UIMM8:
2100 case AARCH64_OPND_SVE_UIMM8_53:
2101 size = get_operand_fields_width (get_operand_from_code (type));
2102 assert (size < 32);
2103 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2104 {
2105 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2106 (1 << size) - 1);
2107 return 0;
2108 }
2109 break;
2110
2111 case AARCH64_OPND_SIMM5:
2112 case AARCH64_OPND_SVE_SIMM5:
2113 case AARCH64_OPND_SVE_SIMM5B:
2114 case AARCH64_OPND_SVE_SIMM6:
2115 case AARCH64_OPND_SVE_SIMM8:
2116 size = get_operand_fields_width (get_operand_from_code (type));
2117 assert (size < 32);
2118 if (!value_fit_signed_field_p (opnd->imm.value, size))
2119 {
2120 set_imm_out_of_range_error (mismatch_detail, idx,
2121 -(1 << (size - 1)),
2122 (1 << (size - 1)) - 1);
2123 return 0;
2124 }
2125 break;
2126
2127 case AARCH64_OPND_WIDTH:
2128 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2129 && opnds[0].type == AARCH64_OPND_Rd);
2130 size = get_upper_bound (qualifier);
2131 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2132 /* lsb+width <= reg.size */
2133 {
2134 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2135 size - opnds[idx-1].imm.value);
2136 return 0;
2137 }
2138 break;
2139
2140 case AARCH64_OPND_LIMM:
2141 case AARCH64_OPND_SVE_LIMM:
2142 {
2143 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2144 uint64_t uimm = opnd->imm.value;
2145 if (opcode->op == OP_BIC)
2146 uimm = ~uimm;
2147 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2148 {
2149 set_other_error (mismatch_detail, idx,
2150 _("immediate out of range"));
2151 return 0;
2152 }
2153 }
2154 break;
2155
2156 case AARCH64_OPND_IMM0:
2157 case AARCH64_OPND_FPIMM0:
2158 if (opnd->imm.value != 0)
2159 {
2160 set_other_error (mismatch_detail, idx,
2161 _("immediate zero expected"));
2162 return 0;
2163 }
2164 break;
2165
2166 case AARCH64_OPND_IMM_ROT1:
2167 case AARCH64_OPND_IMM_ROT2:
2168 case AARCH64_OPND_SVE_IMM_ROT2:
2169 if (opnd->imm.value != 0
2170 && opnd->imm.value != 90
2171 && opnd->imm.value != 180
2172 && opnd->imm.value != 270)
2173 {
2174 set_other_error (mismatch_detail, idx,
2175 _("rotate expected to be 0, 90, 180 or 270"));
2176 return 0;
2177 }
2178 break;
2179
2180 case AARCH64_OPND_IMM_ROT3:
2181 case AARCH64_OPND_SVE_IMM_ROT1:
2182 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2183 {
2184 set_other_error (mismatch_detail, idx,
2185 _("rotate expected to be 90 or 270"));
2186 return 0;
2187 }
2188 break;
2189
2190 case AARCH64_OPND_SHLL_IMM:
2191 assert (idx == 2);
2192 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2193 if (opnd->imm.value != size)
2194 {
2195 set_other_error (mismatch_detail, idx,
2196 _("invalid shift amount"));
2197 return 0;
2198 }
2199 break;
2200
2201 case AARCH64_OPND_IMM_VLSL:
2202 size = aarch64_get_qualifier_esize (qualifier);
2203 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2204 {
2205 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2206 size * 8 - 1);
2207 return 0;
2208 }
2209 break;
2210
2211 case AARCH64_OPND_IMM_VLSR:
2212 size = aarch64_get_qualifier_esize (qualifier);
2213 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2214 {
2215 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2216 return 0;
2217 }
2218 break;
2219
2220 case AARCH64_OPND_SIMD_IMM:
2221 case AARCH64_OPND_SIMD_IMM_SFT:
2222 /* Qualifier check. */
2223 switch (qualifier)
2224 {
2225 case AARCH64_OPND_QLF_LSL:
2226 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2227 {
2228 set_other_error (mismatch_detail, idx,
2229 _("invalid shift operator"));
2230 return 0;
2231 }
2232 break;
2233 case AARCH64_OPND_QLF_MSL:
2234 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2235 {
2236 set_other_error (mismatch_detail, idx,
2237 _("invalid shift operator"));
2238 return 0;
2239 }
2240 break;
2241 case AARCH64_OPND_QLF_NIL:
2242 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2243 {
2244 set_other_error (mismatch_detail, idx,
2245 _("shift is not permitted"));
2246 return 0;
2247 }
2248 break;
2249 default:
2250 assert (0);
2251 return 0;
2252 }
2253 /* Is the immediate valid? */
2254 assert (idx == 1);
2255 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2256 {
2257 /* uimm8 or simm8 */
2258 if (!value_in_range_p (opnd->imm.value, -128, 255))
2259 {
2260 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2261 return 0;
2262 }
2263 }
2264 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2265 {
2266 /* uimm64 is not
2267 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2268 ffffffffgggggggghhhhhhhh'. */
2269 set_other_error (mismatch_detail, idx,
2270 _("invalid value for immediate"));
2271 return 0;
2272 }
2273 /* Is the shift amount valid? */
2274 switch (opnd->shifter.kind)
2275 {
2276 case AARCH64_MOD_LSL:
2277 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2278 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2279 {
2280 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2281 (size - 1) * 8);
2282 return 0;
2283 }
2284 if (!value_aligned_p (opnd->shifter.amount, 8))
2285 {
2286 set_unaligned_error (mismatch_detail, idx, 8);
2287 return 0;
2288 }
2289 break;
2290 case AARCH64_MOD_MSL:
2291 /* Only 8 and 16 are valid shift amount. */
2292 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2293 {
2294 set_other_error (mismatch_detail, idx,
2295 _("shift amount must be 0 or 16"));
2296 return 0;
2297 }
2298 break;
2299 default:
2300 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2301 {
2302 set_other_error (mismatch_detail, idx,
2303 _("invalid shift operator"));
2304 return 0;
2305 }
2306 break;
2307 }
2308 break;
2309
2310 case AARCH64_OPND_FPIMM:
2311 case AARCH64_OPND_SIMD_FPIMM:
2312 case AARCH64_OPND_SVE_FPIMM8:
2313 if (opnd->imm.is_fp == 0)
2314 {
2315 set_other_error (mismatch_detail, idx,
2316 _("floating-point immediate expected"));
2317 return 0;
2318 }
2319 /* The value is expected to be an 8-bit floating-point constant with
2320 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2321 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2322 instruction). */
2323 if (!value_in_range_p (opnd->imm.value, 0, 255))
2324 {
2325 set_other_error (mismatch_detail, idx,
2326 _("immediate out of range"));
2327 return 0;
2328 }
2329 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2330 {
2331 set_other_error (mismatch_detail, idx,
2332 _("invalid shift operator"));
2333 return 0;
2334 }
2335 break;
2336
2337 case AARCH64_OPND_SVE_AIMM:
2338 min_value = 0;
2339 sve_aimm:
2340 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2341 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2342 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2343 uvalue = opnd->imm.value;
2344 shift = opnd->shifter.amount;
2345 if (size == 1)
2346 {
2347 if (shift != 0)
2348 {
2349 set_other_error (mismatch_detail, idx,
2350 _("no shift amount allowed for"
2351 " 8-bit constants"));
2352 return 0;
2353 }
2354 }
2355 else
2356 {
2357 if (shift != 0 && shift != 8)
2358 {
2359 set_other_error (mismatch_detail, idx,
2360 _("shift amount must be 0 or 8"));
2361 return 0;
2362 }
2363 if (shift == 0 && (uvalue & 0xff) == 0)
2364 {
2365 shift = 8;
2366 uvalue = (int64_t) uvalue / 256;
2367 }
2368 }
2369 mask >>= shift;
2370 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2371 {
2372 set_other_error (mismatch_detail, idx,
2373 _("immediate too big for element size"));
2374 return 0;
2375 }
2376 uvalue = (uvalue - min_value) & mask;
2377 if (uvalue > 0xff)
2378 {
2379 set_other_error (mismatch_detail, idx,
2380 _("invalid arithmetic immediate"));
2381 return 0;
2382 }
2383 break;
2384
2385 case AARCH64_OPND_SVE_ASIMM:
2386 min_value = -128;
2387 goto sve_aimm;
2388
2389 case AARCH64_OPND_SVE_I1_HALF_ONE:
2390 assert (opnd->imm.is_fp);
2391 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2392 {
2393 set_other_error (mismatch_detail, idx,
2394 _("floating-point value must be 0.5 or 1.0"));
2395 return 0;
2396 }
2397 break;
2398
2399 case AARCH64_OPND_SVE_I1_HALF_TWO:
2400 assert (opnd->imm.is_fp);
2401 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2402 {
2403 set_other_error (mismatch_detail, idx,
2404 _("floating-point value must be 0.5 or 2.0"));
2405 return 0;
2406 }
2407 break;
2408
2409 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2410 assert (opnd->imm.is_fp);
2411 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2412 {
2413 set_other_error (mismatch_detail, idx,
2414 _("floating-point value must be 0.0 or 1.0"));
2415 return 0;
2416 }
2417 break;
2418
2419 case AARCH64_OPND_SVE_INV_LIMM:
2420 {
2421 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2422 uint64_t uimm = ~opnd->imm.value;
2423 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2424 {
2425 set_other_error (mismatch_detail, idx,
2426 _("immediate out of range"));
2427 return 0;
2428 }
2429 }
2430 break;
2431
2432 case AARCH64_OPND_SVE_LIMM_MOV:
2433 {
2434 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2435 uint64_t uimm = opnd->imm.value;
2436 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2437 {
2438 set_other_error (mismatch_detail, idx,
2439 _("immediate out of range"));
2440 return 0;
2441 }
2442 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2443 {
2444 set_other_error (mismatch_detail, idx,
2445 _("invalid replicated MOV immediate"));
2446 return 0;
2447 }
2448 }
2449 break;
2450
2451 case AARCH64_OPND_SVE_PATTERN_SCALED:
2452 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2453 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2454 {
2455 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2456 return 0;
2457 }
2458 break;
2459
2460 case AARCH64_OPND_SVE_SHLIMM_PRED:
2461 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2462 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2463 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2464 {
2465 set_imm_out_of_range_error (mismatch_detail, idx,
2466 0, 8 * size - 1);
2467 return 0;
2468 }
2469 break;
2470
2471 case AARCH64_OPND_SVE_SHRIMM_PRED:
2472 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2473 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2474 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2475 {
2476 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2477 return 0;
2478 }
2479 break;
2480
2481 default:
2482 break;
2483 }
2484 break;
2485
2486 case AARCH64_OPND_CLASS_SYSTEM:
2487 switch (type)
2488 {
2489 case AARCH64_OPND_PSTATEFIELD:
2490 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2491 /* MSR UAO, #uimm4
2492 MSR PAN, #uimm4
2493 MSR SSBS,#uimm4
2494 The immediate must be #0 or #1. */
2495 if ((opnd->pstatefield == 0x03 /* UAO. */
2496 || opnd->pstatefield == 0x04 /* PAN. */
2497 || opnd->pstatefield == 0x19 /* SSBS. */
2498 || opnd->pstatefield == 0x1a) /* DIT. */
2499 && opnds[1].imm.value > 1)
2500 {
2501 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2502 return 0;
2503 }
2504 /* MSR SPSel, #uimm4
2505 Uses uimm4 as a control value to select the stack pointer: if
2506 bit 0 is set it selects the current exception level's stack
2507 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2508 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2509 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2510 {
2511 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2512 return 0;
2513 }
2514 break;
2515 default:
2516 break;
2517 }
2518 break;
2519
2520 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2521 /* Get the upper bound for the element index. */
2522 if (opcode->op == OP_FCMLA_ELEM)
2523 /* FCMLA index range depends on the vector size of other operands
2524 and is halfed because complex numbers take two elements. */
2525 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2526 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2527 else
2528 num = 16;
2529 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2530 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2531
2532 /* Index out-of-range. */
2533 if (!value_in_range_p (opnd->reglane.index, 0, num))
2534 {
2535 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2536 return 0;
2537 }
2538 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2539 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2540 number is encoded in "size:M:Rm":
2541 size <Vm>
2542 00 RESERVED
2543 01 0:Rm
2544 10 M:Rm
2545 11 RESERVED */
2546 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2547 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2548 {
2549 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2550 return 0;
2551 }
2552 break;
2553
2554 case AARCH64_OPND_CLASS_MODIFIED_REG:
2555 assert (idx == 1 || idx == 2);
2556 switch (type)
2557 {
2558 case AARCH64_OPND_Rm_EXT:
2559 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2560 && opnd->shifter.kind != AARCH64_MOD_LSL)
2561 {
2562 set_other_error (mismatch_detail, idx,
2563 _("extend operator expected"));
2564 return 0;
2565 }
2566 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2567 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2568 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2569 case. */
2570 if (!aarch64_stack_pointer_p (opnds + 0)
2571 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2572 {
2573 if (!opnd->shifter.operator_present)
2574 {
2575 set_other_error (mismatch_detail, idx,
2576 _("missing extend operator"));
2577 return 0;
2578 }
2579 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2580 {
2581 set_other_error (mismatch_detail, idx,
2582 _("'LSL' operator not allowed"));
2583 return 0;
2584 }
2585 }
2586 assert (opnd->shifter.operator_present /* Default to LSL. */
2587 || opnd->shifter.kind == AARCH64_MOD_LSL);
2588 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2589 {
2590 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2591 return 0;
2592 }
2593 /* In the 64-bit form, the final register operand is written as Wm
2594 for all but the (possibly omitted) UXTX/LSL and SXTX
2595 operators.
2596 N.B. GAS allows X register to be used with any operator as a
2597 programming convenience. */
2598 if (qualifier == AARCH64_OPND_QLF_X
2599 && opnd->shifter.kind != AARCH64_MOD_LSL
2600 && opnd->shifter.kind != AARCH64_MOD_UXTX
2601 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2602 {
2603 set_other_error (mismatch_detail, idx, _("W register expected"));
2604 return 0;
2605 }
2606 break;
2607
2608 case AARCH64_OPND_Rm_SFT:
2609 /* ROR is not available to the shifted register operand in
2610 arithmetic instructions. */
2611 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2612 {
2613 set_other_error (mismatch_detail, idx,
2614 _("shift operator expected"));
2615 return 0;
2616 }
2617 if (opnd->shifter.kind == AARCH64_MOD_ROR
2618 && opcode->iclass != log_shift)
2619 {
2620 set_other_error (mismatch_detail, idx,
2621 _("'ROR' operator not allowed"));
2622 return 0;
2623 }
2624 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2625 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2626 {
2627 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2628 return 0;
2629 }
2630 break;
2631
2632 default:
2633 break;
2634 }
2635 break;
2636
2637 default:
2638 break;
2639 }
2640
2641 return 1;
2642 }
2643
2644 /* Main entrypoint for the operand constraint checking.
2645
2646 Return 1 if operands of *INST meet the constraint applied by the operand
2647 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2648 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2649 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2650 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2651 error kind when it is notified that an instruction does not pass the check).
2652
2653 Un-determined operand qualifiers may get established during the process. */
2654
2655 int
2656 aarch64_match_operands_constraint (aarch64_inst *inst,
2657 aarch64_operand_error *mismatch_detail)
2658 {
2659 int i;
2660
2661 DEBUG_TRACE ("enter");
2662
2663 /* Check for cases where a source register needs to be the same as the
2664 destination register. Do this before matching qualifiers since if
2665 an instruction has both invalid tying and invalid qualifiers,
2666 the error about qualifiers would suggest several alternative
2667 instructions that also have invalid tying. */
2668 i = inst->opcode->tied_operand;
2669 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2670 {
2671 if (mismatch_detail)
2672 {
2673 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2674 mismatch_detail->index = i;
2675 mismatch_detail->error = NULL;
2676 }
2677 return 0;
2678 }
2679
2680 /* Match operands' qualifier.
2681 *INST has already had qualifier establish for some, if not all, of
2682 its operands; we need to find out whether these established
2683 qualifiers match one of the qualifier sequence in
2684 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2685 with the corresponding qualifier in such a sequence.
2686 Only basic operand constraint checking is done here; the more thorough
2687 constraint checking will carried out by operand_general_constraint_met_p,
2688 which has be to called after this in order to get all of the operands'
2689 qualifiers established. */
2690 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2691 {
2692 DEBUG_TRACE ("FAIL on operand qualifier matching");
2693 if (mismatch_detail)
2694 {
2695 /* Return an error type to indicate that it is the qualifier
2696 matching failure; we don't care about which operand as there
2697 are enough information in the opcode table to reproduce it. */
2698 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2699 mismatch_detail->index = -1;
2700 mismatch_detail->error = NULL;
2701 }
2702 return 0;
2703 }
2704
2705 /* Match operands' constraint. */
2706 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2707 {
2708 enum aarch64_opnd type = inst->opcode->operands[i];
2709 if (type == AARCH64_OPND_NIL)
2710 break;
2711 if (inst->operands[i].skip)
2712 {
2713 DEBUG_TRACE ("skip the incomplete operand %d", i);
2714 continue;
2715 }
2716 if (operand_general_constraint_met_p (inst->operands, i, type,
2717 inst->opcode, mismatch_detail) == 0)
2718 {
2719 DEBUG_TRACE ("FAIL on operand %d", i);
2720 return 0;
2721 }
2722 }
2723
2724 DEBUG_TRACE ("PASS");
2725
2726 return 1;
2727 }
2728
2729 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2730 Also updates the TYPE of each INST->OPERANDS with the corresponding
2731 value of OPCODE->OPERANDS.
2732
2733 Note that some operand qualifiers may need to be manually cleared by
2734 the caller before it further calls the aarch64_opcode_encode; by
2735 doing this, it helps the qualifier matching facilities work
2736 properly. */
2737
2738 const aarch64_opcode*
2739 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2740 {
2741 int i;
2742 const aarch64_opcode *old = inst->opcode;
2743
2744 inst->opcode = opcode;
2745
2746 /* Update the operand types. */
2747 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2748 {
2749 inst->operands[i].type = opcode->operands[i];
2750 if (opcode->operands[i] == AARCH64_OPND_NIL)
2751 break;
2752 }
2753
2754 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2755
2756 return old;
2757 }
2758
2759 int
2760 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2761 {
2762 int i;
2763 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2764 if (operands[i] == operand)
2765 return i;
2766 else if (operands[i] == AARCH64_OPND_NIL)
2767 break;
2768 return -1;
2769 }
2770 \f
2771 /* R0...R30, followed by FOR31. */
2772 #define BANK(R, FOR31) \
2773 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2774 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2775 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2776 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2777 /* [0][0] 32-bit integer regs with sp Wn
2778 [0][1] 64-bit integer regs with sp Xn sf=1
2779 [1][0] 32-bit integer regs with #0 Wn
2780 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2781 static const char *int_reg[2][2][32] = {
2782 #define R32(X) "w" #X
2783 #define R64(X) "x" #X
2784 { BANK (R32, "wsp"), BANK (R64, "sp") },
2785 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2786 #undef R64
2787 #undef R32
2788 };
2789
2790 /* Names of the SVE vector registers, first with .S suffixes,
2791 then with .D suffixes. */
2792
2793 static const char *sve_reg[2][32] = {
2794 #define ZS(X) "z" #X ".s"
2795 #define ZD(X) "z" #X ".d"
2796 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2797 #undef ZD
2798 #undef ZS
2799 };
2800 #undef BANK
2801
2802 /* Return the integer register name.
2803 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2804
2805 static inline const char *
2806 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2807 {
2808 const int has_zr = sp_reg_p ? 0 : 1;
2809 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2810 return int_reg[has_zr][is_64][regno];
2811 }
2812
2813 /* Like get_int_reg_name, but IS_64 is always 1. */
2814
2815 static inline const char *
2816 get_64bit_int_reg_name (int regno, int sp_reg_p)
2817 {
2818 const int has_zr = sp_reg_p ? 0 : 1;
2819 return int_reg[has_zr][1][regno];
2820 }
2821
2822 /* Get the name of the integer offset register in OPND, using the shift type
2823 to decide whether it's a word or doubleword. */
2824
2825 static inline const char *
2826 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2827 {
2828 switch (opnd->shifter.kind)
2829 {
2830 case AARCH64_MOD_UXTW:
2831 case AARCH64_MOD_SXTW:
2832 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2833
2834 case AARCH64_MOD_LSL:
2835 case AARCH64_MOD_SXTX:
2836 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2837
2838 default:
2839 abort ();
2840 }
2841 }
2842
2843 /* Get the name of the SVE vector offset register in OPND, using the operand
2844 qualifier to decide whether the suffix should be .S or .D. */
2845
2846 static inline const char *
2847 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2848 {
2849 assert (qualifier == AARCH64_OPND_QLF_S_S
2850 || qualifier == AARCH64_OPND_QLF_S_D);
2851 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2852 }
2853
2854 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2855
2856 typedef union
2857 {
2858 uint64_t i;
2859 double d;
2860 } double_conv_t;
2861
2862 typedef union
2863 {
2864 uint32_t i;
2865 float f;
2866 } single_conv_t;
2867
2868 typedef union
2869 {
2870 uint32_t i;
2871 float f;
2872 } half_conv_t;
2873
2874 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2875 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2876 (depending on the type of the instruction). IMM8 will be expanded to a
2877 single-precision floating-point value (SIZE == 4) or a double-precision
2878 floating-point value (SIZE == 8). A half-precision floating-point value
2879 (SIZE == 2) is expanded to a single-precision floating-point value. The
2880 expanded value is returned. */
2881
2882 static uint64_t
2883 expand_fp_imm (int size, uint32_t imm8)
2884 {
2885 uint64_t imm = 0;
2886 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2887
2888 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2889 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2890 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2891 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2892 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2893 if (size == 8)
2894 {
2895 imm = (imm8_7 << (63-32)) /* imm8<7> */
2896 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2897 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2898 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2899 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2900 imm <<= 32;
2901 }
2902 else if (size == 4 || size == 2)
2903 {
2904 imm = (imm8_7 << 31) /* imm8<7> */
2905 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2906 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2907 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2908 }
2909 else
2910 {
2911 /* An unsupported size. */
2912 assert (0);
2913 }
2914
2915 return imm;
2916 }
2917
2918 /* Produce the string representation of the register list operand *OPND
2919 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2920 the register name that comes before the register number, such as "v". */
2921 static void
2922 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2923 const char *prefix)
2924 {
2925 const int num_regs = opnd->reglist.num_regs;
2926 const int first_reg = opnd->reglist.first_regno;
2927 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2928 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2929 char tb[8]; /* Temporary buffer. */
2930
2931 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2932 assert (num_regs >= 1 && num_regs <= 4);
2933
2934 /* Prepare the index if any. */
2935 if (opnd->reglist.has_index)
2936 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2937 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
2938 else
2939 tb[0] = '\0';
2940
2941 /* The hyphenated form is preferred for disassembly if there are
2942 more than two registers in the list, and the register numbers
2943 are monotonically increasing in increments of one. */
2944 if (num_regs > 2 && last_reg > first_reg)
2945 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2946 prefix, last_reg, qlf_name, tb);
2947 else
2948 {
2949 const int reg0 = first_reg;
2950 const int reg1 = (first_reg + 1) & 0x1f;
2951 const int reg2 = (first_reg + 2) & 0x1f;
2952 const int reg3 = (first_reg + 3) & 0x1f;
2953
2954 switch (num_regs)
2955 {
2956 case 1:
2957 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2958 break;
2959 case 2:
2960 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2961 prefix, reg1, qlf_name, tb);
2962 break;
2963 case 3:
2964 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2965 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2966 prefix, reg2, qlf_name, tb);
2967 break;
2968 case 4:
2969 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2970 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2971 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2972 break;
2973 }
2974 }
2975 }
2976
2977 /* Print the register+immediate address in OPND to BUF, which has SIZE
2978 characters. BASE is the name of the base register. */
2979
2980 static void
2981 print_immediate_offset_address (char *buf, size_t size,
2982 const aarch64_opnd_info *opnd,
2983 const char *base)
2984 {
2985 if (opnd->addr.writeback)
2986 {
2987 if (opnd->addr.preind)
2988 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
2989 else
2990 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
2991 }
2992 else
2993 {
2994 if (opnd->shifter.operator_present)
2995 {
2996 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2997 snprintf (buf, size, "[%s, #%d, mul vl]",
2998 base, opnd->addr.offset.imm);
2999 }
3000 else if (opnd->addr.offset.imm)
3001 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3002 else
3003 snprintf (buf, size, "[%s]", base);
3004 }
3005 }
3006
3007 /* Produce the string representation of the register offset address operand
3008 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3009 the names of the base and offset registers. */
3010 static void
3011 print_register_offset_address (char *buf, size_t size,
3012 const aarch64_opnd_info *opnd,
3013 const char *base, const char *offset)
3014 {
3015 char tb[16]; /* Temporary buffer. */
3016 bfd_boolean print_extend_p = TRUE;
3017 bfd_boolean print_amount_p = TRUE;
3018 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3019
3020 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3021 || !opnd->shifter.amount_present))
3022 {
3023 /* Not print the shift/extend amount when the amount is zero and
3024 when it is not the special case of 8-bit load/store instruction. */
3025 print_amount_p = FALSE;
3026 /* Likewise, no need to print the shift operator LSL in such a
3027 situation. */
3028 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3029 print_extend_p = FALSE;
3030 }
3031
3032 /* Prepare for the extend/shift. */
3033 if (print_extend_p)
3034 {
3035 if (print_amount_p)
3036 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3037 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3038 (opnd->shifter.amount % 100));
3039 else
3040 snprintf (tb, sizeof (tb), ", %s", shift_name);
3041 }
3042 else
3043 tb[0] = '\0';
3044
3045 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3046 }
3047
3048 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3049 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3050 PC, PCREL_P and ADDRESS are used to pass in and return information about
3051 the PC-relative address calculation, where the PC value is passed in
3052 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3053 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3054 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3055
3056 The function serves both the disassembler and the assembler diagnostics
3057 issuer, which is the reason why it lives in this file. */
3058
3059 void
3060 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3061 const aarch64_opcode *opcode,
3062 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3063 bfd_vma *address, char** notes)
3064 {
3065 unsigned int i, num_conds;
3066 const char *name = NULL;
3067 const aarch64_opnd_info *opnd = opnds + idx;
3068 enum aarch64_modifier_kind kind;
3069 uint64_t addr, enum_value;
3070
3071 buf[0] = '\0';
3072 if (pcrel_p)
3073 *pcrel_p = 0;
3074
3075 switch (opnd->type)
3076 {
3077 case AARCH64_OPND_Rd:
3078 case AARCH64_OPND_Rn:
3079 case AARCH64_OPND_Rm:
3080 case AARCH64_OPND_Rt:
3081 case AARCH64_OPND_Rt2:
3082 case AARCH64_OPND_Rs:
3083 case AARCH64_OPND_Ra:
3084 case AARCH64_OPND_Rt_SYS:
3085 case AARCH64_OPND_PAIRREG:
3086 case AARCH64_OPND_SVE_Rm:
3087 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3088 the <ic_op>, therefore we use opnd->present to override the
3089 generic optional-ness information. */
3090 if (opnd->type == AARCH64_OPND_Rt_SYS)
3091 {
3092 if (!opnd->present)
3093 break;
3094 }
3095 /* Omit the operand, e.g. RET. */
3096 else if (optional_operand_p (opcode, idx)
3097 && (opnd->reg.regno
3098 == get_optional_operand_default_value (opcode)))
3099 break;
3100 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3101 || opnd->qualifier == AARCH64_OPND_QLF_X);
3102 snprintf (buf, size, "%s",
3103 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3104 break;
3105
3106 case AARCH64_OPND_Rd_SP:
3107 case AARCH64_OPND_Rn_SP:
3108 case AARCH64_OPND_SVE_Rn_SP:
3109 case AARCH64_OPND_Rm_SP:
3110 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3111 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3112 || opnd->qualifier == AARCH64_OPND_QLF_X
3113 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3114 snprintf (buf, size, "%s",
3115 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3116 break;
3117
3118 case AARCH64_OPND_Rm_EXT:
3119 kind = opnd->shifter.kind;
3120 assert (idx == 1 || idx == 2);
3121 if ((aarch64_stack_pointer_p (opnds)
3122 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3123 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3124 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3125 && kind == AARCH64_MOD_UXTW)
3126 || (opnd->qualifier == AARCH64_OPND_QLF_X
3127 && kind == AARCH64_MOD_UXTX)))
3128 {
3129 /* 'LSL' is the preferred form in this case. */
3130 kind = AARCH64_MOD_LSL;
3131 if (opnd->shifter.amount == 0)
3132 {
3133 /* Shifter omitted. */
3134 snprintf (buf, size, "%s",
3135 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3136 break;
3137 }
3138 }
3139 if (opnd->shifter.amount)
3140 snprintf (buf, size, "%s, %s #%" PRIi64,
3141 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3142 aarch64_operand_modifiers[kind].name,
3143 opnd->shifter.amount);
3144 else
3145 snprintf (buf, size, "%s, %s",
3146 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3147 aarch64_operand_modifiers[kind].name);
3148 break;
3149
3150 case AARCH64_OPND_Rm_SFT:
3151 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3152 || opnd->qualifier == AARCH64_OPND_QLF_X);
3153 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3154 snprintf (buf, size, "%s",
3155 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3156 else
3157 snprintf (buf, size, "%s, %s #%" PRIi64,
3158 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3159 aarch64_operand_modifiers[opnd->shifter.kind].name,
3160 opnd->shifter.amount);
3161 break;
3162
3163 case AARCH64_OPND_Fd:
3164 case AARCH64_OPND_Fn:
3165 case AARCH64_OPND_Fm:
3166 case AARCH64_OPND_Fa:
3167 case AARCH64_OPND_Ft:
3168 case AARCH64_OPND_Ft2:
3169 case AARCH64_OPND_Sd:
3170 case AARCH64_OPND_Sn:
3171 case AARCH64_OPND_Sm:
3172 case AARCH64_OPND_SVE_VZn:
3173 case AARCH64_OPND_SVE_Vd:
3174 case AARCH64_OPND_SVE_Vm:
3175 case AARCH64_OPND_SVE_Vn:
3176 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3177 opnd->reg.regno);
3178 break;
3179
3180 case AARCH64_OPND_Va:
3181 case AARCH64_OPND_Vd:
3182 case AARCH64_OPND_Vn:
3183 case AARCH64_OPND_Vm:
3184 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3185 aarch64_get_qualifier_name (opnd->qualifier));
3186 break;
3187
3188 case AARCH64_OPND_Ed:
3189 case AARCH64_OPND_En:
3190 case AARCH64_OPND_Em:
3191 case AARCH64_OPND_Em16:
3192 case AARCH64_OPND_SM3_IMM2:
3193 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3194 aarch64_get_qualifier_name (opnd->qualifier),
3195 opnd->reglane.index);
3196 break;
3197
3198 case AARCH64_OPND_VdD1:
3199 case AARCH64_OPND_VnD1:
3200 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3201 break;
3202
3203 case AARCH64_OPND_LVn:
3204 case AARCH64_OPND_LVt:
3205 case AARCH64_OPND_LVt_AL:
3206 case AARCH64_OPND_LEt:
3207 print_register_list (buf, size, opnd, "v");
3208 break;
3209
3210 case AARCH64_OPND_SVE_Pd:
3211 case AARCH64_OPND_SVE_Pg3:
3212 case AARCH64_OPND_SVE_Pg4_5:
3213 case AARCH64_OPND_SVE_Pg4_10:
3214 case AARCH64_OPND_SVE_Pg4_16:
3215 case AARCH64_OPND_SVE_Pm:
3216 case AARCH64_OPND_SVE_Pn:
3217 case AARCH64_OPND_SVE_Pt:
3218 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3219 snprintf (buf, size, "p%d", opnd->reg.regno);
3220 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3221 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3222 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3223 aarch64_get_qualifier_name (opnd->qualifier));
3224 else
3225 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3226 aarch64_get_qualifier_name (opnd->qualifier));
3227 break;
3228
3229 case AARCH64_OPND_SVE_Za_5:
3230 case AARCH64_OPND_SVE_Za_16:
3231 case AARCH64_OPND_SVE_Zd:
3232 case AARCH64_OPND_SVE_Zm_5:
3233 case AARCH64_OPND_SVE_Zm_16:
3234 case AARCH64_OPND_SVE_Zn:
3235 case AARCH64_OPND_SVE_Zt:
3236 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3237 snprintf (buf, size, "z%d", opnd->reg.regno);
3238 else
3239 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3240 aarch64_get_qualifier_name (opnd->qualifier));
3241 break;
3242
3243 case AARCH64_OPND_SVE_ZnxN:
3244 case AARCH64_OPND_SVE_ZtxN:
3245 print_register_list (buf, size, opnd, "z");
3246 break;
3247
3248 case AARCH64_OPND_SVE_Zm3_INDEX:
3249 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3250 case AARCH64_OPND_SVE_Zm4_INDEX:
3251 case AARCH64_OPND_SVE_Zn_INDEX:
3252 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3253 aarch64_get_qualifier_name (opnd->qualifier),
3254 opnd->reglane.index);
3255 break;
3256
3257 case AARCH64_OPND_CRn:
3258 case AARCH64_OPND_CRm:
3259 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3260 break;
3261
3262 case AARCH64_OPND_IDX:
3263 case AARCH64_OPND_MASK:
3264 case AARCH64_OPND_IMM:
3265 case AARCH64_OPND_IMM_2:
3266 case AARCH64_OPND_WIDTH:
3267 case AARCH64_OPND_UIMM3_OP1:
3268 case AARCH64_OPND_UIMM3_OP2:
3269 case AARCH64_OPND_BIT_NUM:
3270 case AARCH64_OPND_IMM_VLSL:
3271 case AARCH64_OPND_IMM_VLSR:
3272 case AARCH64_OPND_SHLL_IMM:
3273 case AARCH64_OPND_IMM0:
3274 case AARCH64_OPND_IMMR:
3275 case AARCH64_OPND_IMMS:
3276 case AARCH64_OPND_FBITS:
3277 case AARCH64_OPND_SIMM5:
3278 case AARCH64_OPND_SVE_SHLIMM_PRED:
3279 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3280 case AARCH64_OPND_SVE_SHRIMM_PRED:
3281 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3282 case AARCH64_OPND_SVE_SIMM5:
3283 case AARCH64_OPND_SVE_SIMM5B:
3284 case AARCH64_OPND_SVE_SIMM6:
3285 case AARCH64_OPND_SVE_SIMM8:
3286 case AARCH64_OPND_SVE_UIMM3:
3287 case AARCH64_OPND_SVE_UIMM7:
3288 case AARCH64_OPND_SVE_UIMM8:
3289 case AARCH64_OPND_SVE_UIMM8_53:
3290 case AARCH64_OPND_IMM_ROT1:
3291 case AARCH64_OPND_IMM_ROT2:
3292 case AARCH64_OPND_IMM_ROT3:
3293 case AARCH64_OPND_SVE_IMM_ROT1:
3294 case AARCH64_OPND_SVE_IMM_ROT2:
3295 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3296 break;
3297
3298 case AARCH64_OPND_SVE_I1_HALF_ONE:
3299 case AARCH64_OPND_SVE_I1_HALF_TWO:
3300 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3301 {
3302 single_conv_t c;
3303 c.i = opnd->imm.value;
3304 snprintf (buf, size, "#%.1f", c.f);
3305 break;
3306 }
3307
3308 case AARCH64_OPND_SVE_PATTERN:
3309 if (optional_operand_p (opcode, idx)
3310 && opnd->imm.value == get_optional_operand_default_value (opcode))
3311 break;
3312 enum_value = opnd->imm.value;
3313 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3314 if (aarch64_sve_pattern_array[enum_value])
3315 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3316 else
3317 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3318 break;
3319
3320 case AARCH64_OPND_SVE_PATTERN_SCALED:
3321 if (optional_operand_p (opcode, idx)
3322 && !opnd->shifter.operator_present
3323 && opnd->imm.value == get_optional_operand_default_value (opcode))
3324 break;
3325 enum_value = opnd->imm.value;
3326 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3327 if (aarch64_sve_pattern_array[opnd->imm.value])
3328 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3329 else
3330 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3331 if (opnd->shifter.operator_present)
3332 {
3333 size_t len = strlen (buf);
3334 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3335 aarch64_operand_modifiers[opnd->shifter.kind].name,
3336 opnd->shifter.amount);
3337 }
3338 break;
3339
3340 case AARCH64_OPND_SVE_PRFOP:
3341 enum_value = opnd->imm.value;
3342 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3343 if (aarch64_sve_prfop_array[enum_value])
3344 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3345 else
3346 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3347 break;
3348
3349 case AARCH64_OPND_IMM_MOV:
3350 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3351 {
3352 case 4: /* e.g. MOV Wd, #<imm32>. */
3353 {
3354 int imm32 = opnd->imm.value;
3355 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3356 }
3357 break;
3358 case 8: /* e.g. MOV Xd, #<imm64>. */
3359 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3360 opnd->imm.value, opnd->imm.value);
3361 break;
3362 default: assert (0);
3363 }
3364 break;
3365
3366 case AARCH64_OPND_FPIMM0:
3367 snprintf (buf, size, "#0.0");
3368 break;
3369
3370 case AARCH64_OPND_LIMM:
3371 case AARCH64_OPND_AIMM:
3372 case AARCH64_OPND_HALF:
3373 case AARCH64_OPND_SVE_INV_LIMM:
3374 case AARCH64_OPND_SVE_LIMM:
3375 case AARCH64_OPND_SVE_LIMM_MOV:
3376 if (opnd->shifter.amount)
3377 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3378 opnd->shifter.amount);
3379 else
3380 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3381 break;
3382
3383 case AARCH64_OPND_SIMD_IMM:
3384 case AARCH64_OPND_SIMD_IMM_SFT:
3385 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3386 || opnd->shifter.kind == AARCH64_MOD_NONE)
3387 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3388 else
3389 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3390 aarch64_operand_modifiers[opnd->shifter.kind].name,
3391 opnd->shifter.amount);
3392 break;
3393
3394 case AARCH64_OPND_SVE_AIMM:
3395 case AARCH64_OPND_SVE_ASIMM:
3396 if (opnd->shifter.amount)
3397 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3398 opnd->shifter.amount);
3399 else
3400 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3401 break;
3402
3403 case AARCH64_OPND_FPIMM:
3404 case AARCH64_OPND_SIMD_FPIMM:
3405 case AARCH64_OPND_SVE_FPIMM8:
3406 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3407 {
3408 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3409 {
3410 half_conv_t c;
3411 c.i = expand_fp_imm (2, opnd->imm.value);
3412 snprintf (buf, size, "#%.18e", c.f);
3413 }
3414 break;
3415 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3416 {
3417 single_conv_t c;
3418 c.i = expand_fp_imm (4, opnd->imm.value);
3419 snprintf (buf, size, "#%.18e", c.f);
3420 }
3421 break;
3422 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3423 {
3424 double_conv_t c;
3425 c.i = expand_fp_imm (8, opnd->imm.value);
3426 snprintf (buf, size, "#%.18e", c.d);
3427 }
3428 break;
3429 default: assert (0);
3430 }
3431 break;
3432
3433 case AARCH64_OPND_CCMP_IMM:
3434 case AARCH64_OPND_NZCV:
3435 case AARCH64_OPND_EXCEPTION:
3436 case AARCH64_OPND_UIMM4:
3437 case AARCH64_OPND_UIMM7:
3438 if (optional_operand_p (opcode, idx) == TRUE
3439 && (opnd->imm.value ==
3440 (int64_t) get_optional_operand_default_value (opcode)))
3441 /* Omit the operand, e.g. DCPS1. */
3442 break;
3443 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3444 break;
3445
3446 case AARCH64_OPND_COND:
3447 case AARCH64_OPND_COND1:
3448 snprintf (buf, size, "%s", opnd->cond->names[0]);
3449 num_conds = ARRAY_SIZE (opnd->cond->names);
3450 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3451 {
3452 size_t len = strlen (buf);
3453 if (i == 1)
3454 snprintf (buf + len, size - len, " // %s = %s",
3455 opnd->cond->names[0], opnd->cond->names[i]);
3456 else
3457 snprintf (buf + len, size - len, ", %s",
3458 opnd->cond->names[i]);
3459 }
3460 break;
3461
3462 case AARCH64_OPND_ADDR_ADRP:
3463 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3464 + opnd->imm.value;
3465 if (pcrel_p)
3466 *pcrel_p = 1;
3467 if (address)
3468 *address = addr;
3469 /* This is not necessary during the disassembling, as print_address_func
3470 in the disassemble_info will take care of the printing. But some
3471 other callers may be still interested in getting the string in *STR,
3472 so here we do snprintf regardless. */
3473 snprintf (buf, size, "#0x%" PRIx64, addr);
3474 break;
3475
3476 case AARCH64_OPND_ADDR_PCREL14:
3477 case AARCH64_OPND_ADDR_PCREL19:
3478 case AARCH64_OPND_ADDR_PCREL21:
3479 case AARCH64_OPND_ADDR_PCREL26:
3480 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3481 if (pcrel_p)
3482 *pcrel_p = 1;
3483 if (address)
3484 *address = addr;
3485 /* This is not necessary during the disassembling, as print_address_func
3486 in the disassemble_info will take care of the printing. But some
3487 other callers may be still interested in getting the string in *STR,
3488 so here we do snprintf regardless. */
3489 snprintf (buf, size, "#0x%" PRIx64, addr);
3490 break;
3491
3492 case AARCH64_OPND_ADDR_SIMPLE:
3493 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3494 case AARCH64_OPND_SIMD_ADDR_POST:
3495 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3496 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3497 {
3498 if (opnd->addr.offset.is_reg)
3499 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3500 else
3501 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3502 }
3503 else
3504 snprintf (buf, size, "[%s]", name);
3505 break;
3506
3507 case AARCH64_OPND_ADDR_REGOFF:
3508 case AARCH64_OPND_SVE_ADDR_R:
3509 case AARCH64_OPND_SVE_ADDR_RR:
3510 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3511 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3512 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3513 case AARCH64_OPND_SVE_ADDR_RX:
3514 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3515 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3516 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3517 print_register_offset_address
3518 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3519 get_offset_int_reg_name (opnd));
3520 break;
3521
3522 case AARCH64_OPND_SVE_ADDR_RZ:
3523 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3524 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3525 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3526 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3527 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3528 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3529 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3530 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3531 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3532 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3533 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3534 print_register_offset_address
3535 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3536 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3537 break;
3538
3539 case AARCH64_OPND_ADDR_SIMM7:
3540 case AARCH64_OPND_ADDR_SIMM9:
3541 case AARCH64_OPND_ADDR_SIMM9_2:
3542 case AARCH64_OPND_ADDR_SIMM10:
3543 case AARCH64_OPND_ADDR_OFFSET:
3544 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3545 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3546 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3547 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3548 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3549 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3550 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3551 case AARCH64_OPND_SVE_ADDR_RI_U6:
3552 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3553 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3554 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3555 print_immediate_offset_address
3556 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3557 break;
3558
3559 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3560 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3561 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3562 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3563 print_immediate_offset_address
3564 (buf, size, opnd,
3565 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3566 break;
3567
3568 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3569 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3570 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3571 print_register_offset_address
3572 (buf, size, opnd,
3573 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3574 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3575 break;
3576
3577 case AARCH64_OPND_ADDR_UIMM12:
3578 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3579 if (opnd->addr.offset.imm)
3580 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3581 else
3582 snprintf (buf, size, "[%s]", name);
3583 break;
3584
3585 case AARCH64_OPND_SYSREG:
3586 for (i = 0; aarch64_sys_regs[i].name; ++i)
3587 {
3588 bfd_boolean exact_match
3589 = (aarch64_sys_regs[i].flags & opnd->sysreg.flags)
3590 == opnd->sysreg.flags;
3591
3592 /* Try and find an exact match, But if that fails, return the first
3593 partial match that was found. */
3594 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3595 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])
3596 && (name == NULL || exact_match))
3597 {
3598 name = aarch64_sys_regs[i].name;
3599 if (exact_match)
3600 {
3601 if (notes)
3602 *notes = NULL;
3603 break;
3604 }
3605
3606 /* If we didn't match exactly, that means the presense of a flag
3607 indicates what we didn't want for this instruction. e.g. If
3608 F_REG_READ is there, that means we were looking for a write
3609 register. See aarch64_ext_sysreg. */
3610 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3611 *notes = _("reading from a write-only register");
3612 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3613 *notes = _("writing to a read-only register");
3614 }
3615 }
3616
3617 if (name)
3618 snprintf (buf, size, "%s", name);
3619 else
3620 {
3621 /* Implementation defined system register. */
3622 unsigned int value = opnd->sysreg.value;
3623 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3624 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3625 value & 0x7);
3626 }
3627 break;
3628
3629 case AARCH64_OPND_PSTATEFIELD:
3630 for (i = 0; aarch64_pstatefields[i].name; ++i)
3631 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3632 break;
3633 assert (aarch64_pstatefields[i].name);
3634 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3635 break;
3636
3637 case AARCH64_OPND_SYSREG_AT:
3638 case AARCH64_OPND_SYSREG_DC:
3639 case AARCH64_OPND_SYSREG_IC:
3640 case AARCH64_OPND_SYSREG_TLBI:
3641 case AARCH64_OPND_SYSREG_SR:
3642 snprintf (buf, size, "%s", opnd->sysins_op->name);
3643 break;
3644
3645 case AARCH64_OPND_BARRIER:
3646 snprintf (buf, size, "%s", opnd->barrier->name);
3647 break;
3648
3649 case AARCH64_OPND_BARRIER_ISB:
3650 /* Operand can be omitted, e.g. in DCPS1. */
3651 if (! optional_operand_p (opcode, idx)
3652 || (opnd->barrier->value
3653 != get_optional_operand_default_value (opcode)))
3654 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3655 break;
3656
3657 case AARCH64_OPND_PRFOP:
3658 if (opnd->prfop->name != NULL)
3659 snprintf (buf, size, "%s", opnd->prfop->name);
3660 else
3661 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3662 break;
3663
3664 case AARCH64_OPND_BARRIER_PSB:
3665 case AARCH64_OPND_BTI_TARGET:
3666 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3667 snprintf (buf, size, "%s", opnd->hint_option->name);
3668 break;
3669
3670 default:
3671 assert (0);
3672 }
3673 }
3674 \f
3675 #define CPENC(op0,op1,crn,crm,op2) \
3676 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3677 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3678 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3679 /* for 3.9.10 System Instructions */
3680 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3681
3682 #define C0 0
3683 #define C1 1
3684 #define C2 2
3685 #define C3 3
3686 #define C4 4
3687 #define C5 5
3688 #define C6 6
3689 #define C7 7
3690 #define C8 8
3691 #define C9 9
3692 #define C10 10
3693 #define C11 11
3694 #define C12 12
3695 #define C13 13
3696 #define C14 14
3697 #define C15 15
3698
3699 /* TODO there is one more issues need to be resolved
3700 1. handle cpu-implementation-defined system registers. */
3701 const aarch64_sys_reg aarch64_sys_regs [] =
3702 {
3703 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3704 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3705 { "elr_el1", CPEN_(0,C0,1), 0 },
3706 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3707 { "sp_el0", CPEN_(0,C1,0), 0 },
3708 { "spsel", CPEN_(0,C2,0), 0 },
3709 { "daif", CPEN_(3,C2,1), 0 },
3710 { "currentel", CPEN_(0,C2,2), F_REG_READ }, /* RO */
3711 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3712 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3713 { "nzcv", CPEN_(3,C2,0), 0 },
3714 { "ssbs", CPEN_(3,C2,6), F_ARCHEXT },
3715 { "fpcr", CPEN_(3,C4,0), 0 },
3716 { "fpsr", CPEN_(3,C4,1), 0 },
3717 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3718 { "dlr_el0", CPEN_(3,C5,1), 0 },
3719 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3720 { "elr_el2", CPEN_(4,C0,1), 0 },
3721 { "sp_el1", CPEN_(4,C1,0), 0 },
3722 { "spsr_irq", CPEN_(4,C3,0), 0 },
3723 { "spsr_abt", CPEN_(4,C3,1), 0 },
3724 { "spsr_und", CPEN_(4,C3,2), 0 },
3725 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3726 { "spsr_el3", CPEN_(6,C0,0), 0 },
3727 { "elr_el3", CPEN_(6,C0,1), 0 },
3728 { "sp_el2", CPEN_(6,C1,0), 0 },
3729 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3730 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3731 { "midr_el1", CPENC(3,0,C0,C0,0), F_REG_READ }, /* RO */
3732 { "ctr_el0", CPENC(3,3,C0,C0,1), F_REG_READ }, /* RO */
3733 { "mpidr_el1", CPENC(3,0,C0,C0,5), F_REG_READ }, /* RO */
3734 { "revidr_el1", CPENC(3,0,C0,C0,6), F_REG_READ }, /* RO */
3735 { "aidr_el1", CPENC(3,1,C0,C0,7), F_REG_READ }, /* RO */
3736 { "dczid_el0", CPENC(3,3,C0,C0,7), F_REG_READ }, /* RO */
3737 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), F_REG_READ }, /* RO */
3738 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), F_REG_READ }, /* RO */
3739 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), F_REG_READ }, /* RO */
3740 { "id_pfr2_el1", CPENC(3,0,C0,C3,4), F_ARCHEXT | F_REG_READ}, /* RO */
3741 { "id_afr0_el1", CPENC(3,0,C0,C1,3), F_REG_READ }, /* RO */
3742 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), F_REG_READ }, /* RO */
3743 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), F_REG_READ }, /* RO */
3744 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), F_REG_READ }, /* RO */
3745 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), F_REG_READ }, /* RO */
3746 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), F_REG_READ }, /* RO */
3747 { "id_isar0_el1", CPENC(3,0,C0,C2,0), F_REG_READ }, /* RO */
3748 { "id_isar1_el1", CPENC(3,0,C0,C2,1), F_REG_READ }, /* RO */
3749 { "id_isar2_el1", CPENC(3,0,C0,C2,2), F_REG_READ }, /* RO */
3750 { "id_isar3_el1", CPENC(3,0,C0,C2,3), F_REG_READ }, /* RO */
3751 { "id_isar4_el1", CPENC(3,0,C0,C2,4), F_REG_READ }, /* RO */
3752 { "id_isar5_el1", CPENC(3,0,C0,C2,5), F_REG_READ }, /* RO */
3753 { "mvfr0_el1", CPENC(3,0,C0,C3,0), F_REG_READ }, /* RO */
3754 { "mvfr1_el1", CPENC(3,0,C0,C3,1), F_REG_READ }, /* RO */
3755 { "mvfr2_el1", CPENC(3,0,C0,C3,2), F_REG_READ }, /* RO */
3756 { "ccsidr_el1", CPENC(3,1,C0,C0,0), F_REG_READ }, /* RO */
3757 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), F_REG_READ }, /* RO */
3758 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), F_REG_READ }, /* RO */
3759 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), F_REG_READ }, /* RO */
3760 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), F_REG_READ }, /* RO */
3761 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), F_REG_READ }, /* RO */
3762 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), F_REG_READ }, /* RO */
3763 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), F_REG_READ }, /* RO */
3764 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), F_REG_READ }, /* RO */
3765 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT | F_REG_READ }, /* RO */
3766 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), F_REG_READ }, /* RO */
3767 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), F_REG_READ }, /* RO */
3768 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT | F_REG_READ }, /* RO */
3769 { "clidr_el1", CPENC(3,1,C0,C0,1), F_REG_READ }, /* RO */
3770 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 },
3771 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3772 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3773 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3774 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3775 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3776 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3777 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3778 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3779 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3780 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3781 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3782 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3783 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3784 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3785 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3786 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3787 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3788 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3789 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3790 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3791 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3792 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3793 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3794 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3795 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3796 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3797 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3798 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3799 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3800 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3801 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3802 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3803 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3804 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3805 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3806 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3807 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3808 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3809 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3810 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3811 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3812 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3813 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3814 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3815 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3816 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3817 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3818 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3819 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3820 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3821 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3822 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3823 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3824 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3825 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3826 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3827 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3828 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3829 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3830 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT },
3831 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3832 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3833 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3834 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3835 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3836 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3837 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3838 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3839 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3840 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3841 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3842 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3843 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3844 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3845 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3846 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3847 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3848 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3849 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3850 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3851 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3852 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3853 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3854 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3855 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3856 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3857 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3858 { "rvbar_el1", CPENC(3,0,C12,C0,1), F_REG_READ }, /* RO */
3859 { "rvbar_el2", CPENC(3,4,C12,C0,1), F_REG_READ }, /* RO */
3860 { "rvbar_el3", CPENC(3,6,C12,C0,1), F_REG_READ }, /* RO */
3861 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3862 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3863 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3864 { "isr_el1", CPENC(3,0,C12,C1,0), F_REG_READ }, /* RO */
3865 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3866 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3867 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3868 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3869 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3870 { "rndr", CPENC(3,3,C2,C4,0), F_ARCHEXT | F_REG_READ }, /* RO */
3871 { "rndrrs", CPENC(3,3,C2,C4,1), F_ARCHEXT | F_REG_READ }, /* RO */
3872 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3873 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RW */
3874 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3875 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3876 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3877 { "scxtnum_el0", CPENC(3,3,C13,C0,7), F_ARCHEXT },
3878 { "scxtnum_el1", CPENC(3,0,C13,C0,7), F_ARCHEXT },
3879 { "scxtnum_el2", CPENC(3,4,C13,C0,7), F_ARCHEXT },
3880 { "scxtnum_el12", CPENC(3,5,C13,C0,7), F_ARCHEXT },
3881 { "scxtnum_el3", CPENC(3,6,C13,C0,7), F_ARCHEXT },
3882 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3883 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RW */
3884 { "cntpct_el0", CPENC(3,3,C14,C0,1), F_REG_READ }, /* RO */
3885 { "cntvct_el0", CPENC(3,3,C14,C0,2), F_REG_READ }, /* RO */
3886 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3887 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3888 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3889 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3890 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3891 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3892 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3893 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3894 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3895 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3896 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3897 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3898 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3899 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3900 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3901 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3902 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3903 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3904 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3905 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3906 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3907 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3908 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3909 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3910 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3911 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3912 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3913 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3914 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3915 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3916 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), F_REG_READ }, /* r */
3917 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3918 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3919 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), F_REG_READ }, /* r */
3920 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), F_REG_WRITE }, /* w */
3921 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 },
3922 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 },
3923 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3924 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3925 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3926 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3927 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3928 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3929 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3930 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3931 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3932 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3933 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3934 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3935 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3936 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3937 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3938 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3939 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3940 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3941 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3942 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3943 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3944 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3945 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3946 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3947 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3948 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3949 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3950 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3951 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3952 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3953 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3954 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3955 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3956 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3957 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3958 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3959 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3960 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3961 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3962 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3963 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3964 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3965 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3966 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3967 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3968 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3969 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3970 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3971 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3972 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3973 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3974 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3975 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3976 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3977 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3978 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3979 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3980 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3981 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3982 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3983 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3984 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3985 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3986 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3987 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3988 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3989 { "mdrar_el1", CPENC(2,0,C1, C0, 0), F_REG_READ }, /* r */
3990 { "oslar_el1", CPENC(2,0,C1, C0, 4), F_REG_WRITE }, /* w */
3991 { "oslsr_el1", CPENC(2,0,C1, C1, 4), F_REG_READ }, /* r */
3992 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3993 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3994 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3995 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3996 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), F_REG_READ }, /* r */
3997 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3998 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3999 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
4000 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT | F_REG_READ }, /* ro */
4001 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
4002 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
4003 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
4004 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
4005 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
4006 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
4007 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* rw */
4008 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
4009 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
4010 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
4011 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
4012 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
4013 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
4014 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), F_REG_WRITE }, /* w */
4015 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
4016 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), F_REG_READ }, /* r */
4017 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), F_REG_READ }, /* r */
4018 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
4019 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
4020 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
4021 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
4022 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
4023 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
4024 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
4025 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
4026 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
4027 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
4028 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
4029 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
4030 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
4031 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
4032 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
4033 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
4034 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
4035 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
4036 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
4037 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
4038 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
4039 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
4040 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
4041 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
4042 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
4043 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
4044 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
4045 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
4046 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
4047 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
4048 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
4049 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
4050 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
4051 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4052 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4053 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4054 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4055 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4056 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4057 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4058 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4059 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4060 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4061 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4062 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4063 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4064 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4065 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4066 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4067 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4068 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4069 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4070 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4071 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4072 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4073 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4074 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4075 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4076 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4077 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4078 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4079 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4080 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4081 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4082 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4083 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4084 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4085 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4086 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4087 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4088
4089 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4090 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4091 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4092 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4093 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4094 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4095 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4096 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4097 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4098 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4099 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4100 { 0, CPENC(0,0,0,0,0), 0 },
4101 };
4102
4103 bfd_boolean
4104 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4105 {
4106 return (reg->flags & F_DEPRECATED) != 0;
4107 }
4108
4109 bfd_boolean
4110 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4111 const aarch64_sys_reg *reg)
4112 {
4113 if (!(reg->flags & F_ARCHEXT))
4114 return TRUE;
4115
4116 /* PAN. Values are from aarch64_sys_regs. */
4117 if (reg->value == CPEN_(0,C2,3)
4118 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4119 return FALSE;
4120
4121 /* SCXTNUM_ELx registers. */
4122 if ((reg->value == CPENC (3, 3, C13, C0, 7)
4123 || reg->value == CPENC (3, 0, C13, C0, 7)
4124 || reg->value == CPENC (3, 4, C13, C0, 7)
4125 || reg->value == CPENC (3, 6, C13, C0, 7)
4126 || reg->value == CPENC (3, 5, C13, C0, 7))
4127 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SCXTNUM))
4128 return FALSE;
4129
4130 /* ID_PFR2_EL1 register. */
4131 if (reg->value == CPENC(3, 0, C0, C3, 4)
4132 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_ID_PFR2))
4133 return FALSE;
4134
4135 /* SSBS. Values are from aarch64_sys_regs. */
4136 if (reg->value == CPEN_(3,C2,6)
4137 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4138 return FALSE;
4139
4140 /* Virtualization host extensions: system registers. */
4141 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4142 || reg->value == CPENC (3, 4, C13, C0, 1)
4143 || reg->value == CPENC (3, 4, C14, C3, 0)
4144 || reg->value == CPENC (3, 4, C14, C3, 1)
4145 || reg->value == CPENC (3, 4, C14, C3, 2))
4146 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4147 return FALSE;
4148
4149 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4150 if ((reg->value == CPEN_ (5, C0, 0)
4151 || reg->value == CPEN_ (5, C0, 1)
4152 || reg->value == CPENC (3, 5, C1, C0, 0)
4153 || reg->value == CPENC (3, 5, C1, C0, 2)
4154 || reg->value == CPENC (3, 5, C2, C0, 0)
4155 || reg->value == CPENC (3, 5, C2, C0, 1)
4156 || reg->value == CPENC (3, 5, C2, C0, 2)
4157 || reg->value == CPENC (3, 5, C5, C1, 0)
4158 || reg->value == CPENC (3, 5, C5, C1, 1)
4159 || reg->value == CPENC (3, 5, C5, C2, 0)
4160 || reg->value == CPENC (3, 5, C6, C0, 0)
4161 || reg->value == CPENC (3, 5, C10, C2, 0)
4162 || reg->value == CPENC (3, 5, C10, C3, 0)
4163 || reg->value == CPENC (3, 5, C12, C0, 0)
4164 || reg->value == CPENC (3, 5, C13, C0, 1)
4165 || reg->value == CPENC (3, 5, C14, C1, 0))
4166 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4167 return FALSE;
4168
4169 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4170 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4171 || reg->value == CPENC (3, 5, C14, C2, 1)
4172 || reg->value == CPENC (3, 5, C14, C2, 2)
4173 || reg->value == CPENC (3, 5, C14, C3, 0)
4174 || reg->value == CPENC (3, 5, C14, C3, 1)
4175 || reg->value == CPENC (3, 5, C14, C3, 2))
4176 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4177 return FALSE;
4178
4179 /* ARMv8.2 features. */
4180
4181 /* ID_AA64MMFR2_EL1. */
4182 if (reg->value == CPENC (3, 0, C0, C7, 2)
4183 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4184 return FALSE;
4185
4186 /* PSTATE.UAO. */
4187 if (reg->value == CPEN_ (0, C2, 4)
4188 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4189 return FALSE;
4190
4191 /* RAS extension. */
4192
4193 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4194 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4195 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4196 || reg->value == CPENC (3, 0, C5, C3, 1)
4197 || reg->value == CPENC (3, 0, C5, C3, 2)
4198 || reg->value == CPENC (3, 0, C5, C3, 3)
4199 || reg->value == CPENC (3, 0, C5, C4, 0)
4200 || reg->value == CPENC (3, 0, C5, C4, 1)
4201 || reg->value == CPENC (3, 0, C5, C4, 2)
4202 || reg->value == CPENC (3, 0, C5, C4, 3)
4203 || reg->value == CPENC (3, 0, C5, C5, 0)
4204 || reg->value == CPENC (3, 0, C5, C5, 1))
4205 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4206 return FALSE;
4207
4208 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4209 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4210 || reg->value == CPENC (3, 0, C12, C1, 1)
4211 || reg->value == CPENC (3, 4, C12, C1, 1))
4212 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4213 return FALSE;
4214
4215 /* Statistical Profiling extension. */
4216 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4217 || reg->value == CPENC (3, 0, C9, C10, 1)
4218 || reg->value == CPENC (3, 0, C9, C10, 3)
4219 || reg->value == CPENC (3, 0, C9, C10, 7)
4220 || reg->value == CPENC (3, 0, C9, C9, 0)
4221 || reg->value == CPENC (3, 0, C9, C9, 2)
4222 || reg->value == CPENC (3, 0, C9, C9, 3)
4223 || reg->value == CPENC (3, 0, C9, C9, 4)
4224 || reg->value == CPENC (3, 0, C9, C9, 5)
4225 || reg->value == CPENC (3, 0, C9, C9, 6)
4226 || reg->value == CPENC (3, 0, C9, C9, 7)
4227 || reg->value == CPENC (3, 4, C9, C9, 0)
4228 || reg->value == CPENC (3, 5, C9, C9, 0))
4229 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4230 return FALSE;
4231
4232 /* ARMv8.3 Pointer authentication keys. */
4233 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4234 || reg->value == CPENC (3, 0, C2, C1, 1)
4235 || reg->value == CPENC (3, 0, C2, C1, 2)
4236 || reg->value == CPENC (3, 0, C2, C1, 3)
4237 || reg->value == CPENC (3, 0, C2, C2, 0)
4238 || reg->value == CPENC (3, 0, C2, C2, 1)
4239 || reg->value == CPENC (3, 0, C2, C2, 2)
4240 || reg->value == CPENC (3, 0, C2, C2, 3)
4241 || reg->value == CPENC (3, 0, C2, C3, 0)
4242 || reg->value == CPENC (3, 0, C2, C3, 1))
4243 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4244 return FALSE;
4245
4246 /* SVE. */
4247 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4248 || reg->value == CPENC (3, 0, C1, C2, 0)
4249 || reg->value == CPENC (3, 4, C1, C2, 0)
4250 || reg->value == CPENC (3, 6, C1, C2, 0)
4251 || reg->value == CPENC (3, 5, C1, C2, 0)
4252 || reg->value == CPENC (3, 0, C0, C0, 7))
4253 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4254 return FALSE;
4255
4256 /* ARMv8.4 features. */
4257
4258 /* PSTATE.DIT. */
4259 if (reg->value == CPEN_ (3, C2, 5)
4260 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4261 return FALSE;
4262
4263 /* Virtualization extensions. */
4264 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4265 || reg->value == CPENC(3, 4, C2, C6, 0)
4266 || reg->value == CPENC(3, 4, C14, C4, 0)
4267 || reg->value == CPENC(3, 4, C14, C4, 2)
4268 || reg->value == CPENC(3, 4, C14, C4, 1)
4269 || reg->value == CPENC(3, 4, C14, C5, 0)
4270 || reg->value == CPENC(3, 4, C14, C5, 2)
4271 || reg->value == CPENC(3, 4, C14, C5, 1)
4272 || reg->value == CPENC(3, 4, C1, C3, 1)
4273 || reg->value == CPENC(3, 4, C2, C2, 0))
4274 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4275 return FALSE;
4276
4277 /* ARMv8.4 TLB instructions. */
4278 if ((reg->value == CPENS (0, C8, C1, 0)
4279 || reg->value == CPENS (0, C8, C1, 1)
4280 || reg->value == CPENS (0, C8, C1, 2)
4281 || reg->value == CPENS (0, C8, C1, 3)
4282 || reg->value == CPENS (0, C8, C1, 5)
4283 || reg->value == CPENS (0, C8, C1, 7)
4284 || reg->value == CPENS (4, C8, C4, 0)
4285 || reg->value == CPENS (4, C8, C4, 4)
4286 || reg->value == CPENS (4, C8, C1, 1)
4287 || reg->value == CPENS (4, C8, C1, 5)
4288 || reg->value == CPENS (4, C8, C1, 6)
4289 || reg->value == CPENS (6, C8, C1, 1)
4290 || reg->value == CPENS (6, C8, C1, 5)
4291 || reg->value == CPENS (4, C8, C1, 0)
4292 || reg->value == CPENS (4, C8, C1, 4)
4293 || reg->value == CPENS (6, C8, C1, 0)
4294 || reg->value == CPENS (0, C8, C6, 1)
4295 || reg->value == CPENS (0, C8, C6, 3)
4296 || reg->value == CPENS (0, C8, C6, 5)
4297 || reg->value == CPENS (0, C8, C6, 7)
4298 || reg->value == CPENS (0, C8, C2, 1)
4299 || reg->value == CPENS (0, C8, C2, 3)
4300 || reg->value == CPENS (0, C8, C2, 5)
4301 || reg->value == CPENS (0, C8, C2, 7)
4302 || reg->value == CPENS (0, C8, C5, 1)
4303 || reg->value == CPENS (0, C8, C5, 3)
4304 || reg->value == CPENS (0, C8, C5, 5)
4305 || reg->value == CPENS (0, C8, C5, 7)
4306 || reg->value == CPENS (4, C8, C0, 2)
4307 || reg->value == CPENS (4, C8, C0, 6)
4308 || reg->value == CPENS (4, C8, C4, 2)
4309 || reg->value == CPENS (4, C8, C4, 6)
4310 || reg->value == CPENS (4, C8, C4, 3)
4311 || reg->value == CPENS (4, C8, C4, 7)
4312 || reg->value == CPENS (4, C8, C6, 1)
4313 || reg->value == CPENS (4, C8, C6, 5)
4314 || reg->value == CPENS (4, C8, C2, 1)
4315 || reg->value == CPENS (4, C8, C2, 5)
4316 || reg->value == CPENS (4, C8, C5, 1)
4317 || reg->value == CPENS (4, C8, C5, 5)
4318 || reg->value == CPENS (6, C8, C6, 1)
4319 || reg->value == CPENS (6, C8, C6, 5)
4320 || reg->value == CPENS (6, C8, C2, 1)
4321 || reg->value == CPENS (6, C8, C2, 5)
4322 || reg->value == CPENS (6, C8, C5, 1)
4323 || reg->value == CPENS (6, C8, C5, 5))
4324 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4325 return FALSE;
4326
4327 /* Random Number Instructions. For now they are available
4328 (and optional) only with ARMv8.5-A. */
4329 if ((reg->value == CPENC (3, 3, C2, C4, 0)
4330 || reg->value == CPENC (3, 3, C2, C4, 1))
4331 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RNG)
4332 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_5)))
4333 return FALSE;
4334
4335 return TRUE;
4336 }
4337
4338 /* The CPENC below is fairly misleading, the fields
4339 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4340 by ins_pstatefield, which just shifts the value by the width of the fields
4341 in a loop. So if you CPENC them only the first value will be set, the rest
4342 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4343 value of 0b110000000001000000 (0x30040) while what you want is
4344 0b011010 (0x1a). */
4345 const aarch64_sys_reg aarch64_pstatefields [] =
4346 {
4347 { "spsel", 0x05, 0 },
4348 { "daifset", 0x1e, 0 },
4349 { "daifclr", 0x1f, 0 },
4350 { "pan", 0x04, F_ARCHEXT },
4351 { "uao", 0x03, F_ARCHEXT },
4352 { "ssbs", 0x19, F_ARCHEXT },
4353 { "dit", 0x1a, F_ARCHEXT },
4354 { 0, CPENC(0,0,0,0,0), 0 },
4355 };
4356
4357 bfd_boolean
4358 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4359 const aarch64_sys_reg *reg)
4360 {
4361 if (!(reg->flags & F_ARCHEXT))
4362 return TRUE;
4363
4364 /* PAN. Values are from aarch64_pstatefields. */
4365 if (reg->value == 0x04
4366 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4367 return FALSE;
4368
4369 /* UAO. Values are from aarch64_pstatefields. */
4370 if (reg->value == 0x03
4371 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4372 return FALSE;
4373
4374 /* SSBS. Values are from aarch64_pstatefields. */
4375 if (reg->value == 0x19
4376 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4377 return FALSE;
4378
4379 /* DIT. Values are from aarch64_pstatefields. */
4380 if (reg->value == 0x1a
4381 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4382 return FALSE;
4383
4384 return TRUE;
4385 }
4386
4387 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4388 {
4389 { "ialluis", CPENS(0,C7,C1,0), 0 },
4390 { "iallu", CPENS(0,C7,C5,0), 0 },
4391 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4392 { 0, CPENS(0,0,0,0), 0 }
4393 };
4394
4395 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4396 {
4397 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4398 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4399 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4400 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4401 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4402 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4403 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4404 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4405 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4406 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4407 { 0, CPENS(0,0,0,0), 0 }
4408 };
4409
4410 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4411 {
4412 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4413 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4414 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4415 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4416 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4417 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4418 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4419 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4420 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4421 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4422 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4423 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4424 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4425 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4426 { 0, CPENS(0,0,0,0), 0 }
4427 };
4428
4429 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4430 {
4431 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4432 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4433 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4434 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4435 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4436 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4437 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4438 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4439 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4440 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4441 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4442 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4443 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4444 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4445 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4446 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4447 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4448 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4449 { "alle2", CPENS(4,C8,C7,0), 0 },
4450 { "alle2is", CPENS(4,C8,C3,0), 0 },
4451 { "alle1", CPENS(4,C8,C7,4), 0 },
4452 { "alle1is", CPENS(4,C8,C3,4), 0 },
4453 { "alle3", CPENS(6,C8,C7,0), 0 },
4454 { "alle3is", CPENS(6,C8,C3,0), 0 },
4455 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4456 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4457 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4458 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4459 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4460 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4461 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4462 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4463
4464 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4465 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4466 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4467 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4468 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4469 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4470 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4471 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4472 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4473 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4474 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4475 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4476 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4477 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4478 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4479 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4480
4481 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4482 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4483 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4484 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4485 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4486 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4487 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4488 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4489 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4490 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4491 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4492 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4493 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4494 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4495 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4496 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4497 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4498 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4499 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4500 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4501 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4502 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4503 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4504 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4505 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4506 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4507 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4508 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4509 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4510 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4511
4512 { 0, CPENS(0,0,0,0), 0 }
4513 };
4514
4515 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4516 {
4517 /* RCTX is somewhat unique in a way that it has different values
4518 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4519 Thus op2 is masked out and instead encoded directly in the
4520 aarch64_opcode_table entries for the respective instructions. */
4521 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4522
4523 { 0, CPENS(0,0,0,0), 0 }
4524 };
4525
4526 bfd_boolean
4527 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4528 {
4529 return (sys_ins_reg->flags & F_HASXT) != 0;
4530 }
4531
4532 extern bfd_boolean
4533 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4534 const aarch64_sys_ins_reg *reg)
4535 {
4536 if (!(reg->flags & F_ARCHEXT))
4537 return TRUE;
4538
4539 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4540 if (reg->value == CPENS (3, C7, C12, 1)
4541 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4542 return FALSE;
4543
4544 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4545 if (reg->value == CPENS (3, C7, C13, 1)
4546 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
4547 return FALSE;
4548
4549 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4550 if ((reg->value == CPENS (0, C7, C9, 0)
4551 || reg->value == CPENS (0, C7, C9, 1))
4552 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4553 return FALSE;
4554
4555 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4556 if (reg->value == CPENS (3, C7, C3, 0)
4557 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
4558 return FALSE;
4559
4560 return TRUE;
4561 }
4562
4563 #undef C0
4564 #undef C1
4565 #undef C2
4566 #undef C3
4567 #undef C4
4568 #undef C5
4569 #undef C6
4570 #undef C7
4571 #undef C8
4572 #undef C9
4573 #undef C10
4574 #undef C11
4575 #undef C12
4576 #undef C13
4577 #undef C14
4578 #undef C15
4579
4580 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4581 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4582
4583 static enum err_type
4584 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
4585 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
4586 bfd_boolean encoding ATTRIBUTE_UNUSED,
4587 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4588 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4589 {
4590 int t = BITS (insn, 4, 0);
4591 int n = BITS (insn, 9, 5);
4592 int t2 = BITS (insn, 14, 10);
4593
4594 if (BIT (insn, 23))
4595 {
4596 /* Write back enabled. */
4597 if ((t == n || t2 == n) && n != 31)
4598 return ERR_UND;
4599 }
4600
4601 if (BIT (insn, 22))
4602 {
4603 /* Load */
4604 if (t == t2)
4605 return ERR_UND;
4606 }
4607
4608 return ERR_OK;
4609 }
4610
4611 /* Initialize an instruction sequence insn_sequence with the instruction INST.
4612 If INST is NULL the given insn_sequence is cleared and the sequence is left
4613 uninitialized. */
4614
4615 void
4616 init_insn_sequence (const struct aarch64_inst *inst,
4617 aarch64_instr_sequence *insn_sequence)
4618 {
4619 int num_req_entries = 0;
4620 insn_sequence->next_insn = 0;
4621 insn_sequence->num_insns = num_req_entries;
4622 if (insn_sequence->instr)
4623 XDELETE (insn_sequence->instr);
4624 insn_sequence->instr = NULL;
4625
4626 if (inst)
4627 {
4628 insn_sequence->instr = XNEW (aarch64_inst);
4629 memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
4630 }
4631
4632 /* Handle all the cases here. May need to think of something smarter than
4633 a giant if/else chain if this grows. At that time, a lookup table may be
4634 best. */
4635 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
4636 num_req_entries = 1;
4637
4638 if (insn_sequence->current_insns)
4639 XDELETEVEC (insn_sequence->current_insns);
4640 insn_sequence->current_insns = NULL;
4641
4642 if (num_req_entries != 0)
4643 {
4644 size_t size = num_req_entries * sizeof (aarch64_inst);
4645 insn_sequence->current_insns
4646 = (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
4647 memset (insn_sequence->current_insns, 0, size);
4648 }
4649 }
4650
4651
4652 /* This function verifies that the instruction INST adheres to its specified
4653 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
4654 returned and MISMATCH_DETAIL contains the reason why verification failed.
4655
4656 The function is called both during assembly and disassembly. If assembling
4657 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
4658 and will contain the PC of the current instruction w.r.t to the section.
4659
4660 If ENCODING and PC=0 then you are at a start of a section. The constraints
4661 are verified against the given state insn_sequence which is updated as it
4662 transitions through the verification. */
4663
4664 enum err_type
4665 verify_constraints (const struct aarch64_inst *inst,
4666 const aarch64_insn insn ATTRIBUTE_UNUSED,
4667 bfd_vma pc,
4668 bfd_boolean encoding,
4669 aarch64_operand_error *mismatch_detail,
4670 aarch64_instr_sequence *insn_sequence)
4671 {
4672 assert (inst);
4673 assert (inst->opcode);
4674
4675 const struct aarch64_opcode *opcode = inst->opcode;
4676 if (!opcode->constraints && !insn_sequence->instr)
4677 return ERR_OK;
4678
4679 assert (insn_sequence);
4680
4681 enum err_type res = ERR_OK;
4682
4683 /* This instruction puts a constraint on the insn_sequence. */
4684 if (opcode->flags & F_SCAN)
4685 {
4686 if (insn_sequence->instr)
4687 {
4688 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4689 mismatch_detail->error = _("instruction opens new dependency "
4690 "sequence without ending previous one");
4691 mismatch_detail->index = -1;
4692 mismatch_detail->non_fatal = TRUE;
4693 res = ERR_VFI;
4694 }
4695
4696 init_insn_sequence (inst, insn_sequence);
4697 return res;
4698 }
4699
4700 /* Verify constraints on an existing sequence. */
4701 if (insn_sequence->instr)
4702 {
4703 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
4704 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4705 closed a previous one that we should have. */
4706 if (!encoding && pc == 0)
4707 {
4708 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4709 mismatch_detail->error = _("previous `movprfx' sequence not closed");
4710 mismatch_detail->index = -1;
4711 mismatch_detail->non_fatal = TRUE;
4712 res = ERR_VFI;
4713 /* Reset the sequence. */
4714 init_insn_sequence (NULL, insn_sequence);
4715 return res;
4716 }
4717
4718 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
4719 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
4720 {
4721 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4722 instruction for better error messages. */
4723 if (!opcode->avariant || !(*opcode->avariant & AARCH64_FEATURE_SVE))
4724 {
4725 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4726 mismatch_detail->error = _("SVE instruction expected after "
4727 "`movprfx'");
4728 mismatch_detail->index = -1;
4729 mismatch_detail->non_fatal = TRUE;
4730 res = ERR_VFI;
4731 goto done;
4732 }
4733
4734 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4735 instruction that is allowed to be used with a MOVPRFX. */
4736 if (!(opcode->constraints & C_SCAN_MOVPRFX))
4737 {
4738 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4739 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
4740 "expected");
4741 mismatch_detail->index = -1;
4742 mismatch_detail->non_fatal = TRUE;
4743 res = ERR_VFI;
4744 goto done;
4745 }
4746
4747 /* Next check for usage of the predicate register. */
4748 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
4749 aarch64_opnd_info blk_pred, inst_pred;
4750 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
4751 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
4752 bfd_boolean predicated = FALSE;
4753 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
4754
4755 /* Determine if the movprfx instruction used is predicated or not. */
4756 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
4757 {
4758 predicated = TRUE;
4759 blk_pred = insn_sequence->instr->operands[1];
4760 }
4761
4762 unsigned char max_elem_size = 0;
4763 unsigned char current_elem_size;
4764 int num_op_used = 0, last_op_usage = 0;
4765 int i, inst_pred_idx = -1;
4766 int num_ops = aarch64_num_of_operands (opcode);
4767 for (i = 0; i < num_ops; i++)
4768 {
4769 aarch64_opnd_info inst_op = inst->operands[i];
4770 switch (inst_op.type)
4771 {
4772 case AARCH64_OPND_SVE_Zd:
4773 case AARCH64_OPND_SVE_Zm_5:
4774 case AARCH64_OPND_SVE_Zm_16:
4775 case AARCH64_OPND_SVE_Zn:
4776 case AARCH64_OPND_SVE_Zt:
4777 case AARCH64_OPND_SVE_Vm:
4778 case AARCH64_OPND_SVE_Vn:
4779 case AARCH64_OPND_Va:
4780 case AARCH64_OPND_Vn:
4781 case AARCH64_OPND_Vm:
4782 case AARCH64_OPND_Sn:
4783 case AARCH64_OPND_Sm:
4784 case AARCH64_OPND_Rn:
4785 case AARCH64_OPND_Rm:
4786 case AARCH64_OPND_Rn_SP:
4787 case AARCH64_OPND_Rm_SP:
4788 if (inst_op.reg.regno == blk_dest.reg.regno)
4789 {
4790 num_op_used++;
4791 last_op_usage = i;
4792 }
4793 current_elem_size
4794 = aarch64_get_qualifier_esize (inst_op.qualifier);
4795 if (current_elem_size > max_elem_size)
4796 max_elem_size = current_elem_size;
4797 break;
4798 case AARCH64_OPND_SVE_Pd:
4799 case AARCH64_OPND_SVE_Pg3:
4800 case AARCH64_OPND_SVE_Pg4_5:
4801 case AARCH64_OPND_SVE_Pg4_10:
4802 case AARCH64_OPND_SVE_Pg4_16:
4803 case AARCH64_OPND_SVE_Pm:
4804 case AARCH64_OPND_SVE_Pn:
4805 case AARCH64_OPND_SVE_Pt:
4806 inst_pred = inst_op;
4807 inst_pred_idx = i;
4808 break;
4809 default:
4810 break;
4811 }
4812 }
4813
4814 assert (max_elem_size != 0);
4815 aarch64_opnd_info inst_dest = inst->operands[0];
4816 /* Determine the size that should be used to compare against the
4817 movprfx size. */
4818 current_elem_size
4819 = opcode->constraints & C_MAX_ELEM
4820 ? max_elem_size
4821 : aarch64_get_qualifier_esize (inst_dest.qualifier);
4822
4823 /* If movprfx is predicated do some extra checks. */
4824 if (predicated)
4825 {
4826 /* The instruction must be predicated. */
4827 if (inst_pred_idx < 0)
4828 {
4829 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4830 mismatch_detail->error = _("predicated instruction expected "
4831 "after `movprfx'");
4832 mismatch_detail->index = -1;
4833 mismatch_detail->non_fatal = TRUE;
4834 res = ERR_VFI;
4835 goto done;
4836 }
4837
4838 /* The instruction must have a merging predicate. */
4839 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
4840 {
4841 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4842 mismatch_detail->error = _("merging predicate expected due "
4843 "to preceding `movprfx'");
4844 mismatch_detail->index = inst_pred_idx;
4845 mismatch_detail->non_fatal = TRUE;
4846 res = ERR_VFI;
4847 goto done;
4848 }
4849
4850 /* The same register must be used in instruction. */
4851 if (blk_pred.reg.regno != inst_pred.reg.regno)
4852 {
4853 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4854 mismatch_detail->error = _("predicate register differs "
4855 "from that in preceding "
4856 "`movprfx'");
4857 mismatch_detail->index = inst_pred_idx;
4858 mismatch_detail->non_fatal = TRUE;
4859 res = ERR_VFI;
4860 goto done;
4861 }
4862 }
4863
4864 /* Destructive operations by definition must allow one usage of the
4865 same register. */
4866 int allowed_usage
4867 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
4868
4869 /* Operand is not used at all. */
4870 if (num_op_used == 0)
4871 {
4872 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4873 mismatch_detail->error = _("output register of preceding "
4874 "`movprfx' not used in current "
4875 "instruction");
4876 mismatch_detail->index = 0;
4877 mismatch_detail->non_fatal = TRUE;
4878 res = ERR_VFI;
4879 goto done;
4880 }
4881
4882 /* We now know it's used, now determine exactly where it's used. */
4883 if (blk_dest.reg.regno != inst_dest.reg.regno)
4884 {
4885 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4886 mismatch_detail->error = _("output register of preceding "
4887 "`movprfx' expected as output");
4888 mismatch_detail->index = 0;
4889 mismatch_detail->non_fatal = TRUE;
4890 res = ERR_VFI;
4891 goto done;
4892 }
4893
4894 /* Operand used more than allowed for the specific opcode type. */
4895 if (num_op_used > allowed_usage)
4896 {
4897 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4898 mismatch_detail->error = _("output register of preceding "
4899 "`movprfx' used as input");
4900 mismatch_detail->index = last_op_usage;
4901 mismatch_detail->non_fatal = TRUE;
4902 res = ERR_VFI;
4903 goto done;
4904 }
4905
4906 /* Now the only thing left is the qualifiers checks. The register
4907 must have the same maximum element size. */
4908 if (inst_dest.qualifier
4909 && blk_dest.qualifier
4910 && current_elem_size
4911 != aarch64_get_qualifier_esize (blk_dest.qualifier))
4912 {
4913 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4914 mismatch_detail->error = _("register size not compatible with "
4915 "previous `movprfx'");
4916 mismatch_detail->index = 0;
4917 mismatch_detail->non_fatal = TRUE;
4918 res = ERR_VFI;
4919 goto done;
4920 }
4921 }
4922
4923 done:
4924 /* Add the new instruction to the sequence. */
4925 memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
4926 inst, sizeof (aarch64_inst));
4927
4928 /* Check if sequence is now full. */
4929 if (insn_sequence->next_insn >= insn_sequence->num_insns)
4930 {
4931 /* Sequence is full, but we don't have anything special to do for now,
4932 so clear and reset it. */
4933 init_insn_sequence (NULL, insn_sequence);
4934 }
4935 }
4936
4937 return res;
4938 }
4939
4940
4941 /* Return true if VALUE cannot be moved into an SVE register using DUP
4942 (with any element size, not just ESIZE) and if using DUPM would
4943 therefore be OK. ESIZE is the number of bytes in the immediate. */
4944
4945 bfd_boolean
4946 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4947 {
4948 int64_t svalue = uvalue;
4949 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4950
4951 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4952 return FALSE;
4953 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4954 {
4955 svalue = (int32_t) uvalue;
4956 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4957 {
4958 svalue = (int16_t) uvalue;
4959 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4960 return FALSE;
4961 }
4962 }
4963 if ((svalue & 0xff) == 0)
4964 svalue /= 256;
4965 return svalue < -128 || svalue >= 128;
4966 }
4967
4968 /* Include the opcode description table as well as the operand description
4969 table. */
4970 #define VERIFIER(x) verify_##x
4971 #include "aarch64-tbl.h"
This page took 0.264339 seconds and 5 git commands to generate.