[BINUTILS, AArch64] Enable Transactional Memory Extension
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include "bfd_stdint.h"
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
247 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
248 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
249 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
250 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
251 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
252 { 5, 14 }, /* imm14: in test bit and branch instructions. */
253 { 5, 16 }, /* imm16: in exception instructions. */
254 { 0, 26 }, /* imm26: in unconditional branch instructions. */
255 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
256 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
257 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
258 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
259 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
260 { 22, 1 }, /* N: in logical (immediate) instructions. */
261 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
262 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
263 { 31, 1 }, /* sf: in integer data processing instructions. */
264 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
265 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
266 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
267 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
268 { 31, 1 }, /* b5: in the test bit and branch instructions. */
269 { 19, 5 }, /* b40: in the test bit and branch instructions. */
270 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
271 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
272 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
273 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
274 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
275 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
276 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
277 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
278 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
279 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
280 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
281 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
282 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
283 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
284 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
285 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
286 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
289 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
290 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
291 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
292 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
293 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
294 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
295 { 5, 1 }, /* SVE_i1: single-bit immediate. */
296 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
297 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
298 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
299 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
300 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
301 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
302 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
303 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
304 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
305 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
306 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
307 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
308 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
309 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
310 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
311 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
312 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
313 { 16, 4 }, /* SVE_tsz: triangular size select. */
314 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
315 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
316 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
317 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
318 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
319 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
320 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
321 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
322 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
323 { 22, 1 }, /* sz: 1-bit element size select. */
324 };
325
326 enum aarch64_operand_class
327 aarch64_get_operand_class (enum aarch64_opnd type)
328 {
329 return aarch64_operands[type].op_class;
330 }
331
332 const char *
333 aarch64_get_operand_name (enum aarch64_opnd type)
334 {
335 return aarch64_operands[type].name;
336 }
337
338 /* Get operand description string.
339 This is usually for the diagnosis purpose. */
340 const char *
341 aarch64_get_operand_desc (enum aarch64_opnd type)
342 {
343 return aarch64_operands[type].desc;
344 }
345
346 /* Table of all conditional affixes. */
347 const aarch64_cond aarch64_conds[16] =
348 {
349 {{"eq", "none"}, 0x0},
350 {{"ne", "any"}, 0x1},
351 {{"cs", "hs", "nlast"}, 0x2},
352 {{"cc", "lo", "ul", "last"}, 0x3},
353 {{"mi", "first"}, 0x4},
354 {{"pl", "nfrst"}, 0x5},
355 {{"vs"}, 0x6},
356 {{"vc"}, 0x7},
357 {{"hi", "pmore"}, 0x8},
358 {{"ls", "plast"}, 0x9},
359 {{"ge", "tcont"}, 0xa},
360 {{"lt", "tstop"}, 0xb},
361 {{"gt"}, 0xc},
362 {{"le"}, 0xd},
363 {{"al"}, 0xe},
364 {{"nv"}, 0xf},
365 };
366
367 const aarch64_cond *
368 get_cond_from_value (aarch64_insn value)
369 {
370 assert (value < 16);
371 return &aarch64_conds[(unsigned int) value];
372 }
373
374 const aarch64_cond *
375 get_inverted_cond (const aarch64_cond *cond)
376 {
377 return &aarch64_conds[cond->value ^ 0x1];
378 }
379
380 /* Table describing the operand extension/shifting operators; indexed by
381 enum aarch64_modifier_kind.
382
383 The value column provides the most common values for encoding modifiers,
384 which enables table-driven encoding/decoding for the modifiers. */
385 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
386 {
387 {"none", 0x0},
388 {"msl", 0x0},
389 {"ror", 0x3},
390 {"asr", 0x2},
391 {"lsr", 0x1},
392 {"lsl", 0x0},
393 {"uxtb", 0x0},
394 {"uxth", 0x1},
395 {"uxtw", 0x2},
396 {"uxtx", 0x3},
397 {"sxtb", 0x4},
398 {"sxth", 0x5},
399 {"sxtw", 0x6},
400 {"sxtx", 0x7},
401 {"mul", 0x0},
402 {"mul vl", 0x0},
403 {NULL, 0},
404 };
405
406 enum aarch64_modifier_kind
407 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
408 {
409 return desc - aarch64_operand_modifiers;
410 }
411
412 aarch64_insn
413 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
414 {
415 return aarch64_operand_modifiers[kind].value;
416 }
417
418 enum aarch64_modifier_kind
419 aarch64_get_operand_modifier_from_value (aarch64_insn value,
420 bfd_boolean extend_p)
421 {
422 if (extend_p == TRUE)
423 return AARCH64_MOD_UXTB + value;
424 else
425 return AARCH64_MOD_LSL - value;
426 }
427
428 bfd_boolean
429 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
430 {
431 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
432 ? TRUE : FALSE;
433 }
434
435 static inline bfd_boolean
436 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
437 {
438 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
439 ? TRUE : FALSE;
440 }
441
442 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
443 {
444 { "#0x00", 0x0 },
445 { "oshld", 0x1 },
446 { "oshst", 0x2 },
447 { "osh", 0x3 },
448 { "#0x04", 0x4 },
449 { "nshld", 0x5 },
450 { "nshst", 0x6 },
451 { "nsh", 0x7 },
452 { "#0x08", 0x8 },
453 { "ishld", 0x9 },
454 { "ishst", 0xa },
455 { "ish", 0xb },
456 { "#0x0c", 0xc },
457 { "ld", 0xd },
458 { "st", 0xe },
459 { "sy", 0xf },
460 };
461
462 /* Table describing the operands supported by the aliases of the HINT
463 instruction.
464
465 The name column is the operand that is accepted for the alias. The value
466 column is the hint number of the alias. The list of operands is terminated
467 by NULL in the name column. */
468
469 const struct aarch64_name_value_pair aarch64_hint_options[] =
470 {
471 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
472 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
473 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
474 { "c", HINT_OPD_C }, /* BTI C. */
475 { "j", HINT_OPD_J }, /* BTI J. */
476 { "jc", HINT_OPD_JC }, /* BTI JC. */
477 { NULL, HINT_OPD_NULL },
478 };
479
480 /* op -> op: load = 0 instruction = 1 store = 2
481 l -> level: 1-3
482 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
483 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
484 const struct aarch64_name_value_pair aarch64_prfops[32] =
485 {
486 { "pldl1keep", B(0, 1, 0) },
487 { "pldl1strm", B(0, 1, 1) },
488 { "pldl2keep", B(0, 2, 0) },
489 { "pldl2strm", B(0, 2, 1) },
490 { "pldl3keep", B(0, 3, 0) },
491 { "pldl3strm", B(0, 3, 1) },
492 { NULL, 0x06 },
493 { NULL, 0x07 },
494 { "plil1keep", B(1, 1, 0) },
495 { "plil1strm", B(1, 1, 1) },
496 { "plil2keep", B(1, 2, 0) },
497 { "plil2strm", B(1, 2, 1) },
498 { "plil3keep", B(1, 3, 0) },
499 { "plil3strm", B(1, 3, 1) },
500 { NULL, 0x0e },
501 { NULL, 0x0f },
502 { "pstl1keep", B(2, 1, 0) },
503 { "pstl1strm", B(2, 1, 1) },
504 { "pstl2keep", B(2, 2, 0) },
505 { "pstl2strm", B(2, 2, 1) },
506 { "pstl3keep", B(2, 3, 0) },
507 { "pstl3strm", B(2, 3, 1) },
508 { NULL, 0x16 },
509 { NULL, 0x17 },
510 { NULL, 0x18 },
511 { NULL, 0x19 },
512 { NULL, 0x1a },
513 { NULL, 0x1b },
514 { NULL, 0x1c },
515 { NULL, 0x1d },
516 { NULL, 0x1e },
517 { NULL, 0x1f },
518 };
519 #undef B
520 \f
521 /* Utilities on value constraint. */
522
523 static inline int
524 value_in_range_p (int64_t value, int low, int high)
525 {
526 return (value >= low && value <= high) ? 1 : 0;
527 }
528
529 /* Return true if VALUE is a multiple of ALIGN. */
530 static inline int
531 value_aligned_p (int64_t value, int align)
532 {
533 return (value % align) == 0;
534 }
535
536 /* A signed value fits in a field. */
537 static inline int
538 value_fit_signed_field_p (int64_t value, unsigned width)
539 {
540 assert (width < 32);
541 if (width < sizeof (value) * 8)
542 {
543 int64_t lim = (int64_t)1 << (width - 1);
544 if (value >= -lim && value < lim)
545 return 1;
546 }
547 return 0;
548 }
549
550 /* An unsigned value fits in a field. */
551 static inline int
552 value_fit_unsigned_field_p (int64_t value, unsigned width)
553 {
554 assert (width < 32);
555 if (width < sizeof (value) * 8)
556 {
557 int64_t lim = (int64_t)1 << width;
558 if (value >= 0 && value < lim)
559 return 1;
560 }
561 return 0;
562 }
563
564 /* Return 1 if OPERAND is SP or WSP. */
565 int
566 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
567 {
568 return ((aarch64_get_operand_class (operand->type)
569 == AARCH64_OPND_CLASS_INT_REG)
570 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
571 && operand->reg.regno == 31);
572 }
573
574 /* Return 1 if OPERAND is XZR or WZP. */
575 int
576 aarch64_zero_register_p (const aarch64_opnd_info *operand)
577 {
578 return ((aarch64_get_operand_class (operand->type)
579 == AARCH64_OPND_CLASS_INT_REG)
580 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
581 && operand->reg.regno == 31);
582 }
583
584 /* Return true if the operand *OPERAND that has the operand code
585 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
586 qualified by the qualifier TARGET. */
587
588 static inline int
589 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
590 aarch64_opnd_qualifier_t target)
591 {
592 switch (operand->qualifier)
593 {
594 case AARCH64_OPND_QLF_W:
595 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
596 return 1;
597 break;
598 case AARCH64_OPND_QLF_X:
599 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
600 return 1;
601 break;
602 case AARCH64_OPND_QLF_WSP:
603 if (target == AARCH64_OPND_QLF_W
604 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
605 return 1;
606 break;
607 case AARCH64_OPND_QLF_SP:
608 if (target == AARCH64_OPND_QLF_X
609 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
610 return 1;
611 break;
612 default:
613 break;
614 }
615
616 return 0;
617 }
618
619 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
620 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
621
622 Return NIL if more than one expected qualifiers are found. */
623
624 aarch64_opnd_qualifier_t
625 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
626 int idx,
627 const aarch64_opnd_qualifier_t known_qlf,
628 int known_idx)
629 {
630 int i, saved_i;
631
632 /* Special case.
633
634 When the known qualifier is NIL, we have to assume that there is only
635 one qualifier sequence in the *QSEQ_LIST and return the corresponding
636 qualifier directly. One scenario is that for instruction
637 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
638 which has only one possible valid qualifier sequence
639 NIL, S_D
640 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
641 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
642
643 Because the qualifier NIL has dual roles in the qualifier sequence:
644 it can mean no qualifier for the operand, or the qualifer sequence is
645 not in use (when all qualifiers in the sequence are NILs), we have to
646 handle this special case here. */
647 if (known_qlf == AARCH64_OPND_NIL)
648 {
649 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
650 return qseq_list[0][idx];
651 }
652
653 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
654 {
655 if (qseq_list[i][known_idx] == known_qlf)
656 {
657 if (saved_i != -1)
658 /* More than one sequences are found to have KNOWN_QLF at
659 KNOWN_IDX. */
660 return AARCH64_OPND_NIL;
661 saved_i = i;
662 }
663 }
664
665 return qseq_list[saved_i][idx];
666 }
667
668 enum operand_qualifier_kind
669 {
670 OQK_NIL,
671 OQK_OPD_VARIANT,
672 OQK_VALUE_IN_RANGE,
673 OQK_MISC,
674 };
675
676 /* Operand qualifier description. */
677 struct operand_qualifier_data
678 {
679 /* The usage of the three data fields depends on the qualifier kind. */
680 int data0;
681 int data1;
682 int data2;
683 /* Description. */
684 const char *desc;
685 /* Kind. */
686 enum operand_qualifier_kind kind;
687 };
688
689 /* Indexed by the operand qualifier enumerators. */
690 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
691 {
692 {0, 0, 0, "NIL", OQK_NIL},
693
694 /* Operand variant qualifiers.
695 First 3 fields:
696 element size, number of elements and common value for encoding. */
697
698 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
699 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
700 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
701 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
702
703 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
704 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
705 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
706 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
707 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
708 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
709
710 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
711 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
712 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
713 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
714 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
715 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
716 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
717 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
718 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
719 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
720 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
721
722 {0, 0, 0, "z", OQK_OPD_VARIANT},
723 {0, 0, 0, "m", OQK_OPD_VARIANT},
724
725 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
726 {16, 0, 0, "tag", OQK_OPD_VARIANT},
727
728 /* Qualifiers constraining the value range.
729 First 3 fields:
730 Lower bound, higher bound, unused. */
731
732 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
733 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
734 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
735 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
736 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
737 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
738 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
739
740 /* Qualifiers for miscellaneous purpose.
741 First 3 fields:
742 unused, unused and unused. */
743
744 {0, 0, 0, "lsl", 0},
745 {0, 0, 0, "msl", 0},
746
747 {0, 0, 0, "retrieving", 0},
748 };
749
750 static inline bfd_boolean
751 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
752 {
753 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
754 ? TRUE : FALSE;
755 }
756
757 static inline bfd_boolean
758 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
759 {
760 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
761 ? TRUE : FALSE;
762 }
763
764 const char*
765 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
766 {
767 return aarch64_opnd_qualifiers[qualifier].desc;
768 }
769
770 /* Given an operand qualifier, return the expected data element size
771 of a qualified operand. */
772 unsigned char
773 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
774 {
775 assert (operand_variant_qualifier_p (qualifier) == TRUE);
776 return aarch64_opnd_qualifiers[qualifier].data0;
777 }
778
779 unsigned char
780 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
781 {
782 assert (operand_variant_qualifier_p (qualifier) == TRUE);
783 return aarch64_opnd_qualifiers[qualifier].data1;
784 }
785
786 aarch64_insn
787 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
788 {
789 assert (operand_variant_qualifier_p (qualifier) == TRUE);
790 return aarch64_opnd_qualifiers[qualifier].data2;
791 }
792
793 static int
794 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
795 {
796 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
797 return aarch64_opnd_qualifiers[qualifier].data0;
798 }
799
800 static int
801 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
802 {
803 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
804 return aarch64_opnd_qualifiers[qualifier].data1;
805 }
806
807 #ifdef DEBUG_AARCH64
808 void
809 aarch64_verbose (const char *str, ...)
810 {
811 va_list ap;
812 va_start (ap, str);
813 printf ("#### ");
814 vprintf (str, ap);
815 printf ("\n");
816 va_end (ap);
817 }
818
819 static inline void
820 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
821 {
822 int i;
823 printf ("#### \t");
824 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
825 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
826 printf ("\n");
827 }
828
829 static void
830 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
831 const aarch64_opnd_qualifier_t *qualifier)
832 {
833 int i;
834 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
835
836 aarch64_verbose ("dump_match_qualifiers:");
837 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
838 curr[i] = opnd[i].qualifier;
839 dump_qualifier_sequence (curr);
840 aarch64_verbose ("against");
841 dump_qualifier_sequence (qualifier);
842 }
843 #endif /* DEBUG_AARCH64 */
844
845 /* This function checks if the given instruction INSN is a destructive
846 instruction based on the usage of the registers. It does not recognize
847 unary destructive instructions. */
848 bfd_boolean
849 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
850 {
851 int i = 0;
852 const enum aarch64_opnd *opnds = opcode->operands;
853
854 if (opnds[0] == AARCH64_OPND_NIL)
855 return FALSE;
856
857 while (opnds[++i] != AARCH64_OPND_NIL)
858 if (opnds[i] == opnds[0])
859 return TRUE;
860
861 return FALSE;
862 }
863
864 /* TODO improve this, we can have an extra field at the runtime to
865 store the number of operands rather than calculating it every time. */
866
867 int
868 aarch64_num_of_operands (const aarch64_opcode *opcode)
869 {
870 int i = 0;
871 const enum aarch64_opnd *opnds = opcode->operands;
872 while (opnds[i++] != AARCH64_OPND_NIL)
873 ;
874 --i;
875 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
876 return i;
877 }
878
879 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
880 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
881
882 N.B. on the entry, it is very likely that only some operands in *INST
883 have had their qualifiers been established.
884
885 If STOP_AT is not -1, the function will only try to match
886 the qualifier sequence for operands before and including the operand
887 of index STOP_AT; and on success *RET will only be filled with the first
888 (STOP_AT+1) qualifiers.
889
890 A couple examples of the matching algorithm:
891
892 X,W,NIL should match
893 X,W,NIL
894
895 NIL,NIL should match
896 X ,NIL
897
898 Apart from serving the main encoding routine, this can also be called
899 during or after the operand decoding. */
900
901 int
902 aarch64_find_best_match (const aarch64_inst *inst,
903 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
904 int stop_at, aarch64_opnd_qualifier_t *ret)
905 {
906 int found = 0;
907 int i, num_opnds;
908 const aarch64_opnd_qualifier_t *qualifiers;
909
910 num_opnds = aarch64_num_of_operands (inst->opcode);
911 if (num_opnds == 0)
912 {
913 DEBUG_TRACE ("SUCCEED: no operand");
914 return 1;
915 }
916
917 if (stop_at < 0 || stop_at >= num_opnds)
918 stop_at = num_opnds - 1;
919
920 /* For each pattern. */
921 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
922 {
923 int j;
924 qualifiers = *qualifiers_list;
925
926 /* Start as positive. */
927 found = 1;
928
929 DEBUG_TRACE ("%d", i);
930 #ifdef DEBUG_AARCH64
931 if (debug_dump)
932 dump_match_qualifiers (inst->operands, qualifiers);
933 #endif
934
935 /* Most opcodes has much fewer patterns in the list.
936 First NIL qualifier indicates the end in the list. */
937 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
938 {
939 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
940 if (i)
941 found = 0;
942 break;
943 }
944
945 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
946 {
947 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
948 {
949 /* Either the operand does not have qualifier, or the qualifier
950 for the operand needs to be deduced from the qualifier
951 sequence.
952 In the latter case, any constraint checking related with
953 the obtained qualifier should be done later in
954 operand_general_constraint_met_p. */
955 continue;
956 }
957 else if (*qualifiers != inst->operands[j].qualifier)
958 {
959 /* Unless the target qualifier can also qualify the operand
960 (which has already had a non-nil qualifier), non-equal
961 qualifiers are generally un-matched. */
962 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
963 continue;
964 else
965 {
966 found = 0;
967 break;
968 }
969 }
970 else
971 continue; /* Equal qualifiers are certainly matched. */
972 }
973
974 /* Qualifiers established. */
975 if (found == 1)
976 break;
977 }
978
979 if (found == 1)
980 {
981 /* Fill the result in *RET. */
982 int j;
983 qualifiers = *qualifiers_list;
984
985 DEBUG_TRACE ("complete qualifiers using list %d", i);
986 #ifdef DEBUG_AARCH64
987 if (debug_dump)
988 dump_qualifier_sequence (qualifiers);
989 #endif
990
991 for (j = 0; j <= stop_at; ++j, ++qualifiers)
992 ret[j] = *qualifiers;
993 for (; j < AARCH64_MAX_OPND_NUM; ++j)
994 ret[j] = AARCH64_OPND_QLF_NIL;
995
996 DEBUG_TRACE ("SUCCESS");
997 return 1;
998 }
999
1000 DEBUG_TRACE ("FAIL");
1001 return 0;
1002 }
1003
1004 /* Operand qualifier matching and resolving.
1005
1006 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1007 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1008
1009 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1010 succeeds. */
1011
1012 static int
1013 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
1014 {
1015 int i, nops;
1016 aarch64_opnd_qualifier_seq_t qualifiers;
1017
1018 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1019 qualifiers))
1020 {
1021 DEBUG_TRACE ("matching FAIL");
1022 return 0;
1023 }
1024
1025 if (inst->opcode->flags & F_STRICT)
1026 {
1027 /* Require an exact qualifier match, even for NIL qualifiers. */
1028 nops = aarch64_num_of_operands (inst->opcode);
1029 for (i = 0; i < nops; ++i)
1030 if (inst->operands[i].qualifier != qualifiers[i])
1031 return FALSE;
1032 }
1033
1034 /* Update the qualifiers. */
1035 if (update_p == TRUE)
1036 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1037 {
1038 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1039 break;
1040 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1041 "update %s with %s for operand %d",
1042 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1043 aarch64_get_qualifier_name (qualifiers[i]), i);
1044 inst->operands[i].qualifier = qualifiers[i];
1045 }
1046
1047 DEBUG_TRACE ("matching SUCCESS");
1048 return 1;
1049 }
1050
1051 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1052 register by MOVZ.
1053
1054 IS32 indicates whether value is a 32-bit immediate or not.
1055 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1056 amount will be returned in *SHIFT_AMOUNT. */
1057
1058 bfd_boolean
1059 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1060 {
1061 int amount;
1062
1063 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1064
1065 if (is32)
1066 {
1067 /* Allow all zeros or all ones in top 32-bits, so that
1068 32-bit constant expressions like ~0x80000000 are
1069 permitted. */
1070 uint64_t ext = value;
1071 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1072 /* Immediate out of range. */
1073 return FALSE;
1074 value &= (int64_t) 0xffffffff;
1075 }
1076
1077 /* first, try movz then movn */
1078 amount = -1;
1079 if ((value & ((int64_t) 0xffff << 0)) == value)
1080 amount = 0;
1081 else if ((value & ((int64_t) 0xffff << 16)) == value)
1082 amount = 16;
1083 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1084 amount = 32;
1085 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1086 amount = 48;
1087
1088 if (amount == -1)
1089 {
1090 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1091 return FALSE;
1092 }
1093
1094 if (shift_amount != NULL)
1095 *shift_amount = amount;
1096
1097 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1098
1099 return TRUE;
1100 }
1101
1102 /* Build the accepted values for immediate logical SIMD instructions.
1103
1104 The standard encodings of the immediate value are:
1105 N imms immr SIMD size R S
1106 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1107 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1108 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1109 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1110 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1111 0 11110s 00000r 2 UInt(r) UInt(s)
1112 where all-ones value of S is reserved.
1113
1114 Let's call E the SIMD size.
1115
1116 The immediate value is: S+1 bits '1' rotated to the right by R.
1117
1118 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1119 (remember S != E - 1). */
1120
1121 #define TOTAL_IMM_NB 5334
1122
1123 typedef struct
1124 {
1125 uint64_t imm;
1126 aarch64_insn encoding;
1127 } simd_imm_encoding;
1128
1129 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1130
1131 static int
1132 simd_imm_encoding_cmp(const void *i1, const void *i2)
1133 {
1134 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1135 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1136
1137 if (imm1->imm < imm2->imm)
1138 return -1;
1139 if (imm1->imm > imm2->imm)
1140 return +1;
1141 return 0;
1142 }
1143
1144 /* immediate bitfield standard encoding
1145 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1146 1 ssssss rrrrrr 64 rrrrrr ssssss
1147 0 0sssss 0rrrrr 32 rrrrr sssss
1148 0 10ssss 00rrrr 16 rrrr ssss
1149 0 110sss 000rrr 8 rrr sss
1150 0 1110ss 0000rr 4 rr ss
1151 0 11110s 00000r 2 r s */
1152 static inline int
1153 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1154 {
1155 return (is64 << 12) | (r << 6) | s;
1156 }
1157
1158 static void
1159 build_immediate_table (void)
1160 {
1161 uint32_t log_e, e, s, r, s_mask;
1162 uint64_t mask, imm;
1163 int nb_imms;
1164 int is64;
1165
1166 nb_imms = 0;
1167 for (log_e = 1; log_e <= 6; log_e++)
1168 {
1169 /* Get element size. */
1170 e = 1u << log_e;
1171 if (log_e == 6)
1172 {
1173 is64 = 1;
1174 mask = 0xffffffffffffffffull;
1175 s_mask = 0;
1176 }
1177 else
1178 {
1179 is64 = 0;
1180 mask = (1ull << e) - 1;
1181 /* log_e s_mask
1182 1 ((1 << 4) - 1) << 2 = 111100
1183 2 ((1 << 3) - 1) << 3 = 111000
1184 3 ((1 << 2) - 1) << 4 = 110000
1185 4 ((1 << 1) - 1) << 5 = 100000
1186 5 ((1 << 0) - 1) << 6 = 000000 */
1187 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1188 }
1189 for (s = 0; s < e - 1; s++)
1190 for (r = 0; r < e; r++)
1191 {
1192 /* s+1 consecutive bits to 1 (s < 63) */
1193 imm = (1ull << (s + 1)) - 1;
1194 /* rotate right by r */
1195 if (r != 0)
1196 imm = (imm >> r) | ((imm << (e - r)) & mask);
1197 /* replicate the constant depending on SIMD size */
1198 switch (log_e)
1199 {
1200 case 1: imm = (imm << 2) | imm;
1201 /* Fall through. */
1202 case 2: imm = (imm << 4) | imm;
1203 /* Fall through. */
1204 case 3: imm = (imm << 8) | imm;
1205 /* Fall through. */
1206 case 4: imm = (imm << 16) | imm;
1207 /* Fall through. */
1208 case 5: imm = (imm << 32) | imm;
1209 /* Fall through. */
1210 case 6: break;
1211 default: abort ();
1212 }
1213 simd_immediates[nb_imms].imm = imm;
1214 simd_immediates[nb_imms].encoding =
1215 encode_immediate_bitfield(is64, s | s_mask, r);
1216 nb_imms++;
1217 }
1218 }
1219 assert (nb_imms == TOTAL_IMM_NB);
1220 qsort(simd_immediates, nb_imms,
1221 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1222 }
1223
1224 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1225 be accepted by logical (immediate) instructions
1226 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1227
1228 ESIZE is the number of bytes in the decoded immediate value.
1229 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1230 VALUE will be returned in *ENCODING. */
1231
1232 bfd_boolean
1233 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1234 {
1235 simd_imm_encoding imm_enc;
1236 const simd_imm_encoding *imm_encoding;
1237 static bfd_boolean initialized = FALSE;
1238 uint64_t upper;
1239 int i;
1240
1241 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1242 value, esize);
1243
1244 if (!initialized)
1245 {
1246 build_immediate_table ();
1247 initialized = TRUE;
1248 }
1249
1250 /* Allow all zeros or all ones in top bits, so that
1251 constant expressions like ~1 are permitted. */
1252 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1253 if ((value & ~upper) != value && (value | upper) != value)
1254 return FALSE;
1255
1256 /* Replicate to a full 64-bit value. */
1257 value &= ~upper;
1258 for (i = esize * 8; i < 64; i *= 2)
1259 value |= (value << i);
1260
1261 imm_enc.imm = value;
1262 imm_encoding = (const simd_imm_encoding *)
1263 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1264 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1265 if (imm_encoding == NULL)
1266 {
1267 DEBUG_TRACE ("exit with FALSE");
1268 return FALSE;
1269 }
1270 if (encoding != NULL)
1271 *encoding = imm_encoding->encoding;
1272 DEBUG_TRACE ("exit with TRUE");
1273 return TRUE;
1274 }
1275
1276 /* If 64-bit immediate IMM is in the format of
1277 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1278 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1279 of value "abcdefgh". Otherwise return -1. */
1280 int
1281 aarch64_shrink_expanded_imm8 (uint64_t imm)
1282 {
1283 int i, ret;
1284 uint32_t byte;
1285
1286 ret = 0;
1287 for (i = 0; i < 8; i++)
1288 {
1289 byte = (imm >> (8 * i)) & 0xff;
1290 if (byte == 0xff)
1291 ret |= 1 << i;
1292 else if (byte != 0x00)
1293 return -1;
1294 }
1295 return ret;
1296 }
1297
1298 /* Utility inline functions for operand_general_constraint_met_p. */
1299
1300 static inline void
1301 set_error (aarch64_operand_error *mismatch_detail,
1302 enum aarch64_operand_error_kind kind, int idx,
1303 const char* error)
1304 {
1305 if (mismatch_detail == NULL)
1306 return;
1307 mismatch_detail->kind = kind;
1308 mismatch_detail->index = idx;
1309 mismatch_detail->error = error;
1310 }
1311
1312 static inline void
1313 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1314 const char* error)
1315 {
1316 if (mismatch_detail == NULL)
1317 return;
1318 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1319 }
1320
1321 static inline void
1322 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1323 int idx, int lower_bound, int upper_bound,
1324 const char* error)
1325 {
1326 if (mismatch_detail == NULL)
1327 return;
1328 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1329 mismatch_detail->data[0] = lower_bound;
1330 mismatch_detail->data[1] = upper_bound;
1331 }
1332
1333 static inline void
1334 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1335 int idx, int lower_bound, int upper_bound)
1336 {
1337 if (mismatch_detail == NULL)
1338 return;
1339 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1340 _("immediate value"));
1341 }
1342
1343 static inline void
1344 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1345 int idx, int lower_bound, int upper_bound)
1346 {
1347 if (mismatch_detail == NULL)
1348 return;
1349 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1350 _("immediate offset"));
1351 }
1352
1353 static inline void
1354 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1355 int idx, int lower_bound, int upper_bound)
1356 {
1357 if (mismatch_detail == NULL)
1358 return;
1359 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1360 _("register number"));
1361 }
1362
1363 static inline void
1364 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1365 int idx, int lower_bound, int upper_bound)
1366 {
1367 if (mismatch_detail == NULL)
1368 return;
1369 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1370 _("register element index"));
1371 }
1372
1373 static inline void
1374 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1375 int idx, int lower_bound, int upper_bound)
1376 {
1377 if (mismatch_detail == NULL)
1378 return;
1379 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1380 _("shift amount"));
1381 }
1382
1383 /* Report that the MUL modifier in operand IDX should be in the range
1384 [LOWER_BOUND, UPPER_BOUND]. */
1385 static inline void
1386 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1387 int idx, int lower_bound, int upper_bound)
1388 {
1389 if (mismatch_detail == NULL)
1390 return;
1391 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1392 _("multiplier"));
1393 }
1394
1395 static inline void
1396 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1397 int alignment)
1398 {
1399 if (mismatch_detail == NULL)
1400 return;
1401 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1402 mismatch_detail->data[0] = alignment;
1403 }
1404
1405 static inline void
1406 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1407 int expected_num)
1408 {
1409 if (mismatch_detail == NULL)
1410 return;
1411 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1412 mismatch_detail->data[0] = expected_num;
1413 }
1414
1415 static inline void
1416 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1417 const char* error)
1418 {
1419 if (mismatch_detail == NULL)
1420 return;
1421 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1422 }
1423
1424 /* General constraint checking based on operand code.
1425
1426 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1427 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1428
1429 This function has to be called after the qualifiers for all operands
1430 have been resolved.
1431
1432 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1433 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1434 of error message during the disassembling where error message is not
1435 wanted. We avoid the dynamic construction of strings of error messages
1436 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1437 use a combination of error code, static string and some integer data to
1438 represent an error. */
1439
1440 static int
1441 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1442 enum aarch64_opnd type,
1443 const aarch64_opcode *opcode,
1444 aarch64_operand_error *mismatch_detail)
1445 {
1446 unsigned num, modifiers, shift;
1447 unsigned char size;
1448 int64_t imm, min_value, max_value;
1449 uint64_t uvalue, mask;
1450 const aarch64_opnd_info *opnd = opnds + idx;
1451 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1452
1453 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1454
1455 switch (aarch64_operands[type].op_class)
1456 {
1457 case AARCH64_OPND_CLASS_INT_REG:
1458 /* Check pair reg constraints for cas* instructions. */
1459 if (type == AARCH64_OPND_PAIRREG)
1460 {
1461 assert (idx == 1 || idx == 3);
1462 if (opnds[idx - 1].reg.regno % 2 != 0)
1463 {
1464 set_syntax_error (mismatch_detail, idx - 1,
1465 _("reg pair must start from even reg"));
1466 return 0;
1467 }
1468 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1469 {
1470 set_syntax_error (mismatch_detail, idx,
1471 _("reg pair must be contiguous"));
1472 return 0;
1473 }
1474 break;
1475 }
1476
1477 /* <Xt> may be optional in some IC and TLBI instructions. */
1478 if (type == AARCH64_OPND_Rt_SYS)
1479 {
1480 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1481 == AARCH64_OPND_CLASS_SYSTEM));
1482 if (opnds[1].present
1483 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1484 {
1485 set_other_error (mismatch_detail, idx, _("extraneous register"));
1486 return 0;
1487 }
1488 if (!opnds[1].present
1489 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1490 {
1491 set_other_error (mismatch_detail, idx, _("missing register"));
1492 return 0;
1493 }
1494 }
1495 switch (qualifier)
1496 {
1497 case AARCH64_OPND_QLF_WSP:
1498 case AARCH64_OPND_QLF_SP:
1499 if (!aarch64_stack_pointer_p (opnd))
1500 {
1501 set_other_error (mismatch_detail, idx,
1502 _("stack pointer register expected"));
1503 return 0;
1504 }
1505 break;
1506 default:
1507 break;
1508 }
1509 break;
1510
1511 case AARCH64_OPND_CLASS_SVE_REG:
1512 switch (type)
1513 {
1514 case AARCH64_OPND_SVE_Zm3_INDEX:
1515 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1516 case AARCH64_OPND_SVE_Zm4_INDEX:
1517 size = get_operand_fields_width (get_operand_from_code (type));
1518 shift = get_operand_specific_data (&aarch64_operands[type]);
1519 mask = (1 << shift) - 1;
1520 if (opnd->reg.regno > mask)
1521 {
1522 assert (mask == 7 || mask == 15);
1523 set_other_error (mismatch_detail, idx,
1524 mask == 15
1525 ? _("z0-z15 expected")
1526 : _("z0-z7 expected"));
1527 return 0;
1528 }
1529 mask = (1 << (size - shift)) - 1;
1530 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1531 {
1532 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1533 return 0;
1534 }
1535 break;
1536
1537 case AARCH64_OPND_SVE_Zn_INDEX:
1538 size = aarch64_get_qualifier_esize (opnd->qualifier);
1539 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1540 {
1541 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1542 0, 64 / size - 1);
1543 return 0;
1544 }
1545 break;
1546
1547 case AARCH64_OPND_SVE_ZnxN:
1548 case AARCH64_OPND_SVE_ZtxN:
1549 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1550 {
1551 set_other_error (mismatch_detail, idx,
1552 _("invalid register list"));
1553 return 0;
1554 }
1555 break;
1556
1557 default:
1558 break;
1559 }
1560 break;
1561
1562 case AARCH64_OPND_CLASS_PRED_REG:
1563 if (opnd->reg.regno >= 8
1564 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1565 {
1566 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1567 return 0;
1568 }
1569 break;
1570
1571 case AARCH64_OPND_CLASS_COND:
1572 if (type == AARCH64_OPND_COND1
1573 && (opnds[idx].cond->value & 0xe) == 0xe)
1574 {
1575 /* Not allow AL or NV. */
1576 set_syntax_error (mismatch_detail, idx, NULL);
1577 }
1578 break;
1579
1580 case AARCH64_OPND_CLASS_ADDRESS:
1581 /* Check writeback. */
1582 switch (opcode->iclass)
1583 {
1584 case ldst_pos:
1585 case ldst_unscaled:
1586 case ldstnapair_offs:
1587 case ldstpair_off:
1588 case ldst_unpriv:
1589 if (opnd->addr.writeback == 1)
1590 {
1591 set_syntax_error (mismatch_detail, idx,
1592 _("unexpected address writeback"));
1593 return 0;
1594 }
1595 break;
1596 case ldst_imm10:
1597 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1598 {
1599 set_syntax_error (mismatch_detail, idx,
1600 _("unexpected address writeback"));
1601 return 0;
1602 }
1603 break;
1604 case ldst_imm9:
1605 case ldstpair_indexed:
1606 case asisdlsep:
1607 case asisdlsop:
1608 if (opnd->addr.writeback == 0)
1609 {
1610 set_syntax_error (mismatch_detail, idx,
1611 _("address writeback expected"));
1612 return 0;
1613 }
1614 break;
1615 default:
1616 assert (opnd->addr.writeback == 0);
1617 break;
1618 }
1619 switch (type)
1620 {
1621 case AARCH64_OPND_ADDR_SIMM7:
1622 /* Scaled signed 7 bits immediate offset. */
1623 /* Get the size of the data element that is accessed, which may be
1624 different from that of the source register size,
1625 e.g. in strb/ldrb. */
1626 size = aarch64_get_qualifier_esize (opnd->qualifier);
1627 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1628 {
1629 set_offset_out_of_range_error (mismatch_detail, idx,
1630 -64 * size, 63 * size);
1631 return 0;
1632 }
1633 if (!value_aligned_p (opnd->addr.offset.imm, size))
1634 {
1635 set_unaligned_error (mismatch_detail, idx, size);
1636 return 0;
1637 }
1638 break;
1639 case AARCH64_OPND_ADDR_OFFSET:
1640 case AARCH64_OPND_ADDR_SIMM9:
1641 /* Unscaled signed 9 bits immediate offset. */
1642 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1643 {
1644 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1645 return 0;
1646 }
1647 break;
1648
1649 case AARCH64_OPND_ADDR_SIMM9_2:
1650 /* Unscaled signed 9 bits immediate offset, which has to be negative
1651 or unaligned. */
1652 size = aarch64_get_qualifier_esize (qualifier);
1653 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1654 && !value_aligned_p (opnd->addr.offset.imm, size))
1655 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1656 return 1;
1657 set_other_error (mismatch_detail, idx,
1658 _("negative or unaligned offset expected"));
1659 return 0;
1660
1661 case AARCH64_OPND_ADDR_SIMM10:
1662 /* Scaled signed 10 bits immediate offset. */
1663 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1664 {
1665 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1666 return 0;
1667 }
1668 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1669 {
1670 set_unaligned_error (mismatch_detail, idx, 8);
1671 return 0;
1672 }
1673 break;
1674
1675 case AARCH64_OPND_ADDR_SIMM11:
1676 /* Signed 11 bits immediate offset (multiple of 16). */
1677 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1678 {
1679 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1680 return 0;
1681 }
1682
1683 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1684 {
1685 set_unaligned_error (mismatch_detail, idx, 16);
1686 return 0;
1687 }
1688 break;
1689
1690 case AARCH64_OPND_ADDR_SIMM13:
1691 /* Signed 13 bits immediate offset (multiple of 16). */
1692 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1693 {
1694 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1695 return 0;
1696 }
1697
1698 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1699 {
1700 set_unaligned_error (mismatch_detail, idx, 16);
1701 return 0;
1702 }
1703 break;
1704
1705 case AARCH64_OPND_SIMD_ADDR_POST:
1706 /* AdvSIMD load/store multiple structures, post-index. */
1707 assert (idx == 1);
1708 if (opnd->addr.offset.is_reg)
1709 {
1710 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1711 return 1;
1712 else
1713 {
1714 set_other_error (mismatch_detail, idx,
1715 _("invalid register offset"));
1716 return 0;
1717 }
1718 }
1719 else
1720 {
1721 const aarch64_opnd_info *prev = &opnds[idx-1];
1722 unsigned num_bytes; /* total number of bytes transferred. */
1723 /* The opcode dependent area stores the number of elements in
1724 each structure to be loaded/stored. */
1725 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1726 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1727 /* Special handling of loading single structure to all lane. */
1728 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1729 * aarch64_get_qualifier_esize (prev->qualifier);
1730 else
1731 num_bytes = prev->reglist.num_regs
1732 * aarch64_get_qualifier_esize (prev->qualifier)
1733 * aarch64_get_qualifier_nelem (prev->qualifier);
1734 if ((int) num_bytes != opnd->addr.offset.imm)
1735 {
1736 set_other_error (mismatch_detail, idx,
1737 _("invalid post-increment amount"));
1738 return 0;
1739 }
1740 }
1741 break;
1742
1743 case AARCH64_OPND_ADDR_REGOFF:
1744 /* Get the size of the data element that is accessed, which may be
1745 different from that of the source register size,
1746 e.g. in strb/ldrb. */
1747 size = aarch64_get_qualifier_esize (opnd->qualifier);
1748 /* It is either no shift or shift by the binary logarithm of SIZE. */
1749 if (opnd->shifter.amount != 0
1750 && opnd->shifter.amount != (int)get_logsz (size))
1751 {
1752 set_other_error (mismatch_detail, idx,
1753 _("invalid shift amount"));
1754 return 0;
1755 }
1756 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1757 operators. */
1758 switch (opnd->shifter.kind)
1759 {
1760 case AARCH64_MOD_UXTW:
1761 case AARCH64_MOD_LSL:
1762 case AARCH64_MOD_SXTW:
1763 case AARCH64_MOD_SXTX: break;
1764 default:
1765 set_other_error (mismatch_detail, idx,
1766 _("invalid extend/shift operator"));
1767 return 0;
1768 }
1769 break;
1770
1771 case AARCH64_OPND_ADDR_UIMM12:
1772 imm = opnd->addr.offset.imm;
1773 /* Get the size of the data element that is accessed, which may be
1774 different from that of the source register size,
1775 e.g. in strb/ldrb. */
1776 size = aarch64_get_qualifier_esize (qualifier);
1777 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1778 {
1779 set_offset_out_of_range_error (mismatch_detail, idx,
1780 0, 4095 * size);
1781 return 0;
1782 }
1783 if (!value_aligned_p (opnd->addr.offset.imm, size))
1784 {
1785 set_unaligned_error (mismatch_detail, idx, size);
1786 return 0;
1787 }
1788 break;
1789
1790 case AARCH64_OPND_ADDR_PCREL14:
1791 case AARCH64_OPND_ADDR_PCREL19:
1792 case AARCH64_OPND_ADDR_PCREL21:
1793 case AARCH64_OPND_ADDR_PCREL26:
1794 imm = opnd->imm.value;
1795 if (operand_need_shift_by_two (get_operand_from_code (type)))
1796 {
1797 /* The offset value in a PC-relative branch instruction is alway
1798 4-byte aligned and is encoded without the lowest 2 bits. */
1799 if (!value_aligned_p (imm, 4))
1800 {
1801 set_unaligned_error (mismatch_detail, idx, 4);
1802 return 0;
1803 }
1804 /* Right shift by 2 so that we can carry out the following check
1805 canonically. */
1806 imm >>= 2;
1807 }
1808 size = get_operand_fields_width (get_operand_from_code (type));
1809 if (!value_fit_signed_field_p (imm, size))
1810 {
1811 set_other_error (mismatch_detail, idx,
1812 _("immediate out of range"));
1813 return 0;
1814 }
1815 break;
1816
1817 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1818 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1819 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1820 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1821 min_value = -8;
1822 max_value = 7;
1823 sve_imm_offset_vl:
1824 assert (!opnd->addr.offset.is_reg);
1825 assert (opnd->addr.preind);
1826 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1827 min_value *= num;
1828 max_value *= num;
1829 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1830 || (opnd->shifter.operator_present
1831 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1832 {
1833 set_other_error (mismatch_detail, idx,
1834 _("invalid addressing mode"));
1835 return 0;
1836 }
1837 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1838 {
1839 set_offset_out_of_range_error (mismatch_detail, idx,
1840 min_value, max_value);
1841 return 0;
1842 }
1843 if (!value_aligned_p (opnd->addr.offset.imm, num))
1844 {
1845 set_unaligned_error (mismatch_detail, idx, num);
1846 return 0;
1847 }
1848 break;
1849
1850 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1851 min_value = -32;
1852 max_value = 31;
1853 goto sve_imm_offset_vl;
1854
1855 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1856 min_value = -256;
1857 max_value = 255;
1858 goto sve_imm_offset_vl;
1859
1860 case AARCH64_OPND_SVE_ADDR_RI_U6:
1861 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1862 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1863 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1864 min_value = 0;
1865 max_value = 63;
1866 sve_imm_offset:
1867 assert (!opnd->addr.offset.is_reg);
1868 assert (opnd->addr.preind);
1869 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1870 min_value *= num;
1871 max_value *= num;
1872 if (opnd->shifter.operator_present
1873 || opnd->shifter.amount_present)
1874 {
1875 set_other_error (mismatch_detail, idx,
1876 _("invalid addressing mode"));
1877 return 0;
1878 }
1879 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1880 {
1881 set_offset_out_of_range_error (mismatch_detail, idx,
1882 min_value, max_value);
1883 return 0;
1884 }
1885 if (!value_aligned_p (opnd->addr.offset.imm, num))
1886 {
1887 set_unaligned_error (mismatch_detail, idx, num);
1888 return 0;
1889 }
1890 break;
1891
1892 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1893 min_value = -8;
1894 max_value = 7;
1895 goto sve_imm_offset;
1896
1897 case AARCH64_OPND_SVE_ADDR_R:
1898 case AARCH64_OPND_SVE_ADDR_RR:
1899 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1900 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1901 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1902 case AARCH64_OPND_SVE_ADDR_RX:
1903 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1904 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1905 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1906 case AARCH64_OPND_SVE_ADDR_RZ:
1907 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1908 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1909 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1910 modifiers = 1 << AARCH64_MOD_LSL;
1911 sve_rr_operand:
1912 assert (opnd->addr.offset.is_reg);
1913 assert (opnd->addr.preind);
1914 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1915 && opnd->addr.offset.regno == 31)
1916 {
1917 set_other_error (mismatch_detail, idx,
1918 _("index register xzr is not allowed"));
1919 return 0;
1920 }
1921 if (((1 << opnd->shifter.kind) & modifiers) == 0
1922 || (opnd->shifter.amount
1923 != get_operand_specific_data (&aarch64_operands[type])))
1924 {
1925 set_other_error (mismatch_detail, idx,
1926 _("invalid addressing mode"));
1927 return 0;
1928 }
1929 break;
1930
1931 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1932 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1933 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1934 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1935 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1936 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1937 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1938 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1939 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1940 goto sve_rr_operand;
1941
1942 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1943 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1944 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1945 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1946 min_value = 0;
1947 max_value = 31;
1948 goto sve_imm_offset;
1949
1950 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1951 modifiers = 1 << AARCH64_MOD_LSL;
1952 sve_zz_operand:
1953 assert (opnd->addr.offset.is_reg);
1954 assert (opnd->addr.preind);
1955 if (((1 << opnd->shifter.kind) & modifiers) == 0
1956 || opnd->shifter.amount < 0
1957 || opnd->shifter.amount > 3)
1958 {
1959 set_other_error (mismatch_detail, idx,
1960 _("invalid addressing mode"));
1961 return 0;
1962 }
1963 break;
1964
1965 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1966 modifiers = (1 << AARCH64_MOD_SXTW);
1967 goto sve_zz_operand;
1968
1969 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1970 modifiers = 1 << AARCH64_MOD_UXTW;
1971 goto sve_zz_operand;
1972
1973 default:
1974 break;
1975 }
1976 break;
1977
1978 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1979 if (type == AARCH64_OPND_LEt)
1980 {
1981 /* Get the upper bound for the element index. */
1982 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1983 if (!value_in_range_p (opnd->reglist.index, 0, num))
1984 {
1985 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1986 return 0;
1987 }
1988 }
1989 /* The opcode dependent area stores the number of elements in
1990 each structure to be loaded/stored. */
1991 num = get_opcode_dependent_value (opcode);
1992 switch (type)
1993 {
1994 case AARCH64_OPND_LVt:
1995 assert (num >= 1 && num <= 4);
1996 /* Unless LD1/ST1, the number of registers should be equal to that
1997 of the structure elements. */
1998 if (num != 1 && opnd->reglist.num_regs != num)
1999 {
2000 set_reg_list_error (mismatch_detail, idx, num);
2001 return 0;
2002 }
2003 break;
2004 case AARCH64_OPND_LVt_AL:
2005 case AARCH64_OPND_LEt:
2006 assert (num >= 1 && num <= 4);
2007 /* The number of registers should be equal to that of the structure
2008 elements. */
2009 if (opnd->reglist.num_regs != num)
2010 {
2011 set_reg_list_error (mismatch_detail, idx, num);
2012 return 0;
2013 }
2014 break;
2015 default:
2016 break;
2017 }
2018 break;
2019
2020 case AARCH64_OPND_CLASS_IMMEDIATE:
2021 /* Constraint check on immediate operand. */
2022 imm = opnd->imm.value;
2023 /* E.g. imm_0_31 constrains value to be 0..31. */
2024 if (qualifier_value_in_range_constraint_p (qualifier)
2025 && !value_in_range_p (imm, get_lower_bound (qualifier),
2026 get_upper_bound (qualifier)))
2027 {
2028 set_imm_out_of_range_error (mismatch_detail, idx,
2029 get_lower_bound (qualifier),
2030 get_upper_bound (qualifier));
2031 return 0;
2032 }
2033
2034 switch (type)
2035 {
2036 case AARCH64_OPND_AIMM:
2037 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2038 {
2039 set_other_error (mismatch_detail, idx,
2040 _("invalid shift operator"));
2041 return 0;
2042 }
2043 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2044 {
2045 set_other_error (mismatch_detail, idx,
2046 _("shift amount must be 0 or 12"));
2047 return 0;
2048 }
2049 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2050 {
2051 set_other_error (mismatch_detail, idx,
2052 _("immediate out of range"));
2053 return 0;
2054 }
2055 break;
2056
2057 case AARCH64_OPND_HALF:
2058 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2059 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2060 {
2061 set_other_error (mismatch_detail, idx,
2062 _("invalid shift operator"));
2063 return 0;
2064 }
2065 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2066 if (!value_aligned_p (opnd->shifter.amount, 16))
2067 {
2068 set_other_error (mismatch_detail, idx,
2069 _("shift amount must be a multiple of 16"));
2070 return 0;
2071 }
2072 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2073 {
2074 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2075 0, size * 8 - 16);
2076 return 0;
2077 }
2078 if (opnd->imm.value < 0)
2079 {
2080 set_other_error (mismatch_detail, idx,
2081 _("negative immediate value not allowed"));
2082 return 0;
2083 }
2084 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2085 {
2086 set_other_error (mismatch_detail, idx,
2087 _("immediate out of range"));
2088 return 0;
2089 }
2090 break;
2091
2092 case AARCH64_OPND_IMM_MOV:
2093 {
2094 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2095 imm = opnd->imm.value;
2096 assert (idx == 1);
2097 switch (opcode->op)
2098 {
2099 case OP_MOV_IMM_WIDEN:
2100 imm = ~imm;
2101 /* Fall through. */
2102 case OP_MOV_IMM_WIDE:
2103 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2104 {
2105 set_other_error (mismatch_detail, idx,
2106 _("immediate out of range"));
2107 return 0;
2108 }
2109 break;
2110 case OP_MOV_IMM_LOG:
2111 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2112 {
2113 set_other_error (mismatch_detail, idx,
2114 _("immediate out of range"));
2115 return 0;
2116 }
2117 break;
2118 default:
2119 assert (0);
2120 return 0;
2121 }
2122 }
2123 break;
2124
2125 case AARCH64_OPND_NZCV:
2126 case AARCH64_OPND_CCMP_IMM:
2127 case AARCH64_OPND_EXCEPTION:
2128 case AARCH64_OPND_TME_UIMM16:
2129 case AARCH64_OPND_UIMM4:
2130 case AARCH64_OPND_UIMM4_ADDG:
2131 case AARCH64_OPND_UIMM7:
2132 case AARCH64_OPND_UIMM3_OP1:
2133 case AARCH64_OPND_UIMM3_OP2:
2134 case AARCH64_OPND_SVE_UIMM3:
2135 case AARCH64_OPND_SVE_UIMM7:
2136 case AARCH64_OPND_SVE_UIMM8:
2137 case AARCH64_OPND_SVE_UIMM8_53:
2138 size = get_operand_fields_width (get_operand_from_code (type));
2139 assert (size < 32);
2140 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2141 {
2142 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2143 (1 << size) - 1);
2144 return 0;
2145 }
2146 break;
2147
2148 case AARCH64_OPND_UIMM10:
2149 /* Scaled unsigned 10 bits immediate offset. */
2150 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2151 {
2152 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2153 return 0;
2154 }
2155
2156 if (!value_aligned_p (opnd->imm.value, 16))
2157 {
2158 set_unaligned_error (mismatch_detail, idx, 16);
2159 return 0;
2160 }
2161 break;
2162
2163 case AARCH64_OPND_SIMM5:
2164 case AARCH64_OPND_SVE_SIMM5:
2165 case AARCH64_OPND_SVE_SIMM5B:
2166 case AARCH64_OPND_SVE_SIMM6:
2167 case AARCH64_OPND_SVE_SIMM8:
2168 size = get_operand_fields_width (get_operand_from_code (type));
2169 assert (size < 32);
2170 if (!value_fit_signed_field_p (opnd->imm.value, size))
2171 {
2172 set_imm_out_of_range_error (mismatch_detail, idx,
2173 -(1 << (size - 1)),
2174 (1 << (size - 1)) - 1);
2175 return 0;
2176 }
2177 break;
2178
2179 case AARCH64_OPND_WIDTH:
2180 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2181 && opnds[0].type == AARCH64_OPND_Rd);
2182 size = get_upper_bound (qualifier);
2183 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2184 /* lsb+width <= reg.size */
2185 {
2186 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2187 size - opnds[idx-1].imm.value);
2188 return 0;
2189 }
2190 break;
2191
2192 case AARCH64_OPND_LIMM:
2193 case AARCH64_OPND_SVE_LIMM:
2194 {
2195 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2196 uint64_t uimm = opnd->imm.value;
2197 if (opcode->op == OP_BIC)
2198 uimm = ~uimm;
2199 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2200 {
2201 set_other_error (mismatch_detail, idx,
2202 _("immediate out of range"));
2203 return 0;
2204 }
2205 }
2206 break;
2207
2208 case AARCH64_OPND_IMM0:
2209 case AARCH64_OPND_FPIMM0:
2210 if (opnd->imm.value != 0)
2211 {
2212 set_other_error (mismatch_detail, idx,
2213 _("immediate zero expected"));
2214 return 0;
2215 }
2216 break;
2217
2218 case AARCH64_OPND_IMM_ROT1:
2219 case AARCH64_OPND_IMM_ROT2:
2220 case AARCH64_OPND_SVE_IMM_ROT2:
2221 if (opnd->imm.value != 0
2222 && opnd->imm.value != 90
2223 && opnd->imm.value != 180
2224 && opnd->imm.value != 270)
2225 {
2226 set_other_error (mismatch_detail, idx,
2227 _("rotate expected to be 0, 90, 180 or 270"));
2228 return 0;
2229 }
2230 break;
2231
2232 case AARCH64_OPND_IMM_ROT3:
2233 case AARCH64_OPND_SVE_IMM_ROT1:
2234 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2235 {
2236 set_other_error (mismatch_detail, idx,
2237 _("rotate expected to be 90 or 270"));
2238 return 0;
2239 }
2240 break;
2241
2242 case AARCH64_OPND_SHLL_IMM:
2243 assert (idx == 2);
2244 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2245 if (opnd->imm.value != size)
2246 {
2247 set_other_error (mismatch_detail, idx,
2248 _("invalid shift amount"));
2249 return 0;
2250 }
2251 break;
2252
2253 case AARCH64_OPND_IMM_VLSL:
2254 size = aarch64_get_qualifier_esize (qualifier);
2255 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2256 {
2257 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2258 size * 8 - 1);
2259 return 0;
2260 }
2261 break;
2262
2263 case AARCH64_OPND_IMM_VLSR:
2264 size = aarch64_get_qualifier_esize (qualifier);
2265 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2266 {
2267 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2268 return 0;
2269 }
2270 break;
2271
2272 case AARCH64_OPND_SIMD_IMM:
2273 case AARCH64_OPND_SIMD_IMM_SFT:
2274 /* Qualifier check. */
2275 switch (qualifier)
2276 {
2277 case AARCH64_OPND_QLF_LSL:
2278 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2279 {
2280 set_other_error (mismatch_detail, idx,
2281 _("invalid shift operator"));
2282 return 0;
2283 }
2284 break;
2285 case AARCH64_OPND_QLF_MSL:
2286 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2287 {
2288 set_other_error (mismatch_detail, idx,
2289 _("invalid shift operator"));
2290 return 0;
2291 }
2292 break;
2293 case AARCH64_OPND_QLF_NIL:
2294 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2295 {
2296 set_other_error (mismatch_detail, idx,
2297 _("shift is not permitted"));
2298 return 0;
2299 }
2300 break;
2301 default:
2302 assert (0);
2303 return 0;
2304 }
2305 /* Is the immediate valid? */
2306 assert (idx == 1);
2307 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2308 {
2309 /* uimm8 or simm8 */
2310 if (!value_in_range_p (opnd->imm.value, -128, 255))
2311 {
2312 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2313 return 0;
2314 }
2315 }
2316 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2317 {
2318 /* uimm64 is not
2319 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2320 ffffffffgggggggghhhhhhhh'. */
2321 set_other_error (mismatch_detail, idx,
2322 _("invalid value for immediate"));
2323 return 0;
2324 }
2325 /* Is the shift amount valid? */
2326 switch (opnd->shifter.kind)
2327 {
2328 case AARCH64_MOD_LSL:
2329 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2330 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2331 {
2332 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2333 (size - 1) * 8);
2334 return 0;
2335 }
2336 if (!value_aligned_p (opnd->shifter.amount, 8))
2337 {
2338 set_unaligned_error (mismatch_detail, idx, 8);
2339 return 0;
2340 }
2341 break;
2342 case AARCH64_MOD_MSL:
2343 /* Only 8 and 16 are valid shift amount. */
2344 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2345 {
2346 set_other_error (mismatch_detail, idx,
2347 _("shift amount must be 0 or 16"));
2348 return 0;
2349 }
2350 break;
2351 default:
2352 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2353 {
2354 set_other_error (mismatch_detail, idx,
2355 _("invalid shift operator"));
2356 return 0;
2357 }
2358 break;
2359 }
2360 break;
2361
2362 case AARCH64_OPND_FPIMM:
2363 case AARCH64_OPND_SIMD_FPIMM:
2364 case AARCH64_OPND_SVE_FPIMM8:
2365 if (opnd->imm.is_fp == 0)
2366 {
2367 set_other_error (mismatch_detail, idx,
2368 _("floating-point immediate expected"));
2369 return 0;
2370 }
2371 /* The value is expected to be an 8-bit floating-point constant with
2372 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2373 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2374 instruction). */
2375 if (!value_in_range_p (opnd->imm.value, 0, 255))
2376 {
2377 set_other_error (mismatch_detail, idx,
2378 _("immediate out of range"));
2379 return 0;
2380 }
2381 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2382 {
2383 set_other_error (mismatch_detail, idx,
2384 _("invalid shift operator"));
2385 return 0;
2386 }
2387 break;
2388
2389 case AARCH64_OPND_SVE_AIMM:
2390 min_value = 0;
2391 sve_aimm:
2392 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2393 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2394 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2395 uvalue = opnd->imm.value;
2396 shift = opnd->shifter.amount;
2397 if (size == 1)
2398 {
2399 if (shift != 0)
2400 {
2401 set_other_error (mismatch_detail, idx,
2402 _("no shift amount allowed for"
2403 " 8-bit constants"));
2404 return 0;
2405 }
2406 }
2407 else
2408 {
2409 if (shift != 0 && shift != 8)
2410 {
2411 set_other_error (mismatch_detail, idx,
2412 _("shift amount must be 0 or 8"));
2413 return 0;
2414 }
2415 if (shift == 0 && (uvalue & 0xff) == 0)
2416 {
2417 shift = 8;
2418 uvalue = (int64_t) uvalue / 256;
2419 }
2420 }
2421 mask >>= shift;
2422 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2423 {
2424 set_other_error (mismatch_detail, idx,
2425 _("immediate too big for element size"));
2426 return 0;
2427 }
2428 uvalue = (uvalue - min_value) & mask;
2429 if (uvalue > 0xff)
2430 {
2431 set_other_error (mismatch_detail, idx,
2432 _("invalid arithmetic immediate"));
2433 return 0;
2434 }
2435 break;
2436
2437 case AARCH64_OPND_SVE_ASIMM:
2438 min_value = -128;
2439 goto sve_aimm;
2440
2441 case AARCH64_OPND_SVE_I1_HALF_ONE:
2442 assert (opnd->imm.is_fp);
2443 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2444 {
2445 set_other_error (mismatch_detail, idx,
2446 _("floating-point value must be 0.5 or 1.0"));
2447 return 0;
2448 }
2449 break;
2450
2451 case AARCH64_OPND_SVE_I1_HALF_TWO:
2452 assert (opnd->imm.is_fp);
2453 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2454 {
2455 set_other_error (mismatch_detail, idx,
2456 _("floating-point value must be 0.5 or 2.0"));
2457 return 0;
2458 }
2459 break;
2460
2461 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2462 assert (opnd->imm.is_fp);
2463 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2464 {
2465 set_other_error (mismatch_detail, idx,
2466 _("floating-point value must be 0.0 or 1.0"));
2467 return 0;
2468 }
2469 break;
2470
2471 case AARCH64_OPND_SVE_INV_LIMM:
2472 {
2473 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2474 uint64_t uimm = ~opnd->imm.value;
2475 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2476 {
2477 set_other_error (mismatch_detail, idx,
2478 _("immediate out of range"));
2479 return 0;
2480 }
2481 }
2482 break;
2483
2484 case AARCH64_OPND_SVE_LIMM_MOV:
2485 {
2486 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2487 uint64_t uimm = opnd->imm.value;
2488 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2489 {
2490 set_other_error (mismatch_detail, idx,
2491 _("immediate out of range"));
2492 return 0;
2493 }
2494 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2495 {
2496 set_other_error (mismatch_detail, idx,
2497 _("invalid replicated MOV immediate"));
2498 return 0;
2499 }
2500 }
2501 break;
2502
2503 case AARCH64_OPND_SVE_PATTERN_SCALED:
2504 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2505 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2506 {
2507 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2508 return 0;
2509 }
2510 break;
2511
2512 case AARCH64_OPND_SVE_SHLIMM_PRED:
2513 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2514 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2515 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2516 {
2517 set_imm_out_of_range_error (mismatch_detail, idx,
2518 0, 8 * size - 1);
2519 return 0;
2520 }
2521 break;
2522
2523 case AARCH64_OPND_SVE_SHRIMM_PRED:
2524 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2525 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2526 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2527 {
2528 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2529 return 0;
2530 }
2531 break;
2532
2533 default:
2534 break;
2535 }
2536 break;
2537
2538 case AARCH64_OPND_CLASS_SYSTEM:
2539 switch (type)
2540 {
2541 case AARCH64_OPND_PSTATEFIELD:
2542 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2543 /* MSR UAO, #uimm4
2544 MSR PAN, #uimm4
2545 MSR SSBS,#uimm4
2546 The immediate must be #0 or #1. */
2547 if ((opnd->pstatefield == 0x03 /* UAO. */
2548 || opnd->pstatefield == 0x04 /* PAN. */
2549 || opnd->pstatefield == 0x19 /* SSBS. */
2550 || opnd->pstatefield == 0x1a) /* DIT. */
2551 && opnds[1].imm.value > 1)
2552 {
2553 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2554 return 0;
2555 }
2556 /* MSR SPSel, #uimm4
2557 Uses uimm4 as a control value to select the stack pointer: if
2558 bit 0 is set it selects the current exception level's stack
2559 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2560 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2561 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2562 {
2563 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2564 return 0;
2565 }
2566 break;
2567 default:
2568 break;
2569 }
2570 break;
2571
2572 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2573 /* Get the upper bound for the element index. */
2574 if (opcode->op == OP_FCMLA_ELEM)
2575 /* FCMLA index range depends on the vector size of other operands
2576 and is halfed because complex numbers take two elements. */
2577 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2578 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2579 else
2580 num = 16;
2581 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2582 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2583
2584 /* Index out-of-range. */
2585 if (!value_in_range_p (opnd->reglane.index, 0, num))
2586 {
2587 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2588 return 0;
2589 }
2590 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2591 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2592 number is encoded in "size:M:Rm":
2593 size <Vm>
2594 00 RESERVED
2595 01 0:Rm
2596 10 M:Rm
2597 11 RESERVED */
2598 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2599 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2600 {
2601 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2602 return 0;
2603 }
2604 break;
2605
2606 case AARCH64_OPND_CLASS_MODIFIED_REG:
2607 assert (idx == 1 || idx == 2);
2608 switch (type)
2609 {
2610 case AARCH64_OPND_Rm_EXT:
2611 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2612 && opnd->shifter.kind != AARCH64_MOD_LSL)
2613 {
2614 set_other_error (mismatch_detail, idx,
2615 _("extend operator expected"));
2616 return 0;
2617 }
2618 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2619 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2620 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2621 case. */
2622 if (!aarch64_stack_pointer_p (opnds + 0)
2623 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2624 {
2625 if (!opnd->shifter.operator_present)
2626 {
2627 set_other_error (mismatch_detail, idx,
2628 _("missing extend operator"));
2629 return 0;
2630 }
2631 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2632 {
2633 set_other_error (mismatch_detail, idx,
2634 _("'LSL' operator not allowed"));
2635 return 0;
2636 }
2637 }
2638 assert (opnd->shifter.operator_present /* Default to LSL. */
2639 || opnd->shifter.kind == AARCH64_MOD_LSL);
2640 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2641 {
2642 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2643 return 0;
2644 }
2645 /* In the 64-bit form, the final register operand is written as Wm
2646 for all but the (possibly omitted) UXTX/LSL and SXTX
2647 operators.
2648 N.B. GAS allows X register to be used with any operator as a
2649 programming convenience. */
2650 if (qualifier == AARCH64_OPND_QLF_X
2651 && opnd->shifter.kind != AARCH64_MOD_LSL
2652 && opnd->shifter.kind != AARCH64_MOD_UXTX
2653 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2654 {
2655 set_other_error (mismatch_detail, idx, _("W register expected"));
2656 return 0;
2657 }
2658 break;
2659
2660 case AARCH64_OPND_Rm_SFT:
2661 /* ROR is not available to the shifted register operand in
2662 arithmetic instructions. */
2663 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2664 {
2665 set_other_error (mismatch_detail, idx,
2666 _("shift operator expected"));
2667 return 0;
2668 }
2669 if (opnd->shifter.kind == AARCH64_MOD_ROR
2670 && opcode->iclass != log_shift)
2671 {
2672 set_other_error (mismatch_detail, idx,
2673 _("'ROR' operator not allowed"));
2674 return 0;
2675 }
2676 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2677 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2678 {
2679 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2680 return 0;
2681 }
2682 break;
2683
2684 default:
2685 break;
2686 }
2687 break;
2688
2689 default:
2690 break;
2691 }
2692
2693 return 1;
2694 }
2695
2696 /* Main entrypoint for the operand constraint checking.
2697
2698 Return 1 if operands of *INST meet the constraint applied by the operand
2699 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2700 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2701 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2702 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2703 error kind when it is notified that an instruction does not pass the check).
2704
2705 Un-determined operand qualifiers may get established during the process. */
2706
2707 int
2708 aarch64_match_operands_constraint (aarch64_inst *inst,
2709 aarch64_operand_error *mismatch_detail)
2710 {
2711 int i;
2712
2713 DEBUG_TRACE ("enter");
2714
2715 /* Check for cases where a source register needs to be the same as the
2716 destination register. Do this before matching qualifiers since if
2717 an instruction has both invalid tying and invalid qualifiers,
2718 the error about qualifiers would suggest several alternative
2719 instructions that also have invalid tying. */
2720 i = inst->opcode->tied_operand;
2721 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2722 {
2723 if (mismatch_detail)
2724 {
2725 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2726 mismatch_detail->index = i;
2727 mismatch_detail->error = NULL;
2728 }
2729 return 0;
2730 }
2731
2732 /* Match operands' qualifier.
2733 *INST has already had qualifier establish for some, if not all, of
2734 its operands; we need to find out whether these established
2735 qualifiers match one of the qualifier sequence in
2736 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2737 with the corresponding qualifier in such a sequence.
2738 Only basic operand constraint checking is done here; the more thorough
2739 constraint checking will carried out by operand_general_constraint_met_p,
2740 which has be to called after this in order to get all of the operands'
2741 qualifiers established. */
2742 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2743 {
2744 DEBUG_TRACE ("FAIL on operand qualifier matching");
2745 if (mismatch_detail)
2746 {
2747 /* Return an error type to indicate that it is the qualifier
2748 matching failure; we don't care about which operand as there
2749 are enough information in the opcode table to reproduce it. */
2750 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2751 mismatch_detail->index = -1;
2752 mismatch_detail->error = NULL;
2753 }
2754 return 0;
2755 }
2756
2757 /* Match operands' constraint. */
2758 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2759 {
2760 enum aarch64_opnd type = inst->opcode->operands[i];
2761 if (type == AARCH64_OPND_NIL)
2762 break;
2763 if (inst->operands[i].skip)
2764 {
2765 DEBUG_TRACE ("skip the incomplete operand %d", i);
2766 continue;
2767 }
2768 if (operand_general_constraint_met_p (inst->operands, i, type,
2769 inst->opcode, mismatch_detail) == 0)
2770 {
2771 DEBUG_TRACE ("FAIL on operand %d", i);
2772 return 0;
2773 }
2774 }
2775
2776 DEBUG_TRACE ("PASS");
2777
2778 return 1;
2779 }
2780
2781 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2782 Also updates the TYPE of each INST->OPERANDS with the corresponding
2783 value of OPCODE->OPERANDS.
2784
2785 Note that some operand qualifiers may need to be manually cleared by
2786 the caller before it further calls the aarch64_opcode_encode; by
2787 doing this, it helps the qualifier matching facilities work
2788 properly. */
2789
2790 const aarch64_opcode*
2791 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2792 {
2793 int i;
2794 const aarch64_opcode *old = inst->opcode;
2795
2796 inst->opcode = opcode;
2797
2798 /* Update the operand types. */
2799 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2800 {
2801 inst->operands[i].type = opcode->operands[i];
2802 if (opcode->operands[i] == AARCH64_OPND_NIL)
2803 break;
2804 }
2805
2806 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2807
2808 return old;
2809 }
2810
2811 int
2812 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2813 {
2814 int i;
2815 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2816 if (operands[i] == operand)
2817 return i;
2818 else if (operands[i] == AARCH64_OPND_NIL)
2819 break;
2820 return -1;
2821 }
2822 \f
2823 /* R0...R30, followed by FOR31. */
2824 #define BANK(R, FOR31) \
2825 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2826 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2827 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2828 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2829 /* [0][0] 32-bit integer regs with sp Wn
2830 [0][1] 64-bit integer regs with sp Xn sf=1
2831 [1][0] 32-bit integer regs with #0 Wn
2832 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2833 static const char *int_reg[2][2][32] = {
2834 #define R32(X) "w" #X
2835 #define R64(X) "x" #X
2836 { BANK (R32, "wsp"), BANK (R64, "sp") },
2837 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2838 #undef R64
2839 #undef R32
2840 };
2841
2842 /* Names of the SVE vector registers, first with .S suffixes,
2843 then with .D suffixes. */
2844
2845 static const char *sve_reg[2][32] = {
2846 #define ZS(X) "z" #X ".s"
2847 #define ZD(X) "z" #X ".d"
2848 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2849 #undef ZD
2850 #undef ZS
2851 };
2852 #undef BANK
2853
2854 /* Return the integer register name.
2855 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2856
2857 static inline const char *
2858 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2859 {
2860 const int has_zr = sp_reg_p ? 0 : 1;
2861 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2862 return int_reg[has_zr][is_64][regno];
2863 }
2864
2865 /* Like get_int_reg_name, but IS_64 is always 1. */
2866
2867 static inline const char *
2868 get_64bit_int_reg_name (int regno, int sp_reg_p)
2869 {
2870 const int has_zr = sp_reg_p ? 0 : 1;
2871 return int_reg[has_zr][1][regno];
2872 }
2873
2874 /* Get the name of the integer offset register in OPND, using the shift type
2875 to decide whether it's a word or doubleword. */
2876
2877 static inline const char *
2878 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2879 {
2880 switch (opnd->shifter.kind)
2881 {
2882 case AARCH64_MOD_UXTW:
2883 case AARCH64_MOD_SXTW:
2884 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2885
2886 case AARCH64_MOD_LSL:
2887 case AARCH64_MOD_SXTX:
2888 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2889
2890 default:
2891 abort ();
2892 }
2893 }
2894
2895 /* Get the name of the SVE vector offset register in OPND, using the operand
2896 qualifier to decide whether the suffix should be .S or .D. */
2897
2898 static inline const char *
2899 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2900 {
2901 assert (qualifier == AARCH64_OPND_QLF_S_S
2902 || qualifier == AARCH64_OPND_QLF_S_D);
2903 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2904 }
2905
2906 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2907
2908 typedef union
2909 {
2910 uint64_t i;
2911 double d;
2912 } double_conv_t;
2913
2914 typedef union
2915 {
2916 uint32_t i;
2917 float f;
2918 } single_conv_t;
2919
2920 typedef union
2921 {
2922 uint32_t i;
2923 float f;
2924 } half_conv_t;
2925
2926 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2927 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2928 (depending on the type of the instruction). IMM8 will be expanded to a
2929 single-precision floating-point value (SIZE == 4) or a double-precision
2930 floating-point value (SIZE == 8). A half-precision floating-point value
2931 (SIZE == 2) is expanded to a single-precision floating-point value. The
2932 expanded value is returned. */
2933
2934 static uint64_t
2935 expand_fp_imm (int size, uint32_t imm8)
2936 {
2937 uint64_t imm = 0;
2938 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2939
2940 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2941 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2942 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2943 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2944 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2945 if (size == 8)
2946 {
2947 imm = (imm8_7 << (63-32)) /* imm8<7> */
2948 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2949 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2950 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2951 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2952 imm <<= 32;
2953 }
2954 else if (size == 4 || size == 2)
2955 {
2956 imm = (imm8_7 << 31) /* imm8<7> */
2957 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2958 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2959 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2960 }
2961 else
2962 {
2963 /* An unsupported size. */
2964 assert (0);
2965 }
2966
2967 return imm;
2968 }
2969
2970 /* Produce the string representation of the register list operand *OPND
2971 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2972 the register name that comes before the register number, such as "v". */
2973 static void
2974 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2975 const char *prefix)
2976 {
2977 const int num_regs = opnd->reglist.num_regs;
2978 const int first_reg = opnd->reglist.first_regno;
2979 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2980 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2981 char tb[8]; /* Temporary buffer. */
2982
2983 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2984 assert (num_regs >= 1 && num_regs <= 4);
2985
2986 /* Prepare the index if any. */
2987 if (opnd->reglist.has_index)
2988 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2989 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
2990 else
2991 tb[0] = '\0';
2992
2993 /* The hyphenated form is preferred for disassembly if there are
2994 more than two registers in the list, and the register numbers
2995 are monotonically increasing in increments of one. */
2996 if (num_regs > 2 && last_reg > first_reg)
2997 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2998 prefix, last_reg, qlf_name, tb);
2999 else
3000 {
3001 const int reg0 = first_reg;
3002 const int reg1 = (first_reg + 1) & 0x1f;
3003 const int reg2 = (first_reg + 2) & 0x1f;
3004 const int reg3 = (first_reg + 3) & 0x1f;
3005
3006 switch (num_regs)
3007 {
3008 case 1:
3009 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3010 break;
3011 case 2:
3012 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3013 prefix, reg1, qlf_name, tb);
3014 break;
3015 case 3:
3016 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3017 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3018 prefix, reg2, qlf_name, tb);
3019 break;
3020 case 4:
3021 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3022 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3023 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3024 break;
3025 }
3026 }
3027 }
3028
3029 /* Print the register+immediate address in OPND to BUF, which has SIZE
3030 characters. BASE is the name of the base register. */
3031
3032 static void
3033 print_immediate_offset_address (char *buf, size_t size,
3034 const aarch64_opnd_info *opnd,
3035 const char *base)
3036 {
3037 if (opnd->addr.writeback)
3038 {
3039 if (opnd->addr.preind)
3040 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3041 else
3042 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3043 }
3044 else
3045 {
3046 if (opnd->shifter.operator_present)
3047 {
3048 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3049 snprintf (buf, size, "[%s, #%d, mul vl]",
3050 base, opnd->addr.offset.imm);
3051 }
3052 else if (opnd->addr.offset.imm)
3053 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3054 else
3055 snprintf (buf, size, "[%s]", base);
3056 }
3057 }
3058
3059 /* Produce the string representation of the register offset address operand
3060 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3061 the names of the base and offset registers. */
3062 static void
3063 print_register_offset_address (char *buf, size_t size,
3064 const aarch64_opnd_info *opnd,
3065 const char *base, const char *offset)
3066 {
3067 char tb[16]; /* Temporary buffer. */
3068 bfd_boolean print_extend_p = TRUE;
3069 bfd_boolean print_amount_p = TRUE;
3070 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3071
3072 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3073 || !opnd->shifter.amount_present))
3074 {
3075 /* Not print the shift/extend amount when the amount is zero and
3076 when it is not the special case of 8-bit load/store instruction. */
3077 print_amount_p = FALSE;
3078 /* Likewise, no need to print the shift operator LSL in such a
3079 situation. */
3080 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3081 print_extend_p = FALSE;
3082 }
3083
3084 /* Prepare for the extend/shift. */
3085 if (print_extend_p)
3086 {
3087 if (print_amount_p)
3088 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3089 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3090 (opnd->shifter.amount % 100));
3091 else
3092 snprintf (tb, sizeof (tb), ", %s", shift_name);
3093 }
3094 else
3095 tb[0] = '\0';
3096
3097 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3098 }
3099
3100 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3101 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3102 PC, PCREL_P and ADDRESS are used to pass in and return information about
3103 the PC-relative address calculation, where the PC value is passed in
3104 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3105 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3106 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3107
3108 The function serves both the disassembler and the assembler diagnostics
3109 issuer, which is the reason why it lives in this file. */
3110
3111 void
3112 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3113 const aarch64_opcode *opcode,
3114 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3115 bfd_vma *address, char** notes)
3116 {
3117 unsigned int i, num_conds;
3118 const char *name = NULL;
3119 const aarch64_opnd_info *opnd = opnds + idx;
3120 enum aarch64_modifier_kind kind;
3121 uint64_t addr, enum_value;
3122
3123 buf[0] = '\0';
3124 if (pcrel_p)
3125 *pcrel_p = 0;
3126
3127 switch (opnd->type)
3128 {
3129 case AARCH64_OPND_Rd:
3130 case AARCH64_OPND_Rn:
3131 case AARCH64_OPND_Rm:
3132 case AARCH64_OPND_Rt:
3133 case AARCH64_OPND_Rt2:
3134 case AARCH64_OPND_Rs:
3135 case AARCH64_OPND_Ra:
3136 case AARCH64_OPND_Rt_SYS:
3137 case AARCH64_OPND_PAIRREG:
3138 case AARCH64_OPND_SVE_Rm:
3139 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3140 the <ic_op>, therefore we use opnd->present to override the
3141 generic optional-ness information. */
3142 if (opnd->type == AARCH64_OPND_Rt_SYS)
3143 {
3144 if (!opnd->present)
3145 break;
3146 }
3147 /* Omit the operand, e.g. RET. */
3148 else if (optional_operand_p (opcode, idx)
3149 && (opnd->reg.regno
3150 == get_optional_operand_default_value (opcode)))
3151 break;
3152 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3153 || opnd->qualifier == AARCH64_OPND_QLF_X);
3154 snprintf (buf, size, "%s",
3155 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3156 break;
3157
3158 case AARCH64_OPND_Rd_SP:
3159 case AARCH64_OPND_Rn_SP:
3160 case AARCH64_OPND_Rt_SP:
3161 case AARCH64_OPND_SVE_Rn_SP:
3162 case AARCH64_OPND_Rm_SP:
3163 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3164 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3165 || opnd->qualifier == AARCH64_OPND_QLF_X
3166 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3167 snprintf (buf, size, "%s",
3168 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3169 break;
3170
3171 case AARCH64_OPND_Rm_EXT:
3172 kind = opnd->shifter.kind;
3173 assert (idx == 1 || idx == 2);
3174 if ((aarch64_stack_pointer_p (opnds)
3175 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3176 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3177 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3178 && kind == AARCH64_MOD_UXTW)
3179 || (opnd->qualifier == AARCH64_OPND_QLF_X
3180 && kind == AARCH64_MOD_UXTX)))
3181 {
3182 /* 'LSL' is the preferred form in this case. */
3183 kind = AARCH64_MOD_LSL;
3184 if (opnd->shifter.amount == 0)
3185 {
3186 /* Shifter omitted. */
3187 snprintf (buf, size, "%s",
3188 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3189 break;
3190 }
3191 }
3192 if (opnd->shifter.amount)
3193 snprintf (buf, size, "%s, %s #%" PRIi64,
3194 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3195 aarch64_operand_modifiers[kind].name,
3196 opnd->shifter.amount);
3197 else
3198 snprintf (buf, size, "%s, %s",
3199 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3200 aarch64_operand_modifiers[kind].name);
3201 break;
3202
3203 case AARCH64_OPND_Rm_SFT:
3204 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3205 || opnd->qualifier == AARCH64_OPND_QLF_X);
3206 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3207 snprintf (buf, size, "%s",
3208 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3209 else
3210 snprintf (buf, size, "%s, %s #%" PRIi64,
3211 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3212 aarch64_operand_modifiers[opnd->shifter.kind].name,
3213 opnd->shifter.amount);
3214 break;
3215
3216 case AARCH64_OPND_Fd:
3217 case AARCH64_OPND_Fn:
3218 case AARCH64_OPND_Fm:
3219 case AARCH64_OPND_Fa:
3220 case AARCH64_OPND_Ft:
3221 case AARCH64_OPND_Ft2:
3222 case AARCH64_OPND_Sd:
3223 case AARCH64_OPND_Sn:
3224 case AARCH64_OPND_Sm:
3225 case AARCH64_OPND_SVE_VZn:
3226 case AARCH64_OPND_SVE_Vd:
3227 case AARCH64_OPND_SVE_Vm:
3228 case AARCH64_OPND_SVE_Vn:
3229 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3230 opnd->reg.regno);
3231 break;
3232
3233 case AARCH64_OPND_Va:
3234 case AARCH64_OPND_Vd:
3235 case AARCH64_OPND_Vn:
3236 case AARCH64_OPND_Vm:
3237 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3238 aarch64_get_qualifier_name (opnd->qualifier));
3239 break;
3240
3241 case AARCH64_OPND_Ed:
3242 case AARCH64_OPND_En:
3243 case AARCH64_OPND_Em:
3244 case AARCH64_OPND_Em16:
3245 case AARCH64_OPND_SM3_IMM2:
3246 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3247 aarch64_get_qualifier_name (opnd->qualifier),
3248 opnd->reglane.index);
3249 break;
3250
3251 case AARCH64_OPND_VdD1:
3252 case AARCH64_OPND_VnD1:
3253 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3254 break;
3255
3256 case AARCH64_OPND_LVn:
3257 case AARCH64_OPND_LVt:
3258 case AARCH64_OPND_LVt_AL:
3259 case AARCH64_OPND_LEt:
3260 print_register_list (buf, size, opnd, "v");
3261 break;
3262
3263 case AARCH64_OPND_SVE_Pd:
3264 case AARCH64_OPND_SVE_Pg3:
3265 case AARCH64_OPND_SVE_Pg4_5:
3266 case AARCH64_OPND_SVE_Pg4_10:
3267 case AARCH64_OPND_SVE_Pg4_16:
3268 case AARCH64_OPND_SVE_Pm:
3269 case AARCH64_OPND_SVE_Pn:
3270 case AARCH64_OPND_SVE_Pt:
3271 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3272 snprintf (buf, size, "p%d", opnd->reg.regno);
3273 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3274 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3275 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3276 aarch64_get_qualifier_name (opnd->qualifier));
3277 else
3278 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3279 aarch64_get_qualifier_name (opnd->qualifier));
3280 break;
3281
3282 case AARCH64_OPND_SVE_Za_5:
3283 case AARCH64_OPND_SVE_Za_16:
3284 case AARCH64_OPND_SVE_Zd:
3285 case AARCH64_OPND_SVE_Zm_5:
3286 case AARCH64_OPND_SVE_Zm_16:
3287 case AARCH64_OPND_SVE_Zn:
3288 case AARCH64_OPND_SVE_Zt:
3289 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3290 snprintf (buf, size, "z%d", opnd->reg.regno);
3291 else
3292 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3293 aarch64_get_qualifier_name (opnd->qualifier));
3294 break;
3295
3296 case AARCH64_OPND_SVE_ZnxN:
3297 case AARCH64_OPND_SVE_ZtxN:
3298 print_register_list (buf, size, opnd, "z");
3299 break;
3300
3301 case AARCH64_OPND_SVE_Zm3_INDEX:
3302 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3303 case AARCH64_OPND_SVE_Zm4_INDEX:
3304 case AARCH64_OPND_SVE_Zn_INDEX:
3305 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3306 aarch64_get_qualifier_name (opnd->qualifier),
3307 opnd->reglane.index);
3308 break;
3309
3310 case AARCH64_OPND_CRn:
3311 case AARCH64_OPND_CRm:
3312 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3313 break;
3314
3315 case AARCH64_OPND_IDX:
3316 case AARCH64_OPND_MASK:
3317 case AARCH64_OPND_IMM:
3318 case AARCH64_OPND_IMM_2:
3319 case AARCH64_OPND_WIDTH:
3320 case AARCH64_OPND_UIMM3_OP1:
3321 case AARCH64_OPND_UIMM3_OP2:
3322 case AARCH64_OPND_BIT_NUM:
3323 case AARCH64_OPND_IMM_VLSL:
3324 case AARCH64_OPND_IMM_VLSR:
3325 case AARCH64_OPND_SHLL_IMM:
3326 case AARCH64_OPND_IMM0:
3327 case AARCH64_OPND_IMMR:
3328 case AARCH64_OPND_IMMS:
3329 case AARCH64_OPND_FBITS:
3330 case AARCH64_OPND_TME_UIMM16:
3331 case AARCH64_OPND_SIMM5:
3332 case AARCH64_OPND_SVE_SHLIMM_PRED:
3333 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3334 case AARCH64_OPND_SVE_SHRIMM_PRED:
3335 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3336 case AARCH64_OPND_SVE_SIMM5:
3337 case AARCH64_OPND_SVE_SIMM5B:
3338 case AARCH64_OPND_SVE_SIMM6:
3339 case AARCH64_OPND_SVE_SIMM8:
3340 case AARCH64_OPND_SVE_UIMM3:
3341 case AARCH64_OPND_SVE_UIMM7:
3342 case AARCH64_OPND_SVE_UIMM8:
3343 case AARCH64_OPND_SVE_UIMM8_53:
3344 case AARCH64_OPND_IMM_ROT1:
3345 case AARCH64_OPND_IMM_ROT2:
3346 case AARCH64_OPND_IMM_ROT3:
3347 case AARCH64_OPND_SVE_IMM_ROT1:
3348 case AARCH64_OPND_SVE_IMM_ROT2:
3349 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3350 break;
3351
3352 case AARCH64_OPND_SVE_I1_HALF_ONE:
3353 case AARCH64_OPND_SVE_I1_HALF_TWO:
3354 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3355 {
3356 single_conv_t c;
3357 c.i = opnd->imm.value;
3358 snprintf (buf, size, "#%.1f", c.f);
3359 break;
3360 }
3361
3362 case AARCH64_OPND_SVE_PATTERN:
3363 if (optional_operand_p (opcode, idx)
3364 && opnd->imm.value == get_optional_operand_default_value (opcode))
3365 break;
3366 enum_value = opnd->imm.value;
3367 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3368 if (aarch64_sve_pattern_array[enum_value])
3369 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3370 else
3371 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3372 break;
3373
3374 case AARCH64_OPND_SVE_PATTERN_SCALED:
3375 if (optional_operand_p (opcode, idx)
3376 && !opnd->shifter.operator_present
3377 && opnd->imm.value == get_optional_operand_default_value (opcode))
3378 break;
3379 enum_value = opnd->imm.value;
3380 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3381 if (aarch64_sve_pattern_array[opnd->imm.value])
3382 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3383 else
3384 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3385 if (opnd->shifter.operator_present)
3386 {
3387 size_t len = strlen (buf);
3388 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3389 aarch64_operand_modifiers[opnd->shifter.kind].name,
3390 opnd->shifter.amount);
3391 }
3392 break;
3393
3394 case AARCH64_OPND_SVE_PRFOP:
3395 enum_value = opnd->imm.value;
3396 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3397 if (aarch64_sve_prfop_array[enum_value])
3398 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3399 else
3400 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3401 break;
3402
3403 case AARCH64_OPND_IMM_MOV:
3404 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3405 {
3406 case 4: /* e.g. MOV Wd, #<imm32>. */
3407 {
3408 int imm32 = opnd->imm.value;
3409 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3410 }
3411 break;
3412 case 8: /* e.g. MOV Xd, #<imm64>. */
3413 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3414 opnd->imm.value, opnd->imm.value);
3415 break;
3416 default: assert (0);
3417 }
3418 break;
3419
3420 case AARCH64_OPND_FPIMM0:
3421 snprintf (buf, size, "#0.0");
3422 break;
3423
3424 case AARCH64_OPND_LIMM:
3425 case AARCH64_OPND_AIMM:
3426 case AARCH64_OPND_HALF:
3427 case AARCH64_OPND_SVE_INV_LIMM:
3428 case AARCH64_OPND_SVE_LIMM:
3429 case AARCH64_OPND_SVE_LIMM_MOV:
3430 if (opnd->shifter.amount)
3431 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3432 opnd->shifter.amount);
3433 else
3434 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3435 break;
3436
3437 case AARCH64_OPND_SIMD_IMM:
3438 case AARCH64_OPND_SIMD_IMM_SFT:
3439 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3440 || opnd->shifter.kind == AARCH64_MOD_NONE)
3441 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3442 else
3443 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3444 aarch64_operand_modifiers[opnd->shifter.kind].name,
3445 opnd->shifter.amount);
3446 break;
3447
3448 case AARCH64_OPND_SVE_AIMM:
3449 case AARCH64_OPND_SVE_ASIMM:
3450 if (opnd->shifter.amount)
3451 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3452 opnd->shifter.amount);
3453 else
3454 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3455 break;
3456
3457 case AARCH64_OPND_FPIMM:
3458 case AARCH64_OPND_SIMD_FPIMM:
3459 case AARCH64_OPND_SVE_FPIMM8:
3460 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3461 {
3462 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3463 {
3464 half_conv_t c;
3465 c.i = expand_fp_imm (2, opnd->imm.value);
3466 snprintf (buf, size, "#%.18e", c.f);
3467 }
3468 break;
3469 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3470 {
3471 single_conv_t c;
3472 c.i = expand_fp_imm (4, opnd->imm.value);
3473 snprintf (buf, size, "#%.18e", c.f);
3474 }
3475 break;
3476 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3477 {
3478 double_conv_t c;
3479 c.i = expand_fp_imm (8, opnd->imm.value);
3480 snprintf (buf, size, "#%.18e", c.d);
3481 }
3482 break;
3483 default: assert (0);
3484 }
3485 break;
3486
3487 case AARCH64_OPND_CCMP_IMM:
3488 case AARCH64_OPND_NZCV:
3489 case AARCH64_OPND_EXCEPTION:
3490 case AARCH64_OPND_UIMM4:
3491 case AARCH64_OPND_UIMM4_ADDG:
3492 case AARCH64_OPND_UIMM7:
3493 case AARCH64_OPND_UIMM10:
3494 if (optional_operand_p (opcode, idx) == TRUE
3495 && (opnd->imm.value ==
3496 (int64_t) get_optional_operand_default_value (opcode)))
3497 /* Omit the operand, e.g. DCPS1. */
3498 break;
3499 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3500 break;
3501
3502 case AARCH64_OPND_COND:
3503 case AARCH64_OPND_COND1:
3504 snprintf (buf, size, "%s", opnd->cond->names[0]);
3505 num_conds = ARRAY_SIZE (opnd->cond->names);
3506 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3507 {
3508 size_t len = strlen (buf);
3509 if (i == 1)
3510 snprintf (buf + len, size - len, " // %s = %s",
3511 opnd->cond->names[0], opnd->cond->names[i]);
3512 else
3513 snprintf (buf + len, size - len, ", %s",
3514 opnd->cond->names[i]);
3515 }
3516 break;
3517
3518 case AARCH64_OPND_ADDR_ADRP:
3519 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3520 + opnd->imm.value;
3521 if (pcrel_p)
3522 *pcrel_p = 1;
3523 if (address)
3524 *address = addr;
3525 /* This is not necessary during the disassembling, as print_address_func
3526 in the disassemble_info will take care of the printing. But some
3527 other callers may be still interested in getting the string in *STR,
3528 so here we do snprintf regardless. */
3529 snprintf (buf, size, "#0x%" PRIx64, addr);
3530 break;
3531
3532 case AARCH64_OPND_ADDR_PCREL14:
3533 case AARCH64_OPND_ADDR_PCREL19:
3534 case AARCH64_OPND_ADDR_PCREL21:
3535 case AARCH64_OPND_ADDR_PCREL26:
3536 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3537 if (pcrel_p)
3538 *pcrel_p = 1;
3539 if (address)
3540 *address = addr;
3541 /* This is not necessary during the disassembling, as print_address_func
3542 in the disassemble_info will take care of the printing. But some
3543 other callers may be still interested in getting the string in *STR,
3544 so here we do snprintf regardless. */
3545 snprintf (buf, size, "#0x%" PRIx64, addr);
3546 break;
3547
3548 case AARCH64_OPND_ADDR_SIMPLE:
3549 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3550 case AARCH64_OPND_SIMD_ADDR_POST:
3551 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3552 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3553 {
3554 if (opnd->addr.offset.is_reg)
3555 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3556 else
3557 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3558 }
3559 else
3560 snprintf (buf, size, "[%s]", name);
3561 break;
3562
3563 case AARCH64_OPND_ADDR_REGOFF:
3564 case AARCH64_OPND_SVE_ADDR_R:
3565 case AARCH64_OPND_SVE_ADDR_RR:
3566 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3567 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3568 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3569 case AARCH64_OPND_SVE_ADDR_RX:
3570 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3571 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3572 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3573 print_register_offset_address
3574 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3575 get_offset_int_reg_name (opnd));
3576 break;
3577
3578 case AARCH64_OPND_SVE_ADDR_RZ:
3579 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3580 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3581 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3582 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3583 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3584 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3585 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3586 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3587 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3588 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3589 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3590 print_register_offset_address
3591 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3592 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3593 break;
3594
3595 case AARCH64_OPND_ADDR_SIMM7:
3596 case AARCH64_OPND_ADDR_SIMM9:
3597 case AARCH64_OPND_ADDR_SIMM9_2:
3598 case AARCH64_OPND_ADDR_SIMM10:
3599 case AARCH64_OPND_ADDR_SIMM11:
3600 case AARCH64_OPND_ADDR_SIMM13:
3601 case AARCH64_OPND_ADDR_OFFSET:
3602 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3603 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3604 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3605 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3606 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3607 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3608 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3609 case AARCH64_OPND_SVE_ADDR_RI_U6:
3610 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3611 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3612 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3613 print_immediate_offset_address
3614 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3615 break;
3616
3617 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3618 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3619 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3620 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3621 print_immediate_offset_address
3622 (buf, size, opnd,
3623 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3624 break;
3625
3626 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3627 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3628 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3629 print_register_offset_address
3630 (buf, size, opnd,
3631 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3632 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3633 break;
3634
3635 case AARCH64_OPND_ADDR_UIMM12:
3636 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3637 if (opnd->addr.offset.imm)
3638 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3639 else
3640 snprintf (buf, size, "[%s]", name);
3641 break;
3642
3643 case AARCH64_OPND_SYSREG:
3644 for (i = 0; aarch64_sys_regs[i].name; ++i)
3645 {
3646 bfd_boolean exact_match
3647 = (aarch64_sys_regs[i].flags & opnd->sysreg.flags)
3648 == opnd->sysreg.flags;
3649
3650 /* Try and find an exact match, But if that fails, return the first
3651 partial match that was found. */
3652 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3653 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])
3654 && (name == NULL || exact_match))
3655 {
3656 name = aarch64_sys_regs[i].name;
3657 if (exact_match)
3658 {
3659 if (notes)
3660 *notes = NULL;
3661 break;
3662 }
3663
3664 /* If we didn't match exactly, that means the presense of a flag
3665 indicates what we didn't want for this instruction. e.g. If
3666 F_REG_READ is there, that means we were looking for a write
3667 register. See aarch64_ext_sysreg. */
3668 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3669 *notes = _("reading from a write-only register");
3670 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3671 *notes = _("writing to a read-only register");
3672 }
3673 }
3674
3675 if (name)
3676 snprintf (buf, size, "%s", name);
3677 else
3678 {
3679 /* Implementation defined system register. */
3680 unsigned int value = opnd->sysreg.value;
3681 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3682 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3683 value & 0x7);
3684 }
3685 break;
3686
3687 case AARCH64_OPND_PSTATEFIELD:
3688 for (i = 0; aarch64_pstatefields[i].name; ++i)
3689 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3690 break;
3691 assert (aarch64_pstatefields[i].name);
3692 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3693 break;
3694
3695 case AARCH64_OPND_SYSREG_AT:
3696 case AARCH64_OPND_SYSREG_DC:
3697 case AARCH64_OPND_SYSREG_IC:
3698 case AARCH64_OPND_SYSREG_TLBI:
3699 case AARCH64_OPND_SYSREG_SR:
3700 snprintf (buf, size, "%s", opnd->sysins_op->name);
3701 break;
3702
3703 case AARCH64_OPND_BARRIER:
3704 snprintf (buf, size, "%s", opnd->barrier->name);
3705 break;
3706
3707 case AARCH64_OPND_BARRIER_ISB:
3708 /* Operand can be omitted, e.g. in DCPS1. */
3709 if (! optional_operand_p (opcode, idx)
3710 || (opnd->barrier->value
3711 != get_optional_operand_default_value (opcode)))
3712 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3713 break;
3714
3715 case AARCH64_OPND_PRFOP:
3716 if (opnd->prfop->name != NULL)
3717 snprintf (buf, size, "%s", opnd->prfop->name);
3718 else
3719 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3720 break;
3721
3722 case AARCH64_OPND_BARRIER_PSB:
3723 case AARCH64_OPND_BTI_TARGET:
3724 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3725 snprintf (buf, size, "%s", opnd->hint_option->name);
3726 break;
3727
3728 default:
3729 assert (0);
3730 }
3731 }
3732 \f
3733 #define CPENC(op0,op1,crn,crm,op2) \
3734 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3735 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3736 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3737 /* for 3.9.10 System Instructions */
3738 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3739
3740 #define C0 0
3741 #define C1 1
3742 #define C2 2
3743 #define C3 3
3744 #define C4 4
3745 #define C5 5
3746 #define C6 6
3747 #define C7 7
3748 #define C8 8
3749 #define C9 9
3750 #define C10 10
3751 #define C11 11
3752 #define C12 12
3753 #define C13 13
3754 #define C14 14
3755 #define C15 15
3756
3757 /* TODO there is one more issues need to be resolved
3758 1. handle cpu-implementation-defined system registers. */
3759 const aarch64_sys_reg aarch64_sys_regs [] =
3760 {
3761 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3762 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3763 { "elr_el1", CPEN_(0,C0,1), 0 },
3764 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3765 { "sp_el0", CPEN_(0,C1,0), 0 },
3766 { "spsel", CPEN_(0,C2,0), 0 },
3767 { "daif", CPEN_(3,C2,1), 0 },
3768 { "currentel", CPEN_(0,C2,2), F_REG_READ }, /* RO */
3769 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3770 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3771 { "nzcv", CPEN_(3,C2,0), 0 },
3772 { "ssbs", CPEN_(3,C2,6), F_ARCHEXT },
3773 { "fpcr", CPEN_(3,C4,0), 0 },
3774 { "fpsr", CPEN_(3,C4,1), 0 },
3775 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3776 { "dlr_el0", CPEN_(3,C5,1), 0 },
3777 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3778 { "elr_el2", CPEN_(4,C0,1), 0 },
3779 { "sp_el1", CPEN_(4,C1,0), 0 },
3780 { "spsr_irq", CPEN_(4,C3,0), 0 },
3781 { "spsr_abt", CPEN_(4,C3,1), 0 },
3782 { "spsr_und", CPEN_(4,C3,2), 0 },
3783 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3784 { "spsr_el3", CPEN_(6,C0,0), 0 },
3785 { "elr_el3", CPEN_(6,C0,1), 0 },
3786 { "sp_el2", CPEN_(6,C1,0), 0 },
3787 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3788 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3789 { "midr_el1", CPENC(3,0,C0,C0,0), F_REG_READ }, /* RO */
3790 { "ctr_el0", CPENC(3,3,C0,C0,1), F_REG_READ }, /* RO */
3791 { "mpidr_el1", CPENC(3,0,C0,C0,5), F_REG_READ }, /* RO */
3792 { "revidr_el1", CPENC(3,0,C0,C0,6), F_REG_READ }, /* RO */
3793 { "aidr_el1", CPENC(3,1,C0,C0,7), F_REG_READ }, /* RO */
3794 { "dczid_el0", CPENC(3,3,C0,C0,7), F_REG_READ }, /* RO */
3795 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), F_REG_READ }, /* RO */
3796 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), F_REG_READ }, /* RO */
3797 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), F_REG_READ }, /* RO */
3798 { "id_pfr2_el1", CPENC(3,0,C0,C3,4), F_ARCHEXT | F_REG_READ}, /* RO */
3799 { "id_afr0_el1", CPENC(3,0,C0,C1,3), F_REG_READ }, /* RO */
3800 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), F_REG_READ }, /* RO */
3801 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), F_REG_READ }, /* RO */
3802 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), F_REG_READ }, /* RO */
3803 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), F_REG_READ }, /* RO */
3804 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), F_REG_READ }, /* RO */
3805 { "id_isar0_el1", CPENC(3,0,C0,C2,0), F_REG_READ }, /* RO */
3806 { "id_isar1_el1", CPENC(3,0,C0,C2,1), F_REG_READ }, /* RO */
3807 { "id_isar2_el1", CPENC(3,0,C0,C2,2), F_REG_READ }, /* RO */
3808 { "id_isar3_el1", CPENC(3,0,C0,C2,3), F_REG_READ }, /* RO */
3809 { "id_isar4_el1", CPENC(3,0,C0,C2,4), F_REG_READ }, /* RO */
3810 { "id_isar5_el1", CPENC(3,0,C0,C2,5), F_REG_READ }, /* RO */
3811 { "mvfr0_el1", CPENC(3,0,C0,C3,0), F_REG_READ }, /* RO */
3812 { "mvfr1_el1", CPENC(3,0,C0,C3,1), F_REG_READ }, /* RO */
3813 { "mvfr2_el1", CPENC(3,0,C0,C3,2), F_REG_READ }, /* RO */
3814 { "ccsidr_el1", CPENC(3,1,C0,C0,0), F_REG_READ }, /* RO */
3815 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), F_REG_READ }, /* RO */
3816 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), F_REG_READ }, /* RO */
3817 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), F_REG_READ }, /* RO */
3818 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), F_REG_READ }, /* RO */
3819 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), F_REG_READ }, /* RO */
3820 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), F_REG_READ }, /* RO */
3821 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), F_REG_READ }, /* RO */
3822 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), F_REG_READ }, /* RO */
3823 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT | F_REG_READ }, /* RO */
3824 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), F_REG_READ }, /* RO */
3825 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), F_REG_READ }, /* RO */
3826 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT | F_REG_READ }, /* RO */
3827 { "clidr_el1", CPENC(3,1,C0,C0,1), F_REG_READ }, /* RO */
3828 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 },
3829 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3830 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3831 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3832 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3833 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3834 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3835 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3836 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3837 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3838 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3839 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3840 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3841 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3842 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3843 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3844 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3845 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3846 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3847 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3848 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3849 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3850 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3851 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3852 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3853 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3854 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3855 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3856 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3857 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3858 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3859 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3860 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3861 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3862 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3863 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3864 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3865 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3866 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3867 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3868 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3869 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3870 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3871 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3872 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3873 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3874 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3875 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3876 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3877 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3878 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3879 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3880 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3881 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3882 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3883 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3884 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3885 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3886 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3887 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3888 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT },
3889 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3890 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3891 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3892 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3893 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3894 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3895 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3896 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3897 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3898 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3899 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3900 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3901 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3902 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3903 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3904 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3905 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3906 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3907 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3908 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3909 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3910 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3911 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3912 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3913 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3914 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3915 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3916 { "rvbar_el1", CPENC(3,0,C12,C0,1), F_REG_READ }, /* RO */
3917 { "rvbar_el2", CPENC(3,4,C12,C0,1), F_REG_READ }, /* RO */
3918 { "rvbar_el3", CPENC(3,6,C12,C0,1), F_REG_READ }, /* RO */
3919 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3920 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3921 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3922 { "isr_el1", CPENC(3,0,C12,C1,0), F_REG_READ }, /* RO */
3923 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3924 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3925 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3926 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3927 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3928 { "rndr", CPENC(3,3,C2,C4,0), F_ARCHEXT | F_REG_READ }, /* RO */
3929 { "rndrrs", CPENC(3,3,C2,C4,1), F_ARCHEXT | F_REG_READ }, /* RO */
3930 { "tco", CPENC(3,3,C4,C2,7), F_ARCHEXT },
3931 { "tfsre0_el1", CPENC(3,0,C6,C6,1), F_ARCHEXT },
3932 { "tfsr_el1", CPENC(3,0,C6,C5,0), F_ARCHEXT },
3933 { "tfsr_el2", CPENC(3,4,C6,C5,0), F_ARCHEXT },
3934 { "tfsr_el3", CPENC(3,6,C6,C6,0), F_ARCHEXT },
3935 { "tfsr_el12", CPENC(3,5,C6,C6,0), F_ARCHEXT },
3936 { "rgsr_el1", CPENC(3,0,C1,C0,5), F_ARCHEXT },
3937 { "gcr_el1", CPENC(3,0,C1,C0,6), F_ARCHEXT },
3938 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3939 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RW */
3940 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3941 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3942 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3943 { "scxtnum_el0", CPENC(3,3,C13,C0,7), F_ARCHEXT },
3944 { "scxtnum_el1", CPENC(3,0,C13,C0,7), F_ARCHEXT },
3945 { "scxtnum_el2", CPENC(3,4,C13,C0,7), F_ARCHEXT },
3946 { "scxtnum_el12", CPENC(3,5,C13,C0,7), F_ARCHEXT },
3947 { "scxtnum_el3", CPENC(3,6,C13,C0,7), F_ARCHEXT },
3948 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3949 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RW */
3950 { "cntpct_el0", CPENC(3,3,C14,C0,1), F_REG_READ }, /* RO */
3951 { "cntvct_el0", CPENC(3,3,C14,C0,2), F_REG_READ }, /* RO */
3952 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3953 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3954 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3955 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3956 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3957 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3958 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3959 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3960 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3961 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3962 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3963 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3964 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3965 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3966 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3967 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3968 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3969 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3970 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3971 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3972 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3973 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3974 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3975 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3976 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3977 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3978 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3979 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3980 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3981 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3982 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), F_REG_READ }, /* r */
3983 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3984 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3985 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), F_REG_READ }, /* r */
3986 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), F_REG_WRITE }, /* w */
3987 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 },
3988 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 },
3989 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3990 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3991 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3992 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3993 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3994 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3995 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3996 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3997 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3998 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3999 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
4000 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
4001 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
4002 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
4003 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
4004 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
4005 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
4006 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
4007 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
4008 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
4009 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
4010 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
4011 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
4012 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
4013 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
4014 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
4015 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
4016 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
4017 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
4018 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
4019 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
4020 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
4021 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
4022 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
4023 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
4024 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
4025 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
4026 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
4027 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
4028 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
4029 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
4030 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
4031 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
4032 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
4033 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
4034 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
4035 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
4036 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
4037 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
4038 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
4039 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
4040 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
4041 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
4042 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
4043 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
4044 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
4045 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
4046 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
4047 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
4048 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
4049 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
4050 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
4051 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
4052 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
4053 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
4054 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
4055 { "mdrar_el1", CPENC(2,0,C1, C0, 0), F_REG_READ }, /* r */
4056 { "oslar_el1", CPENC(2,0,C1, C0, 4), F_REG_WRITE }, /* w */
4057 { "oslsr_el1", CPENC(2,0,C1, C1, 4), F_REG_READ }, /* r */
4058 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
4059 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
4060 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
4061 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
4062 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), F_REG_READ }, /* r */
4063 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
4064 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
4065 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
4066 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT | F_REG_READ }, /* ro */
4067 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
4068 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
4069 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
4070 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
4071 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
4072 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
4073 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* rw */
4074 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
4075 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
4076 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
4077 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
4078 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
4079 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
4080 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), F_REG_WRITE }, /* w */
4081 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
4082 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), F_REG_READ }, /* r */
4083 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), F_REG_READ }, /* r */
4084 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
4085 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
4086 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
4087 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
4088 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
4089 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
4090 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
4091 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
4092 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
4093 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
4094 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
4095 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
4096 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
4097 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
4098 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
4099 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
4100 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
4101 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
4102 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
4103 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
4104 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
4105 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
4106 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
4107 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
4108 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
4109 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
4110 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
4111 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
4112 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
4113 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
4114 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
4115 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
4116 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
4117 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4118 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4119 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4120 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4121 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4122 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4123 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4124 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4125 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4126 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4127 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4128 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4129 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4130 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4131 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4132 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4133 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4134 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4135 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4136 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4137 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4138 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4139 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4140 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4141 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4142 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4143 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4144 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4145 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4146 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4147 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4148 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4149 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4150 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4151 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4152 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4153 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4154
4155 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4156 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4157 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4158 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4159 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4160 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4161 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4162 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4163 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4164 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4165 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4166 { 0, CPENC(0,0,0,0,0), 0 },
4167 };
4168
4169 bfd_boolean
4170 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4171 {
4172 return (reg->flags & F_DEPRECATED) != 0;
4173 }
4174
4175 bfd_boolean
4176 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4177 const aarch64_sys_reg *reg)
4178 {
4179 if (!(reg->flags & F_ARCHEXT))
4180 return TRUE;
4181
4182 /* PAN. Values are from aarch64_sys_regs. */
4183 if (reg->value == CPEN_(0,C2,3)
4184 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4185 return FALSE;
4186
4187 /* SCXTNUM_ELx registers. */
4188 if ((reg->value == CPENC (3, 3, C13, C0, 7)
4189 || reg->value == CPENC (3, 0, C13, C0, 7)
4190 || reg->value == CPENC (3, 4, C13, C0, 7)
4191 || reg->value == CPENC (3, 6, C13, C0, 7)
4192 || reg->value == CPENC (3, 5, C13, C0, 7))
4193 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SCXTNUM))
4194 return FALSE;
4195
4196 /* ID_PFR2_EL1 register. */
4197 if (reg->value == CPENC(3, 0, C0, C3, 4)
4198 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_ID_PFR2))
4199 return FALSE;
4200
4201 /* SSBS. Values are from aarch64_sys_regs. */
4202 if (reg->value == CPEN_(3,C2,6)
4203 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4204 return FALSE;
4205
4206 /* Virtualization host extensions: system registers. */
4207 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4208 || reg->value == CPENC (3, 4, C13, C0, 1)
4209 || reg->value == CPENC (3, 4, C14, C3, 0)
4210 || reg->value == CPENC (3, 4, C14, C3, 1)
4211 || reg->value == CPENC (3, 4, C14, C3, 2))
4212 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4213 return FALSE;
4214
4215 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4216 if ((reg->value == CPEN_ (5, C0, 0)
4217 || reg->value == CPEN_ (5, C0, 1)
4218 || reg->value == CPENC (3, 5, C1, C0, 0)
4219 || reg->value == CPENC (3, 5, C1, C0, 2)
4220 || reg->value == CPENC (3, 5, C2, C0, 0)
4221 || reg->value == CPENC (3, 5, C2, C0, 1)
4222 || reg->value == CPENC (3, 5, C2, C0, 2)
4223 || reg->value == CPENC (3, 5, C5, C1, 0)
4224 || reg->value == CPENC (3, 5, C5, C1, 1)
4225 || reg->value == CPENC (3, 5, C5, C2, 0)
4226 || reg->value == CPENC (3, 5, C6, C0, 0)
4227 || reg->value == CPENC (3, 5, C10, C2, 0)
4228 || reg->value == CPENC (3, 5, C10, C3, 0)
4229 || reg->value == CPENC (3, 5, C12, C0, 0)
4230 || reg->value == CPENC (3, 5, C13, C0, 1)
4231 || reg->value == CPENC (3, 5, C14, C1, 0))
4232 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4233 return FALSE;
4234
4235 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4236 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4237 || reg->value == CPENC (3, 5, C14, C2, 1)
4238 || reg->value == CPENC (3, 5, C14, C2, 2)
4239 || reg->value == CPENC (3, 5, C14, C3, 0)
4240 || reg->value == CPENC (3, 5, C14, C3, 1)
4241 || reg->value == CPENC (3, 5, C14, C3, 2))
4242 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4243 return FALSE;
4244
4245 /* ARMv8.2 features. */
4246
4247 /* ID_AA64MMFR2_EL1. */
4248 if (reg->value == CPENC (3, 0, C0, C7, 2)
4249 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4250 return FALSE;
4251
4252 /* PSTATE.UAO. */
4253 if (reg->value == CPEN_ (0, C2, 4)
4254 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4255 return FALSE;
4256
4257 /* RAS extension. */
4258
4259 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4260 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4261 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4262 || reg->value == CPENC (3, 0, C5, C3, 1)
4263 || reg->value == CPENC (3, 0, C5, C3, 2)
4264 || reg->value == CPENC (3, 0, C5, C3, 3)
4265 || reg->value == CPENC (3, 0, C5, C4, 0)
4266 || reg->value == CPENC (3, 0, C5, C4, 1)
4267 || reg->value == CPENC (3, 0, C5, C4, 2)
4268 || reg->value == CPENC (3, 0, C5, C4, 3)
4269 || reg->value == CPENC (3, 0, C5, C5, 0)
4270 || reg->value == CPENC (3, 0, C5, C5, 1))
4271 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4272 return FALSE;
4273
4274 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4275 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4276 || reg->value == CPENC (3, 0, C12, C1, 1)
4277 || reg->value == CPENC (3, 4, C12, C1, 1))
4278 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4279 return FALSE;
4280
4281 /* Statistical Profiling extension. */
4282 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4283 || reg->value == CPENC (3, 0, C9, C10, 1)
4284 || reg->value == CPENC (3, 0, C9, C10, 3)
4285 || reg->value == CPENC (3, 0, C9, C10, 7)
4286 || reg->value == CPENC (3, 0, C9, C9, 0)
4287 || reg->value == CPENC (3, 0, C9, C9, 2)
4288 || reg->value == CPENC (3, 0, C9, C9, 3)
4289 || reg->value == CPENC (3, 0, C9, C9, 4)
4290 || reg->value == CPENC (3, 0, C9, C9, 5)
4291 || reg->value == CPENC (3, 0, C9, C9, 6)
4292 || reg->value == CPENC (3, 0, C9, C9, 7)
4293 || reg->value == CPENC (3, 4, C9, C9, 0)
4294 || reg->value == CPENC (3, 5, C9, C9, 0))
4295 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4296 return FALSE;
4297
4298 /* ARMv8.3 Pointer authentication keys. */
4299 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4300 || reg->value == CPENC (3, 0, C2, C1, 1)
4301 || reg->value == CPENC (3, 0, C2, C1, 2)
4302 || reg->value == CPENC (3, 0, C2, C1, 3)
4303 || reg->value == CPENC (3, 0, C2, C2, 0)
4304 || reg->value == CPENC (3, 0, C2, C2, 1)
4305 || reg->value == CPENC (3, 0, C2, C2, 2)
4306 || reg->value == CPENC (3, 0, C2, C2, 3)
4307 || reg->value == CPENC (3, 0, C2, C3, 0)
4308 || reg->value == CPENC (3, 0, C2, C3, 1))
4309 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4310 return FALSE;
4311
4312 /* SVE. */
4313 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4314 || reg->value == CPENC (3, 0, C1, C2, 0)
4315 || reg->value == CPENC (3, 4, C1, C2, 0)
4316 || reg->value == CPENC (3, 6, C1, C2, 0)
4317 || reg->value == CPENC (3, 5, C1, C2, 0)
4318 || reg->value == CPENC (3, 0, C0, C0, 7))
4319 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4320 return FALSE;
4321
4322 /* ARMv8.4 features. */
4323
4324 /* PSTATE.DIT. */
4325 if (reg->value == CPEN_ (3, C2, 5)
4326 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4327 return FALSE;
4328
4329 /* Virtualization extensions. */
4330 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4331 || reg->value == CPENC(3, 4, C2, C6, 0)
4332 || reg->value == CPENC(3, 4, C14, C4, 0)
4333 || reg->value == CPENC(3, 4, C14, C4, 2)
4334 || reg->value == CPENC(3, 4, C14, C4, 1)
4335 || reg->value == CPENC(3, 4, C14, C5, 0)
4336 || reg->value == CPENC(3, 4, C14, C5, 2)
4337 || reg->value == CPENC(3, 4, C14, C5, 1)
4338 || reg->value == CPENC(3, 4, C1, C3, 1)
4339 || reg->value == CPENC(3, 4, C2, C2, 0))
4340 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4341 return FALSE;
4342
4343 /* ARMv8.4 TLB instructions. */
4344 if ((reg->value == CPENS (0, C8, C1, 0)
4345 || reg->value == CPENS (0, C8, C1, 1)
4346 || reg->value == CPENS (0, C8, C1, 2)
4347 || reg->value == CPENS (0, C8, C1, 3)
4348 || reg->value == CPENS (0, C8, C1, 5)
4349 || reg->value == CPENS (0, C8, C1, 7)
4350 || reg->value == CPENS (4, C8, C4, 0)
4351 || reg->value == CPENS (4, C8, C4, 4)
4352 || reg->value == CPENS (4, C8, C1, 1)
4353 || reg->value == CPENS (4, C8, C1, 5)
4354 || reg->value == CPENS (4, C8, C1, 6)
4355 || reg->value == CPENS (6, C8, C1, 1)
4356 || reg->value == CPENS (6, C8, C1, 5)
4357 || reg->value == CPENS (4, C8, C1, 0)
4358 || reg->value == CPENS (4, C8, C1, 4)
4359 || reg->value == CPENS (6, C8, C1, 0)
4360 || reg->value == CPENS (0, C8, C6, 1)
4361 || reg->value == CPENS (0, C8, C6, 3)
4362 || reg->value == CPENS (0, C8, C6, 5)
4363 || reg->value == CPENS (0, C8, C6, 7)
4364 || reg->value == CPENS (0, C8, C2, 1)
4365 || reg->value == CPENS (0, C8, C2, 3)
4366 || reg->value == CPENS (0, C8, C2, 5)
4367 || reg->value == CPENS (0, C8, C2, 7)
4368 || reg->value == CPENS (0, C8, C5, 1)
4369 || reg->value == CPENS (0, C8, C5, 3)
4370 || reg->value == CPENS (0, C8, C5, 5)
4371 || reg->value == CPENS (0, C8, C5, 7)
4372 || reg->value == CPENS (4, C8, C0, 2)
4373 || reg->value == CPENS (4, C8, C0, 6)
4374 || reg->value == CPENS (4, C8, C4, 2)
4375 || reg->value == CPENS (4, C8, C4, 6)
4376 || reg->value == CPENS (4, C8, C4, 3)
4377 || reg->value == CPENS (4, C8, C4, 7)
4378 || reg->value == CPENS (4, C8, C6, 1)
4379 || reg->value == CPENS (4, C8, C6, 5)
4380 || reg->value == CPENS (4, C8, C2, 1)
4381 || reg->value == CPENS (4, C8, C2, 5)
4382 || reg->value == CPENS (4, C8, C5, 1)
4383 || reg->value == CPENS (4, C8, C5, 5)
4384 || reg->value == CPENS (6, C8, C6, 1)
4385 || reg->value == CPENS (6, C8, C6, 5)
4386 || reg->value == CPENS (6, C8, C2, 1)
4387 || reg->value == CPENS (6, C8, C2, 5)
4388 || reg->value == CPENS (6, C8, C5, 1)
4389 || reg->value == CPENS (6, C8, C5, 5))
4390 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4391 return FALSE;
4392
4393 /* Random Number Instructions. For now they are available
4394 (and optional) only with ARMv8.5-A. */
4395 if ((reg->value == CPENC (3, 3, C2, C4, 0)
4396 || reg->value == CPENC (3, 3, C2, C4, 1))
4397 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RNG)
4398 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_5)))
4399 return FALSE;
4400
4401 /* System Registers in ARMv8.5-A with AARCH64_FEATURE_MEMTAG. */
4402 if ((reg->value == CPENC (3, 3, C4, C2, 7)
4403 || reg->value == CPENC (3, 0, C6, C6, 1)
4404 || reg->value == CPENC (3, 0, C6, C5, 0)
4405 || reg->value == CPENC (3, 4, C6, C5, 0)
4406 || reg->value == CPENC (3, 6, C6, C6, 0)
4407 || reg->value == CPENC (3, 5, C6, C6, 0)
4408 || reg->value == CPENC (3, 0, C1, C0, 5)
4409 || reg->value == CPENC (3, 0, C1, C0, 6))
4410 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG)))
4411 return FALSE;
4412
4413 return TRUE;
4414 }
4415
4416 /* The CPENC below is fairly misleading, the fields
4417 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4418 by ins_pstatefield, which just shifts the value by the width of the fields
4419 in a loop. So if you CPENC them only the first value will be set, the rest
4420 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4421 value of 0b110000000001000000 (0x30040) while what you want is
4422 0b011010 (0x1a). */
4423 const aarch64_sys_reg aarch64_pstatefields [] =
4424 {
4425 { "spsel", 0x05, 0 },
4426 { "daifset", 0x1e, 0 },
4427 { "daifclr", 0x1f, 0 },
4428 { "pan", 0x04, F_ARCHEXT },
4429 { "uao", 0x03, F_ARCHEXT },
4430 { "ssbs", 0x19, F_ARCHEXT },
4431 { "dit", 0x1a, F_ARCHEXT },
4432 { "tco", 0x1c, F_ARCHEXT },
4433 { 0, CPENC(0,0,0,0,0), 0 },
4434 };
4435
4436 bfd_boolean
4437 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4438 const aarch64_sys_reg *reg)
4439 {
4440 if (!(reg->flags & F_ARCHEXT))
4441 return TRUE;
4442
4443 /* PAN. Values are from aarch64_pstatefields. */
4444 if (reg->value == 0x04
4445 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4446 return FALSE;
4447
4448 /* UAO. Values are from aarch64_pstatefields. */
4449 if (reg->value == 0x03
4450 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4451 return FALSE;
4452
4453 /* SSBS. Values are from aarch64_pstatefields. */
4454 if (reg->value == 0x19
4455 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4456 return FALSE;
4457
4458 /* DIT. Values are from aarch64_pstatefields. */
4459 if (reg->value == 0x1a
4460 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4461 return FALSE;
4462
4463 /* TCO. Values are from aarch64_pstatefields. */
4464 if (reg->value == 0x1c
4465 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4466 return FALSE;
4467
4468 return TRUE;
4469 }
4470
4471 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4472 {
4473 { "ialluis", CPENS(0,C7,C1,0), 0 },
4474 { "iallu", CPENS(0,C7,C5,0), 0 },
4475 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4476 { 0, CPENS(0,0,0,0), 0 }
4477 };
4478
4479 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4480 {
4481 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4482 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
4483 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
4484 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4485 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
4486 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
4487 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4488 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
4489 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
4490 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4491 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4492 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4493 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4494 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4495 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4496 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4497 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4498 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4499 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4500 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4501 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4502 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4503 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4504 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4505 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4506 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4507 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4508 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4509 { 0, CPENS(0,0,0,0), 0 }
4510 };
4511
4512 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4513 {
4514 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4515 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4516 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4517 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4518 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4519 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4520 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4521 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4522 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4523 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4524 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4525 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4526 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4527 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4528 { 0, CPENS(0,0,0,0), 0 }
4529 };
4530
4531 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4532 {
4533 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4534 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4535 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4536 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4537 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4538 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4539 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4540 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4541 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4542 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4543 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4544 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4545 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4546 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4547 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4548 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4549 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4550 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4551 { "alle2", CPENS(4,C8,C7,0), 0 },
4552 { "alle2is", CPENS(4,C8,C3,0), 0 },
4553 { "alle1", CPENS(4,C8,C7,4), 0 },
4554 { "alle1is", CPENS(4,C8,C3,4), 0 },
4555 { "alle3", CPENS(6,C8,C7,0), 0 },
4556 { "alle3is", CPENS(6,C8,C3,0), 0 },
4557 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4558 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4559 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4560 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4561 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4562 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4563 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4564 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4565
4566 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4567 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4568 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4569 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4570 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4571 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4572 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4573 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4574 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4575 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4576 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4577 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4578 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4579 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4580 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4581 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4582
4583 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4584 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4585 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4586 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4587 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4588 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4589 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4590 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4591 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4592 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4593 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4594 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4595 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4596 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4597 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4598 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4599 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4600 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4601 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4602 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4603 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4604 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4605 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4606 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4607 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4608 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4609 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4610 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4611 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4612 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4613
4614 { 0, CPENS(0,0,0,0), 0 }
4615 };
4616
4617 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4618 {
4619 /* RCTX is somewhat unique in a way that it has different values
4620 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4621 Thus op2 is masked out and instead encoded directly in the
4622 aarch64_opcode_table entries for the respective instructions. */
4623 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4624
4625 { 0, CPENS(0,0,0,0), 0 }
4626 };
4627
4628 bfd_boolean
4629 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4630 {
4631 return (sys_ins_reg->flags & F_HASXT) != 0;
4632 }
4633
4634 extern bfd_boolean
4635 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4636 const aarch64_sys_ins_reg *reg)
4637 {
4638 if (!(reg->flags & F_ARCHEXT))
4639 return TRUE;
4640
4641 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4642 if (reg->value == CPENS (3, C7, C12, 1)
4643 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4644 return FALSE;
4645
4646 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4647 if (reg->value == CPENS (3, C7, C13, 1)
4648 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
4649 return FALSE;
4650
4651 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
4652 if ((reg->value == CPENS (0, C7, C6, 3)
4653 || reg->value == CPENS (0, C7, C6, 4)
4654 || reg->value == CPENS (0, C7, C10, 4)
4655 || reg->value == CPENS (0, C7, C14, 4)
4656 || reg->value == CPENS (3, C7, C10, 3)
4657 || reg->value == CPENS (3, C7, C12, 3)
4658 || reg->value == CPENS (3, C7, C13, 3)
4659 || reg->value == CPENS (3, C7, C14, 3)
4660 || reg->value == CPENS (3, C7, C4, 3)
4661 || reg->value == CPENS (0, C7, C6, 5)
4662 || reg->value == CPENS (0, C7, C6, 6)
4663 || reg->value == CPENS (0, C7, C10, 6)
4664 || reg->value == CPENS (0, C7, C14, 6)
4665 || reg->value == CPENS (3, C7, C10, 5)
4666 || reg->value == CPENS (3, C7, C12, 5)
4667 || reg->value == CPENS (3, C7, C13, 5)
4668 || reg->value == CPENS (3, C7, C14, 5)
4669 || reg->value == CPENS (3, C7, C4, 4))
4670 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4671 return FALSE;
4672
4673 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4674 if ((reg->value == CPENS (0, C7, C9, 0)
4675 || reg->value == CPENS (0, C7, C9, 1))
4676 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4677 return FALSE;
4678
4679 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4680 if (reg->value == CPENS (3, C7, C3, 0)
4681 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
4682 return FALSE;
4683
4684 return TRUE;
4685 }
4686
4687 #undef C0
4688 #undef C1
4689 #undef C2
4690 #undef C3
4691 #undef C4
4692 #undef C5
4693 #undef C6
4694 #undef C7
4695 #undef C8
4696 #undef C9
4697 #undef C10
4698 #undef C11
4699 #undef C12
4700 #undef C13
4701 #undef C14
4702 #undef C15
4703
4704 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4705 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4706
4707 static enum err_type
4708 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
4709 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
4710 bfd_boolean encoding ATTRIBUTE_UNUSED,
4711 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4712 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4713 {
4714 int t = BITS (insn, 4, 0);
4715 int n = BITS (insn, 9, 5);
4716 int t2 = BITS (insn, 14, 10);
4717
4718 if (BIT (insn, 23))
4719 {
4720 /* Write back enabled. */
4721 if ((t == n || t2 == n) && n != 31)
4722 return ERR_UND;
4723 }
4724
4725 if (BIT (insn, 22))
4726 {
4727 /* Load */
4728 if (t == t2)
4729 return ERR_UND;
4730 }
4731
4732 return ERR_OK;
4733 }
4734
4735 /* Verifier for vector by element 3 operands functions where the
4736 conditions `if sz:L == 11 then UNDEFINED` holds. */
4737
4738 static enum err_type
4739 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
4740 bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
4741 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4742 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4743 {
4744 const aarch64_insn undef_pattern = 0x3;
4745 aarch64_insn value;
4746
4747 assert (inst->opcode);
4748 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
4749 value = encoding ? inst->value : insn;
4750 assert (value);
4751
4752 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
4753 return ERR_UND;
4754
4755 return ERR_OK;
4756 }
4757
4758 /* Initialize an instruction sequence insn_sequence with the instruction INST.
4759 If INST is NULL the given insn_sequence is cleared and the sequence is left
4760 uninitialized. */
4761
4762 void
4763 init_insn_sequence (const struct aarch64_inst *inst,
4764 aarch64_instr_sequence *insn_sequence)
4765 {
4766 int num_req_entries = 0;
4767 insn_sequence->next_insn = 0;
4768 insn_sequence->num_insns = num_req_entries;
4769 if (insn_sequence->instr)
4770 XDELETE (insn_sequence->instr);
4771 insn_sequence->instr = NULL;
4772
4773 if (inst)
4774 {
4775 insn_sequence->instr = XNEW (aarch64_inst);
4776 memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
4777 }
4778
4779 /* Handle all the cases here. May need to think of something smarter than
4780 a giant if/else chain if this grows. At that time, a lookup table may be
4781 best. */
4782 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
4783 num_req_entries = 1;
4784
4785 if (insn_sequence->current_insns)
4786 XDELETEVEC (insn_sequence->current_insns);
4787 insn_sequence->current_insns = NULL;
4788
4789 if (num_req_entries != 0)
4790 {
4791 size_t size = num_req_entries * sizeof (aarch64_inst);
4792 insn_sequence->current_insns
4793 = (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
4794 memset (insn_sequence->current_insns, 0, size);
4795 }
4796 }
4797
4798
4799 /* This function verifies that the instruction INST adheres to its specified
4800 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
4801 returned and MISMATCH_DETAIL contains the reason why verification failed.
4802
4803 The function is called both during assembly and disassembly. If assembling
4804 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
4805 and will contain the PC of the current instruction w.r.t to the section.
4806
4807 If ENCODING and PC=0 then you are at a start of a section. The constraints
4808 are verified against the given state insn_sequence which is updated as it
4809 transitions through the verification. */
4810
4811 enum err_type
4812 verify_constraints (const struct aarch64_inst *inst,
4813 const aarch64_insn insn ATTRIBUTE_UNUSED,
4814 bfd_vma pc,
4815 bfd_boolean encoding,
4816 aarch64_operand_error *mismatch_detail,
4817 aarch64_instr_sequence *insn_sequence)
4818 {
4819 assert (inst);
4820 assert (inst->opcode);
4821
4822 const struct aarch64_opcode *opcode = inst->opcode;
4823 if (!opcode->constraints && !insn_sequence->instr)
4824 return ERR_OK;
4825
4826 assert (insn_sequence);
4827
4828 enum err_type res = ERR_OK;
4829
4830 /* This instruction puts a constraint on the insn_sequence. */
4831 if (opcode->flags & F_SCAN)
4832 {
4833 if (insn_sequence->instr)
4834 {
4835 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4836 mismatch_detail->error = _("instruction opens new dependency "
4837 "sequence without ending previous one");
4838 mismatch_detail->index = -1;
4839 mismatch_detail->non_fatal = TRUE;
4840 res = ERR_VFI;
4841 }
4842
4843 init_insn_sequence (inst, insn_sequence);
4844 return res;
4845 }
4846
4847 /* Verify constraints on an existing sequence. */
4848 if (insn_sequence->instr)
4849 {
4850 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
4851 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4852 closed a previous one that we should have. */
4853 if (!encoding && pc == 0)
4854 {
4855 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4856 mismatch_detail->error = _("previous `movprfx' sequence not closed");
4857 mismatch_detail->index = -1;
4858 mismatch_detail->non_fatal = TRUE;
4859 res = ERR_VFI;
4860 /* Reset the sequence. */
4861 init_insn_sequence (NULL, insn_sequence);
4862 return res;
4863 }
4864
4865 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
4866 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
4867 {
4868 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4869 instruction for better error messages. */
4870 if (!opcode->avariant || !(*opcode->avariant & AARCH64_FEATURE_SVE))
4871 {
4872 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4873 mismatch_detail->error = _("SVE instruction expected after "
4874 "`movprfx'");
4875 mismatch_detail->index = -1;
4876 mismatch_detail->non_fatal = TRUE;
4877 res = ERR_VFI;
4878 goto done;
4879 }
4880
4881 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4882 instruction that is allowed to be used with a MOVPRFX. */
4883 if (!(opcode->constraints & C_SCAN_MOVPRFX))
4884 {
4885 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4886 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
4887 "expected");
4888 mismatch_detail->index = -1;
4889 mismatch_detail->non_fatal = TRUE;
4890 res = ERR_VFI;
4891 goto done;
4892 }
4893
4894 /* Next check for usage of the predicate register. */
4895 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
4896 aarch64_opnd_info blk_pred, inst_pred;
4897 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
4898 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
4899 bfd_boolean predicated = FALSE;
4900 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
4901
4902 /* Determine if the movprfx instruction used is predicated or not. */
4903 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
4904 {
4905 predicated = TRUE;
4906 blk_pred = insn_sequence->instr->operands[1];
4907 }
4908
4909 unsigned char max_elem_size = 0;
4910 unsigned char current_elem_size;
4911 int num_op_used = 0, last_op_usage = 0;
4912 int i, inst_pred_idx = -1;
4913 int num_ops = aarch64_num_of_operands (opcode);
4914 for (i = 0; i < num_ops; i++)
4915 {
4916 aarch64_opnd_info inst_op = inst->operands[i];
4917 switch (inst_op.type)
4918 {
4919 case AARCH64_OPND_SVE_Zd:
4920 case AARCH64_OPND_SVE_Zm_5:
4921 case AARCH64_OPND_SVE_Zm_16:
4922 case AARCH64_OPND_SVE_Zn:
4923 case AARCH64_OPND_SVE_Zt:
4924 case AARCH64_OPND_SVE_Vm:
4925 case AARCH64_OPND_SVE_Vn:
4926 case AARCH64_OPND_Va:
4927 case AARCH64_OPND_Vn:
4928 case AARCH64_OPND_Vm:
4929 case AARCH64_OPND_Sn:
4930 case AARCH64_OPND_Sm:
4931 case AARCH64_OPND_Rn:
4932 case AARCH64_OPND_Rm:
4933 case AARCH64_OPND_Rn_SP:
4934 case AARCH64_OPND_Rt_SP:
4935 case AARCH64_OPND_Rm_SP:
4936 if (inst_op.reg.regno == blk_dest.reg.regno)
4937 {
4938 num_op_used++;
4939 last_op_usage = i;
4940 }
4941 current_elem_size
4942 = aarch64_get_qualifier_esize (inst_op.qualifier);
4943 if (current_elem_size > max_elem_size)
4944 max_elem_size = current_elem_size;
4945 break;
4946 case AARCH64_OPND_SVE_Pd:
4947 case AARCH64_OPND_SVE_Pg3:
4948 case AARCH64_OPND_SVE_Pg4_5:
4949 case AARCH64_OPND_SVE_Pg4_10:
4950 case AARCH64_OPND_SVE_Pg4_16:
4951 case AARCH64_OPND_SVE_Pm:
4952 case AARCH64_OPND_SVE_Pn:
4953 case AARCH64_OPND_SVE_Pt:
4954 inst_pred = inst_op;
4955 inst_pred_idx = i;
4956 break;
4957 default:
4958 break;
4959 }
4960 }
4961
4962 assert (max_elem_size != 0);
4963 aarch64_opnd_info inst_dest = inst->operands[0];
4964 /* Determine the size that should be used to compare against the
4965 movprfx size. */
4966 current_elem_size
4967 = opcode->constraints & C_MAX_ELEM
4968 ? max_elem_size
4969 : aarch64_get_qualifier_esize (inst_dest.qualifier);
4970
4971 /* If movprfx is predicated do some extra checks. */
4972 if (predicated)
4973 {
4974 /* The instruction must be predicated. */
4975 if (inst_pred_idx < 0)
4976 {
4977 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4978 mismatch_detail->error = _("predicated instruction expected "
4979 "after `movprfx'");
4980 mismatch_detail->index = -1;
4981 mismatch_detail->non_fatal = TRUE;
4982 res = ERR_VFI;
4983 goto done;
4984 }
4985
4986 /* The instruction must have a merging predicate. */
4987 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
4988 {
4989 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4990 mismatch_detail->error = _("merging predicate expected due "
4991 "to preceding `movprfx'");
4992 mismatch_detail->index = inst_pred_idx;
4993 mismatch_detail->non_fatal = TRUE;
4994 res = ERR_VFI;
4995 goto done;
4996 }
4997
4998 /* The same register must be used in instruction. */
4999 if (blk_pred.reg.regno != inst_pred.reg.regno)
5000 {
5001 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5002 mismatch_detail->error = _("predicate register differs "
5003 "from that in preceding "
5004 "`movprfx'");
5005 mismatch_detail->index = inst_pred_idx;
5006 mismatch_detail->non_fatal = TRUE;
5007 res = ERR_VFI;
5008 goto done;
5009 }
5010 }
5011
5012 /* Destructive operations by definition must allow one usage of the
5013 same register. */
5014 int allowed_usage
5015 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5016
5017 /* Operand is not used at all. */
5018 if (num_op_used == 0)
5019 {
5020 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5021 mismatch_detail->error = _("output register of preceding "
5022 "`movprfx' not used in current "
5023 "instruction");
5024 mismatch_detail->index = 0;
5025 mismatch_detail->non_fatal = TRUE;
5026 res = ERR_VFI;
5027 goto done;
5028 }
5029
5030 /* We now know it's used, now determine exactly where it's used. */
5031 if (blk_dest.reg.regno != inst_dest.reg.regno)
5032 {
5033 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5034 mismatch_detail->error = _("output register of preceding "
5035 "`movprfx' expected as output");
5036 mismatch_detail->index = 0;
5037 mismatch_detail->non_fatal = TRUE;
5038 res = ERR_VFI;
5039 goto done;
5040 }
5041
5042 /* Operand used more than allowed for the specific opcode type. */
5043 if (num_op_used > allowed_usage)
5044 {
5045 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5046 mismatch_detail->error = _("output register of preceding "
5047 "`movprfx' used as input");
5048 mismatch_detail->index = last_op_usage;
5049 mismatch_detail->non_fatal = TRUE;
5050 res = ERR_VFI;
5051 goto done;
5052 }
5053
5054 /* Now the only thing left is the qualifiers checks. The register
5055 must have the same maximum element size. */
5056 if (inst_dest.qualifier
5057 && blk_dest.qualifier
5058 && current_elem_size
5059 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5060 {
5061 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5062 mismatch_detail->error = _("register size not compatible with "
5063 "previous `movprfx'");
5064 mismatch_detail->index = 0;
5065 mismatch_detail->non_fatal = TRUE;
5066 res = ERR_VFI;
5067 goto done;
5068 }
5069 }
5070
5071 done:
5072 /* Add the new instruction to the sequence. */
5073 memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
5074 inst, sizeof (aarch64_inst));
5075
5076 /* Check if sequence is now full. */
5077 if (insn_sequence->next_insn >= insn_sequence->num_insns)
5078 {
5079 /* Sequence is full, but we don't have anything special to do for now,
5080 so clear and reset it. */
5081 init_insn_sequence (NULL, insn_sequence);
5082 }
5083 }
5084
5085 return res;
5086 }
5087
5088
5089 /* Return true if VALUE cannot be moved into an SVE register using DUP
5090 (with any element size, not just ESIZE) and if using DUPM would
5091 therefore be OK. ESIZE is the number of bytes in the immediate. */
5092
5093 bfd_boolean
5094 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5095 {
5096 int64_t svalue = uvalue;
5097 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5098
5099 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5100 return FALSE;
5101 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5102 {
5103 svalue = (int32_t) uvalue;
5104 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5105 {
5106 svalue = (int16_t) uvalue;
5107 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5108 return FALSE;
5109 }
5110 }
5111 if ((svalue & 0xff) == 0)
5112 svalue /= 256;
5113 return svalue < -128 || svalue >= 128;
5114 }
5115
5116 /* Include the opcode description table as well as the operand description
5117 table. */
5118 #define VERIFIER(x) verify_##x
5119 #include "aarch64-tbl.h"
This page took 0.200962 seconds and 5 git commands to generate.