[binutils][aarch64] New iclass sve_size_hsd2.
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include "bfd_stdint.h"
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
247 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
248 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
249 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
250 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
251 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
252 { 5, 14 }, /* imm14: in test bit and branch instructions. */
253 { 5, 16 }, /* imm16: in exception instructions. */
254 { 0, 26 }, /* imm26: in unconditional branch instructions. */
255 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
256 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
257 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
258 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
259 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
260 { 22, 1 }, /* N: in logical (immediate) instructions. */
261 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
262 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
263 { 31, 1 }, /* sf: in integer data processing instructions. */
264 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
265 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
266 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
267 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
268 { 31, 1 }, /* b5: in the test bit and branch instructions. */
269 { 19, 5 }, /* b40: in the test bit and branch instructions. */
270 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
271 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
272 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
273 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
274 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
275 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
276 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
277 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
278 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
279 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
280 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
281 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
282 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
283 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
284 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
285 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
286 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
289 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
290 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
291 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
292 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
293 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
294 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
295 { 5, 1 }, /* SVE_i1: single-bit immediate. */
296 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
297 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
298 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
299 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
300 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
301 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
302 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
303 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
304 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
305 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
306 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
307 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
308 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
309 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
310 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
311 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
312 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
313 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
314 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
315 { 16, 4 }, /* SVE_tsz: triangular size select. */
316 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
317 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
318 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
319 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
320 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
321 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
322 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
323 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
324 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
325 { 22, 1 }, /* sz: 1-bit element size select. */
326 };
327
328 enum aarch64_operand_class
329 aarch64_get_operand_class (enum aarch64_opnd type)
330 {
331 return aarch64_operands[type].op_class;
332 }
333
334 const char *
335 aarch64_get_operand_name (enum aarch64_opnd type)
336 {
337 return aarch64_operands[type].name;
338 }
339
340 /* Get operand description string.
341 This is usually for the diagnosis purpose. */
342 const char *
343 aarch64_get_operand_desc (enum aarch64_opnd type)
344 {
345 return aarch64_operands[type].desc;
346 }
347
348 /* Table of all conditional affixes. */
349 const aarch64_cond aarch64_conds[16] =
350 {
351 {{"eq", "none"}, 0x0},
352 {{"ne", "any"}, 0x1},
353 {{"cs", "hs", "nlast"}, 0x2},
354 {{"cc", "lo", "ul", "last"}, 0x3},
355 {{"mi", "first"}, 0x4},
356 {{"pl", "nfrst"}, 0x5},
357 {{"vs"}, 0x6},
358 {{"vc"}, 0x7},
359 {{"hi", "pmore"}, 0x8},
360 {{"ls", "plast"}, 0x9},
361 {{"ge", "tcont"}, 0xa},
362 {{"lt", "tstop"}, 0xb},
363 {{"gt"}, 0xc},
364 {{"le"}, 0xd},
365 {{"al"}, 0xe},
366 {{"nv"}, 0xf},
367 };
368
369 const aarch64_cond *
370 get_cond_from_value (aarch64_insn value)
371 {
372 assert (value < 16);
373 return &aarch64_conds[(unsigned int) value];
374 }
375
376 const aarch64_cond *
377 get_inverted_cond (const aarch64_cond *cond)
378 {
379 return &aarch64_conds[cond->value ^ 0x1];
380 }
381
382 /* Table describing the operand extension/shifting operators; indexed by
383 enum aarch64_modifier_kind.
384
385 The value column provides the most common values for encoding modifiers,
386 which enables table-driven encoding/decoding for the modifiers. */
387 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
388 {
389 {"none", 0x0},
390 {"msl", 0x0},
391 {"ror", 0x3},
392 {"asr", 0x2},
393 {"lsr", 0x1},
394 {"lsl", 0x0},
395 {"uxtb", 0x0},
396 {"uxth", 0x1},
397 {"uxtw", 0x2},
398 {"uxtx", 0x3},
399 {"sxtb", 0x4},
400 {"sxth", 0x5},
401 {"sxtw", 0x6},
402 {"sxtx", 0x7},
403 {"mul", 0x0},
404 {"mul vl", 0x0},
405 {NULL, 0},
406 };
407
408 enum aarch64_modifier_kind
409 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
410 {
411 return desc - aarch64_operand_modifiers;
412 }
413
414 aarch64_insn
415 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
416 {
417 return aarch64_operand_modifiers[kind].value;
418 }
419
420 enum aarch64_modifier_kind
421 aarch64_get_operand_modifier_from_value (aarch64_insn value,
422 bfd_boolean extend_p)
423 {
424 if (extend_p == TRUE)
425 return AARCH64_MOD_UXTB + value;
426 else
427 return AARCH64_MOD_LSL - value;
428 }
429
430 bfd_boolean
431 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
432 {
433 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
434 ? TRUE : FALSE;
435 }
436
437 static inline bfd_boolean
438 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
439 {
440 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
441 ? TRUE : FALSE;
442 }
443
444 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
445 {
446 { "#0x00", 0x0 },
447 { "oshld", 0x1 },
448 { "oshst", 0x2 },
449 { "osh", 0x3 },
450 { "#0x04", 0x4 },
451 { "nshld", 0x5 },
452 { "nshst", 0x6 },
453 { "nsh", 0x7 },
454 { "#0x08", 0x8 },
455 { "ishld", 0x9 },
456 { "ishst", 0xa },
457 { "ish", 0xb },
458 { "#0x0c", 0xc },
459 { "ld", 0xd },
460 { "st", 0xe },
461 { "sy", 0xf },
462 };
463
464 /* Table describing the operands supported by the aliases of the HINT
465 instruction.
466
467 The name column is the operand that is accepted for the alias. The value
468 column is the hint number of the alias. The list of operands is terminated
469 by NULL in the name column. */
470
471 const struct aarch64_name_value_pair aarch64_hint_options[] =
472 {
473 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
474 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
475 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
476 { "c", HINT_OPD_C }, /* BTI C. */
477 { "j", HINT_OPD_J }, /* BTI J. */
478 { "jc", HINT_OPD_JC }, /* BTI JC. */
479 { NULL, HINT_OPD_NULL },
480 };
481
482 /* op -> op: load = 0 instruction = 1 store = 2
483 l -> level: 1-3
484 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
485 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
486 const struct aarch64_name_value_pair aarch64_prfops[32] =
487 {
488 { "pldl1keep", B(0, 1, 0) },
489 { "pldl1strm", B(0, 1, 1) },
490 { "pldl2keep", B(0, 2, 0) },
491 { "pldl2strm", B(0, 2, 1) },
492 { "pldl3keep", B(0, 3, 0) },
493 { "pldl3strm", B(0, 3, 1) },
494 { NULL, 0x06 },
495 { NULL, 0x07 },
496 { "plil1keep", B(1, 1, 0) },
497 { "plil1strm", B(1, 1, 1) },
498 { "plil2keep", B(1, 2, 0) },
499 { "plil2strm", B(1, 2, 1) },
500 { "plil3keep", B(1, 3, 0) },
501 { "plil3strm", B(1, 3, 1) },
502 { NULL, 0x0e },
503 { NULL, 0x0f },
504 { "pstl1keep", B(2, 1, 0) },
505 { "pstl1strm", B(2, 1, 1) },
506 { "pstl2keep", B(2, 2, 0) },
507 { "pstl2strm", B(2, 2, 1) },
508 { "pstl3keep", B(2, 3, 0) },
509 { "pstl3strm", B(2, 3, 1) },
510 { NULL, 0x16 },
511 { NULL, 0x17 },
512 { NULL, 0x18 },
513 { NULL, 0x19 },
514 { NULL, 0x1a },
515 { NULL, 0x1b },
516 { NULL, 0x1c },
517 { NULL, 0x1d },
518 { NULL, 0x1e },
519 { NULL, 0x1f },
520 };
521 #undef B
522 \f
523 /* Utilities on value constraint. */
524
525 static inline int
526 value_in_range_p (int64_t value, int low, int high)
527 {
528 return (value >= low && value <= high) ? 1 : 0;
529 }
530
531 /* Return true if VALUE is a multiple of ALIGN. */
532 static inline int
533 value_aligned_p (int64_t value, int align)
534 {
535 return (value % align) == 0;
536 }
537
538 /* A signed value fits in a field. */
539 static inline int
540 value_fit_signed_field_p (int64_t value, unsigned width)
541 {
542 assert (width < 32);
543 if (width < sizeof (value) * 8)
544 {
545 int64_t lim = (int64_t)1 << (width - 1);
546 if (value >= -lim && value < lim)
547 return 1;
548 }
549 return 0;
550 }
551
552 /* An unsigned value fits in a field. */
553 static inline int
554 value_fit_unsigned_field_p (int64_t value, unsigned width)
555 {
556 assert (width < 32);
557 if (width < sizeof (value) * 8)
558 {
559 int64_t lim = (int64_t)1 << width;
560 if (value >= 0 && value < lim)
561 return 1;
562 }
563 return 0;
564 }
565
566 /* Return 1 if OPERAND is SP or WSP. */
567 int
568 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
569 {
570 return ((aarch64_get_operand_class (operand->type)
571 == AARCH64_OPND_CLASS_INT_REG)
572 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
573 && operand->reg.regno == 31);
574 }
575
576 /* Return 1 if OPERAND is XZR or WZP. */
577 int
578 aarch64_zero_register_p (const aarch64_opnd_info *operand)
579 {
580 return ((aarch64_get_operand_class (operand->type)
581 == AARCH64_OPND_CLASS_INT_REG)
582 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
583 && operand->reg.regno == 31);
584 }
585
586 /* Return true if the operand *OPERAND that has the operand code
587 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
588 qualified by the qualifier TARGET. */
589
590 static inline int
591 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
592 aarch64_opnd_qualifier_t target)
593 {
594 switch (operand->qualifier)
595 {
596 case AARCH64_OPND_QLF_W:
597 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
598 return 1;
599 break;
600 case AARCH64_OPND_QLF_X:
601 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
602 return 1;
603 break;
604 case AARCH64_OPND_QLF_WSP:
605 if (target == AARCH64_OPND_QLF_W
606 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
607 return 1;
608 break;
609 case AARCH64_OPND_QLF_SP:
610 if (target == AARCH64_OPND_QLF_X
611 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
612 return 1;
613 break;
614 default:
615 break;
616 }
617
618 return 0;
619 }
620
621 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
622 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
623
624 Return NIL if more than one expected qualifiers are found. */
625
626 aarch64_opnd_qualifier_t
627 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
628 int idx,
629 const aarch64_opnd_qualifier_t known_qlf,
630 int known_idx)
631 {
632 int i, saved_i;
633
634 /* Special case.
635
636 When the known qualifier is NIL, we have to assume that there is only
637 one qualifier sequence in the *QSEQ_LIST and return the corresponding
638 qualifier directly. One scenario is that for instruction
639 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
640 which has only one possible valid qualifier sequence
641 NIL, S_D
642 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
643 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
644
645 Because the qualifier NIL has dual roles in the qualifier sequence:
646 it can mean no qualifier for the operand, or the qualifer sequence is
647 not in use (when all qualifiers in the sequence are NILs), we have to
648 handle this special case here. */
649 if (known_qlf == AARCH64_OPND_NIL)
650 {
651 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
652 return qseq_list[0][idx];
653 }
654
655 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
656 {
657 if (qseq_list[i][known_idx] == known_qlf)
658 {
659 if (saved_i != -1)
660 /* More than one sequences are found to have KNOWN_QLF at
661 KNOWN_IDX. */
662 return AARCH64_OPND_NIL;
663 saved_i = i;
664 }
665 }
666
667 return qseq_list[saved_i][idx];
668 }
669
670 enum operand_qualifier_kind
671 {
672 OQK_NIL,
673 OQK_OPD_VARIANT,
674 OQK_VALUE_IN_RANGE,
675 OQK_MISC,
676 };
677
678 /* Operand qualifier description. */
679 struct operand_qualifier_data
680 {
681 /* The usage of the three data fields depends on the qualifier kind. */
682 int data0;
683 int data1;
684 int data2;
685 /* Description. */
686 const char *desc;
687 /* Kind. */
688 enum operand_qualifier_kind kind;
689 };
690
691 /* Indexed by the operand qualifier enumerators. */
692 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
693 {
694 {0, 0, 0, "NIL", OQK_NIL},
695
696 /* Operand variant qualifiers.
697 First 3 fields:
698 element size, number of elements and common value for encoding. */
699
700 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
701 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
702 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
703 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
704
705 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
706 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
707 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
708 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
709 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
710 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
711
712 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
713 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
714 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
715 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
716 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
717 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
718 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
719 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
720 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
721 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
722 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
723
724 {0, 0, 0, "z", OQK_OPD_VARIANT},
725 {0, 0, 0, "m", OQK_OPD_VARIANT},
726
727 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
728 {16, 0, 0, "tag", OQK_OPD_VARIANT},
729
730 /* Qualifiers constraining the value range.
731 First 3 fields:
732 Lower bound, higher bound, unused. */
733
734 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
735 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
736 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
737 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
738 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
739 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
740 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
741
742 /* Qualifiers for miscellaneous purpose.
743 First 3 fields:
744 unused, unused and unused. */
745
746 {0, 0, 0, "lsl", 0},
747 {0, 0, 0, "msl", 0},
748
749 {0, 0, 0, "retrieving", 0},
750 };
751
752 static inline bfd_boolean
753 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
754 {
755 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
756 ? TRUE : FALSE;
757 }
758
759 static inline bfd_boolean
760 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
761 {
762 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
763 ? TRUE : FALSE;
764 }
765
766 const char*
767 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
768 {
769 return aarch64_opnd_qualifiers[qualifier].desc;
770 }
771
772 /* Given an operand qualifier, return the expected data element size
773 of a qualified operand. */
774 unsigned char
775 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
776 {
777 assert (operand_variant_qualifier_p (qualifier) == TRUE);
778 return aarch64_opnd_qualifiers[qualifier].data0;
779 }
780
781 unsigned char
782 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
783 {
784 assert (operand_variant_qualifier_p (qualifier) == TRUE);
785 return aarch64_opnd_qualifiers[qualifier].data1;
786 }
787
788 aarch64_insn
789 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
790 {
791 assert (operand_variant_qualifier_p (qualifier) == TRUE);
792 return aarch64_opnd_qualifiers[qualifier].data2;
793 }
794
795 static int
796 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
797 {
798 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
799 return aarch64_opnd_qualifiers[qualifier].data0;
800 }
801
802 static int
803 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
804 {
805 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
806 return aarch64_opnd_qualifiers[qualifier].data1;
807 }
808
809 #ifdef DEBUG_AARCH64
810 void
811 aarch64_verbose (const char *str, ...)
812 {
813 va_list ap;
814 va_start (ap, str);
815 printf ("#### ");
816 vprintf (str, ap);
817 printf ("\n");
818 va_end (ap);
819 }
820
821 static inline void
822 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
823 {
824 int i;
825 printf ("#### \t");
826 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
827 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
828 printf ("\n");
829 }
830
831 static void
832 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
833 const aarch64_opnd_qualifier_t *qualifier)
834 {
835 int i;
836 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
837
838 aarch64_verbose ("dump_match_qualifiers:");
839 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
840 curr[i] = opnd[i].qualifier;
841 dump_qualifier_sequence (curr);
842 aarch64_verbose ("against");
843 dump_qualifier_sequence (qualifier);
844 }
845 #endif /* DEBUG_AARCH64 */
846
847 /* This function checks if the given instruction INSN is a destructive
848 instruction based on the usage of the registers. It does not recognize
849 unary destructive instructions. */
850 bfd_boolean
851 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
852 {
853 int i = 0;
854 const enum aarch64_opnd *opnds = opcode->operands;
855
856 if (opnds[0] == AARCH64_OPND_NIL)
857 return FALSE;
858
859 while (opnds[++i] != AARCH64_OPND_NIL)
860 if (opnds[i] == opnds[0])
861 return TRUE;
862
863 return FALSE;
864 }
865
866 /* TODO improve this, we can have an extra field at the runtime to
867 store the number of operands rather than calculating it every time. */
868
869 int
870 aarch64_num_of_operands (const aarch64_opcode *opcode)
871 {
872 int i = 0;
873 const enum aarch64_opnd *opnds = opcode->operands;
874 while (opnds[i++] != AARCH64_OPND_NIL)
875 ;
876 --i;
877 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
878 return i;
879 }
880
881 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
882 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
883
884 N.B. on the entry, it is very likely that only some operands in *INST
885 have had their qualifiers been established.
886
887 If STOP_AT is not -1, the function will only try to match
888 the qualifier sequence for operands before and including the operand
889 of index STOP_AT; and on success *RET will only be filled with the first
890 (STOP_AT+1) qualifiers.
891
892 A couple examples of the matching algorithm:
893
894 X,W,NIL should match
895 X,W,NIL
896
897 NIL,NIL should match
898 X ,NIL
899
900 Apart from serving the main encoding routine, this can also be called
901 during or after the operand decoding. */
902
903 int
904 aarch64_find_best_match (const aarch64_inst *inst,
905 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
906 int stop_at, aarch64_opnd_qualifier_t *ret)
907 {
908 int found = 0;
909 int i, num_opnds;
910 const aarch64_opnd_qualifier_t *qualifiers;
911
912 num_opnds = aarch64_num_of_operands (inst->opcode);
913 if (num_opnds == 0)
914 {
915 DEBUG_TRACE ("SUCCEED: no operand");
916 return 1;
917 }
918
919 if (stop_at < 0 || stop_at >= num_opnds)
920 stop_at = num_opnds - 1;
921
922 /* For each pattern. */
923 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
924 {
925 int j;
926 qualifiers = *qualifiers_list;
927
928 /* Start as positive. */
929 found = 1;
930
931 DEBUG_TRACE ("%d", i);
932 #ifdef DEBUG_AARCH64
933 if (debug_dump)
934 dump_match_qualifiers (inst->operands, qualifiers);
935 #endif
936
937 /* Most opcodes has much fewer patterns in the list.
938 First NIL qualifier indicates the end in the list. */
939 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
940 {
941 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
942 if (i)
943 found = 0;
944 break;
945 }
946
947 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
948 {
949 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
950 {
951 /* Either the operand does not have qualifier, or the qualifier
952 for the operand needs to be deduced from the qualifier
953 sequence.
954 In the latter case, any constraint checking related with
955 the obtained qualifier should be done later in
956 operand_general_constraint_met_p. */
957 continue;
958 }
959 else if (*qualifiers != inst->operands[j].qualifier)
960 {
961 /* Unless the target qualifier can also qualify the operand
962 (which has already had a non-nil qualifier), non-equal
963 qualifiers are generally un-matched. */
964 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
965 continue;
966 else
967 {
968 found = 0;
969 break;
970 }
971 }
972 else
973 continue; /* Equal qualifiers are certainly matched. */
974 }
975
976 /* Qualifiers established. */
977 if (found == 1)
978 break;
979 }
980
981 if (found == 1)
982 {
983 /* Fill the result in *RET. */
984 int j;
985 qualifiers = *qualifiers_list;
986
987 DEBUG_TRACE ("complete qualifiers using list %d", i);
988 #ifdef DEBUG_AARCH64
989 if (debug_dump)
990 dump_qualifier_sequence (qualifiers);
991 #endif
992
993 for (j = 0; j <= stop_at; ++j, ++qualifiers)
994 ret[j] = *qualifiers;
995 for (; j < AARCH64_MAX_OPND_NUM; ++j)
996 ret[j] = AARCH64_OPND_QLF_NIL;
997
998 DEBUG_TRACE ("SUCCESS");
999 return 1;
1000 }
1001
1002 DEBUG_TRACE ("FAIL");
1003 return 0;
1004 }
1005
1006 /* Operand qualifier matching and resolving.
1007
1008 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1009 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1010
1011 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1012 succeeds. */
1013
1014 static int
1015 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
1016 {
1017 int i, nops;
1018 aarch64_opnd_qualifier_seq_t qualifiers;
1019
1020 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1021 qualifiers))
1022 {
1023 DEBUG_TRACE ("matching FAIL");
1024 return 0;
1025 }
1026
1027 if (inst->opcode->flags & F_STRICT)
1028 {
1029 /* Require an exact qualifier match, even for NIL qualifiers. */
1030 nops = aarch64_num_of_operands (inst->opcode);
1031 for (i = 0; i < nops; ++i)
1032 if (inst->operands[i].qualifier != qualifiers[i])
1033 return FALSE;
1034 }
1035
1036 /* Update the qualifiers. */
1037 if (update_p == TRUE)
1038 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1039 {
1040 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1041 break;
1042 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1043 "update %s with %s for operand %d",
1044 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1045 aarch64_get_qualifier_name (qualifiers[i]), i);
1046 inst->operands[i].qualifier = qualifiers[i];
1047 }
1048
1049 DEBUG_TRACE ("matching SUCCESS");
1050 return 1;
1051 }
1052
1053 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1054 register by MOVZ.
1055
1056 IS32 indicates whether value is a 32-bit immediate or not.
1057 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1058 amount will be returned in *SHIFT_AMOUNT. */
1059
1060 bfd_boolean
1061 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1062 {
1063 int amount;
1064
1065 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1066
1067 if (is32)
1068 {
1069 /* Allow all zeros or all ones in top 32-bits, so that
1070 32-bit constant expressions like ~0x80000000 are
1071 permitted. */
1072 uint64_t ext = value;
1073 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1074 /* Immediate out of range. */
1075 return FALSE;
1076 value &= (int64_t) 0xffffffff;
1077 }
1078
1079 /* first, try movz then movn */
1080 amount = -1;
1081 if ((value & ((int64_t) 0xffff << 0)) == value)
1082 amount = 0;
1083 else if ((value & ((int64_t) 0xffff << 16)) == value)
1084 amount = 16;
1085 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1086 amount = 32;
1087 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1088 amount = 48;
1089
1090 if (amount == -1)
1091 {
1092 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1093 return FALSE;
1094 }
1095
1096 if (shift_amount != NULL)
1097 *shift_amount = amount;
1098
1099 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1100
1101 return TRUE;
1102 }
1103
1104 /* Build the accepted values for immediate logical SIMD instructions.
1105
1106 The standard encodings of the immediate value are:
1107 N imms immr SIMD size R S
1108 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1109 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1110 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1111 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1112 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1113 0 11110s 00000r 2 UInt(r) UInt(s)
1114 where all-ones value of S is reserved.
1115
1116 Let's call E the SIMD size.
1117
1118 The immediate value is: S+1 bits '1' rotated to the right by R.
1119
1120 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1121 (remember S != E - 1). */
1122
1123 #define TOTAL_IMM_NB 5334
1124
1125 typedef struct
1126 {
1127 uint64_t imm;
1128 aarch64_insn encoding;
1129 } simd_imm_encoding;
1130
1131 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1132
1133 static int
1134 simd_imm_encoding_cmp(const void *i1, const void *i2)
1135 {
1136 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1137 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1138
1139 if (imm1->imm < imm2->imm)
1140 return -1;
1141 if (imm1->imm > imm2->imm)
1142 return +1;
1143 return 0;
1144 }
1145
1146 /* immediate bitfield standard encoding
1147 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1148 1 ssssss rrrrrr 64 rrrrrr ssssss
1149 0 0sssss 0rrrrr 32 rrrrr sssss
1150 0 10ssss 00rrrr 16 rrrr ssss
1151 0 110sss 000rrr 8 rrr sss
1152 0 1110ss 0000rr 4 rr ss
1153 0 11110s 00000r 2 r s */
1154 static inline int
1155 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1156 {
1157 return (is64 << 12) | (r << 6) | s;
1158 }
1159
1160 static void
1161 build_immediate_table (void)
1162 {
1163 uint32_t log_e, e, s, r, s_mask;
1164 uint64_t mask, imm;
1165 int nb_imms;
1166 int is64;
1167
1168 nb_imms = 0;
1169 for (log_e = 1; log_e <= 6; log_e++)
1170 {
1171 /* Get element size. */
1172 e = 1u << log_e;
1173 if (log_e == 6)
1174 {
1175 is64 = 1;
1176 mask = 0xffffffffffffffffull;
1177 s_mask = 0;
1178 }
1179 else
1180 {
1181 is64 = 0;
1182 mask = (1ull << e) - 1;
1183 /* log_e s_mask
1184 1 ((1 << 4) - 1) << 2 = 111100
1185 2 ((1 << 3) - 1) << 3 = 111000
1186 3 ((1 << 2) - 1) << 4 = 110000
1187 4 ((1 << 1) - 1) << 5 = 100000
1188 5 ((1 << 0) - 1) << 6 = 000000 */
1189 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1190 }
1191 for (s = 0; s < e - 1; s++)
1192 for (r = 0; r < e; r++)
1193 {
1194 /* s+1 consecutive bits to 1 (s < 63) */
1195 imm = (1ull << (s + 1)) - 1;
1196 /* rotate right by r */
1197 if (r != 0)
1198 imm = (imm >> r) | ((imm << (e - r)) & mask);
1199 /* replicate the constant depending on SIMD size */
1200 switch (log_e)
1201 {
1202 case 1: imm = (imm << 2) | imm;
1203 /* Fall through. */
1204 case 2: imm = (imm << 4) | imm;
1205 /* Fall through. */
1206 case 3: imm = (imm << 8) | imm;
1207 /* Fall through. */
1208 case 4: imm = (imm << 16) | imm;
1209 /* Fall through. */
1210 case 5: imm = (imm << 32) | imm;
1211 /* Fall through. */
1212 case 6: break;
1213 default: abort ();
1214 }
1215 simd_immediates[nb_imms].imm = imm;
1216 simd_immediates[nb_imms].encoding =
1217 encode_immediate_bitfield(is64, s | s_mask, r);
1218 nb_imms++;
1219 }
1220 }
1221 assert (nb_imms == TOTAL_IMM_NB);
1222 qsort(simd_immediates, nb_imms,
1223 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1224 }
1225
1226 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1227 be accepted by logical (immediate) instructions
1228 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1229
1230 ESIZE is the number of bytes in the decoded immediate value.
1231 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1232 VALUE will be returned in *ENCODING. */
1233
1234 bfd_boolean
1235 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1236 {
1237 simd_imm_encoding imm_enc;
1238 const simd_imm_encoding *imm_encoding;
1239 static bfd_boolean initialized = FALSE;
1240 uint64_t upper;
1241 int i;
1242
1243 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1244 value, esize);
1245
1246 if (!initialized)
1247 {
1248 build_immediate_table ();
1249 initialized = TRUE;
1250 }
1251
1252 /* Allow all zeros or all ones in top bits, so that
1253 constant expressions like ~1 are permitted. */
1254 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1255 if ((value & ~upper) != value && (value | upper) != value)
1256 return FALSE;
1257
1258 /* Replicate to a full 64-bit value. */
1259 value &= ~upper;
1260 for (i = esize * 8; i < 64; i *= 2)
1261 value |= (value << i);
1262
1263 imm_enc.imm = value;
1264 imm_encoding = (const simd_imm_encoding *)
1265 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1266 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1267 if (imm_encoding == NULL)
1268 {
1269 DEBUG_TRACE ("exit with FALSE");
1270 return FALSE;
1271 }
1272 if (encoding != NULL)
1273 *encoding = imm_encoding->encoding;
1274 DEBUG_TRACE ("exit with TRUE");
1275 return TRUE;
1276 }
1277
1278 /* If 64-bit immediate IMM is in the format of
1279 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1280 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1281 of value "abcdefgh". Otherwise return -1. */
1282 int
1283 aarch64_shrink_expanded_imm8 (uint64_t imm)
1284 {
1285 int i, ret;
1286 uint32_t byte;
1287
1288 ret = 0;
1289 for (i = 0; i < 8; i++)
1290 {
1291 byte = (imm >> (8 * i)) & 0xff;
1292 if (byte == 0xff)
1293 ret |= 1 << i;
1294 else if (byte != 0x00)
1295 return -1;
1296 }
1297 return ret;
1298 }
1299
1300 /* Utility inline functions for operand_general_constraint_met_p. */
1301
1302 static inline void
1303 set_error (aarch64_operand_error *mismatch_detail,
1304 enum aarch64_operand_error_kind kind, int idx,
1305 const char* error)
1306 {
1307 if (mismatch_detail == NULL)
1308 return;
1309 mismatch_detail->kind = kind;
1310 mismatch_detail->index = idx;
1311 mismatch_detail->error = error;
1312 }
1313
1314 static inline void
1315 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1316 const char* error)
1317 {
1318 if (mismatch_detail == NULL)
1319 return;
1320 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1321 }
1322
1323 static inline void
1324 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1325 int idx, int lower_bound, int upper_bound,
1326 const char* error)
1327 {
1328 if (mismatch_detail == NULL)
1329 return;
1330 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1331 mismatch_detail->data[0] = lower_bound;
1332 mismatch_detail->data[1] = upper_bound;
1333 }
1334
1335 static inline void
1336 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1337 int idx, int lower_bound, int upper_bound)
1338 {
1339 if (mismatch_detail == NULL)
1340 return;
1341 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1342 _("immediate value"));
1343 }
1344
1345 static inline void
1346 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1347 int idx, int lower_bound, int upper_bound)
1348 {
1349 if (mismatch_detail == NULL)
1350 return;
1351 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1352 _("immediate offset"));
1353 }
1354
1355 static inline void
1356 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1357 int idx, int lower_bound, int upper_bound)
1358 {
1359 if (mismatch_detail == NULL)
1360 return;
1361 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1362 _("register number"));
1363 }
1364
1365 static inline void
1366 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1367 int idx, int lower_bound, int upper_bound)
1368 {
1369 if (mismatch_detail == NULL)
1370 return;
1371 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1372 _("register element index"));
1373 }
1374
1375 static inline void
1376 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1377 int idx, int lower_bound, int upper_bound)
1378 {
1379 if (mismatch_detail == NULL)
1380 return;
1381 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1382 _("shift amount"));
1383 }
1384
1385 /* Report that the MUL modifier in operand IDX should be in the range
1386 [LOWER_BOUND, UPPER_BOUND]. */
1387 static inline void
1388 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1389 int idx, int lower_bound, int upper_bound)
1390 {
1391 if (mismatch_detail == NULL)
1392 return;
1393 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1394 _("multiplier"));
1395 }
1396
1397 static inline void
1398 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1399 int alignment)
1400 {
1401 if (mismatch_detail == NULL)
1402 return;
1403 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1404 mismatch_detail->data[0] = alignment;
1405 }
1406
1407 static inline void
1408 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1409 int expected_num)
1410 {
1411 if (mismatch_detail == NULL)
1412 return;
1413 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1414 mismatch_detail->data[0] = expected_num;
1415 }
1416
1417 static inline void
1418 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1419 const char* error)
1420 {
1421 if (mismatch_detail == NULL)
1422 return;
1423 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1424 }
1425
1426 /* General constraint checking based on operand code.
1427
1428 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1429 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1430
1431 This function has to be called after the qualifiers for all operands
1432 have been resolved.
1433
1434 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1435 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1436 of error message during the disassembling where error message is not
1437 wanted. We avoid the dynamic construction of strings of error messages
1438 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1439 use a combination of error code, static string and some integer data to
1440 represent an error. */
1441
1442 static int
1443 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1444 enum aarch64_opnd type,
1445 const aarch64_opcode *opcode,
1446 aarch64_operand_error *mismatch_detail)
1447 {
1448 unsigned num, modifiers, shift;
1449 unsigned char size;
1450 int64_t imm, min_value, max_value;
1451 uint64_t uvalue, mask;
1452 const aarch64_opnd_info *opnd = opnds + idx;
1453 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1454
1455 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1456
1457 switch (aarch64_operands[type].op_class)
1458 {
1459 case AARCH64_OPND_CLASS_INT_REG:
1460 /* Check pair reg constraints for cas* instructions. */
1461 if (type == AARCH64_OPND_PAIRREG)
1462 {
1463 assert (idx == 1 || idx == 3);
1464 if (opnds[idx - 1].reg.regno % 2 != 0)
1465 {
1466 set_syntax_error (mismatch_detail, idx - 1,
1467 _("reg pair must start from even reg"));
1468 return 0;
1469 }
1470 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1471 {
1472 set_syntax_error (mismatch_detail, idx,
1473 _("reg pair must be contiguous"));
1474 return 0;
1475 }
1476 break;
1477 }
1478
1479 /* <Xt> may be optional in some IC and TLBI instructions. */
1480 if (type == AARCH64_OPND_Rt_SYS)
1481 {
1482 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1483 == AARCH64_OPND_CLASS_SYSTEM));
1484 if (opnds[1].present
1485 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1486 {
1487 set_other_error (mismatch_detail, idx, _("extraneous register"));
1488 return 0;
1489 }
1490 if (!opnds[1].present
1491 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1492 {
1493 set_other_error (mismatch_detail, idx, _("missing register"));
1494 return 0;
1495 }
1496 }
1497 switch (qualifier)
1498 {
1499 case AARCH64_OPND_QLF_WSP:
1500 case AARCH64_OPND_QLF_SP:
1501 if (!aarch64_stack_pointer_p (opnd))
1502 {
1503 set_other_error (mismatch_detail, idx,
1504 _("stack pointer register expected"));
1505 return 0;
1506 }
1507 break;
1508 default:
1509 break;
1510 }
1511 break;
1512
1513 case AARCH64_OPND_CLASS_SVE_REG:
1514 switch (type)
1515 {
1516 case AARCH64_OPND_SVE_Zm3_INDEX:
1517 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1518 case AARCH64_OPND_SVE_Zm4_INDEX:
1519 size = get_operand_fields_width (get_operand_from_code (type));
1520 shift = get_operand_specific_data (&aarch64_operands[type]);
1521 mask = (1 << shift) - 1;
1522 if (opnd->reg.regno > mask)
1523 {
1524 assert (mask == 7 || mask == 15);
1525 set_other_error (mismatch_detail, idx,
1526 mask == 15
1527 ? _("z0-z15 expected")
1528 : _("z0-z7 expected"));
1529 return 0;
1530 }
1531 mask = (1 << (size - shift)) - 1;
1532 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1533 {
1534 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1535 return 0;
1536 }
1537 break;
1538
1539 case AARCH64_OPND_SVE_Zn_INDEX:
1540 size = aarch64_get_qualifier_esize (opnd->qualifier);
1541 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1542 {
1543 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1544 0, 64 / size - 1);
1545 return 0;
1546 }
1547 break;
1548
1549 case AARCH64_OPND_SVE_ZnxN:
1550 case AARCH64_OPND_SVE_ZtxN:
1551 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1552 {
1553 set_other_error (mismatch_detail, idx,
1554 _("invalid register list"));
1555 return 0;
1556 }
1557 break;
1558
1559 default:
1560 break;
1561 }
1562 break;
1563
1564 case AARCH64_OPND_CLASS_PRED_REG:
1565 if (opnd->reg.regno >= 8
1566 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1567 {
1568 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1569 return 0;
1570 }
1571 break;
1572
1573 case AARCH64_OPND_CLASS_COND:
1574 if (type == AARCH64_OPND_COND1
1575 && (opnds[idx].cond->value & 0xe) == 0xe)
1576 {
1577 /* Not allow AL or NV. */
1578 set_syntax_error (mismatch_detail, idx, NULL);
1579 }
1580 break;
1581
1582 case AARCH64_OPND_CLASS_ADDRESS:
1583 /* Check writeback. */
1584 switch (opcode->iclass)
1585 {
1586 case ldst_pos:
1587 case ldst_unscaled:
1588 case ldstnapair_offs:
1589 case ldstpair_off:
1590 case ldst_unpriv:
1591 if (opnd->addr.writeback == 1)
1592 {
1593 set_syntax_error (mismatch_detail, idx,
1594 _("unexpected address writeback"));
1595 return 0;
1596 }
1597 break;
1598 case ldst_imm10:
1599 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1600 {
1601 set_syntax_error (mismatch_detail, idx,
1602 _("unexpected address writeback"));
1603 return 0;
1604 }
1605 break;
1606 case ldst_imm9:
1607 case ldstpair_indexed:
1608 case asisdlsep:
1609 case asisdlsop:
1610 if (opnd->addr.writeback == 0)
1611 {
1612 set_syntax_error (mismatch_detail, idx,
1613 _("address writeback expected"));
1614 return 0;
1615 }
1616 break;
1617 default:
1618 assert (opnd->addr.writeback == 0);
1619 break;
1620 }
1621 switch (type)
1622 {
1623 case AARCH64_OPND_ADDR_SIMM7:
1624 /* Scaled signed 7 bits immediate offset. */
1625 /* Get the size of the data element that is accessed, which may be
1626 different from that of the source register size,
1627 e.g. in strb/ldrb. */
1628 size = aarch64_get_qualifier_esize (opnd->qualifier);
1629 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1630 {
1631 set_offset_out_of_range_error (mismatch_detail, idx,
1632 -64 * size, 63 * size);
1633 return 0;
1634 }
1635 if (!value_aligned_p (opnd->addr.offset.imm, size))
1636 {
1637 set_unaligned_error (mismatch_detail, idx, size);
1638 return 0;
1639 }
1640 break;
1641 case AARCH64_OPND_ADDR_OFFSET:
1642 case AARCH64_OPND_ADDR_SIMM9:
1643 /* Unscaled signed 9 bits immediate offset. */
1644 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1645 {
1646 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1647 return 0;
1648 }
1649 break;
1650
1651 case AARCH64_OPND_ADDR_SIMM9_2:
1652 /* Unscaled signed 9 bits immediate offset, which has to be negative
1653 or unaligned. */
1654 size = aarch64_get_qualifier_esize (qualifier);
1655 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1656 && !value_aligned_p (opnd->addr.offset.imm, size))
1657 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1658 return 1;
1659 set_other_error (mismatch_detail, idx,
1660 _("negative or unaligned offset expected"));
1661 return 0;
1662
1663 case AARCH64_OPND_ADDR_SIMM10:
1664 /* Scaled signed 10 bits immediate offset. */
1665 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1666 {
1667 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1668 return 0;
1669 }
1670 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1671 {
1672 set_unaligned_error (mismatch_detail, idx, 8);
1673 return 0;
1674 }
1675 break;
1676
1677 case AARCH64_OPND_ADDR_SIMM11:
1678 /* Signed 11 bits immediate offset (multiple of 16). */
1679 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1680 {
1681 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1682 return 0;
1683 }
1684
1685 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1686 {
1687 set_unaligned_error (mismatch_detail, idx, 16);
1688 return 0;
1689 }
1690 break;
1691
1692 case AARCH64_OPND_ADDR_SIMM13:
1693 /* Signed 13 bits immediate offset (multiple of 16). */
1694 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1695 {
1696 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1697 return 0;
1698 }
1699
1700 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1701 {
1702 set_unaligned_error (mismatch_detail, idx, 16);
1703 return 0;
1704 }
1705 break;
1706
1707 case AARCH64_OPND_SIMD_ADDR_POST:
1708 /* AdvSIMD load/store multiple structures, post-index. */
1709 assert (idx == 1);
1710 if (opnd->addr.offset.is_reg)
1711 {
1712 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1713 return 1;
1714 else
1715 {
1716 set_other_error (mismatch_detail, idx,
1717 _("invalid register offset"));
1718 return 0;
1719 }
1720 }
1721 else
1722 {
1723 const aarch64_opnd_info *prev = &opnds[idx-1];
1724 unsigned num_bytes; /* total number of bytes transferred. */
1725 /* The opcode dependent area stores the number of elements in
1726 each structure to be loaded/stored. */
1727 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1728 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1729 /* Special handling of loading single structure to all lane. */
1730 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1731 * aarch64_get_qualifier_esize (prev->qualifier);
1732 else
1733 num_bytes = prev->reglist.num_regs
1734 * aarch64_get_qualifier_esize (prev->qualifier)
1735 * aarch64_get_qualifier_nelem (prev->qualifier);
1736 if ((int) num_bytes != opnd->addr.offset.imm)
1737 {
1738 set_other_error (mismatch_detail, idx,
1739 _("invalid post-increment amount"));
1740 return 0;
1741 }
1742 }
1743 break;
1744
1745 case AARCH64_OPND_ADDR_REGOFF:
1746 /* Get the size of the data element that is accessed, which may be
1747 different from that of the source register size,
1748 e.g. in strb/ldrb. */
1749 size = aarch64_get_qualifier_esize (opnd->qualifier);
1750 /* It is either no shift or shift by the binary logarithm of SIZE. */
1751 if (opnd->shifter.amount != 0
1752 && opnd->shifter.amount != (int)get_logsz (size))
1753 {
1754 set_other_error (mismatch_detail, idx,
1755 _("invalid shift amount"));
1756 return 0;
1757 }
1758 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1759 operators. */
1760 switch (opnd->shifter.kind)
1761 {
1762 case AARCH64_MOD_UXTW:
1763 case AARCH64_MOD_LSL:
1764 case AARCH64_MOD_SXTW:
1765 case AARCH64_MOD_SXTX: break;
1766 default:
1767 set_other_error (mismatch_detail, idx,
1768 _("invalid extend/shift operator"));
1769 return 0;
1770 }
1771 break;
1772
1773 case AARCH64_OPND_ADDR_UIMM12:
1774 imm = opnd->addr.offset.imm;
1775 /* Get the size of the data element that is accessed, which may be
1776 different from that of the source register size,
1777 e.g. in strb/ldrb. */
1778 size = aarch64_get_qualifier_esize (qualifier);
1779 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1780 {
1781 set_offset_out_of_range_error (mismatch_detail, idx,
1782 0, 4095 * size);
1783 return 0;
1784 }
1785 if (!value_aligned_p (opnd->addr.offset.imm, size))
1786 {
1787 set_unaligned_error (mismatch_detail, idx, size);
1788 return 0;
1789 }
1790 break;
1791
1792 case AARCH64_OPND_ADDR_PCREL14:
1793 case AARCH64_OPND_ADDR_PCREL19:
1794 case AARCH64_OPND_ADDR_PCREL21:
1795 case AARCH64_OPND_ADDR_PCREL26:
1796 imm = opnd->imm.value;
1797 if (operand_need_shift_by_two (get_operand_from_code (type)))
1798 {
1799 /* The offset value in a PC-relative branch instruction is alway
1800 4-byte aligned and is encoded without the lowest 2 bits. */
1801 if (!value_aligned_p (imm, 4))
1802 {
1803 set_unaligned_error (mismatch_detail, idx, 4);
1804 return 0;
1805 }
1806 /* Right shift by 2 so that we can carry out the following check
1807 canonically. */
1808 imm >>= 2;
1809 }
1810 size = get_operand_fields_width (get_operand_from_code (type));
1811 if (!value_fit_signed_field_p (imm, size))
1812 {
1813 set_other_error (mismatch_detail, idx,
1814 _("immediate out of range"));
1815 return 0;
1816 }
1817 break;
1818
1819 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1820 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1821 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1822 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1823 min_value = -8;
1824 max_value = 7;
1825 sve_imm_offset_vl:
1826 assert (!opnd->addr.offset.is_reg);
1827 assert (opnd->addr.preind);
1828 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1829 min_value *= num;
1830 max_value *= num;
1831 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1832 || (opnd->shifter.operator_present
1833 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1834 {
1835 set_other_error (mismatch_detail, idx,
1836 _("invalid addressing mode"));
1837 return 0;
1838 }
1839 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1840 {
1841 set_offset_out_of_range_error (mismatch_detail, idx,
1842 min_value, max_value);
1843 return 0;
1844 }
1845 if (!value_aligned_p (opnd->addr.offset.imm, num))
1846 {
1847 set_unaligned_error (mismatch_detail, idx, num);
1848 return 0;
1849 }
1850 break;
1851
1852 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1853 min_value = -32;
1854 max_value = 31;
1855 goto sve_imm_offset_vl;
1856
1857 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1858 min_value = -256;
1859 max_value = 255;
1860 goto sve_imm_offset_vl;
1861
1862 case AARCH64_OPND_SVE_ADDR_RI_U6:
1863 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1864 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1865 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1866 min_value = 0;
1867 max_value = 63;
1868 sve_imm_offset:
1869 assert (!opnd->addr.offset.is_reg);
1870 assert (opnd->addr.preind);
1871 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1872 min_value *= num;
1873 max_value *= num;
1874 if (opnd->shifter.operator_present
1875 || opnd->shifter.amount_present)
1876 {
1877 set_other_error (mismatch_detail, idx,
1878 _("invalid addressing mode"));
1879 return 0;
1880 }
1881 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1882 {
1883 set_offset_out_of_range_error (mismatch_detail, idx,
1884 min_value, max_value);
1885 return 0;
1886 }
1887 if (!value_aligned_p (opnd->addr.offset.imm, num))
1888 {
1889 set_unaligned_error (mismatch_detail, idx, num);
1890 return 0;
1891 }
1892 break;
1893
1894 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1895 min_value = -8;
1896 max_value = 7;
1897 goto sve_imm_offset;
1898
1899 case AARCH64_OPND_SVE_ADDR_R:
1900 case AARCH64_OPND_SVE_ADDR_RR:
1901 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1902 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1903 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1904 case AARCH64_OPND_SVE_ADDR_RX:
1905 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1906 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1907 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1908 case AARCH64_OPND_SVE_ADDR_RZ:
1909 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1910 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1911 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1912 modifiers = 1 << AARCH64_MOD_LSL;
1913 sve_rr_operand:
1914 assert (opnd->addr.offset.is_reg);
1915 assert (opnd->addr.preind);
1916 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1917 && opnd->addr.offset.regno == 31)
1918 {
1919 set_other_error (mismatch_detail, idx,
1920 _("index register xzr is not allowed"));
1921 return 0;
1922 }
1923 if (((1 << opnd->shifter.kind) & modifiers) == 0
1924 || (opnd->shifter.amount
1925 != get_operand_specific_data (&aarch64_operands[type])))
1926 {
1927 set_other_error (mismatch_detail, idx,
1928 _("invalid addressing mode"));
1929 return 0;
1930 }
1931 break;
1932
1933 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1934 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1935 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1936 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1937 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1938 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1939 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1940 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1941 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1942 goto sve_rr_operand;
1943
1944 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1945 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1946 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1947 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1948 min_value = 0;
1949 max_value = 31;
1950 goto sve_imm_offset;
1951
1952 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1953 modifiers = 1 << AARCH64_MOD_LSL;
1954 sve_zz_operand:
1955 assert (opnd->addr.offset.is_reg);
1956 assert (opnd->addr.preind);
1957 if (((1 << opnd->shifter.kind) & modifiers) == 0
1958 || opnd->shifter.amount < 0
1959 || opnd->shifter.amount > 3)
1960 {
1961 set_other_error (mismatch_detail, idx,
1962 _("invalid addressing mode"));
1963 return 0;
1964 }
1965 break;
1966
1967 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1968 modifiers = (1 << AARCH64_MOD_SXTW);
1969 goto sve_zz_operand;
1970
1971 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1972 modifiers = 1 << AARCH64_MOD_UXTW;
1973 goto sve_zz_operand;
1974
1975 default:
1976 break;
1977 }
1978 break;
1979
1980 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1981 if (type == AARCH64_OPND_LEt)
1982 {
1983 /* Get the upper bound for the element index. */
1984 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1985 if (!value_in_range_p (opnd->reglist.index, 0, num))
1986 {
1987 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1988 return 0;
1989 }
1990 }
1991 /* The opcode dependent area stores the number of elements in
1992 each structure to be loaded/stored. */
1993 num = get_opcode_dependent_value (opcode);
1994 switch (type)
1995 {
1996 case AARCH64_OPND_LVt:
1997 assert (num >= 1 && num <= 4);
1998 /* Unless LD1/ST1, the number of registers should be equal to that
1999 of the structure elements. */
2000 if (num != 1 && opnd->reglist.num_regs != num)
2001 {
2002 set_reg_list_error (mismatch_detail, idx, num);
2003 return 0;
2004 }
2005 break;
2006 case AARCH64_OPND_LVt_AL:
2007 case AARCH64_OPND_LEt:
2008 assert (num >= 1 && num <= 4);
2009 /* The number of registers should be equal to that of the structure
2010 elements. */
2011 if (opnd->reglist.num_regs != num)
2012 {
2013 set_reg_list_error (mismatch_detail, idx, num);
2014 return 0;
2015 }
2016 break;
2017 default:
2018 break;
2019 }
2020 break;
2021
2022 case AARCH64_OPND_CLASS_IMMEDIATE:
2023 /* Constraint check on immediate operand. */
2024 imm = opnd->imm.value;
2025 /* E.g. imm_0_31 constrains value to be 0..31. */
2026 if (qualifier_value_in_range_constraint_p (qualifier)
2027 && !value_in_range_p (imm, get_lower_bound (qualifier),
2028 get_upper_bound (qualifier)))
2029 {
2030 set_imm_out_of_range_error (mismatch_detail, idx,
2031 get_lower_bound (qualifier),
2032 get_upper_bound (qualifier));
2033 return 0;
2034 }
2035
2036 switch (type)
2037 {
2038 case AARCH64_OPND_AIMM:
2039 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2040 {
2041 set_other_error (mismatch_detail, idx,
2042 _("invalid shift operator"));
2043 return 0;
2044 }
2045 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2046 {
2047 set_other_error (mismatch_detail, idx,
2048 _("shift amount must be 0 or 12"));
2049 return 0;
2050 }
2051 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2052 {
2053 set_other_error (mismatch_detail, idx,
2054 _("immediate out of range"));
2055 return 0;
2056 }
2057 break;
2058
2059 case AARCH64_OPND_HALF:
2060 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2061 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2062 {
2063 set_other_error (mismatch_detail, idx,
2064 _("invalid shift operator"));
2065 return 0;
2066 }
2067 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2068 if (!value_aligned_p (opnd->shifter.amount, 16))
2069 {
2070 set_other_error (mismatch_detail, idx,
2071 _("shift amount must be a multiple of 16"));
2072 return 0;
2073 }
2074 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2075 {
2076 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2077 0, size * 8 - 16);
2078 return 0;
2079 }
2080 if (opnd->imm.value < 0)
2081 {
2082 set_other_error (mismatch_detail, idx,
2083 _("negative immediate value not allowed"));
2084 return 0;
2085 }
2086 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2087 {
2088 set_other_error (mismatch_detail, idx,
2089 _("immediate out of range"));
2090 return 0;
2091 }
2092 break;
2093
2094 case AARCH64_OPND_IMM_MOV:
2095 {
2096 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2097 imm = opnd->imm.value;
2098 assert (idx == 1);
2099 switch (opcode->op)
2100 {
2101 case OP_MOV_IMM_WIDEN:
2102 imm = ~imm;
2103 /* Fall through. */
2104 case OP_MOV_IMM_WIDE:
2105 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2106 {
2107 set_other_error (mismatch_detail, idx,
2108 _("immediate out of range"));
2109 return 0;
2110 }
2111 break;
2112 case OP_MOV_IMM_LOG:
2113 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2114 {
2115 set_other_error (mismatch_detail, idx,
2116 _("immediate out of range"));
2117 return 0;
2118 }
2119 break;
2120 default:
2121 assert (0);
2122 return 0;
2123 }
2124 }
2125 break;
2126
2127 case AARCH64_OPND_NZCV:
2128 case AARCH64_OPND_CCMP_IMM:
2129 case AARCH64_OPND_EXCEPTION:
2130 case AARCH64_OPND_TME_UIMM16:
2131 case AARCH64_OPND_UIMM4:
2132 case AARCH64_OPND_UIMM4_ADDG:
2133 case AARCH64_OPND_UIMM7:
2134 case AARCH64_OPND_UIMM3_OP1:
2135 case AARCH64_OPND_UIMM3_OP2:
2136 case AARCH64_OPND_SVE_UIMM3:
2137 case AARCH64_OPND_SVE_UIMM7:
2138 case AARCH64_OPND_SVE_UIMM8:
2139 case AARCH64_OPND_SVE_UIMM8_53:
2140 size = get_operand_fields_width (get_operand_from_code (type));
2141 assert (size < 32);
2142 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2143 {
2144 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2145 (1 << size) - 1);
2146 return 0;
2147 }
2148 break;
2149
2150 case AARCH64_OPND_UIMM10:
2151 /* Scaled unsigned 10 bits immediate offset. */
2152 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2153 {
2154 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2155 return 0;
2156 }
2157
2158 if (!value_aligned_p (opnd->imm.value, 16))
2159 {
2160 set_unaligned_error (mismatch_detail, idx, 16);
2161 return 0;
2162 }
2163 break;
2164
2165 case AARCH64_OPND_SIMM5:
2166 case AARCH64_OPND_SVE_SIMM5:
2167 case AARCH64_OPND_SVE_SIMM5B:
2168 case AARCH64_OPND_SVE_SIMM6:
2169 case AARCH64_OPND_SVE_SIMM8:
2170 size = get_operand_fields_width (get_operand_from_code (type));
2171 assert (size < 32);
2172 if (!value_fit_signed_field_p (opnd->imm.value, size))
2173 {
2174 set_imm_out_of_range_error (mismatch_detail, idx,
2175 -(1 << (size - 1)),
2176 (1 << (size - 1)) - 1);
2177 return 0;
2178 }
2179 break;
2180
2181 case AARCH64_OPND_WIDTH:
2182 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2183 && opnds[0].type == AARCH64_OPND_Rd);
2184 size = get_upper_bound (qualifier);
2185 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2186 /* lsb+width <= reg.size */
2187 {
2188 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2189 size - opnds[idx-1].imm.value);
2190 return 0;
2191 }
2192 break;
2193
2194 case AARCH64_OPND_LIMM:
2195 case AARCH64_OPND_SVE_LIMM:
2196 {
2197 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2198 uint64_t uimm = opnd->imm.value;
2199 if (opcode->op == OP_BIC)
2200 uimm = ~uimm;
2201 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2202 {
2203 set_other_error (mismatch_detail, idx,
2204 _("immediate out of range"));
2205 return 0;
2206 }
2207 }
2208 break;
2209
2210 case AARCH64_OPND_IMM0:
2211 case AARCH64_OPND_FPIMM0:
2212 if (opnd->imm.value != 0)
2213 {
2214 set_other_error (mismatch_detail, idx,
2215 _("immediate zero expected"));
2216 return 0;
2217 }
2218 break;
2219
2220 case AARCH64_OPND_IMM_ROT1:
2221 case AARCH64_OPND_IMM_ROT2:
2222 case AARCH64_OPND_SVE_IMM_ROT2:
2223 if (opnd->imm.value != 0
2224 && opnd->imm.value != 90
2225 && opnd->imm.value != 180
2226 && opnd->imm.value != 270)
2227 {
2228 set_other_error (mismatch_detail, idx,
2229 _("rotate expected to be 0, 90, 180 or 270"));
2230 return 0;
2231 }
2232 break;
2233
2234 case AARCH64_OPND_IMM_ROT3:
2235 case AARCH64_OPND_SVE_IMM_ROT1:
2236 case AARCH64_OPND_SVE_IMM_ROT3:
2237 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2238 {
2239 set_other_error (mismatch_detail, idx,
2240 _("rotate expected to be 90 or 270"));
2241 return 0;
2242 }
2243 break;
2244
2245 case AARCH64_OPND_SHLL_IMM:
2246 assert (idx == 2);
2247 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2248 if (opnd->imm.value != size)
2249 {
2250 set_other_error (mismatch_detail, idx,
2251 _("invalid shift amount"));
2252 return 0;
2253 }
2254 break;
2255
2256 case AARCH64_OPND_IMM_VLSL:
2257 size = aarch64_get_qualifier_esize (qualifier);
2258 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2259 {
2260 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2261 size * 8 - 1);
2262 return 0;
2263 }
2264 break;
2265
2266 case AARCH64_OPND_IMM_VLSR:
2267 size = aarch64_get_qualifier_esize (qualifier);
2268 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2269 {
2270 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2271 return 0;
2272 }
2273 break;
2274
2275 case AARCH64_OPND_SIMD_IMM:
2276 case AARCH64_OPND_SIMD_IMM_SFT:
2277 /* Qualifier check. */
2278 switch (qualifier)
2279 {
2280 case AARCH64_OPND_QLF_LSL:
2281 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2282 {
2283 set_other_error (mismatch_detail, idx,
2284 _("invalid shift operator"));
2285 return 0;
2286 }
2287 break;
2288 case AARCH64_OPND_QLF_MSL:
2289 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2290 {
2291 set_other_error (mismatch_detail, idx,
2292 _("invalid shift operator"));
2293 return 0;
2294 }
2295 break;
2296 case AARCH64_OPND_QLF_NIL:
2297 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2298 {
2299 set_other_error (mismatch_detail, idx,
2300 _("shift is not permitted"));
2301 return 0;
2302 }
2303 break;
2304 default:
2305 assert (0);
2306 return 0;
2307 }
2308 /* Is the immediate valid? */
2309 assert (idx == 1);
2310 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2311 {
2312 /* uimm8 or simm8 */
2313 if (!value_in_range_p (opnd->imm.value, -128, 255))
2314 {
2315 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2316 return 0;
2317 }
2318 }
2319 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2320 {
2321 /* uimm64 is not
2322 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2323 ffffffffgggggggghhhhhhhh'. */
2324 set_other_error (mismatch_detail, idx,
2325 _("invalid value for immediate"));
2326 return 0;
2327 }
2328 /* Is the shift amount valid? */
2329 switch (opnd->shifter.kind)
2330 {
2331 case AARCH64_MOD_LSL:
2332 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2333 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2334 {
2335 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2336 (size - 1) * 8);
2337 return 0;
2338 }
2339 if (!value_aligned_p (opnd->shifter.amount, 8))
2340 {
2341 set_unaligned_error (mismatch_detail, idx, 8);
2342 return 0;
2343 }
2344 break;
2345 case AARCH64_MOD_MSL:
2346 /* Only 8 and 16 are valid shift amount. */
2347 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2348 {
2349 set_other_error (mismatch_detail, idx,
2350 _("shift amount must be 0 or 16"));
2351 return 0;
2352 }
2353 break;
2354 default:
2355 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2356 {
2357 set_other_error (mismatch_detail, idx,
2358 _("invalid shift operator"));
2359 return 0;
2360 }
2361 break;
2362 }
2363 break;
2364
2365 case AARCH64_OPND_FPIMM:
2366 case AARCH64_OPND_SIMD_FPIMM:
2367 case AARCH64_OPND_SVE_FPIMM8:
2368 if (opnd->imm.is_fp == 0)
2369 {
2370 set_other_error (mismatch_detail, idx,
2371 _("floating-point immediate expected"));
2372 return 0;
2373 }
2374 /* The value is expected to be an 8-bit floating-point constant with
2375 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2376 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2377 instruction). */
2378 if (!value_in_range_p (opnd->imm.value, 0, 255))
2379 {
2380 set_other_error (mismatch_detail, idx,
2381 _("immediate out of range"));
2382 return 0;
2383 }
2384 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2385 {
2386 set_other_error (mismatch_detail, idx,
2387 _("invalid shift operator"));
2388 return 0;
2389 }
2390 break;
2391
2392 case AARCH64_OPND_SVE_AIMM:
2393 min_value = 0;
2394 sve_aimm:
2395 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2396 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2397 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2398 uvalue = opnd->imm.value;
2399 shift = opnd->shifter.amount;
2400 if (size == 1)
2401 {
2402 if (shift != 0)
2403 {
2404 set_other_error (mismatch_detail, idx,
2405 _("no shift amount allowed for"
2406 " 8-bit constants"));
2407 return 0;
2408 }
2409 }
2410 else
2411 {
2412 if (shift != 0 && shift != 8)
2413 {
2414 set_other_error (mismatch_detail, idx,
2415 _("shift amount must be 0 or 8"));
2416 return 0;
2417 }
2418 if (shift == 0 && (uvalue & 0xff) == 0)
2419 {
2420 shift = 8;
2421 uvalue = (int64_t) uvalue / 256;
2422 }
2423 }
2424 mask >>= shift;
2425 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2426 {
2427 set_other_error (mismatch_detail, idx,
2428 _("immediate too big for element size"));
2429 return 0;
2430 }
2431 uvalue = (uvalue - min_value) & mask;
2432 if (uvalue > 0xff)
2433 {
2434 set_other_error (mismatch_detail, idx,
2435 _("invalid arithmetic immediate"));
2436 return 0;
2437 }
2438 break;
2439
2440 case AARCH64_OPND_SVE_ASIMM:
2441 min_value = -128;
2442 goto sve_aimm;
2443
2444 case AARCH64_OPND_SVE_I1_HALF_ONE:
2445 assert (opnd->imm.is_fp);
2446 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2447 {
2448 set_other_error (mismatch_detail, idx,
2449 _("floating-point value must be 0.5 or 1.0"));
2450 return 0;
2451 }
2452 break;
2453
2454 case AARCH64_OPND_SVE_I1_HALF_TWO:
2455 assert (opnd->imm.is_fp);
2456 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2457 {
2458 set_other_error (mismatch_detail, idx,
2459 _("floating-point value must be 0.5 or 2.0"));
2460 return 0;
2461 }
2462 break;
2463
2464 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2465 assert (opnd->imm.is_fp);
2466 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2467 {
2468 set_other_error (mismatch_detail, idx,
2469 _("floating-point value must be 0.0 or 1.0"));
2470 return 0;
2471 }
2472 break;
2473
2474 case AARCH64_OPND_SVE_INV_LIMM:
2475 {
2476 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2477 uint64_t uimm = ~opnd->imm.value;
2478 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2479 {
2480 set_other_error (mismatch_detail, idx,
2481 _("immediate out of range"));
2482 return 0;
2483 }
2484 }
2485 break;
2486
2487 case AARCH64_OPND_SVE_LIMM_MOV:
2488 {
2489 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2490 uint64_t uimm = opnd->imm.value;
2491 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2492 {
2493 set_other_error (mismatch_detail, idx,
2494 _("immediate out of range"));
2495 return 0;
2496 }
2497 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2498 {
2499 set_other_error (mismatch_detail, idx,
2500 _("invalid replicated MOV immediate"));
2501 return 0;
2502 }
2503 }
2504 break;
2505
2506 case AARCH64_OPND_SVE_PATTERN_SCALED:
2507 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2508 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2509 {
2510 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2511 return 0;
2512 }
2513 break;
2514
2515 case AARCH64_OPND_SVE_SHLIMM_PRED:
2516 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2517 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2518 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2519 {
2520 set_imm_out_of_range_error (mismatch_detail, idx,
2521 0, 8 * size - 1);
2522 return 0;
2523 }
2524 break;
2525
2526 case AARCH64_OPND_SVE_SHRIMM_PRED:
2527 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2528 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2529 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2530 {
2531 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2532 return 0;
2533 }
2534 break;
2535
2536 default:
2537 break;
2538 }
2539 break;
2540
2541 case AARCH64_OPND_CLASS_SYSTEM:
2542 switch (type)
2543 {
2544 case AARCH64_OPND_PSTATEFIELD:
2545 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2546 /* MSR UAO, #uimm4
2547 MSR PAN, #uimm4
2548 MSR SSBS,#uimm4
2549 The immediate must be #0 or #1. */
2550 if ((opnd->pstatefield == 0x03 /* UAO. */
2551 || opnd->pstatefield == 0x04 /* PAN. */
2552 || opnd->pstatefield == 0x19 /* SSBS. */
2553 || opnd->pstatefield == 0x1a) /* DIT. */
2554 && opnds[1].imm.value > 1)
2555 {
2556 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2557 return 0;
2558 }
2559 /* MSR SPSel, #uimm4
2560 Uses uimm4 as a control value to select the stack pointer: if
2561 bit 0 is set it selects the current exception level's stack
2562 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2563 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2564 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2565 {
2566 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2567 return 0;
2568 }
2569 break;
2570 default:
2571 break;
2572 }
2573 break;
2574
2575 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2576 /* Get the upper bound for the element index. */
2577 if (opcode->op == OP_FCMLA_ELEM)
2578 /* FCMLA index range depends on the vector size of other operands
2579 and is halfed because complex numbers take two elements. */
2580 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2581 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2582 else
2583 num = 16;
2584 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2585 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2586
2587 /* Index out-of-range. */
2588 if (!value_in_range_p (opnd->reglane.index, 0, num))
2589 {
2590 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2591 return 0;
2592 }
2593 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2594 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2595 number is encoded in "size:M:Rm":
2596 size <Vm>
2597 00 RESERVED
2598 01 0:Rm
2599 10 M:Rm
2600 11 RESERVED */
2601 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2602 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2603 {
2604 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2605 return 0;
2606 }
2607 break;
2608
2609 case AARCH64_OPND_CLASS_MODIFIED_REG:
2610 assert (idx == 1 || idx == 2);
2611 switch (type)
2612 {
2613 case AARCH64_OPND_Rm_EXT:
2614 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2615 && opnd->shifter.kind != AARCH64_MOD_LSL)
2616 {
2617 set_other_error (mismatch_detail, idx,
2618 _("extend operator expected"));
2619 return 0;
2620 }
2621 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2622 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2623 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2624 case. */
2625 if (!aarch64_stack_pointer_p (opnds + 0)
2626 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2627 {
2628 if (!opnd->shifter.operator_present)
2629 {
2630 set_other_error (mismatch_detail, idx,
2631 _("missing extend operator"));
2632 return 0;
2633 }
2634 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2635 {
2636 set_other_error (mismatch_detail, idx,
2637 _("'LSL' operator not allowed"));
2638 return 0;
2639 }
2640 }
2641 assert (opnd->shifter.operator_present /* Default to LSL. */
2642 || opnd->shifter.kind == AARCH64_MOD_LSL);
2643 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2644 {
2645 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2646 return 0;
2647 }
2648 /* In the 64-bit form, the final register operand is written as Wm
2649 for all but the (possibly omitted) UXTX/LSL and SXTX
2650 operators.
2651 N.B. GAS allows X register to be used with any operator as a
2652 programming convenience. */
2653 if (qualifier == AARCH64_OPND_QLF_X
2654 && opnd->shifter.kind != AARCH64_MOD_LSL
2655 && opnd->shifter.kind != AARCH64_MOD_UXTX
2656 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2657 {
2658 set_other_error (mismatch_detail, idx, _("W register expected"));
2659 return 0;
2660 }
2661 break;
2662
2663 case AARCH64_OPND_Rm_SFT:
2664 /* ROR is not available to the shifted register operand in
2665 arithmetic instructions. */
2666 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2667 {
2668 set_other_error (mismatch_detail, idx,
2669 _("shift operator expected"));
2670 return 0;
2671 }
2672 if (opnd->shifter.kind == AARCH64_MOD_ROR
2673 && opcode->iclass != log_shift)
2674 {
2675 set_other_error (mismatch_detail, idx,
2676 _("'ROR' operator not allowed"));
2677 return 0;
2678 }
2679 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2680 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2681 {
2682 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2683 return 0;
2684 }
2685 break;
2686
2687 default:
2688 break;
2689 }
2690 break;
2691
2692 default:
2693 break;
2694 }
2695
2696 return 1;
2697 }
2698
2699 /* Main entrypoint for the operand constraint checking.
2700
2701 Return 1 if operands of *INST meet the constraint applied by the operand
2702 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2703 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2704 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2705 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2706 error kind when it is notified that an instruction does not pass the check).
2707
2708 Un-determined operand qualifiers may get established during the process. */
2709
2710 int
2711 aarch64_match_operands_constraint (aarch64_inst *inst,
2712 aarch64_operand_error *mismatch_detail)
2713 {
2714 int i;
2715
2716 DEBUG_TRACE ("enter");
2717
2718 /* Check for cases where a source register needs to be the same as the
2719 destination register. Do this before matching qualifiers since if
2720 an instruction has both invalid tying and invalid qualifiers,
2721 the error about qualifiers would suggest several alternative
2722 instructions that also have invalid tying. */
2723 i = inst->opcode->tied_operand;
2724 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2725 {
2726 if (mismatch_detail)
2727 {
2728 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2729 mismatch_detail->index = i;
2730 mismatch_detail->error = NULL;
2731 }
2732 return 0;
2733 }
2734
2735 /* Match operands' qualifier.
2736 *INST has already had qualifier establish for some, if not all, of
2737 its operands; we need to find out whether these established
2738 qualifiers match one of the qualifier sequence in
2739 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2740 with the corresponding qualifier in such a sequence.
2741 Only basic operand constraint checking is done here; the more thorough
2742 constraint checking will carried out by operand_general_constraint_met_p,
2743 which has be to called after this in order to get all of the operands'
2744 qualifiers established. */
2745 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2746 {
2747 DEBUG_TRACE ("FAIL on operand qualifier matching");
2748 if (mismatch_detail)
2749 {
2750 /* Return an error type to indicate that it is the qualifier
2751 matching failure; we don't care about which operand as there
2752 are enough information in the opcode table to reproduce it. */
2753 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2754 mismatch_detail->index = -1;
2755 mismatch_detail->error = NULL;
2756 }
2757 return 0;
2758 }
2759
2760 /* Match operands' constraint. */
2761 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2762 {
2763 enum aarch64_opnd type = inst->opcode->operands[i];
2764 if (type == AARCH64_OPND_NIL)
2765 break;
2766 if (inst->operands[i].skip)
2767 {
2768 DEBUG_TRACE ("skip the incomplete operand %d", i);
2769 continue;
2770 }
2771 if (operand_general_constraint_met_p (inst->operands, i, type,
2772 inst->opcode, mismatch_detail) == 0)
2773 {
2774 DEBUG_TRACE ("FAIL on operand %d", i);
2775 return 0;
2776 }
2777 }
2778
2779 DEBUG_TRACE ("PASS");
2780
2781 return 1;
2782 }
2783
2784 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2785 Also updates the TYPE of each INST->OPERANDS with the corresponding
2786 value of OPCODE->OPERANDS.
2787
2788 Note that some operand qualifiers may need to be manually cleared by
2789 the caller before it further calls the aarch64_opcode_encode; by
2790 doing this, it helps the qualifier matching facilities work
2791 properly. */
2792
2793 const aarch64_opcode*
2794 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2795 {
2796 int i;
2797 const aarch64_opcode *old = inst->opcode;
2798
2799 inst->opcode = opcode;
2800
2801 /* Update the operand types. */
2802 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2803 {
2804 inst->operands[i].type = opcode->operands[i];
2805 if (opcode->operands[i] == AARCH64_OPND_NIL)
2806 break;
2807 }
2808
2809 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2810
2811 return old;
2812 }
2813
2814 int
2815 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2816 {
2817 int i;
2818 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2819 if (operands[i] == operand)
2820 return i;
2821 else if (operands[i] == AARCH64_OPND_NIL)
2822 break;
2823 return -1;
2824 }
2825 \f
2826 /* R0...R30, followed by FOR31. */
2827 #define BANK(R, FOR31) \
2828 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2829 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2830 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2831 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2832 /* [0][0] 32-bit integer regs with sp Wn
2833 [0][1] 64-bit integer regs with sp Xn sf=1
2834 [1][0] 32-bit integer regs with #0 Wn
2835 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2836 static const char *int_reg[2][2][32] = {
2837 #define R32(X) "w" #X
2838 #define R64(X) "x" #X
2839 { BANK (R32, "wsp"), BANK (R64, "sp") },
2840 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2841 #undef R64
2842 #undef R32
2843 };
2844
2845 /* Names of the SVE vector registers, first with .S suffixes,
2846 then with .D suffixes. */
2847
2848 static const char *sve_reg[2][32] = {
2849 #define ZS(X) "z" #X ".s"
2850 #define ZD(X) "z" #X ".d"
2851 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2852 #undef ZD
2853 #undef ZS
2854 };
2855 #undef BANK
2856
2857 /* Return the integer register name.
2858 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2859
2860 static inline const char *
2861 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2862 {
2863 const int has_zr = sp_reg_p ? 0 : 1;
2864 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2865 return int_reg[has_zr][is_64][regno];
2866 }
2867
2868 /* Like get_int_reg_name, but IS_64 is always 1. */
2869
2870 static inline const char *
2871 get_64bit_int_reg_name (int regno, int sp_reg_p)
2872 {
2873 const int has_zr = sp_reg_p ? 0 : 1;
2874 return int_reg[has_zr][1][regno];
2875 }
2876
2877 /* Get the name of the integer offset register in OPND, using the shift type
2878 to decide whether it's a word or doubleword. */
2879
2880 static inline const char *
2881 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2882 {
2883 switch (opnd->shifter.kind)
2884 {
2885 case AARCH64_MOD_UXTW:
2886 case AARCH64_MOD_SXTW:
2887 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2888
2889 case AARCH64_MOD_LSL:
2890 case AARCH64_MOD_SXTX:
2891 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2892
2893 default:
2894 abort ();
2895 }
2896 }
2897
2898 /* Get the name of the SVE vector offset register in OPND, using the operand
2899 qualifier to decide whether the suffix should be .S or .D. */
2900
2901 static inline const char *
2902 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2903 {
2904 assert (qualifier == AARCH64_OPND_QLF_S_S
2905 || qualifier == AARCH64_OPND_QLF_S_D);
2906 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2907 }
2908
2909 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2910
2911 typedef union
2912 {
2913 uint64_t i;
2914 double d;
2915 } double_conv_t;
2916
2917 typedef union
2918 {
2919 uint32_t i;
2920 float f;
2921 } single_conv_t;
2922
2923 typedef union
2924 {
2925 uint32_t i;
2926 float f;
2927 } half_conv_t;
2928
2929 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2930 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2931 (depending on the type of the instruction). IMM8 will be expanded to a
2932 single-precision floating-point value (SIZE == 4) or a double-precision
2933 floating-point value (SIZE == 8). A half-precision floating-point value
2934 (SIZE == 2) is expanded to a single-precision floating-point value. The
2935 expanded value is returned. */
2936
2937 static uint64_t
2938 expand_fp_imm (int size, uint32_t imm8)
2939 {
2940 uint64_t imm = 0;
2941 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2942
2943 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2944 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2945 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2946 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2947 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2948 if (size == 8)
2949 {
2950 imm = (imm8_7 << (63-32)) /* imm8<7> */
2951 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2952 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2953 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2954 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2955 imm <<= 32;
2956 }
2957 else if (size == 4 || size == 2)
2958 {
2959 imm = (imm8_7 << 31) /* imm8<7> */
2960 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2961 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2962 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2963 }
2964 else
2965 {
2966 /* An unsupported size. */
2967 assert (0);
2968 }
2969
2970 return imm;
2971 }
2972
2973 /* Produce the string representation of the register list operand *OPND
2974 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2975 the register name that comes before the register number, such as "v". */
2976 static void
2977 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2978 const char *prefix)
2979 {
2980 const int num_regs = opnd->reglist.num_regs;
2981 const int first_reg = opnd->reglist.first_regno;
2982 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2983 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2984 char tb[8]; /* Temporary buffer. */
2985
2986 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2987 assert (num_regs >= 1 && num_regs <= 4);
2988
2989 /* Prepare the index if any. */
2990 if (opnd->reglist.has_index)
2991 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2992 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
2993 else
2994 tb[0] = '\0';
2995
2996 /* The hyphenated form is preferred for disassembly if there are
2997 more than two registers in the list, and the register numbers
2998 are monotonically increasing in increments of one. */
2999 if (num_regs > 2 && last_reg > first_reg)
3000 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
3001 prefix, last_reg, qlf_name, tb);
3002 else
3003 {
3004 const int reg0 = first_reg;
3005 const int reg1 = (first_reg + 1) & 0x1f;
3006 const int reg2 = (first_reg + 2) & 0x1f;
3007 const int reg3 = (first_reg + 3) & 0x1f;
3008
3009 switch (num_regs)
3010 {
3011 case 1:
3012 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3013 break;
3014 case 2:
3015 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3016 prefix, reg1, qlf_name, tb);
3017 break;
3018 case 3:
3019 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3020 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3021 prefix, reg2, qlf_name, tb);
3022 break;
3023 case 4:
3024 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3025 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3026 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3027 break;
3028 }
3029 }
3030 }
3031
3032 /* Print the register+immediate address in OPND to BUF, which has SIZE
3033 characters. BASE is the name of the base register. */
3034
3035 static void
3036 print_immediate_offset_address (char *buf, size_t size,
3037 const aarch64_opnd_info *opnd,
3038 const char *base)
3039 {
3040 if (opnd->addr.writeback)
3041 {
3042 if (opnd->addr.preind)
3043 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3044 else
3045 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3046 }
3047 else
3048 {
3049 if (opnd->shifter.operator_present)
3050 {
3051 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3052 snprintf (buf, size, "[%s, #%d, mul vl]",
3053 base, opnd->addr.offset.imm);
3054 }
3055 else if (opnd->addr.offset.imm)
3056 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3057 else
3058 snprintf (buf, size, "[%s]", base);
3059 }
3060 }
3061
3062 /* Produce the string representation of the register offset address operand
3063 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3064 the names of the base and offset registers. */
3065 static void
3066 print_register_offset_address (char *buf, size_t size,
3067 const aarch64_opnd_info *opnd,
3068 const char *base, const char *offset)
3069 {
3070 char tb[16]; /* Temporary buffer. */
3071 bfd_boolean print_extend_p = TRUE;
3072 bfd_boolean print_amount_p = TRUE;
3073 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3074
3075 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3076 || !opnd->shifter.amount_present))
3077 {
3078 /* Not print the shift/extend amount when the amount is zero and
3079 when it is not the special case of 8-bit load/store instruction. */
3080 print_amount_p = FALSE;
3081 /* Likewise, no need to print the shift operator LSL in such a
3082 situation. */
3083 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3084 print_extend_p = FALSE;
3085 }
3086
3087 /* Prepare for the extend/shift. */
3088 if (print_extend_p)
3089 {
3090 if (print_amount_p)
3091 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3092 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3093 (opnd->shifter.amount % 100));
3094 else
3095 snprintf (tb, sizeof (tb), ", %s", shift_name);
3096 }
3097 else
3098 tb[0] = '\0';
3099
3100 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3101 }
3102
3103 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3104 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3105 PC, PCREL_P and ADDRESS are used to pass in and return information about
3106 the PC-relative address calculation, where the PC value is passed in
3107 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3108 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3109 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3110
3111 The function serves both the disassembler and the assembler diagnostics
3112 issuer, which is the reason why it lives in this file. */
3113
3114 void
3115 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3116 const aarch64_opcode *opcode,
3117 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3118 bfd_vma *address, char** notes)
3119 {
3120 unsigned int i, num_conds;
3121 const char *name = NULL;
3122 const aarch64_opnd_info *opnd = opnds + idx;
3123 enum aarch64_modifier_kind kind;
3124 uint64_t addr, enum_value;
3125
3126 buf[0] = '\0';
3127 if (pcrel_p)
3128 *pcrel_p = 0;
3129
3130 switch (opnd->type)
3131 {
3132 case AARCH64_OPND_Rd:
3133 case AARCH64_OPND_Rn:
3134 case AARCH64_OPND_Rm:
3135 case AARCH64_OPND_Rt:
3136 case AARCH64_OPND_Rt2:
3137 case AARCH64_OPND_Rs:
3138 case AARCH64_OPND_Ra:
3139 case AARCH64_OPND_Rt_SYS:
3140 case AARCH64_OPND_PAIRREG:
3141 case AARCH64_OPND_SVE_Rm:
3142 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3143 the <ic_op>, therefore we use opnd->present to override the
3144 generic optional-ness information. */
3145 if (opnd->type == AARCH64_OPND_Rt_SYS)
3146 {
3147 if (!opnd->present)
3148 break;
3149 }
3150 /* Omit the operand, e.g. RET. */
3151 else if (optional_operand_p (opcode, idx)
3152 && (opnd->reg.regno
3153 == get_optional_operand_default_value (opcode)))
3154 break;
3155 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3156 || opnd->qualifier == AARCH64_OPND_QLF_X);
3157 snprintf (buf, size, "%s",
3158 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3159 break;
3160
3161 case AARCH64_OPND_Rd_SP:
3162 case AARCH64_OPND_Rn_SP:
3163 case AARCH64_OPND_Rt_SP:
3164 case AARCH64_OPND_SVE_Rn_SP:
3165 case AARCH64_OPND_Rm_SP:
3166 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3167 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3168 || opnd->qualifier == AARCH64_OPND_QLF_X
3169 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3170 snprintf (buf, size, "%s",
3171 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3172 break;
3173
3174 case AARCH64_OPND_Rm_EXT:
3175 kind = opnd->shifter.kind;
3176 assert (idx == 1 || idx == 2);
3177 if ((aarch64_stack_pointer_p (opnds)
3178 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3179 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3180 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3181 && kind == AARCH64_MOD_UXTW)
3182 || (opnd->qualifier == AARCH64_OPND_QLF_X
3183 && kind == AARCH64_MOD_UXTX)))
3184 {
3185 /* 'LSL' is the preferred form in this case. */
3186 kind = AARCH64_MOD_LSL;
3187 if (opnd->shifter.amount == 0)
3188 {
3189 /* Shifter omitted. */
3190 snprintf (buf, size, "%s",
3191 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3192 break;
3193 }
3194 }
3195 if (opnd->shifter.amount)
3196 snprintf (buf, size, "%s, %s #%" PRIi64,
3197 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3198 aarch64_operand_modifiers[kind].name,
3199 opnd->shifter.amount);
3200 else
3201 snprintf (buf, size, "%s, %s",
3202 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3203 aarch64_operand_modifiers[kind].name);
3204 break;
3205
3206 case AARCH64_OPND_Rm_SFT:
3207 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3208 || opnd->qualifier == AARCH64_OPND_QLF_X);
3209 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3210 snprintf (buf, size, "%s",
3211 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3212 else
3213 snprintf (buf, size, "%s, %s #%" PRIi64,
3214 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3215 aarch64_operand_modifiers[opnd->shifter.kind].name,
3216 opnd->shifter.amount);
3217 break;
3218
3219 case AARCH64_OPND_Fd:
3220 case AARCH64_OPND_Fn:
3221 case AARCH64_OPND_Fm:
3222 case AARCH64_OPND_Fa:
3223 case AARCH64_OPND_Ft:
3224 case AARCH64_OPND_Ft2:
3225 case AARCH64_OPND_Sd:
3226 case AARCH64_OPND_Sn:
3227 case AARCH64_OPND_Sm:
3228 case AARCH64_OPND_SVE_VZn:
3229 case AARCH64_OPND_SVE_Vd:
3230 case AARCH64_OPND_SVE_Vm:
3231 case AARCH64_OPND_SVE_Vn:
3232 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3233 opnd->reg.regno);
3234 break;
3235
3236 case AARCH64_OPND_Va:
3237 case AARCH64_OPND_Vd:
3238 case AARCH64_OPND_Vn:
3239 case AARCH64_OPND_Vm:
3240 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3241 aarch64_get_qualifier_name (opnd->qualifier));
3242 break;
3243
3244 case AARCH64_OPND_Ed:
3245 case AARCH64_OPND_En:
3246 case AARCH64_OPND_Em:
3247 case AARCH64_OPND_Em16:
3248 case AARCH64_OPND_SM3_IMM2:
3249 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3250 aarch64_get_qualifier_name (opnd->qualifier),
3251 opnd->reglane.index);
3252 break;
3253
3254 case AARCH64_OPND_VdD1:
3255 case AARCH64_OPND_VnD1:
3256 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3257 break;
3258
3259 case AARCH64_OPND_LVn:
3260 case AARCH64_OPND_LVt:
3261 case AARCH64_OPND_LVt_AL:
3262 case AARCH64_OPND_LEt:
3263 print_register_list (buf, size, opnd, "v");
3264 break;
3265
3266 case AARCH64_OPND_SVE_Pd:
3267 case AARCH64_OPND_SVE_Pg3:
3268 case AARCH64_OPND_SVE_Pg4_5:
3269 case AARCH64_OPND_SVE_Pg4_10:
3270 case AARCH64_OPND_SVE_Pg4_16:
3271 case AARCH64_OPND_SVE_Pm:
3272 case AARCH64_OPND_SVE_Pn:
3273 case AARCH64_OPND_SVE_Pt:
3274 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3275 snprintf (buf, size, "p%d", opnd->reg.regno);
3276 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3277 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3278 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3279 aarch64_get_qualifier_name (opnd->qualifier));
3280 else
3281 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3282 aarch64_get_qualifier_name (opnd->qualifier));
3283 break;
3284
3285 case AARCH64_OPND_SVE_Za_5:
3286 case AARCH64_OPND_SVE_Za_16:
3287 case AARCH64_OPND_SVE_Zd:
3288 case AARCH64_OPND_SVE_Zm_5:
3289 case AARCH64_OPND_SVE_Zm_16:
3290 case AARCH64_OPND_SVE_Zn:
3291 case AARCH64_OPND_SVE_Zt:
3292 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3293 snprintf (buf, size, "z%d", opnd->reg.regno);
3294 else
3295 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3296 aarch64_get_qualifier_name (opnd->qualifier));
3297 break;
3298
3299 case AARCH64_OPND_SVE_ZnxN:
3300 case AARCH64_OPND_SVE_ZtxN:
3301 print_register_list (buf, size, opnd, "z");
3302 break;
3303
3304 case AARCH64_OPND_SVE_Zm3_INDEX:
3305 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3306 case AARCH64_OPND_SVE_Zm4_INDEX:
3307 case AARCH64_OPND_SVE_Zn_INDEX:
3308 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3309 aarch64_get_qualifier_name (opnd->qualifier),
3310 opnd->reglane.index);
3311 break;
3312
3313 case AARCH64_OPND_CRn:
3314 case AARCH64_OPND_CRm:
3315 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3316 break;
3317
3318 case AARCH64_OPND_IDX:
3319 case AARCH64_OPND_MASK:
3320 case AARCH64_OPND_IMM:
3321 case AARCH64_OPND_IMM_2:
3322 case AARCH64_OPND_WIDTH:
3323 case AARCH64_OPND_UIMM3_OP1:
3324 case AARCH64_OPND_UIMM3_OP2:
3325 case AARCH64_OPND_BIT_NUM:
3326 case AARCH64_OPND_IMM_VLSL:
3327 case AARCH64_OPND_IMM_VLSR:
3328 case AARCH64_OPND_SHLL_IMM:
3329 case AARCH64_OPND_IMM0:
3330 case AARCH64_OPND_IMMR:
3331 case AARCH64_OPND_IMMS:
3332 case AARCH64_OPND_FBITS:
3333 case AARCH64_OPND_TME_UIMM16:
3334 case AARCH64_OPND_SIMM5:
3335 case AARCH64_OPND_SVE_SHLIMM_PRED:
3336 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3337 case AARCH64_OPND_SVE_SHRIMM_PRED:
3338 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3339 case AARCH64_OPND_SVE_SIMM5:
3340 case AARCH64_OPND_SVE_SIMM5B:
3341 case AARCH64_OPND_SVE_SIMM6:
3342 case AARCH64_OPND_SVE_SIMM8:
3343 case AARCH64_OPND_SVE_UIMM3:
3344 case AARCH64_OPND_SVE_UIMM7:
3345 case AARCH64_OPND_SVE_UIMM8:
3346 case AARCH64_OPND_SVE_UIMM8_53:
3347 case AARCH64_OPND_IMM_ROT1:
3348 case AARCH64_OPND_IMM_ROT2:
3349 case AARCH64_OPND_IMM_ROT3:
3350 case AARCH64_OPND_SVE_IMM_ROT1:
3351 case AARCH64_OPND_SVE_IMM_ROT2:
3352 case AARCH64_OPND_SVE_IMM_ROT3:
3353 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3354 break;
3355
3356 case AARCH64_OPND_SVE_I1_HALF_ONE:
3357 case AARCH64_OPND_SVE_I1_HALF_TWO:
3358 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3359 {
3360 single_conv_t c;
3361 c.i = opnd->imm.value;
3362 snprintf (buf, size, "#%.1f", c.f);
3363 break;
3364 }
3365
3366 case AARCH64_OPND_SVE_PATTERN:
3367 if (optional_operand_p (opcode, idx)
3368 && opnd->imm.value == get_optional_operand_default_value (opcode))
3369 break;
3370 enum_value = opnd->imm.value;
3371 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3372 if (aarch64_sve_pattern_array[enum_value])
3373 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3374 else
3375 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3376 break;
3377
3378 case AARCH64_OPND_SVE_PATTERN_SCALED:
3379 if (optional_operand_p (opcode, idx)
3380 && !opnd->shifter.operator_present
3381 && opnd->imm.value == get_optional_operand_default_value (opcode))
3382 break;
3383 enum_value = opnd->imm.value;
3384 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3385 if (aarch64_sve_pattern_array[opnd->imm.value])
3386 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3387 else
3388 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3389 if (opnd->shifter.operator_present)
3390 {
3391 size_t len = strlen (buf);
3392 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3393 aarch64_operand_modifiers[opnd->shifter.kind].name,
3394 opnd->shifter.amount);
3395 }
3396 break;
3397
3398 case AARCH64_OPND_SVE_PRFOP:
3399 enum_value = opnd->imm.value;
3400 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3401 if (aarch64_sve_prfop_array[enum_value])
3402 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3403 else
3404 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3405 break;
3406
3407 case AARCH64_OPND_IMM_MOV:
3408 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3409 {
3410 case 4: /* e.g. MOV Wd, #<imm32>. */
3411 {
3412 int imm32 = opnd->imm.value;
3413 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3414 }
3415 break;
3416 case 8: /* e.g. MOV Xd, #<imm64>. */
3417 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3418 opnd->imm.value, opnd->imm.value);
3419 break;
3420 default: assert (0);
3421 }
3422 break;
3423
3424 case AARCH64_OPND_FPIMM0:
3425 snprintf (buf, size, "#0.0");
3426 break;
3427
3428 case AARCH64_OPND_LIMM:
3429 case AARCH64_OPND_AIMM:
3430 case AARCH64_OPND_HALF:
3431 case AARCH64_OPND_SVE_INV_LIMM:
3432 case AARCH64_OPND_SVE_LIMM:
3433 case AARCH64_OPND_SVE_LIMM_MOV:
3434 if (opnd->shifter.amount)
3435 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3436 opnd->shifter.amount);
3437 else
3438 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3439 break;
3440
3441 case AARCH64_OPND_SIMD_IMM:
3442 case AARCH64_OPND_SIMD_IMM_SFT:
3443 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3444 || opnd->shifter.kind == AARCH64_MOD_NONE)
3445 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3446 else
3447 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3448 aarch64_operand_modifiers[opnd->shifter.kind].name,
3449 opnd->shifter.amount);
3450 break;
3451
3452 case AARCH64_OPND_SVE_AIMM:
3453 case AARCH64_OPND_SVE_ASIMM:
3454 if (opnd->shifter.amount)
3455 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3456 opnd->shifter.amount);
3457 else
3458 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3459 break;
3460
3461 case AARCH64_OPND_FPIMM:
3462 case AARCH64_OPND_SIMD_FPIMM:
3463 case AARCH64_OPND_SVE_FPIMM8:
3464 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3465 {
3466 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3467 {
3468 half_conv_t c;
3469 c.i = expand_fp_imm (2, opnd->imm.value);
3470 snprintf (buf, size, "#%.18e", c.f);
3471 }
3472 break;
3473 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3474 {
3475 single_conv_t c;
3476 c.i = expand_fp_imm (4, opnd->imm.value);
3477 snprintf (buf, size, "#%.18e", c.f);
3478 }
3479 break;
3480 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3481 {
3482 double_conv_t c;
3483 c.i = expand_fp_imm (8, opnd->imm.value);
3484 snprintf (buf, size, "#%.18e", c.d);
3485 }
3486 break;
3487 default: assert (0);
3488 }
3489 break;
3490
3491 case AARCH64_OPND_CCMP_IMM:
3492 case AARCH64_OPND_NZCV:
3493 case AARCH64_OPND_EXCEPTION:
3494 case AARCH64_OPND_UIMM4:
3495 case AARCH64_OPND_UIMM4_ADDG:
3496 case AARCH64_OPND_UIMM7:
3497 case AARCH64_OPND_UIMM10:
3498 if (optional_operand_p (opcode, idx) == TRUE
3499 && (opnd->imm.value ==
3500 (int64_t) get_optional_operand_default_value (opcode)))
3501 /* Omit the operand, e.g. DCPS1. */
3502 break;
3503 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3504 break;
3505
3506 case AARCH64_OPND_COND:
3507 case AARCH64_OPND_COND1:
3508 snprintf (buf, size, "%s", opnd->cond->names[0]);
3509 num_conds = ARRAY_SIZE (opnd->cond->names);
3510 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3511 {
3512 size_t len = strlen (buf);
3513 if (i == 1)
3514 snprintf (buf + len, size - len, " // %s = %s",
3515 opnd->cond->names[0], opnd->cond->names[i]);
3516 else
3517 snprintf (buf + len, size - len, ", %s",
3518 opnd->cond->names[i]);
3519 }
3520 break;
3521
3522 case AARCH64_OPND_ADDR_ADRP:
3523 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3524 + opnd->imm.value;
3525 if (pcrel_p)
3526 *pcrel_p = 1;
3527 if (address)
3528 *address = addr;
3529 /* This is not necessary during the disassembling, as print_address_func
3530 in the disassemble_info will take care of the printing. But some
3531 other callers may be still interested in getting the string in *STR,
3532 so here we do snprintf regardless. */
3533 snprintf (buf, size, "#0x%" PRIx64, addr);
3534 break;
3535
3536 case AARCH64_OPND_ADDR_PCREL14:
3537 case AARCH64_OPND_ADDR_PCREL19:
3538 case AARCH64_OPND_ADDR_PCREL21:
3539 case AARCH64_OPND_ADDR_PCREL26:
3540 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3541 if (pcrel_p)
3542 *pcrel_p = 1;
3543 if (address)
3544 *address = addr;
3545 /* This is not necessary during the disassembling, as print_address_func
3546 in the disassemble_info will take care of the printing. But some
3547 other callers may be still interested in getting the string in *STR,
3548 so here we do snprintf regardless. */
3549 snprintf (buf, size, "#0x%" PRIx64, addr);
3550 break;
3551
3552 case AARCH64_OPND_ADDR_SIMPLE:
3553 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3554 case AARCH64_OPND_SIMD_ADDR_POST:
3555 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3556 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3557 {
3558 if (opnd->addr.offset.is_reg)
3559 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3560 else
3561 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3562 }
3563 else
3564 snprintf (buf, size, "[%s]", name);
3565 break;
3566
3567 case AARCH64_OPND_ADDR_REGOFF:
3568 case AARCH64_OPND_SVE_ADDR_R:
3569 case AARCH64_OPND_SVE_ADDR_RR:
3570 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3571 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3572 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3573 case AARCH64_OPND_SVE_ADDR_RX:
3574 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3575 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3576 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3577 print_register_offset_address
3578 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3579 get_offset_int_reg_name (opnd));
3580 break;
3581
3582 case AARCH64_OPND_SVE_ADDR_RZ:
3583 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3584 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3585 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3586 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3587 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3588 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3589 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3590 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3591 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3592 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3593 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3594 print_register_offset_address
3595 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3596 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3597 break;
3598
3599 case AARCH64_OPND_ADDR_SIMM7:
3600 case AARCH64_OPND_ADDR_SIMM9:
3601 case AARCH64_OPND_ADDR_SIMM9_2:
3602 case AARCH64_OPND_ADDR_SIMM10:
3603 case AARCH64_OPND_ADDR_SIMM11:
3604 case AARCH64_OPND_ADDR_SIMM13:
3605 case AARCH64_OPND_ADDR_OFFSET:
3606 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3607 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3608 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3609 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3610 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3611 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3612 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3613 case AARCH64_OPND_SVE_ADDR_RI_U6:
3614 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3615 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3616 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3617 print_immediate_offset_address
3618 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3619 break;
3620
3621 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3622 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3623 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3624 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3625 print_immediate_offset_address
3626 (buf, size, opnd,
3627 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3628 break;
3629
3630 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3631 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3632 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3633 print_register_offset_address
3634 (buf, size, opnd,
3635 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3636 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3637 break;
3638
3639 case AARCH64_OPND_ADDR_UIMM12:
3640 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3641 if (opnd->addr.offset.imm)
3642 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3643 else
3644 snprintf (buf, size, "[%s]", name);
3645 break;
3646
3647 case AARCH64_OPND_SYSREG:
3648 for (i = 0; aarch64_sys_regs[i].name; ++i)
3649 {
3650 bfd_boolean exact_match
3651 = (aarch64_sys_regs[i].flags & opnd->sysreg.flags)
3652 == opnd->sysreg.flags;
3653
3654 /* Try and find an exact match, But if that fails, return the first
3655 partial match that was found. */
3656 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3657 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])
3658 && (name == NULL || exact_match))
3659 {
3660 name = aarch64_sys_regs[i].name;
3661 if (exact_match)
3662 {
3663 if (notes)
3664 *notes = NULL;
3665 break;
3666 }
3667
3668 /* If we didn't match exactly, that means the presense of a flag
3669 indicates what we didn't want for this instruction. e.g. If
3670 F_REG_READ is there, that means we were looking for a write
3671 register. See aarch64_ext_sysreg. */
3672 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3673 *notes = _("reading from a write-only register");
3674 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3675 *notes = _("writing to a read-only register");
3676 }
3677 }
3678
3679 if (name)
3680 snprintf (buf, size, "%s", name);
3681 else
3682 {
3683 /* Implementation defined system register. */
3684 unsigned int value = opnd->sysreg.value;
3685 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3686 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3687 value & 0x7);
3688 }
3689 break;
3690
3691 case AARCH64_OPND_PSTATEFIELD:
3692 for (i = 0; aarch64_pstatefields[i].name; ++i)
3693 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3694 break;
3695 assert (aarch64_pstatefields[i].name);
3696 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3697 break;
3698
3699 case AARCH64_OPND_SYSREG_AT:
3700 case AARCH64_OPND_SYSREG_DC:
3701 case AARCH64_OPND_SYSREG_IC:
3702 case AARCH64_OPND_SYSREG_TLBI:
3703 case AARCH64_OPND_SYSREG_SR:
3704 snprintf (buf, size, "%s", opnd->sysins_op->name);
3705 break;
3706
3707 case AARCH64_OPND_BARRIER:
3708 snprintf (buf, size, "%s", opnd->barrier->name);
3709 break;
3710
3711 case AARCH64_OPND_BARRIER_ISB:
3712 /* Operand can be omitted, e.g. in DCPS1. */
3713 if (! optional_operand_p (opcode, idx)
3714 || (opnd->barrier->value
3715 != get_optional_operand_default_value (opcode)))
3716 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3717 break;
3718
3719 case AARCH64_OPND_PRFOP:
3720 if (opnd->prfop->name != NULL)
3721 snprintf (buf, size, "%s", opnd->prfop->name);
3722 else
3723 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3724 break;
3725
3726 case AARCH64_OPND_BARRIER_PSB:
3727 case AARCH64_OPND_BTI_TARGET:
3728 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3729 snprintf (buf, size, "%s", opnd->hint_option->name);
3730 break;
3731
3732 default:
3733 assert (0);
3734 }
3735 }
3736 \f
3737 #define CPENC(op0,op1,crn,crm,op2) \
3738 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3739 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3740 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3741 /* for 3.9.10 System Instructions */
3742 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3743
3744 #define C0 0
3745 #define C1 1
3746 #define C2 2
3747 #define C3 3
3748 #define C4 4
3749 #define C5 5
3750 #define C6 6
3751 #define C7 7
3752 #define C8 8
3753 #define C9 9
3754 #define C10 10
3755 #define C11 11
3756 #define C12 12
3757 #define C13 13
3758 #define C14 14
3759 #define C15 15
3760
3761 /* TODO there is one more issues need to be resolved
3762 1. handle cpu-implementation-defined system registers. */
3763 const aarch64_sys_reg aarch64_sys_regs [] =
3764 {
3765 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3766 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3767 { "elr_el1", CPEN_(0,C0,1), 0 },
3768 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3769 { "sp_el0", CPEN_(0,C1,0), 0 },
3770 { "spsel", CPEN_(0,C2,0), 0 },
3771 { "daif", CPEN_(3,C2,1), 0 },
3772 { "currentel", CPEN_(0,C2,2), F_REG_READ }, /* RO */
3773 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3774 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3775 { "nzcv", CPEN_(3,C2,0), 0 },
3776 { "ssbs", CPEN_(3,C2,6), F_ARCHEXT },
3777 { "fpcr", CPEN_(3,C4,0), 0 },
3778 { "fpsr", CPEN_(3,C4,1), 0 },
3779 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3780 { "dlr_el0", CPEN_(3,C5,1), 0 },
3781 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3782 { "elr_el2", CPEN_(4,C0,1), 0 },
3783 { "sp_el1", CPEN_(4,C1,0), 0 },
3784 { "spsr_irq", CPEN_(4,C3,0), 0 },
3785 { "spsr_abt", CPEN_(4,C3,1), 0 },
3786 { "spsr_und", CPEN_(4,C3,2), 0 },
3787 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3788 { "spsr_el3", CPEN_(6,C0,0), 0 },
3789 { "elr_el3", CPEN_(6,C0,1), 0 },
3790 { "sp_el2", CPEN_(6,C1,0), 0 },
3791 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3792 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3793 { "midr_el1", CPENC(3,0,C0,C0,0), F_REG_READ }, /* RO */
3794 { "ctr_el0", CPENC(3,3,C0,C0,1), F_REG_READ }, /* RO */
3795 { "mpidr_el1", CPENC(3,0,C0,C0,5), F_REG_READ }, /* RO */
3796 { "revidr_el1", CPENC(3,0,C0,C0,6), F_REG_READ }, /* RO */
3797 { "aidr_el1", CPENC(3,1,C0,C0,7), F_REG_READ }, /* RO */
3798 { "dczid_el0", CPENC(3,3,C0,C0,7), F_REG_READ }, /* RO */
3799 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), F_REG_READ }, /* RO */
3800 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), F_REG_READ }, /* RO */
3801 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), F_REG_READ }, /* RO */
3802 { "id_pfr2_el1", CPENC(3,0,C0,C3,4), F_ARCHEXT | F_REG_READ}, /* RO */
3803 { "id_afr0_el1", CPENC(3,0,C0,C1,3), F_REG_READ }, /* RO */
3804 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), F_REG_READ }, /* RO */
3805 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), F_REG_READ }, /* RO */
3806 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), F_REG_READ }, /* RO */
3807 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), F_REG_READ }, /* RO */
3808 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), F_REG_READ }, /* RO */
3809 { "id_isar0_el1", CPENC(3,0,C0,C2,0), F_REG_READ }, /* RO */
3810 { "id_isar1_el1", CPENC(3,0,C0,C2,1), F_REG_READ }, /* RO */
3811 { "id_isar2_el1", CPENC(3,0,C0,C2,2), F_REG_READ }, /* RO */
3812 { "id_isar3_el1", CPENC(3,0,C0,C2,3), F_REG_READ }, /* RO */
3813 { "id_isar4_el1", CPENC(3,0,C0,C2,4), F_REG_READ }, /* RO */
3814 { "id_isar5_el1", CPENC(3,0,C0,C2,5), F_REG_READ }, /* RO */
3815 { "mvfr0_el1", CPENC(3,0,C0,C3,0), F_REG_READ }, /* RO */
3816 { "mvfr1_el1", CPENC(3,0,C0,C3,1), F_REG_READ }, /* RO */
3817 { "mvfr2_el1", CPENC(3,0,C0,C3,2), F_REG_READ }, /* RO */
3818 { "ccsidr_el1", CPENC(3,1,C0,C0,0), F_REG_READ }, /* RO */
3819 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), F_REG_READ }, /* RO */
3820 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), F_REG_READ }, /* RO */
3821 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), F_REG_READ }, /* RO */
3822 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), F_REG_READ }, /* RO */
3823 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), F_REG_READ }, /* RO */
3824 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), F_REG_READ }, /* RO */
3825 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), F_REG_READ }, /* RO */
3826 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), F_REG_READ }, /* RO */
3827 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT | F_REG_READ }, /* RO */
3828 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), F_REG_READ }, /* RO */
3829 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), F_REG_READ }, /* RO */
3830 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT | F_REG_READ }, /* RO */
3831 { "clidr_el1", CPENC(3,1,C0,C0,1), F_REG_READ }, /* RO */
3832 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 },
3833 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3834 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3835 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3836 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3837 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3838 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3839 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3840 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3841 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3842 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3843 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3844 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3845 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3846 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3847 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3848 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3849 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3850 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3851 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3852 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3853 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3854 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3855 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3856 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3857 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3858 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3859 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3860 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3861 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3862 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3863 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3864 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3865 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3866 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3867 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3868 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3869 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3870 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3871 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3872 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3873 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3874 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3875 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3876 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3877 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3878 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3879 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3880 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3881 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3882 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3883 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3884 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3885 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3886 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3887 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3888 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3889 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3890 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3891 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3892 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT },
3893 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3894 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3895 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3896 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3897 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3898 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3899 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3900 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3901 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3902 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3903 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3904 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3905 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3906 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3907 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3908 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3909 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3910 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3911 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3912 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3913 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3914 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3915 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3916 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3917 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3918 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3919 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3920 { "rvbar_el1", CPENC(3,0,C12,C0,1), F_REG_READ }, /* RO */
3921 { "rvbar_el2", CPENC(3,4,C12,C0,1), F_REG_READ }, /* RO */
3922 { "rvbar_el3", CPENC(3,6,C12,C0,1), F_REG_READ }, /* RO */
3923 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3924 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3925 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3926 { "isr_el1", CPENC(3,0,C12,C1,0), F_REG_READ }, /* RO */
3927 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3928 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3929 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3930 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3931 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3932 { "rndr", CPENC(3,3,C2,C4,0), F_ARCHEXT | F_REG_READ }, /* RO */
3933 { "rndrrs", CPENC(3,3,C2,C4,1), F_ARCHEXT | F_REG_READ }, /* RO */
3934 { "tco", CPENC(3,3,C4,C2,7), F_ARCHEXT },
3935 { "tfsre0_el1", CPENC(3,0,C6,C6,1), F_ARCHEXT },
3936 { "tfsr_el1", CPENC(3,0,C6,C5,0), F_ARCHEXT },
3937 { "tfsr_el2", CPENC(3,4,C6,C5,0), F_ARCHEXT },
3938 { "tfsr_el3", CPENC(3,6,C6,C6,0), F_ARCHEXT },
3939 { "tfsr_el12", CPENC(3,5,C6,C6,0), F_ARCHEXT },
3940 { "rgsr_el1", CPENC(3,0,C1,C0,5), F_ARCHEXT },
3941 { "gcr_el1", CPENC(3,0,C1,C0,6), F_ARCHEXT },
3942 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3943 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RW */
3944 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3945 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3946 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3947 { "scxtnum_el0", CPENC(3,3,C13,C0,7), F_ARCHEXT },
3948 { "scxtnum_el1", CPENC(3,0,C13,C0,7), F_ARCHEXT },
3949 { "scxtnum_el2", CPENC(3,4,C13,C0,7), F_ARCHEXT },
3950 { "scxtnum_el12", CPENC(3,5,C13,C0,7), F_ARCHEXT },
3951 { "scxtnum_el3", CPENC(3,6,C13,C0,7), F_ARCHEXT },
3952 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3953 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RW */
3954 { "cntpct_el0", CPENC(3,3,C14,C0,1), F_REG_READ }, /* RO */
3955 { "cntvct_el0", CPENC(3,3,C14,C0,2), F_REG_READ }, /* RO */
3956 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3957 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3958 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3959 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3960 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3961 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3962 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3963 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3964 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3965 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3966 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3967 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3968 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3969 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3970 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3971 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3972 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3973 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3974 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3975 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3976 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3977 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3978 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3979 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3980 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3981 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3982 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3983 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3984 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3985 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3986 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), F_REG_READ }, /* r */
3987 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3988 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3989 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), F_REG_READ }, /* r */
3990 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), F_REG_WRITE }, /* w */
3991 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 },
3992 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 },
3993 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3994 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3995 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3996 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3997 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3998 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3999 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
4000 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
4001 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
4002 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
4003 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
4004 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
4005 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
4006 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
4007 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
4008 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
4009 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
4010 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
4011 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
4012 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
4013 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
4014 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
4015 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
4016 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
4017 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
4018 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
4019 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
4020 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
4021 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
4022 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
4023 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
4024 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
4025 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
4026 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
4027 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
4028 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
4029 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
4030 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
4031 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
4032 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
4033 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
4034 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
4035 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
4036 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
4037 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
4038 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
4039 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
4040 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
4041 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
4042 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
4043 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
4044 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
4045 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
4046 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
4047 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
4048 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
4049 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
4050 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
4051 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
4052 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
4053 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
4054 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
4055 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
4056 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
4057 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
4058 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
4059 { "mdrar_el1", CPENC(2,0,C1, C0, 0), F_REG_READ }, /* r */
4060 { "oslar_el1", CPENC(2,0,C1, C0, 4), F_REG_WRITE }, /* w */
4061 { "oslsr_el1", CPENC(2,0,C1, C1, 4), F_REG_READ }, /* r */
4062 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
4063 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
4064 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
4065 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
4066 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), F_REG_READ }, /* r */
4067 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
4068 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
4069 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
4070 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT | F_REG_READ }, /* ro */
4071 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
4072 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
4073 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
4074 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
4075 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
4076 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
4077 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* rw */
4078 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
4079 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
4080 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
4081 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
4082 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
4083 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
4084 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), F_REG_WRITE }, /* w */
4085 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
4086 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), F_REG_READ }, /* r */
4087 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), F_REG_READ }, /* r */
4088 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
4089 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
4090 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
4091 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
4092 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
4093 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
4094 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
4095 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
4096 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
4097 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
4098 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
4099 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
4100 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
4101 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
4102 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
4103 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
4104 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
4105 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
4106 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
4107 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
4108 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
4109 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
4110 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
4111 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
4112 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
4113 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
4114 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
4115 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
4116 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
4117 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
4118 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
4119 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
4120 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
4121 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4122 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4123 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4124 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4125 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4126 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4127 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4128 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4129 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4130 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4131 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4132 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4133 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4134 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4135 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4136 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4137 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4138 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4139 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4140 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4141 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4142 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4143 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4144 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4145 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4146 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4147 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4148 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4149 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4150 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4151 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4152 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4153 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4154 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4155 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4156 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4157 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4158
4159 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4160 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4161 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4162 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4163 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4164 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4165 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4166 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4167 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4168 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4169 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4170 { 0, CPENC(0,0,0,0,0), 0 },
4171 };
4172
4173 bfd_boolean
4174 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4175 {
4176 return (reg->flags & F_DEPRECATED) != 0;
4177 }
4178
4179 bfd_boolean
4180 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4181 const aarch64_sys_reg *reg)
4182 {
4183 if (!(reg->flags & F_ARCHEXT))
4184 return TRUE;
4185
4186 /* PAN. Values are from aarch64_sys_regs. */
4187 if (reg->value == CPEN_(0,C2,3)
4188 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4189 return FALSE;
4190
4191 /* SCXTNUM_ELx registers. */
4192 if ((reg->value == CPENC (3, 3, C13, C0, 7)
4193 || reg->value == CPENC (3, 0, C13, C0, 7)
4194 || reg->value == CPENC (3, 4, C13, C0, 7)
4195 || reg->value == CPENC (3, 6, C13, C0, 7)
4196 || reg->value == CPENC (3, 5, C13, C0, 7))
4197 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SCXTNUM))
4198 return FALSE;
4199
4200 /* ID_PFR2_EL1 register. */
4201 if (reg->value == CPENC(3, 0, C0, C3, 4)
4202 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_ID_PFR2))
4203 return FALSE;
4204
4205 /* SSBS. Values are from aarch64_sys_regs. */
4206 if (reg->value == CPEN_(3,C2,6)
4207 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4208 return FALSE;
4209
4210 /* Virtualization host extensions: system registers. */
4211 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4212 || reg->value == CPENC (3, 4, C13, C0, 1)
4213 || reg->value == CPENC (3, 4, C14, C3, 0)
4214 || reg->value == CPENC (3, 4, C14, C3, 1)
4215 || reg->value == CPENC (3, 4, C14, C3, 2))
4216 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4217 return FALSE;
4218
4219 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4220 if ((reg->value == CPEN_ (5, C0, 0)
4221 || reg->value == CPEN_ (5, C0, 1)
4222 || reg->value == CPENC (3, 5, C1, C0, 0)
4223 || reg->value == CPENC (3, 5, C1, C0, 2)
4224 || reg->value == CPENC (3, 5, C2, C0, 0)
4225 || reg->value == CPENC (3, 5, C2, C0, 1)
4226 || reg->value == CPENC (3, 5, C2, C0, 2)
4227 || reg->value == CPENC (3, 5, C5, C1, 0)
4228 || reg->value == CPENC (3, 5, C5, C1, 1)
4229 || reg->value == CPENC (3, 5, C5, C2, 0)
4230 || reg->value == CPENC (3, 5, C6, C0, 0)
4231 || reg->value == CPENC (3, 5, C10, C2, 0)
4232 || reg->value == CPENC (3, 5, C10, C3, 0)
4233 || reg->value == CPENC (3, 5, C12, C0, 0)
4234 || reg->value == CPENC (3, 5, C13, C0, 1)
4235 || reg->value == CPENC (3, 5, C14, C1, 0))
4236 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4237 return FALSE;
4238
4239 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4240 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4241 || reg->value == CPENC (3, 5, C14, C2, 1)
4242 || reg->value == CPENC (3, 5, C14, C2, 2)
4243 || reg->value == CPENC (3, 5, C14, C3, 0)
4244 || reg->value == CPENC (3, 5, C14, C3, 1)
4245 || reg->value == CPENC (3, 5, C14, C3, 2))
4246 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4247 return FALSE;
4248
4249 /* ARMv8.2 features. */
4250
4251 /* ID_AA64MMFR2_EL1. */
4252 if (reg->value == CPENC (3, 0, C0, C7, 2)
4253 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4254 return FALSE;
4255
4256 /* PSTATE.UAO. */
4257 if (reg->value == CPEN_ (0, C2, 4)
4258 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4259 return FALSE;
4260
4261 /* RAS extension. */
4262
4263 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4264 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4265 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4266 || reg->value == CPENC (3, 0, C5, C3, 1)
4267 || reg->value == CPENC (3, 0, C5, C3, 2)
4268 || reg->value == CPENC (3, 0, C5, C3, 3)
4269 || reg->value == CPENC (3, 0, C5, C4, 0)
4270 || reg->value == CPENC (3, 0, C5, C4, 1)
4271 || reg->value == CPENC (3, 0, C5, C4, 2)
4272 || reg->value == CPENC (3, 0, C5, C4, 3)
4273 || reg->value == CPENC (3, 0, C5, C5, 0)
4274 || reg->value == CPENC (3, 0, C5, C5, 1))
4275 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4276 return FALSE;
4277
4278 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4279 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4280 || reg->value == CPENC (3, 0, C12, C1, 1)
4281 || reg->value == CPENC (3, 4, C12, C1, 1))
4282 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4283 return FALSE;
4284
4285 /* Statistical Profiling extension. */
4286 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4287 || reg->value == CPENC (3, 0, C9, C10, 1)
4288 || reg->value == CPENC (3, 0, C9, C10, 3)
4289 || reg->value == CPENC (3, 0, C9, C10, 7)
4290 || reg->value == CPENC (3, 0, C9, C9, 0)
4291 || reg->value == CPENC (3, 0, C9, C9, 2)
4292 || reg->value == CPENC (3, 0, C9, C9, 3)
4293 || reg->value == CPENC (3, 0, C9, C9, 4)
4294 || reg->value == CPENC (3, 0, C9, C9, 5)
4295 || reg->value == CPENC (3, 0, C9, C9, 6)
4296 || reg->value == CPENC (3, 0, C9, C9, 7)
4297 || reg->value == CPENC (3, 4, C9, C9, 0)
4298 || reg->value == CPENC (3, 5, C9, C9, 0))
4299 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4300 return FALSE;
4301
4302 /* ARMv8.3 Pointer authentication keys. */
4303 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4304 || reg->value == CPENC (3, 0, C2, C1, 1)
4305 || reg->value == CPENC (3, 0, C2, C1, 2)
4306 || reg->value == CPENC (3, 0, C2, C1, 3)
4307 || reg->value == CPENC (3, 0, C2, C2, 0)
4308 || reg->value == CPENC (3, 0, C2, C2, 1)
4309 || reg->value == CPENC (3, 0, C2, C2, 2)
4310 || reg->value == CPENC (3, 0, C2, C2, 3)
4311 || reg->value == CPENC (3, 0, C2, C3, 0)
4312 || reg->value == CPENC (3, 0, C2, C3, 1))
4313 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4314 return FALSE;
4315
4316 /* SVE. */
4317 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4318 || reg->value == CPENC (3, 0, C1, C2, 0)
4319 || reg->value == CPENC (3, 4, C1, C2, 0)
4320 || reg->value == CPENC (3, 6, C1, C2, 0)
4321 || reg->value == CPENC (3, 5, C1, C2, 0)
4322 || reg->value == CPENC (3, 0, C0, C0, 7))
4323 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4324 return FALSE;
4325
4326 /* ARMv8.4 features. */
4327
4328 /* PSTATE.DIT. */
4329 if (reg->value == CPEN_ (3, C2, 5)
4330 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4331 return FALSE;
4332
4333 /* Virtualization extensions. */
4334 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4335 || reg->value == CPENC(3, 4, C2, C6, 0)
4336 || reg->value == CPENC(3, 4, C14, C4, 0)
4337 || reg->value == CPENC(3, 4, C14, C4, 2)
4338 || reg->value == CPENC(3, 4, C14, C4, 1)
4339 || reg->value == CPENC(3, 4, C14, C5, 0)
4340 || reg->value == CPENC(3, 4, C14, C5, 2)
4341 || reg->value == CPENC(3, 4, C14, C5, 1)
4342 || reg->value == CPENC(3, 4, C1, C3, 1)
4343 || reg->value == CPENC(3, 4, C2, C2, 0))
4344 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4345 return FALSE;
4346
4347 /* ARMv8.4 TLB instructions. */
4348 if ((reg->value == CPENS (0, C8, C1, 0)
4349 || reg->value == CPENS (0, C8, C1, 1)
4350 || reg->value == CPENS (0, C8, C1, 2)
4351 || reg->value == CPENS (0, C8, C1, 3)
4352 || reg->value == CPENS (0, C8, C1, 5)
4353 || reg->value == CPENS (0, C8, C1, 7)
4354 || reg->value == CPENS (4, C8, C4, 0)
4355 || reg->value == CPENS (4, C8, C4, 4)
4356 || reg->value == CPENS (4, C8, C1, 1)
4357 || reg->value == CPENS (4, C8, C1, 5)
4358 || reg->value == CPENS (4, C8, C1, 6)
4359 || reg->value == CPENS (6, C8, C1, 1)
4360 || reg->value == CPENS (6, C8, C1, 5)
4361 || reg->value == CPENS (4, C8, C1, 0)
4362 || reg->value == CPENS (4, C8, C1, 4)
4363 || reg->value == CPENS (6, C8, C1, 0)
4364 || reg->value == CPENS (0, C8, C6, 1)
4365 || reg->value == CPENS (0, C8, C6, 3)
4366 || reg->value == CPENS (0, C8, C6, 5)
4367 || reg->value == CPENS (0, C8, C6, 7)
4368 || reg->value == CPENS (0, C8, C2, 1)
4369 || reg->value == CPENS (0, C8, C2, 3)
4370 || reg->value == CPENS (0, C8, C2, 5)
4371 || reg->value == CPENS (0, C8, C2, 7)
4372 || reg->value == CPENS (0, C8, C5, 1)
4373 || reg->value == CPENS (0, C8, C5, 3)
4374 || reg->value == CPENS (0, C8, C5, 5)
4375 || reg->value == CPENS (0, C8, C5, 7)
4376 || reg->value == CPENS (4, C8, C0, 2)
4377 || reg->value == CPENS (4, C8, C0, 6)
4378 || reg->value == CPENS (4, C8, C4, 2)
4379 || reg->value == CPENS (4, C8, C4, 6)
4380 || reg->value == CPENS (4, C8, C4, 3)
4381 || reg->value == CPENS (4, C8, C4, 7)
4382 || reg->value == CPENS (4, C8, C6, 1)
4383 || reg->value == CPENS (4, C8, C6, 5)
4384 || reg->value == CPENS (4, C8, C2, 1)
4385 || reg->value == CPENS (4, C8, C2, 5)
4386 || reg->value == CPENS (4, C8, C5, 1)
4387 || reg->value == CPENS (4, C8, C5, 5)
4388 || reg->value == CPENS (6, C8, C6, 1)
4389 || reg->value == CPENS (6, C8, C6, 5)
4390 || reg->value == CPENS (6, C8, C2, 1)
4391 || reg->value == CPENS (6, C8, C2, 5)
4392 || reg->value == CPENS (6, C8, C5, 1)
4393 || reg->value == CPENS (6, C8, C5, 5))
4394 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4395 return FALSE;
4396
4397 /* Random Number Instructions. For now they are available
4398 (and optional) only with ARMv8.5-A. */
4399 if ((reg->value == CPENC (3, 3, C2, C4, 0)
4400 || reg->value == CPENC (3, 3, C2, C4, 1))
4401 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RNG)
4402 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_5)))
4403 return FALSE;
4404
4405 /* System Registers in ARMv8.5-A with AARCH64_FEATURE_MEMTAG. */
4406 if ((reg->value == CPENC (3, 3, C4, C2, 7)
4407 || reg->value == CPENC (3, 0, C6, C6, 1)
4408 || reg->value == CPENC (3, 0, C6, C5, 0)
4409 || reg->value == CPENC (3, 4, C6, C5, 0)
4410 || reg->value == CPENC (3, 6, C6, C6, 0)
4411 || reg->value == CPENC (3, 5, C6, C6, 0)
4412 || reg->value == CPENC (3, 0, C1, C0, 5)
4413 || reg->value == CPENC (3, 0, C1, C0, 6))
4414 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG)))
4415 return FALSE;
4416
4417 return TRUE;
4418 }
4419
4420 /* The CPENC below is fairly misleading, the fields
4421 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4422 by ins_pstatefield, which just shifts the value by the width of the fields
4423 in a loop. So if you CPENC them only the first value will be set, the rest
4424 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4425 value of 0b110000000001000000 (0x30040) while what you want is
4426 0b011010 (0x1a). */
4427 const aarch64_sys_reg aarch64_pstatefields [] =
4428 {
4429 { "spsel", 0x05, 0 },
4430 { "daifset", 0x1e, 0 },
4431 { "daifclr", 0x1f, 0 },
4432 { "pan", 0x04, F_ARCHEXT },
4433 { "uao", 0x03, F_ARCHEXT },
4434 { "ssbs", 0x19, F_ARCHEXT },
4435 { "dit", 0x1a, F_ARCHEXT },
4436 { "tco", 0x1c, F_ARCHEXT },
4437 { 0, CPENC(0,0,0,0,0), 0 },
4438 };
4439
4440 bfd_boolean
4441 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4442 const aarch64_sys_reg *reg)
4443 {
4444 if (!(reg->flags & F_ARCHEXT))
4445 return TRUE;
4446
4447 /* PAN. Values are from aarch64_pstatefields. */
4448 if (reg->value == 0x04
4449 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4450 return FALSE;
4451
4452 /* UAO. Values are from aarch64_pstatefields. */
4453 if (reg->value == 0x03
4454 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4455 return FALSE;
4456
4457 /* SSBS. Values are from aarch64_pstatefields. */
4458 if (reg->value == 0x19
4459 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4460 return FALSE;
4461
4462 /* DIT. Values are from aarch64_pstatefields. */
4463 if (reg->value == 0x1a
4464 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4465 return FALSE;
4466
4467 /* TCO. Values are from aarch64_pstatefields. */
4468 if (reg->value == 0x1c
4469 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4470 return FALSE;
4471
4472 return TRUE;
4473 }
4474
4475 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4476 {
4477 { "ialluis", CPENS(0,C7,C1,0), 0 },
4478 { "iallu", CPENS(0,C7,C5,0), 0 },
4479 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4480 { 0, CPENS(0,0,0,0), 0 }
4481 };
4482
4483 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4484 {
4485 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4486 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
4487 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
4488 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4489 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
4490 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
4491 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4492 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
4493 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
4494 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4495 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4496 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4497 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4498 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4499 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4500 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4501 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4502 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4503 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4504 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4505 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4506 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4507 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4508 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4509 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4510 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4511 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4512 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4513 { 0, CPENS(0,0,0,0), 0 }
4514 };
4515
4516 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4517 {
4518 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4519 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4520 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4521 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4522 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4523 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4524 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4525 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4526 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4527 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4528 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4529 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4530 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4531 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4532 { 0, CPENS(0,0,0,0), 0 }
4533 };
4534
4535 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4536 {
4537 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4538 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4539 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4540 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4541 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4542 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4543 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4544 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4545 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4546 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4547 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4548 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4549 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4550 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4551 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4552 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4553 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4554 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4555 { "alle2", CPENS(4,C8,C7,0), 0 },
4556 { "alle2is", CPENS(4,C8,C3,0), 0 },
4557 { "alle1", CPENS(4,C8,C7,4), 0 },
4558 { "alle1is", CPENS(4,C8,C3,4), 0 },
4559 { "alle3", CPENS(6,C8,C7,0), 0 },
4560 { "alle3is", CPENS(6,C8,C3,0), 0 },
4561 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4562 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4563 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4564 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4565 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4566 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4567 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4568 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4569
4570 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4571 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4572 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4573 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4574 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4575 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4576 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4577 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4578 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4579 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4580 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4581 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4582 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4583 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4584 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4585 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4586
4587 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4588 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4589 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4590 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4591 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4592 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4593 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4594 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4595 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4596 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4597 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4598 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4599 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4600 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4601 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4602 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4603 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4604 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4605 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4606 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4607 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4608 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4609 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4610 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4611 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4612 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4613 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4614 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4615 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4616 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4617
4618 { 0, CPENS(0,0,0,0), 0 }
4619 };
4620
4621 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4622 {
4623 /* RCTX is somewhat unique in a way that it has different values
4624 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4625 Thus op2 is masked out and instead encoded directly in the
4626 aarch64_opcode_table entries for the respective instructions. */
4627 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4628
4629 { 0, CPENS(0,0,0,0), 0 }
4630 };
4631
4632 bfd_boolean
4633 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4634 {
4635 return (sys_ins_reg->flags & F_HASXT) != 0;
4636 }
4637
4638 extern bfd_boolean
4639 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4640 const aarch64_sys_ins_reg *reg)
4641 {
4642 if (!(reg->flags & F_ARCHEXT))
4643 return TRUE;
4644
4645 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4646 if (reg->value == CPENS (3, C7, C12, 1)
4647 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4648 return FALSE;
4649
4650 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4651 if (reg->value == CPENS (3, C7, C13, 1)
4652 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
4653 return FALSE;
4654
4655 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
4656 if ((reg->value == CPENS (0, C7, C6, 3)
4657 || reg->value == CPENS (0, C7, C6, 4)
4658 || reg->value == CPENS (0, C7, C10, 4)
4659 || reg->value == CPENS (0, C7, C14, 4)
4660 || reg->value == CPENS (3, C7, C10, 3)
4661 || reg->value == CPENS (3, C7, C12, 3)
4662 || reg->value == CPENS (3, C7, C13, 3)
4663 || reg->value == CPENS (3, C7, C14, 3)
4664 || reg->value == CPENS (3, C7, C4, 3)
4665 || reg->value == CPENS (0, C7, C6, 5)
4666 || reg->value == CPENS (0, C7, C6, 6)
4667 || reg->value == CPENS (0, C7, C10, 6)
4668 || reg->value == CPENS (0, C7, C14, 6)
4669 || reg->value == CPENS (3, C7, C10, 5)
4670 || reg->value == CPENS (3, C7, C12, 5)
4671 || reg->value == CPENS (3, C7, C13, 5)
4672 || reg->value == CPENS (3, C7, C14, 5)
4673 || reg->value == CPENS (3, C7, C4, 4))
4674 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4675 return FALSE;
4676
4677 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4678 if ((reg->value == CPENS (0, C7, C9, 0)
4679 || reg->value == CPENS (0, C7, C9, 1))
4680 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4681 return FALSE;
4682
4683 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4684 if (reg->value == CPENS (3, C7, C3, 0)
4685 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
4686 return FALSE;
4687
4688 return TRUE;
4689 }
4690
4691 #undef C0
4692 #undef C1
4693 #undef C2
4694 #undef C3
4695 #undef C4
4696 #undef C5
4697 #undef C6
4698 #undef C7
4699 #undef C8
4700 #undef C9
4701 #undef C10
4702 #undef C11
4703 #undef C12
4704 #undef C13
4705 #undef C14
4706 #undef C15
4707
4708 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4709 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4710
4711 static enum err_type
4712 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
4713 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
4714 bfd_boolean encoding ATTRIBUTE_UNUSED,
4715 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4716 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4717 {
4718 int t = BITS (insn, 4, 0);
4719 int n = BITS (insn, 9, 5);
4720 int t2 = BITS (insn, 14, 10);
4721
4722 if (BIT (insn, 23))
4723 {
4724 /* Write back enabled. */
4725 if ((t == n || t2 == n) && n != 31)
4726 return ERR_UND;
4727 }
4728
4729 if (BIT (insn, 22))
4730 {
4731 /* Load */
4732 if (t == t2)
4733 return ERR_UND;
4734 }
4735
4736 return ERR_OK;
4737 }
4738
4739 /* Verifier for vector by element 3 operands functions where the
4740 conditions `if sz:L == 11 then UNDEFINED` holds. */
4741
4742 static enum err_type
4743 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
4744 bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
4745 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4746 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4747 {
4748 const aarch64_insn undef_pattern = 0x3;
4749 aarch64_insn value;
4750
4751 assert (inst->opcode);
4752 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
4753 value = encoding ? inst->value : insn;
4754 assert (value);
4755
4756 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
4757 return ERR_UND;
4758
4759 return ERR_OK;
4760 }
4761
4762 /* Initialize an instruction sequence insn_sequence with the instruction INST.
4763 If INST is NULL the given insn_sequence is cleared and the sequence is left
4764 uninitialized. */
4765
4766 void
4767 init_insn_sequence (const struct aarch64_inst *inst,
4768 aarch64_instr_sequence *insn_sequence)
4769 {
4770 int num_req_entries = 0;
4771 insn_sequence->next_insn = 0;
4772 insn_sequence->num_insns = num_req_entries;
4773 if (insn_sequence->instr)
4774 XDELETE (insn_sequence->instr);
4775 insn_sequence->instr = NULL;
4776
4777 if (inst)
4778 {
4779 insn_sequence->instr = XNEW (aarch64_inst);
4780 memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
4781 }
4782
4783 /* Handle all the cases here. May need to think of something smarter than
4784 a giant if/else chain if this grows. At that time, a lookup table may be
4785 best. */
4786 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
4787 num_req_entries = 1;
4788
4789 if (insn_sequence->current_insns)
4790 XDELETEVEC (insn_sequence->current_insns);
4791 insn_sequence->current_insns = NULL;
4792
4793 if (num_req_entries != 0)
4794 {
4795 size_t size = num_req_entries * sizeof (aarch64_inst);
4796 insn_sequence->current_insns
4797 = (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
4798 memset (insn_sequence->current_insns, 0, size);
4799 }
4800 }
4801
4802
4803 /* This function verifies that the instruction INST adheres to its specified
4804 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
4805 returned and MISMATCH_DETAIL contains the reason why verification failed.
4806
4807 The function is called both during assembly and disassembly. If assembling
4808 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
4809 and will contain the PC of the current instruction w.r.t to the section.
4810
4811 If ENCODING and PC=0 then you are at a start of a section. The constraints
4812 are verified against the given state insn_sequence which is updated as it
4813 transitions through the verification. */
4814
4815 enum err_type
4816 verify_constraints (const struct aarch64_inst *inst,
4817 const aarch64_insn insn ATTRIBUTE_UNUSED,
4818 bfd_vma pc,
4819 bfd_boolean encoding,
4820 aarch64_operand_error *mismatch_detail,
4821 aarch64_instr_sequence *insn_sequence)
4822 {
4823 assert (inst);
4824 assert (inst->opcode);
4825
4826 const struct aarch64_opcode *opcode = inst->opcode;
4827 if (!opcode->constraints && !insn_sequence->instr)
4828 return ERR_OK;
4829
4830 assert (insn_sequence);
4831
4832 enum err_type res = ERR_OK;
4833
4834 /* This instruction puts a constraint on the insn_sequence. */
4835 if (opcode->flags & F_SCAN)
4836 {
4837 if (insn_sequence->instr)
4838 {
4839 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4840 mismatch_detail->error = _("instruction opens new dependency "
4841 "sequence without ending previous one");
4842 mismatch_detail->index = -1;
4843 mismatch_detail->non_fatal = TRUE;
4844 res = ERR_VFI;
4845 }
4846
4847 init_insn_sequence (inst, insn_sequence);
4848 return res;
4849 }
4850
4851 /* Verify constraints on an existing sequence. */
4852 if (insn_sequence->instr)
4853 {
4854 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
4855 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4856 closed a previous one that we should have. */
4857 if (!encoding && pc == 0)
4858 {
4859 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4860 mismatch_detail->error = _("previous `movprfx' sequence not closed");
4861 mismatch_detail->index = -1;
4862 mismatch_detail->non_fatal = TRUE;
4863 res = ERR_VFI;
4864 /* Reset the sequence. */
4865 init_insn_sequence (NULL, insn_sequence);
4866 return res;
4867 }
4868
4869 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
4870 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
4871 {
4872 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4873 instruction for better error messages. */
4874 if (!opcode->avariant
4875 || !(*opcode->avariant &
4876 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
4877 {
4878 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4879 mismatch_detail->error = _("SVE instruction expected after "
4880 "`movprfx'");
4881 mismatch_detail->index = -1;
4882 mismatch_detail->non_fatal = TRUE;
4883 res = ERR_VFI;
4884 goto done;
4885 }
4886
4887 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4888 instruction that is allowed to be used with a MOVPRFX. */
4889 if (!(opcode->constraints & C_SCAN_MOVPRFX))
4890 {
4891 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4892 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
4893 "expected");
4894 mismatch_detail->index = -1;
4895 mismatch_detail->non_fatal = TRUE;
4896 res = ERR_VFI;
4897 goto done;
4898 }
4899
4900 /* Next check for usage of the predicate register. */
4901 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
4902 aarch64_opnd_info blk_pred, inst_pred;
4903 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
4904 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
4905 bfd_boolean predicated = FALSE;
4906 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
4907
4908 /* Determine if the movprfx instruction used is predicated or not. */
4909 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
4910 {
4911 predicated = TRUE;
4912 blk_pred = insn_sequence->instr->operands[1];
4913 }
4914
4915 unsigned char max_elem_size = 0;
4916 unsigned char current_elem_size;
4917 int num_op_used = 0, last_op_usage = 0;
4918 int i, inst_pred_idx = -1;
4919 int num_ops = aarch64_num_of_operands (opcode);
4920 for (i = 0; i < num_ops; i++)
4921 {
4922 aarch64_opnd_info inst_op = inst->operands[i];
4923 switch (inst_op.type)
4924 {
4925 case AARCH64_OPND_SVE_Zd:
4926 case AARCH64_OPND_SVE_Zm_5:
4927 case AARCH64_OPND_SVE_Zm_16:
4928 case AARCH64_OPND_SVE_Zn:
4929 case AARCH64_OPND_SVE_Zt:
4930 case AARCH64_OPND_SVE_Vm:
4931 case AARCH64_OPND_SVE_Vn:
4932 case AARCH64_OPND_Va:
4933 case AARCH64_OPND_Vn:
4934 case AARCH64_OPND_Vm:
4935 case AARCH64_OPND_Sn:
4936 case AARCH64_OPND_Sm:
4937 case AARCH64_OPND_Rn:
4938 case AARCH64_OPND_Rm:
4939 case AARCH64_OPND_Rn_SP:
4940 case AARCH64_OPND_Rt_SP:
4941 case AARCH64_OPND_Rm_SP:
4942 if (inst_op.reg.regno == blk_dest.reg.regno)
4943 {
4944 num_op_used++;
4945 last_op_usage = i;
4946 }
4947 current_elem_size
4948 = aarch64_get_qualifier_esize (inst_op.qualifier);
4949 if (current_elem_size > max_elem_size)
4950 max_elem_size = current_elem_size;
4951 break;
4952 case AARCH64_OPND_SVE_Pd:
4953 case AARCH64_OPND_SVE_Pg3:
4954 case AARCH64_OPND_SVE_Pg4_5:
4955 case AARCH64_OPND_SVE_Pg4_10:
4956 case AARCH64_OPND_SVE_Pg4_16:
4957 case AARCH64_OPND_SVE_Pm:
4958 case AARCH64_OPND_SVE_Pn:
4959 case AARCH64_OPND_SVE_Pt:
4960 inst_pred = inst_op;
4961 inst_pred_idx = i;
4962 break;
4963 default:
4964 break;
4965 }
4966 }
4967
4968 assert (max_elem_size != 0);
4969 aarch64_opnd_info inst_dest = inst->operands[0];
4970 /* Determine the size that should be used to compare against the
4971 movprfx size. */
4972 current_elem_size
4973 = opcode->constraints & C_MAX_ELEM
4974 ? max_elem_size
4975 : aarch64_get_qualifier_esize (inst_dest.qualifier);
4976
4977 /* If movprfx is predicated do some extra checks. */
4978 if (predicated)
4979 {
4980 /* The instruction must be predicated. */
4981 if (inst_pred_idx < 0)
4982 {
4983 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4984 mismatch_detail->error = _("predicated instruction expected "
4985 "after `movprfx'");
4986 mismatch_detail->index = -1;
4987 mismatch_detail->non_fatal = TRUE;
4988 res = ERR_VFI;
4989 goto done;
4990 }
4991
4992 /* The instruction must have a merging predicate. */
4993 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
4994 {
4995 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4996 mismatch_detail->error = _("merging predicate expected due "
4997 "to preceding `movprfx'");
4998 mismatch_detail->index = inst_pred_idx;
4999 mismatch_detail->non_fatal = TRUE;
5000 res = ERR_VFI;
5001 goto done;
5002 }
5003
5004 /* The same register must be used in instruction. */
5005 if (blk_pred.reg.regno != inst_pred.reg.regno)
5006 {
5007 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5008 mismatch_detail->error = _("predicate register differs "
5009 "from that in preceding "
5010 "`movprfx'");
5011 mismatch_detail->index = inst_pred_idx;
5012 mismatch_detail->non_fatal = TRUE;
5013 res = ERR_VFI;
5014 goto done;
5015 }
5016 }
5017
5018 /* Destructive operations by definition must allow one usage of the
5019 same register. */
5020 int allowed_usage
5021 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5022
5023 /* Operand is not used at all. */
5024 if (num_op_used == 0)
5025 {
5026 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5027 mismatch_detail->error = _("output register of preceding "
5028 "`movprfx' not used in current "
5029 "instruction");
5030 mismatch_detail->index = 0;
5031 mismatch_detail->non_fatal = TRUE;
5032 res = ERR_VFI;
5033 goto done;
5034 }
5035
5036 /* We now know it's used, now determine exactly where it's used. */
5037 if (blk_dest.reg.regno != inst_dest.reg.regno)
5038 {
5039 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5040 mismatch_detail->error = _("output register of preceding "
5041 "`movprfx' expected as output");
5042 mismatch_detail->index = 0;
5043 mismatch_detail->non_fatal = TRUE;
5044 res = ERR_VFI;
5045 goto done;
5046 }
5047
5048 /* Operand used more than allowed for the specific opcode type. */
5049 if (num_op_used > allowed_usage)
5050 {
5051 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5052 mismatch_detail->error = _("output register of preceding "
5053 "`movprfx' used as input");
5054 mismatch_detail->index = last_op_usage;
5055 mismatch_detail->non_fatal = TRUE;
5056 res = ERR_VFI;
5057 goto done;
5058 }
5059
5060 /* Now the only thing left is the qualifiers checks. The register
5061 must have the same maximum element size. */
5062 if (inst_dest.qualifier
5063 && blk_dest.qualifier
5064 && current_elem_size
5065 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5066 {
5067 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5068 mismatch_detail->error = _("register size not compatible with "
5069 "previous `movprfx'");
5070 mismatch_detail->index = 0;
5071 mismatch_detail->non_fatal = TRUE;
5072 res = ERR_VFI;
5073 goto done;
5074 }
5075 }
5076
5077 done:
5078 /* Add the new instruction to the sequence. */
5079 memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
5080 inst, sizeof (aarch64_inst));
5081
5082 /* Check if sequence is now full. */
5083 if (insn_sequence->next_insn >= insn_sequence->num_insns)
5084 {
5085 /* Sequence is full, but we don't have anything special to do for now,
5086 so clear and reset it. */
5087 init_insn_sequence (NULL, insn_sequence);
5088 }
5089 }
5090
5091 return res;
5092 }
5093
5094
5095 /* Return true if VALUE cannot be moved into an SVE register using DUP
5096 (with any element size, not just ESIZE) and if using DUPM would
5097 therefore be OK. ESIZE is the number of bytes in the immediate. */
5098
5099 bfd_boolean
5100 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5101 {
5102 int64_t svalue = uvalue;
5103 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5104
5105 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5106 return FALSE;
5107 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5108 {
5109 svalue = (int32_t) uvalue;
5110 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5111 {
5112 svalue = (int16_t) uvalue;
5113 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5114 return FALSE;
5115 }
5116 }
5117 if ((svalue & 0xff) == 0)
5118 svalue /= 256;
5119 return svalue < -128 || svalue >= 128;
5120 }
5121
5122 /* Include the opcode description table as well as the operand description
5123 table. */
5124 #define VERIFIER(x) verify_##x
5125 #include "aarch64-tbl.h"
This page took 0.145313 seconds and 5 git commands to generate.