Implement Read/Write constraints on system registers on AArch64
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
247 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
248 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
249 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
250 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
251 { 5, 14 }, /* imm14: in test bit and branch instructions. */
252 { 5, 16 }, /* imm16: in exception instructions. */
253 { 0, 26 }, /* imm26: in unconditional branch instructions. */
254 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
255 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
256 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
257 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
258 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
259 { 22, 1 }, /* N: in logical (immediate) instructions. */
260 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
261 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
262 { 31, 1 }, /* sf: in integer data processing instructions. */
263 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
264 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
265 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
266 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
267 { 31, 1 }, /* b5: in the test bit and branch instructions. */
268 { 19, 5 }, /* b40: in the test bit and branch instructions. */
269 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
270 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
271 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
272 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
273 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
274 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
275 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
276 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
277 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
278 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
279 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
280 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
281 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
282 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
283 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
284 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
285 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
286 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
289 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
290 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
292 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
293 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
294 { 5, 1 }, /* SVE_i1: single-bit immediate. */
295 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
296 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
297 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
298 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
299 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
300 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
301 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
302 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
303 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
304 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
305 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
306 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
307 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
308 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
309 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
310 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
311 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
312 { 16, 4 }, /* SVE_tsz: triangular size select. */
313 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
314 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
315 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
316 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
317 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
318 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
319 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
320 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
321 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
322 };
323
324 enum aarch64_operand_class
325 aarch64_get_operand_class (enum aarch64_opnd type)
326 {
327 return aarch64_operands[type].op_class;
328 }
329
330 const char *
331 aarch64_get_operand_name (enum aarch64_opnd type)
332 {
333 return aarch64_operands[type].name;
334 }
335
336 /* Get operand description string.
337 This is usually for the diagnosis purpose. */
338 const char *
339 aarch64_get_operand_desc (enum aarch64_opnd type)
340 {
341 return aarch64_operands[type].desc;
342 }
343
344 /* Table of all conditional affixes. */
345 const aarch64_cond aarch64_conds[16] =
346 {
347 {{"eq", "none"}, 0x0},
348 {{"ne", "any"}, 0x1},
349 {{"cs", "hs", "nlast"}, 0x2},
350 {{"cc", "lo", "ul", "last"}, 0x3},
351 {{"mi", "first"}, 0x4},
352 {{"pl", "nfrst"}, 0x5},
353 {{"vs"}, 0x6},
354 {{"vc"}, 0x7},
355 {{"hi", "pmore"}, 0x8},
356 {{"ls", "plast"}, 0x9},
357 {{"ge", "tcont"}, 0xa},
358 {{"lt", "tstop"}, 0xb},
359 {{"gt"}, 0xc},
360 {{"le"}, 0xd},
361 {{"al"}, 0xe},
362 {{"nv"}, 0xf},
363 };
364
365 const aarch64_cond *
366 get_cond_from_value (aarch64_insn value)
367 {
368 assert (value < 16);
369 return &aarch64_conds[(unsigned int) value];
370 }
371
372 const aarch64_cond *
373 get_inverted_cond (const aarch64_cond *cond)
374 {
375 return &aarch64_conds[cond->value ^ 0x1];
376 }
377
378 /* Table describing the operand extension/shifting operators; indexed by
379 enum aarch64_modifier_kind.
380
381 The value column provides the most common values for encoding modifiers,
382 which enables table-driven encoding/decoding for the modifiers. */
383 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
384 {
385 {"none", 0x0},
386 {"msl", 0x0},
387 {"ror", 0x3},
388 {"asr", 0x2},
389 {"lsr", 0x1},
390 {"lsl", 0x0},
391 {"uxtb", 0x0},
392 {"uxth", 0x1},
393 {"uxtw", 0x2},
394 {"uxtx", 0x3},
395 {"sxtb", 0x4},
396 {"sxth", 0x5},
397 {"sxtw", 0x6},
398 {"sxtx", 0x7},
399 {"mul", 0x0},
400 {"mul vl", 0x0},
401 {NULL, 0},
402 };
403
404 enum aarch64_modifier_kind
405 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
406 {
407 return desc - aarch64_operand_modifiers;
408 }
409
410 aarch64_insn
411 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
412 {
413 return aarch64_operand_modifiers[kind].value;
414 }
415
416 enum aarch64_modifier_kind
417 aarch64_get_operand_modifier_from_value (aarch64_insn value,
418 bfd_boolean extend_p)
419 {
420 if (extend_p == TRUE)
421 return AARCH64_MOD_UXTB + value;
422 else
423 return AARCH64_MOD_LSL - value;
424 }
425
426 bfd_boolean
427 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
428 {
429 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
430 ? TRUE : FALSE;
431 }
432
433 static inline bfd_boolean
434 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
435 {
436 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
437 ? TRUE : FALSE;
438 }
439
440 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
441 {
442 { "#0x00", 0x0 },
443 { "oshld", 0x1 },
444 { "oshst", 0x2 },
445 { "osh", 0x3 },
446 { "#0x04", 0x4 },
447 { "nshld", 0x5 },
448 { "nshst", 0x6 },
449 { "nsh", 0x7 },
450 { "#0x08", 0x8 },
451 { "ishld", 0x9 },
452 { "ishst", 0xa },
453 { "ish", 0xb },
454 { "#0x0c", 0xc },
455 { "ld", 0xd },
456 { "st", 0xe },
457 { "sy", 0xf },
458 };
459
460 /* Table describing the operands supported by the aliases of the HINT
461 instruction.
462
463 The name column is the operand that is accepted for the alias. The value
464 column is the hint number of the alias. The list of operands is terminated
465 by NULL in the name column. */
466
467 const struct aarch64_name_value_pair aarch64_hint_options[] =
468 {
469 { "csync", 0x11 }, /* PSB CSYNC. */
470 { NULL, 0x0 },
471 };
472
473 /* op -> op: load = 0 instruction = 1 store = 2
474 l -> level: 1-3
475 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
476 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
477 const struct aarch64_name_value_pair aarch64_prfops[32] =
478 {
479 { "pldl1keep", B(0, 1, 0) },
480 { "pldl1strm", B(0, 1, 1) },
481 { "pldl2keep", B(0, 2, 0) },
482 { "pldl2strm", B(0, 2, 1) },
483 { "pldl3keep", B(0, 3, 0) },
484 { "pldl3strm", B(0, 3, 1) },
485 { NULL, 0x06 },
486 { NULL, 0x07 },
487 { "plil1keep", B(1, 1, 0) },
488 { "plil1strm", B(1, 1, 1) },
489 { "plil2keep", B(1, 2, 0) },
490 { "plil2strm", B(1, 2, 1) },
491 { "plil3keep", B(1, 3, 0) },
492 { "plil3strm", B(1, 3, 1) },
493 { NULL, 0x0e },
494 { NULL, 0x0f },
495 { "pstl1keep", B(2, 1, 0) },
496 { "pstl1strm", B(2, 1, 1) },
497 { "pstl2keep", B(2, 2, 0) },
498 { "pstl2strm", B(2, 2, 1) },
499 { "pstl3keep", B(2, 3, 0) },
500 { "pstl3strm", B(2, 3, 1) },
501 { NULL, 0x16 },
502 { NULL, 0x17 },
503 { NULL, 0x18 },
504 { NULL, 0x19 },
505 { NULL, 0x1a },
506 { NULL, 0x1b },
507 { NULL, 0x1c },
508 { NULL, 0x1d },
509 { NULL, 0x1e },
510 { NULL, 0x1f },
511 };
512 #undef B
513 \f
514 /* Utilities on value constraint. */
515
516 static inline int
517 value_in_range_p (int64_t value, int low, int high)
518 {
519 return (value >= low && value <= high) ? 1 : 0;
520 }
521
522 /* Return true if VALUE is a multiple of ALIGN. */
523 static inline int
524 value_aligned_p (int64_t value, int align)
525 {
526 return (value % align) == 0;
527 }
528
529 /* A signed value fits in a field. */
530 static inline int
531 value_fit_signed_field_p (int64_t value, unsigned width)
532 {
533 assert (width < 32);
534 if (width < sizeof (value) * 8)
535 {
536 int64_t lim = (int64_t)1 << (width - 1);
537 if (value >= -lim && value < lim)
538 return 1;
539 }
540 return 0;
541 }
542
543 /* An unsigned value fits in a field. */
544 static inline int
545 value_fit_unsigned_field_p (int64_t value, unsigned width)
546 {
547 assert (width < 32);
548 if (width < sizeof (value) * 8)
549 {
550 int64_t lim = (int64_t)1 << width;
551 if (value >= 0 && value < lim)
552 return 1;
553 }
554 return 0;
555 }
556
557 /* Return 1 if OPERAND is SP or WSP. */
558 int
559 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
560 {
561 return ((aarch64_get_operand_class (operand->type)
562 == AARCH64_OPND_CLASS_INT_REG)
563 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
564 && operand->reg.regno == 31);
565 }
566
567 /* Return 1 if OPERAND is XZR or WZP. */
568 int
569 aarch64_zero_register_p (const aarch64_opnd_info *operand)
570 {
571 return ((aarch64_get_operand_class (operand->type)
572 == AARCH64_OPND_CLASS_INT_REG)
573 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
574 && operand->reg.regno == 31);
575 }
576
577 /* Return true if the operand *OPERAND that has the operand code
578 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
579 qualified by the qualifier TARGET. */
580
581 static inline int
582 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
583 aarch64_opnd_qualifier_t target)
584 {
585 switch (operand->qualifier)
586 {
587 case AARCH64_OPND_QLF_W:
588 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
589 return 1;
590 break;
591 case AARCH64_OPND_QLF_X:
592 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
593 return 1;
594 break;
595 case AARCH64_OPND_QLF_WSP:
596 if (target == AARCH64_OPND_QLF_W
597 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
598 return 1;
599 break;
600 case AARCH64_OPND_QLF_SP:
601 if (target == AARCH64_OPND_QLF_X
602 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
603 return 1;
604 break;
605 default:
606 break;
607 }
608
609 return 0;
610 }
611
612 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
613 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
614
615 Return NIL if more than one expected qualifiers are found. */
616
617 aarch64_opnd_qualifier_t
618 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
619 int idx,
620 const aarch64_opnd_qualifier_t known_qlf,
621 int known_idx)
622 {
623 int i, saved_i;
624
625 /* Special case.
626
627 When the known qualifier is NIL, we have to assume that there is only
628 one qualifier sequence in the *QSEQ_LIST and return the corresponding
629 qualifier directly. One scenario is that for instruction
630 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
631 which has only one possible valid qualifier sequence
632 NIL, S_D
633 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
634 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
635
636 Because the qualifier NIL has dual roles in the qualifier sequence:
637 it can mean no qualifier for the operand, or the qualifer sequence is
638 not in use (when all qualifiers in the sequence are NILs), we have to
639 handle this special case here. */
640 if (known_qlf == AARCH64_OPND_NIL)
641 {
642 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
643 return qseq_list[0][idx];
644 }
645
646 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
647 {
648 if (qseq_list[i][known_idx] == known_qlf)
649 {
650 if (saved_i != -1)
651 /* More than one sequences are found to have KNOWN_QLF at
652 KNOWN_IDX. */
653 return AARCH64_OPND_NIL;
654 saved_i = i;
655 }
656 }
657
658 return qseq_list[saved_i][idx];
659 }
660
661 enum operand_qualifier_kind
662 {
663 OQK_NIL,
664 OQK_OPD_VARIANT,
665 OQK_VALUE_IN_RANGE,
666 OQK_MISC,
667 };
668
669 /* Operand qualifier description. */
670 struct operand_qualifier_data
671 {
672 /* The usage of the three data fields depends on the qualifier kind. */
673 int data0;
674 int data1;
675 int data2;
676 /* Description. */
677 const char *desc;
678 /* Kind. */
679 enum operand_qualifier_kind kind;
680 };
681
682 /* Indexed by the operand qualifier enumerators. */
683 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
684 {
685 {0, 0, 0, "NIL", OQK_NIL},
686
687 /* Operand variant qualifiers.
688 First 3 fields:
689 element size, number of elements and common value for encoding. */
690
691 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
692 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
693 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
694 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
695
696 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
697 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
698 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
699 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
700 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
701 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
702
703 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
704 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
705 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
706 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
707 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
708 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
709 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
710 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
711 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
712 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
713 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
714
715 {0, 0, 0, "z", OQK_OPD_VARIANT},
716 {0, 0, 0, "m", OQK_OPD_VARIANT},
717
718 /* Qualifiers constraining the value range.
719 First 3 fields:
720 Lower bound, higher bound, unused. */
721
722 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
723 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
724 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
725 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
726 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
727 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
728 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
729
730 /* Qualifiers for miscellaneous purpose.
731 First 3 fields:
732 unused, unused and unused. */
733
734 {0, 0, 0, "lsl", 0},
735 {0, 0, 0, "msl", 0},
736
737 {0, 0, 0, "retrieving", 0},
738 };
739
740 static inline bfd_boolean
741 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
742 {
743 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
744 ? TRUE : FALSE;
745 }
746
747 static inline bfd_boolean
748 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
749 {
750 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
751 ? TRUE : FALSE;
752 }
753
754 const char*
755 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
756 {
757 return aarch64_opnd_qualifiers[qualifier].desc;
758 }
759
760 /* Given an operand qualifier, return the expected data element size
761 of a qualified operand. */
762 unsigned char
763 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
764 {
765 assert (operand_variant_qualifier_p (qualifier) == TRUE);
766 return aarch64_opnd_qualifiers[qualifier].data0;
767 }
768
769 unsigned char
770 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
771 {
772 assert (operand_variant_qualifier_p (qualifier) == TRUE);
773 return aarch64_opnd_qualifiers[qualifier].data1;
774 }
775
776 aarch64_insn
777 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
778 {
779 assert (operand_variant_qualifier_p (qualifier) == TRUE);
780 return aarch64_opnd_qualifiers[qualifier].data2;
781 }
782
783 static int
784 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
785 {
786 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
787 return aarch64_opnd_qualifiers[qualifier].data0;
788 }
789
790 static int
791 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
792 {
793 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
794 return aarch64_opnd_qualifiers[qualifier].data1;
795 }
796
797 #ifdef DEBUG_AARCH64
798 void
799 aarch64_verbose (const char *str, ...)
800 {
801 va_list ap;
802 va_start (ap, str);
803 printf ("#### ");
804 vprintf (str, ap);
805 printf ("\n");
806 va_end (ap);
807 }
808
809 static inline void
810 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
811 {
812 int i;
813 printf ("#### \t");
814 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
815 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
816 printf ("\n");
817 }
818
819 static void
820 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
821 const aarch64_opnd_qualifier_t *qualifier)
822 {
823 int i;
824 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
825
826 aarch64_verbose ("dump_match_qualifiers:");
827 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
828 curr[i] = opnd[i].qualifier;
829 dump_qualifier_sequence (curr);
830 aarch64_verbose ("against");
831 dump_qualifier_sequence (qualifier);
832 }
833 #endif /* DEBUG_AARCH64 */
834
835 /* TODO improve this, we can have an extra field at the runtime to
836 store the number of operands rather than calculating it every time. */
837
838 int
839 aarch64_num_of_operands (const aarch64_opcode *opcode)
840 {
841 int i = 0;
842 const enum aarch64_opnd *opnds = opcode->operands;
843 while (opnds[i++] != AARCH64_OPND_NIL)
844 ;
845 --i;
846 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
847 return i;
848 }
849
850 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
851 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
852
853 N.B. on the entry, it is very likely that only some operands in *INST
854 have had their qualifiers been established.
855
856 If STOP_AT is not -1, the function will only try to match
857 the qualifier sequence for operands before and including the operand
858 of index STOP_AT; and on success *RET will only be filled with the first
859 (STOP_AT+1) qualifiers.
860
861 A couple examples of the matching algorithm:
862
863 X,W,NIL should match
864 X,W,NIL
865
866 NIL,NIL should match
867 X ,NIL
868
869 Apart from serving the main encoding routine, this can also be called
870 during or after the operand decoding. */
871
872 int
873 aarch64_find_best_match (const aarch64_inst *inst,
874 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
875 int stop_at, aarch64_opnd_qualifier_t *ret)
876 {
877 int found = 0;
878 int i, num_opnds;
879 const aarch64_opnd_qualifier_t *qualifiers;
880
881 num_opnds = aarch64_num_of_operands (inst->opcode);
882 if (num_opnds == 0)
883 {
884 DEBUG_TRACE ("SUCCEED: no operand");
885 return 1;
886 }
887
888 if (stop_at < 0 || stop_at >= num_opnds)
889 stop_at = num_opnds - 1;
890
891 /* For each pattern. */
892 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
893 {
894 int j;
895 qualifiers = *qualifiers_list;
896
897 /* Start as positive. */
898 found = 1;
899
900 DEBUG_TRACE ("%d", i);
901 #ifdef DEBUG_AARCH64
902 if (debug_dump)
903 dump_match_qualifiers (inst->operands, qualifiers);
904 #endif
905
906 /* Most opcodes has much fewer patterns in the list.
907 First NIL qualifier indicates the end in the list. */
908 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
909 {
910 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
911 if (i)
912 found = 0;
913 break;
914 }
915
916 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
917 {
918 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
919 {
920 /* Either the operand does not have qualifier, or the qualifier
921 for the operand needs to be deduced from the qualifier
922 sequence.
923 In the latter case, any constraint checking related with
924 the obtained qualifier should be done later in
925 operand_general_constraint_met_p. */
926 continue;
927 }
928 else if (*qualifiers != inst->operands[j].qualifier)
929 {
930 /* Unless the target qualifier can also qualify the operand
931 (which has already had a non-nil qualifier), non-equal
932 qualifiers are generally un-matched. */
933 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
934 continue;
935 else
936 {
937 found = 0;
938 break;
939 }
940 }
941 else
942 continue; /* Equal qualifiers are certainly matched. */
943 }
944
945 /* Qualifiers established. */
946 if (found == 1)
947 break;
948 }
949
950 if (found == 1)
951 {
952 /* Fill the result in *RET. */
953 int j;
954 qualifiers = *qualifiers_list;
955
956 DEBUG_TRACE ("complete qualifiers using list %d", i);
957 #ifdef DEBUG_AARCH64
958 if (debug_dump)
959 dump_qualifier_sequence (qualifiers);
960 #endif
961
962 for (j = 0; j <= stop_at; ++j, ++qualifiers)
963 ret[j] = *qualifiers;
964 for (; j < AARCH64_MAX_OPND_NUM; ++j)
965 ret[j] = AARCH64_OPND_QLF_NIL;
966
967 DEBUG_TRACE ("SUCCESS");
968 return 1;
969 }
970
971 DEBUG_TRACE ("FAIL");
972 return 0;
973 }
974
975 /* Operand qualifier matching and resolving.
976
977 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
978 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
979
980 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
981 succeeds. */
982
983 static int
984 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
985 {
986 int i, nops;
987 aarch64_opnd_qualifier_seq_t qualifiers;
988
989 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
990 qualifiers))
991 {
992 DEBUG_TRACE ("matching FAIL");
993 return 0;
994 }
995
996 if (inst->opcode->flags & F_STRICT)
997 {
998 /* Require an exact qualifier match, even for NIL qualifiers. */
999 nops = aarch64_num_of_operands (inst->opcode);
1000 for (i = 0; i < nops; ++i)
1001 if (inst->operands[i].qualifier != qualifiers[i])
1002 return FALSE;
1003 }
1004
1005 /* Update the qualifiers. */
1006 if (update_p == TRUE)
1007 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1008 {
1009 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1010 break;
1011 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1012 "update %s with %s for operand %d",
1013 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1014 aarch64_get_qualifier_name (qualifiers[i]), i);
1015 inst->operands[i].qualifier = qualifiers[i];
1016 }
1017
1018 DEBUG_TRACE ("matching SUCCESS");
1019 return 1;
1020 }
1021
1022 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1023 register by MOVZ.
1024
1025 IS32 indicates whether value is a 32-bit immediate or not.
1026 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1027 amount will be returned in *SHIFT_AMOUNT. */
1028
1029 bfd_boolean
1030 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1031 {
1032 int amount;
1033
1034 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1035
1036 if (is32)
1037 {
1038 /* Allow all zeros or all ones in top 32-bits, so that
1039 32-bit constant expressions like ~0x80000000 are
1040 permitted. */
1041 uint64_t ext = value;
1042 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1043 /* Immediate out of range. */
1044 return FALSE;
1045 value &= (int64_t) 0xffffffff;
1046 }
1047
1048 /* first, try movz then movn */
1049 amount = -1;
1050 if ((value & ((int64_t) 0xffff << 0)) == value)
1051 amount = 0;
1052 else if ((value & ((int64_t) 0xffff << 16)) == value)
1053 amount = 16;
1054 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1055 amount = 32;
1056 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1057 amount = 48;
1058
1059 if (amount == -1)
1060 {
1061 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1062 return FALSE;
1063 }
1064
1065 if (shift_amount != NULL)
1066 *shift_amount = amount;
1067
1068 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1069
1070 return TRUE;
1071 }
1072
1073 /* Build the accepted values for immediate logical SIMD instructions.
1074
1075 The standard encodings of the immediate value are:
1076 N imms immr SIMD size R S
1077 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1078 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1079 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1080 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1081 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1082 0 11110s 00000r 2 UInt(r) UInt(s)
1083 where all-ones value of S is reserved.
1084
1085 Let's call E the SIMD size.
1086
1087 The immediate value is: S+1 bits '1' rotated to the right by R.
1088
1089 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1090 (remember S != E - 1). */
1091
1092 #define TOTAL_IMM_NB 5334
1093
1094 typedef struct
1095 {
1096 uint64_t imm;
1097 aarch64_insn encoding;
1098 } simd_imm_encoding;
1099
1100 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1101
1102 static int
1103 simd_imm_encoding_cmp(const void *i1, const void *i2)
1104 {
1105 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1106 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1107
1108 if (imm1->imm < imm2->imm)
1109 return -1;
1110 if (imm1->imm > imm2->imm)
1111 return +1;
1112 return 0;
1113 }
1114
1115 /* immediate bitfield standard encoding
1116 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1117 1 ssssss rrrrrr 64 rrrrrr ssssss
1118 0 0sssss 0rrrrr 32 rrrrr sssss
1119 0 10ssss 00rrrr 16 rrrr ssss
1120 0 110sss 000rrr 8 rrr sss
1121 0 1110ss 0000rr 4 rr ss
1122 0 11110s 00000r 2 r s */
1123 static inline int
1124 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1125 {
1126 return (is64 << 12) | (r << 6) | s;
1127 }
1128
1129 static void
1130 build_immediate_table (void)
1131 {
1132 uint32_t log_e, e, s, r, s_mask;
1133 uint64_t mask, imm;
1134 int nb_imms;
1135 int is64;
1136
1137 nb_imms = 0;
1138 for (log_e = 1; log_e <= 6; log_e++)
1139 {
1140 /* Get element size. */
1141 e = 1u << log_e;
1142 if (log_e == 6)
1143 {
1144 is64 = 1;
1145 mask = 0xffffffffffffffffull;
1146 s_mask = 0;
1147 }
1148 else
1149 {
1150 is64 = 0;
1151 mask = (1ull << e) - 1;
1152 /* log_e s_mask
1153 1 ((1 << 4) - 1) << 2 = 111100
1154 2 ((1 << 3) - 1) << 3 = 111000
1155 3 ((1 << 2) - 1) << 4 = 110000
1156 4 ((1 << 1) - 1) << 5 = 100000
1157 5 ((1 << 0) - 1) << 6 = 000000 */
1158 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1159 }
1160 for (s = 0; s < e - 1; s++)
1161 for (r = 0; r < e; r++)
1162 {
1163 /* s+1 consecutive bits to 1 (s < 63) */
1164 imm = (1ull << (s + 1)) - 1;
1165 /* rotate right by r */
1166 if (r != 0)
1167 imm = (imm >> r) | ((imm << (e - r)) & mask);
1168 /* replicate the constant depending on SIMD size */
1169 switch (log_e)
1170 {
1171 case 1: imm = (imm << 2) | imm;
1172 /* Fall through. */
1173 case 2: imm = (imm << 4) | imm;
1174 /* Fall through. */
1175 case 3: imm = (imm << 8) | imm;
1176 /* Fall through. */
1177 case 4: imm = (imm << 16) | imm;
1178 /* Fall through. */
1179 case 5: imm = (imm << 32) | imm;
1180 /* Fall through. */
1181 case 6: break;
1182 default: abort ();
1183 }
1184 simd_immediates[nb_imms].imm = imm;
1185 simd_immediates[nb_imms].encoding =
1186 encode_immediate_bitfield(is64, s | s_mask, r);
1187 nb_imms++;
1188 }
1189 }
1190 assert (nb_imms == TOTAL_IMM_NB);
1191 qsort(simd_immediates, nb_imms,
1192 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1193 }
1194
1195 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1196 be accepted by logical (immediate) instructions
1197 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1198
1199 ESIZE is the number of bytes in the decoded immediate value.
1200 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1201 VALUE will be returned in *ENCODING. */
1202
1203 bfd_boolean
1204 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1205 {
1206 simd_imm_encoding imm_enc;
1207 const simd_imm_encoding *imm_encoding;
1208 static bfd_boolean initialized = FALSE;
1209 uint64_t upper;
1210 int i;
1211
1212 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1213 value, esize);
1214
1215 if (!initialized)
1216 {
1217 build_immediate_table ();
1218 initialized = TRUE;
1219 }
1220
1221 /* Allow all zeros or all ones in top bits, so that
1222 constant expressions like ~1 are permitted. */
1223 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1224 if ((value & ~upper) != value && (value | upper) != value)
1225 return FALSE;
1226
1227 /* Replicate to a full 64-bit value. */
1228 value &= ~upper;
1229 for (i = esize * 8; i < 64; i *= 2)
1230 value |= (value << i);
1231
1232 imm_enc.imm = value;
1233 imm_encoding = (const simd_imm_encoding *)
1234 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1235 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1236 if (imm_encoding == NULL)
1237 {
1238 DEBUG_TRACE ("exit with FALSE");
1239 return FALSE;
1240 }
1241 if (encoding != NULL)
1242 *encoding = imm_encoding->encoding;
1243 DEBUG_TRACE ("exit with TRUE");
1244 return TRUE;
1245 }
1246
1247 /* If 64-bit immediate IMM is in the format of
1248 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1249 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1250 of value "abcdefgh". Otherwise return -1. */
1251 int
1252 aarch64_shrink_expanded_imm8 (uint64_t imm)
1253 {
1254 int i, ret;
1255 uint32_t byte;
1256
1257 ret = 0;
1258 for (i = 0; i < 8; i++)
1259 {
1260 byte = (imm >> (8 * i)) & 0xff;
1261 if (byte == 0xff)
1262 ret |= 1 << i;
1263 else if (byte != 0x00)
1264 return -1;
1265 }
1266 return ret;
1267 }
1268
1269 /* Utility inline functions for operand_general_constraint_met_p. */
1270
1271 static inline void
1272 set_error (aarch64_operand_error *mismatch_detail,
1273 enum aarch64_operand_error_kind kind, int idx,
1274 const char* error)
1275 {
1276 if (mismatch_detail == NULL)
1277 return;
1278 mismatch_detail->kind = kind;
1279 mismatch_detail->index = idx;
1280 mismatch_detail->error = error;
1281 }
1282
1283 static inline void
1284 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1285 const char* error)
1286 {
1287 if (mismatch_detail == NULL)
1288 return;
1289 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1290 }
1291
1292 static inline void
1293 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1294 int idx, int lower_bound, int upper_bound,
1295 const char* error)
1296 {
1297 if (mismatch_detail == NULL)
1298 return;
1299 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1300 mismatch_detail->data[0] = lower_bound;
1301 mismatch_detail->data[1] = upper_bound;
1302 }
1303
1304 static inline void
1305 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1306 int idx, int lower_bound, int upper_bound)
1307 {
1308 if (mismatch_detail == NULL)
1309 return;
1310 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1311 _("immediate value"));
1312 }
1313
1314 static inline void
1315 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1316 int idx, int lower_bound, int upper_bound)
1317 {
1318 if (mismatch_detail == NULL)
1319 return;
1320 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1321 _("immediate offset"));
1322 }
1323
1324 static inline void
1325 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1326 int idx, int lower_bound, int upper_bound)
1327 {
1328 if (mismatch_detail == NULL)
1329 return;
1330 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1331 _("register number"));
1332 }
1333
1334 static inline void
1335 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1336 int idx, int lower_bound, int upper_bound)
1337 {
1338 if (mismatch_detail == NULL)
1339 return;
1340 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1341 _("register element index"));
1342 }
1343
1344 static inline void
1345 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1346 int idx, int lower_bound, int upper_bound)
1347 {
1348 if (mismatch_detail == NULL)
1349 return;
1350 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1351 _("shift amount"));
1352 }
1353
1354 /* Report that the MUL modifier in operand IDX should be in the range
1355 [LOWER_BOUND, UPPER_BOUND]. */
1356 static inline void
1357 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1358 int idx, int lower_bound, int upper_bound)
1359 {
1360 if (mismatch_detail == NULL)
1361 return;
1362 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1363 _("multiplier"));
1364 }
1365
1366 static inline void
1367 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1368 int alignment)
1369 {
1370 if (mismatch_detail == NULL)
1371 return;
1372 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1373 mismatch_detail->data[0] = alignment;
1374 }
1375
1376 static inline void
1377 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1378 int expected_num)
1379 {
1380 if (mismatch_detail == NULL)
1381 return;
1382 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1383 mismatch_detail->data[0] = expected_num;
1384 }
1385
1386 static inline void
1387 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1388 const char* error)
1389 {
1390 if (mismatch_detail == NULL)
1391 return;
1392 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1393 }
1394
1395 /* General constraint checking based on operand code.
1396
1397 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1398 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1399
1400 This function has to be called after the qualifiers for all operands
1401 have been resolved.
1402
1403 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1404 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1405 of error message during the disassembling where error message is not
1406 wanted. We avoid the dynamic construction of strings of error messages
1407 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1408 use a combination of error code, static string and some integer data to
1409 represent an error. */
1410
1411 static int
1412 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1413 enum aarch64_opnd type,
1414 const aarch64_opcode *opcode,
1415 aarch64_operand_error *mismatch_detail)
1416 {
1417 unsigned num, modifiers, shift;
1418 unsigned char size;
1419 int64_t imm, min_value, max_value;
1420 uint64_t uvalue, mask;
1421 const aarch64_opnd_info *opnd = opnds + idx;
1422 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1423
1424 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1425
1426 switch (aarch64_operands[type].op_class)
1427 {
1428 case AARCH64_OPND_CLASS_INT_REG:
1429 /* Check pair reg constraints for cas* instructions. */
1430 if (type == AARCH64_OPND_PAIRREG)
1431 {
1432 assert (idx == 1 || idx == 3);
1433 if (opnds[idx - 1].reg.regno % 2 != 0)
1434 {
1435 set_syntax_error (mismatch_detail, idx - 1,
1436 _("reg pair must start from even reg"));
1437 return 0;
1438 }
1439 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1440 {
1441 set_syntax_error (mismatch_detail, idx,
1442 _("reg pair must be contiguous"));
1443 return 0;
1444 }
1445 break;
1446 }
1447
1448 /* <Xt> may be optional in some IC and TLBI instructions. */
1449 if (type == AARCH64_OPND_Rt_SYS)
1450 {
1451 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1452 == AARCH64_OPND_CLASS_SYSTEM));
1453 if (opnds[1].present
1454 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1455 {
1456 set_other_error (mismatch_detail, idx, _("extraneous register"));
1457 return 0;
1458 }
1459 if (!opnds[1].present
1460 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1461 {
1462 set_other_error (mismatch_detail, idx, _("missing register"));
1463 return 0;
1464 }
1465 }
1466 switch (qualifier)
1467 {
1468 case AARCH64_OPND_QLF_WSP:
1469 case AARCH64_OPND_QLF_SP:
1470 if (!aarch64_stack_pointer_p (opnd))
1471 {
1472 set_other_error (mismatch_detail, idx,
1473 _("stack pointer register expected"));
1474 return 0;
1475 }
1476 break;
1477 default:
1478 break;
1479 }
1480 break;
1481
1482 case AARCH64_OPND_CLASS_SVE_REG:
1483 switch (type)
1484 {
1485 case AARCH64_OPND_SVE_Zm3_INDEX:
1486 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1487 case AARCH64_OPND_SVE_Zm4_INDEX:
1488 size = get_operand_fields_width (get_operand_from_code (type));
1489 shift = get_operand_specific_data (&aarch64_operands[type]);
1490 mask = (1 << shift) - 1;
1491 if (opnd->reg.regno > mask)
1492 {
1493 assert (mask == 7 || mask == 15);
1494 set_other_error (mismatch_detail, idx,
1495 mask == 15
1496 ? _("z0-z15 expected")
1497 : _("z0-z7 expected"));
1498 return 0;
1499 }
1500 mask = (1 << (size - shift)) - 1;
1501 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1502 {
1503 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1504 return 0;
1505 }
1506 break;
1507
1508 case AARCH64_OPND_SVE_Zn_INDEX:
1509 size = aarch64_get_qualifier_esize (opnd->qualifier);
1510 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1511 {
1512 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1513 0, 64 / size - 1);
1514 return 0;
1515 }
1516 break;
1517
1518 case AARCH64_OPND_SVE_ZnxN:
1519 case AARCH64_OPND_SVE_ZtxN:
1520 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1521 {
1522 set_other_error (mismatch_detail, idx,
1523 _("invalid register list"));
1524 return 0;
1525 }
1526 break;
1527
1528 default:
1529 break;
1530 }
1531 break;
1532
1533 case AARCH64_OPND_CLASS_PRED_REG:
1534 if (opnd->reg.regno >= 8
1535 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1536 {
1537 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1538 return 0;
1539 }
1540 break;
1541
1542 case AARCH64_OPND_CLASS_COND:
1543 if (type == AARCH64_OPND_COND1
1544 && (opnds[idx].cond->value & 0xe) == 0xe)
1545 {
1546 /* Not allow AL or NV. */
1547 set_syntax_error (mismatch_detail, idx, NULL);
1548 }
1549 break;
1550
1551 case AARCH64_OPND_CLASS_ADDRESS:
1552 /* Check writeback. */
1553 switch (opcode->iclass)
1554 {
1555 case ldst_pos:
1556 case ldst_unscaled:
1557 case ldstnapair_offs:
1558 case ldstpair_off:
1559 case ldst_unpriv:
1560 if (opnd->addr.writeback == 1)
1561 {
1562 set_syntax_error (mismatch_detail, idx,
1563 _("unexpected address writeback"));
1564 return 0;
1565 }
1566 break;
1567 case ldst_imm10:
1568 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1569 {
1570 set_syntax_error (mismatch_detail, idx,
1571 _("unexpected address writeback"));
1572 return 0;
1573 }
1574 break;
1575 case ldst_imm9:
1576 case ldstpair_indexed:
1577 case asisdlsep:
1578 case asisdlsop:
1579 if (opnd->addr.writeback == 0)
1580 {
1581 set_syntax_error (mismatch_detail, idx,
1582 _("address writeback expected"));
1583 return 0;
1584 }
1585 break;
1586 default:
1587 assert (opnd->addr.writeback == 0);
1588 break;
1589 }
1590 switch (type)
1591 {
1592 case AARCH64_OPND_ADDR_SIMM7:
1593 /* Scaled signed 7 bits immediate offset. */
1594 /* Get the size of the data element that is accessed, which may be
1595 different from that of the source register size,
1596 e.g. in strb/ldrb. */
1597 size = aarch64_get_qualifier_esize (opnd->qualifier);
1598 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1599 {
1600 set_offset_out_of_range_error (mismatch_detail, idx,
1601 -64 * size, 63 * size);
1602 return 0;
1603 }
1604 if (!value_aligned_p (opnd->addr.offset.imm, size))
1605 {
1606 set_unaligned_error (mismatch_detail, idx, size);
1607 return 0;
1608 }
1609 break;
1610 case AARCH64_OPND_ADDR_OFFSET:
1611 case AARCH64_OPND_ADDR_SIMM9:
1612 /* Unscaled signed 9 bits immediate offset. */
1613 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1614 {
1615 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1616 return 0;
1617 }
1618 break;
1619
1620 case AARCH64_OPND_ADDR_SIMM9_2:
1621 /* Unscaled signed 9 bits immediate offset, which has to be negative
1622 or unaligned. */
1623 size = aarch64_get_qualifier_esize (qualifier);
1624 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1625 && !value_aligned_p (opnd->addr.offset.imm, size))
1626 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1627 return 1;
1628 set_other_error (mismatch_detail, idx,
1629 _("negative or unaligned offset expected"));
1630 return 0;
1631
1632 case AARCH64_OPND_ADDR_SIMM10:
1633 /* Scaled signed 10 bits immediate offset. */
1634 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1635 {
1636 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1637 return 0;
1638 }
1639 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1640 {
1641 set_unaligned_error (mismatch_detail, idx, 8);
1642 return 0;
1643 }
1644 break;
1645
1646 case AARCH64_OPND_SIMD_ADDR_POST:
1647 /* AdvSIMD load/store multiple structures, post-index. */
1648 assert (idx == 1);
1649 if (opnd->addr.offset.is_reg)
1650 {
1651 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1652 return 1;
1653 else
1654 {
1655 set_other_error (mismatch_detail, idx,
1656 _("invalid register offset"));
1657 return 0;
1658 }
1659 }
1660 else
1661 {
1662 const aarch64_opnd_info *prev = &opnds[idx-1];
1663 unsigned num_bytes; /* total number of bytes transferred. */
1664 /* The opcode dependent area stores the number of elements in
1665 each structure to be loaded/stored. */
1666 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1667 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1668 /* Special handling of loading single structure to all lane. */
1669 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1670 * aarch64_get_qualifier_esize (prev->qualifier);
1671 else
1672 num_bytes = prev->reglist.num_regs
1673 * aarch64_get_qualifier_esize (prev->qualifier)
1674 * aarch64_get_qualifier_nelem (prev->qualifier);
1675 if ((int) num_bytes != opnd->addr.offset.imm)
1676 {
1677 set_other_error (mismatch_detail, idx,
1678 _("invalid post-increment amount"));
1679 return 0;
1680 }
1681 }
1682 break;
1683
1684 case AARCH64_OPND_ADDR_REGOFF:
1685 /* Get the size of the data element that is accessed, which may be
1686 different from that of the source register size,
1687 e.g. in strb/ldrb. */
1688 size = aarch64_get_qualifier_esize (opnd->qualifier);
1689 /* It is either no shift or shift by the binary logarithm of SIZE. */
1690 if (opnd->shifter.amount != 0
1691 && opnd->shifter.amount != (int)get_logsz (size))
1692 {
1693 set_other_error (mismatch_detail, idx,
1694 _("invalid shift amount"));
1695 return 0;
1696 }
1697 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1698 operators. */
1699 switch (opnd->shifter.kind)
1700 {
1701 case AARCH64_MOD_UXTW:
1702 case AARCH64_MOD_LSL:
1703 case AARCH64_MOD_SXTW:
1704 case AARCH64_MOD_SXTX: break;
1705 default:
1706 set_other_error (mismatch_detail, idx,
1707 _("invalid extend/shift operator"));
1708 return 0;
1709 }
1710 break;
1711
1712 case AARCH64_OPND_ADDR_UIMM12:
1713 imm = opnd->addr.offset.imm;
1714 /* Get the size of the data element that is accessed, which may be
1715 different from that of the source register size,
1716 e.g. in strb/ldrb. */
1717 size = aarch64_get_qualifier_esize (qualifier);
1718 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1719 {
1720 set_offset_out_of_range_error (mismatch_detail, idx,
1721 0, 4095 * size);
1722 return 0;
1723 }
1724 if (!value_aligned_p (opnd->addr.offset.imm, size))
1725 {
1726 set_unaligned_error (mismatch_detail, idx, size);
1727 return 0;
1728 }
1729 break;
1730
1731 case AARCH64_OPND_ADDR_PCREL14:
1732 case AARCH64_OPND_ADDR_PCREL19:
1733 case AARCH64_OPND_ADDR_PCREL21:
1734 case AARCH64_OPND_ADDR_PCREL26:
1735 imm = opnd->imm.value;
1736 if (operand_need_shift_by_two (get_operand_from_code (type)))
1737 {
1738 /* The offset value in a PC-relative branch instruction is alway
1739 4-byte aligned and is encoded without the lowest 2 bits. */
1740 if (!value_aligned_p (imm, 4))
1741 {
1742 set_unaligned_error (mismatch_detail, idx, 4);
1743 return 0;
1744 }
1745 /* Right shift by 2 so that we can carry out the following check
1746 canonically. */
1747 imm >>= 2;
1748 }
1749 size = get_operand_fields_width (get_operand_from_code (type));
1750 if (!value_fit_signed_field_p (imm, size))
1751 {
1752 set_other_error (mismatch_detail, idx,
1753 _("immediate out of range"));
1754 return 0;
1755 }
1756 break;
1757
1758 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1759 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1760 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1761 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1762 min_value = -8;
1763 max_value = 7;
1764 sve_imm_offset_vl:
1765 assert (!opnd->addr.offset.is_reg);
1766 assert (opnd->addr.preind);
1767 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1768 min_value *= num;
1769 max_value *= num;
1770 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1771 || (opnd->shifter.operator_present
1772 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1773 {
1774 set_other_error (mismatch_detail, idx,
1775 _("invalid addressing mode"));
1776 return 0;
1777 }
1778 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1779 {
1780 set_offset_out_of_range_error (mismatch_detail, idx,
1781 min_value, max_value);
1782 return 0;
1783 }
1784 if (!value_aligned_p (opnd->addr.offset.imm, num))
1785 {
1786 set_unaligned_error (mismatch_detail, idx, num);
1787 return 0;
1788 }
1789 break;
1790
1791 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1792 min_value = -32;
1793 max_value = 31;
1794 goto sve_imm_offset_vl;
1795
1796 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1797 min_value = -256;
1798 max_value = 255;
1799 goto sve_imm_offset_vl;
1800
1801 case AARCH64_OPND_SVE_ADDR_RI_U6:
1802 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1803 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1804 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1805 min_value = 0;
1806 max_value = 63;
1807 sve_imm_offset:
1808 assert (!opnd->addr.offset.is_reg);
1809 assert (opnd->addr.preind);
1810 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1811 min_value *= num;
1812 max_value *= num;
1813 if (opnd->shifter.operator_present
1814 || opnd->shifter.amount_present)
1815 {
1816 set_other_error (mismatch_detail, idx,
1817 _("invalid addressing mode"));
1818 return 0;
1819 }
1820 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1821 {
1822 set_offset_out_of_range_error (mismatch_detail, idx,
1823 min_value, max_value);
1824 return 0;
1825 }
1826 if (!value_aligned_p (opnd->addr.offset.imm, num))
1827 {
1828 set_unaligned_error (mismatch_detail, idx, num);
1829 return 0;
1830 }
1831 break;
1832
1833 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1834 min_value = -8;
1835 max_value = 7;
1836 goto sve_imm_offset;
1837
1838 case AARCH64_OPND_SVE_ADDR_R:
1839 case AARCH64_OPND_SVE_ADDR_RR:
1840 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1841 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1842 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1843 case AARCH64_OPND_SVE_ADDR_RX:
1844 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1845 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1846 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1847 case AARCH64_OPND_SVE_ADDR_RZ:
1848 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1849 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1850 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1851 modifiers = 1 << AARCH64_MOD_LSL;
1852 sve_rr_operand:
1853 assert (opnd->addr.offset.is_reg);
1854 assert (opnd->addr.preind);
1855 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1856 && opnd->addr.offset.regno == 31)
1857 {
1858 set_other_error (mismatch_detail, idx,
1859 _("index register xzr is not allowed"));
1860 return 0;
1861 }
1862 if (((1 << opnd->shifter.kind) & modifiers) == 0
1863 || (opnd->shifter.amount
1864 != get_operand_specific_data (&aarch64_operands[type])))
1865 {
1866 set_other_error (mismatch_detail, idx,
1867 _("invalid addressing mode"));
1868 return 0;
1869 }
1870 break;
1871
1872 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1873 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1874 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1875 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1876 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1877 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1878 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1879 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1880 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1881 goto sve_rr_operand;
1882
1883 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1884 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1885 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1886 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1887 min_value = 0;
1888 max_value = 31;
1889 goto sve_imm_offset;
1890
1891 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1892 modifiers = 1 << AARCH64_MOD_LSL;
1893 sve_zz_operand:
1894 assert (opnd->addr.offset.is_reg);
1895 assert (opnd->addr.preind);
1896 if (((1 << opnd->shifter.kind) & modifiers) == 0
1897 || opnd->shifter.amount < 0
1898 || opnd->shifter.amount > 3)
1899 {
1900 set_other_error (mismatch_detail, idx,
1901 _("invalid addressing mode"));
1902 return 0;
1903 }
1904 break;
1905
1906 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1907 modifiers = (1 << AARCH64_MOD_SXTW);
1908 goto sve_zz_operand;
1909
1910 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1911 modifiers = 1 << AARCH64_MOD_UXTW;
1912 goto sve_zz_operand;
1913
1914 default:
1915 break;
1916 }
1917 break;
1918
1919 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1920 if (type == AARCH64_OPND_LEt)
1921 {
1922 /* Get the upper bound for the element index. */
1923 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1924 if (!value_in_range_p (opnd->reglist.index, 0, num))
1925 {
1926 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1927 return 0;
1928 }
1929 }
1930 /* The opcode dependent area stores the number of elements in
1931 each structure to be loaded/stored. */
1932 num = get_opcode_dependent_value (opcode);
1933 switch (type)
1934 {
1935 case AARCH64_OPND_LVt:
1936 assert (num >= 1 && num <= 4);
1937 /* Unless LD1/ST1, the number of registers should be equal to that
1938 of the structure elements. */
1939 if (num != 1 && opnd->reglist.num_regs != num)
1940 {
1941 set_reg_list_error (mismatch_detail, idx, num);
1942 return 0;
1943 }
1944 break;
1945 case AARCH64_OPND_LVt_AL:
1946 case AARCH64_OPND_LEt:
1947 assert (num >= 1 && num <= 4);
1948 /* The number of registers should be equal to that of the structure
1949 elements. */
1950 if (opnd->reglist.num_regs != num)
1951 {
1952 set_reg_list_error (mismatch_detail, idx, num);
1953 return 0;
1954 }
1955 break;
1956 default:
1957 break;
1958 }
1959 break;
1960
1961 case AARCH64_OPND_CLASS_IMMEDIATE:
1962 /* Constraint check on immediate operand. */
1963 imm = opnd->imm.value;
1964 /* E.g. imm_0_31 constrains value to be 0..31. */
1965 if (qualifier_value_in_range_constraint_p (qualifier)
1966 && !value_in_range_p (imm, get_lower_bound (qualifier),
1967 get_upper_bound (qualifier)))
1968 {
1969 set_imm_out_of_range_error (mismatch_detail, idx,
1970 get_lower_bound (qualifier),
1971 get_upper_bound (qualifier));
1972 return 0;
1973 }
1974
1975 switch (type)
1976 {
1977 case AARCH64_OPND_AIMM:
1978 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1979 {
1980 set_other_error (mismatch_detail, idx,
1981 _("invalid shift operator"));
1982 return 0;
1983 }
1984 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1985 {
1986 set_other_error (mismatch_detail, idx,
1987 _("shift amount must be 0 or 12"));
1988 return 0;
1989 }
1990 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1991 {
1992 set_other_error (mismatch_detail, idx,
1993 _("immediate out of range"));
1994 return 0;
1995 }
1996 break;
1997
1998 case AARCH64_OPND_HALF:
1999 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2000 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2001 {
2002 set_other_error (mismatch_detail, idx,
2003 _("invalid shift operator"));
2004 return 0;
2005 }
2006 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2007 if (!value_aligned_p (opnd->shifter.amount, 16))
2008 {
2009 set_other_error (mismatch_detail, idx,
2010 _("shift amount must be a multiple of 16"));
2011 return 0;
2012 }
2013 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2014 {
2015 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2016 0, size * 8 - 16);
2017 return 0;
2018 }
2019 if (opnd->imm.value < 0)
2020 {
2021 set_other_error (mismatch_detail, idx,
2022 _("negative immediate value not allowed"));
2023 return 0;
2024 }
2025 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2026 {
2027 set_other_error (mismatch_detail, idx,
2028 _("immediate out of range"));
2029 return 0;
2030 }
2031 break;
2032
2033 case AARCH64_OPND_IMM_MOV:
2034 {
2035 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2036 imm = opnd->imm.value;
2037 assert (idx == 1);
2038 switch (opcode->op)
2039 {
2040 case OP_MOV_IMM_WIDEN:
2041 imm = ~imm;
2042 /* Fall through. */
2043 case OP_MOV_IMM_WIDE:
2044 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2045 {
2046 set_other_error (mismatch_detail, idx,
2047 _("immediate out of range"));
2048 return 0;
2049 }
2050 break;
2051 case OP_MOV_IMM_LOG:
2052 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2053 {
2054 set_other_error (mismatch_detail, idx,
2055 _("immediate out of range"));
2056 return 0;
2057 }
2058 break;
2059 default:
2060 assert (0);
2061 return 0;
2062 }
2063 }
2064 break;
2065
2066 case AARCH64_OPND_NZCV:
2067 case AARCH64_OPND_CCMP_IMM:
2068 case AARCH64_OPND_EXCEPTION:
2069 case AARCH64_OPND_UIMM4:
2070 case AARCH64_OPND_UIMM7:
2071 case AARCH64_OPND_UIMM3_OP1:
2072 case AARCH64_OPND_UIMM3_OP2:
2073 case AARCH64_OPND_SVE_UIMM3:
2074 case AARCH64_OPND_SVE_UIMM7:
2075 case AARCH64_OPND_SVE_UIMM8:
2076 case AARCH64_OPND_SVE_UIMM8_53:
2077 size = get_operand_fields_width (get_operand_from_code (type));
2078 assert (size < 32);
2079 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2080 {
2081 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2082 (1 << size) - 1);
2083 return 0;
2084 }
2085 break;
2086
2087 case AARCH64_OPND_SIMM5:
2088 case AARCH64_OPND_SVE_SIMM5:
2089 case AARCH64_OPND_SVE_SIMM5B:
2090 case AARCH64_OPND_SVE_SIMM6:
2091 case AARCH64_OPND_SVE_SIMM8:
2092 size = get_operand_fields_width (get_operand_from_code (type));
2093 assert (size < 32);
2094 if (!value_fit_signed_field_p (opnd->imm.value, size))
2095 {
2096 set_imm_out_of_range_error (mismatch_detail, idx,
2097 -(1 << (size - 1)),
2098 (1 << (size - 1)) - 1);
2099 return 0;
2100 }
2101 break;
2102
2103 case AARCH64_OPND_WIDTH:
2104 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2105 && opnds[0].type == AARCH64_OPND_Rd);
2106 size = get_upper_bound (qualifier);
2107 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2108 /* lsb+width <= reg.size */
2109 {
2110 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2111 size - opnds[idx-1].imm.value);
2112 return 0;
2113 }
2114 break;
2115
2116 case AARCH64_OPND_LIMM:
2117 case AARCH64_OPND_SVE_LIMM:
2118 {
2119 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2120 uint64_t uimm = opnd->imm.value;
2121 if (opcode->op == OP_BIC)
2122 uimm = ~uimm;
2123 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2124 {
2125 set_other_error (mismatch_detail, idx,
2126 _("immediate out of range"));
2127 return 0;
2128 }
2129 }
2130 break;
2131
2132 case AARCH64_OPND_IMM0:
2133 case AARCH64_OPND_FPIMM0:
2134 if (opnd->imm.value != 0)
2135 {
2136 set_other_error (mismatch_detail, idx,
2137 _("immediate zero expected"));
2138 return 0;
2139 }
2140 break;
2141
2142 case AARCH64_OPND_IMM_ROT1:
2143 case AARCH64_OPND_IMM_ROT2:
2144 case AARCH64_OPND_SVE_IMM_ROT2:
2145 if (opnd->imm.value != 0
2146 && opnd->imm.value != 90
2147 && opnd->imm.value != 180
2148 && opnd->imm.value != 270)
2149 {
2150 set_other_error (mismatch_detail, idx,
2151 _("rotate expected to be 0, 90, 180 or 270"));
2152 return 0;
2153 }
2154 break;
2155
2156 case AARCH64_OPND_IMM_ROT3:
2157 case AARCH64_OPND_SVE_IMM_ROT1:
2158 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2159 {
2160 set_other_error (mismatch_detail, idx,
2161 _("rotate expected to be 90 or 270"));
2162 return 0;
2163 }
2164 break;
2165
2166 case AARCH64_OPND_SHLL_IMM:
2167 assert (idx == 2);
2168 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2169 if (opnd->imm.value != size)
2170 {
2171 set_other_error (mismatch_detail, idx,
2172 _("invalid shift amount"));
2173 return 0;
2174 }
2175 break;
2176
2177 case AARCH64_OPND_IMM_VLSL:
2178 size = aarch64_get_qualifier_esize (qualifier);
2179 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2180 {
2181 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2182 size * 8 - 1);
2183 return 0;
2184 }
2185 break;
2186
2187 case AARCH64_OPND_IMM_VLSR:
2188 size = aarch64_get_qualifier_esize (qualifier);
2189 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2190 {
2191 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2192 return 0;
2193 }
2194 break;
2195
2196 case AARCH64_OPND_SIMD_IMM:
2197 case AARCH64_OPND_SIMD_IMM_SFT:
2198 /* Qualifier check. */
2199 switch (qualifier)
2200 {
2201 case AARCH64_OPND_QLF_LSL:
2202 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2203 {
2204 set_other_error (mismatch_detail, idx,
2205 _("invalid shift operator"));
2206 return 0;
2207 }
2208 break;
2209 case AARCH64_OPND_QLF_MSL:
2210 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2211 {
2212 set_other_error (mismatch_detail, idx,
2213 _("invalid shift operator"));
2214 return 0;
2215 }
2216 break;
2217 case AARCH64_OPND_QLF_NIL:
2218 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2219 {
2220 set_other_error (mismatch_detail, idx,
2221 _("shift is not permitted"));
2222 return 0;
2223 }
2224 break;
2225 default:
2226 assert (0);
2227 return 0;
2228 }
2229 /* Is the immediate valid? */
2230 assert (idx == 1);
2231 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2232 {
2233 /* uimm8 or simm8 */
2234 if (!value_in_range_p (opnd->imm.value, -128, 255))
2235 {
2236 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2237 return 0;
2238 }
2239 }
2240 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2241 {
2242 /* uimm64 is not
2243 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2244 ffffffffgggggggghhhhhhhh'. */
2245 set_other_error (mismatch_detail, idx,
2246 _("invalid value for immediate"));
2247 return 0;
2248 }
2249 /* Is the shift amount valid? */
2250 switch (opnd->shifter.kind)
2251 {
2252 case AARCH64_MOD_LSL:
2253 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2254 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2255 {
2256 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2257 (size - 1) * 8);
2258 return 0;
2259 }
2260 if (!value_aligned_p (opnd->shifter.amount, 8))
2261 {
2262 set_unaligned_error (mismatch_detail, idx, 8);
2263 return 0;
2264 }
2265 break;
2266 case AARCH64_MOD_MSL:
2267 /* Only 8 and 16 are valid shift amount. */
2268 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2269 {
2270 set_other_error (mismatch_detail, idx,
2271 _("shift amount must be 0 or 16"));
2272 return 0;
2273 }
2274 break;
2275 default:
2276 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2277 {
2278 set_other_error (mismatch_detail, idx,
2279 _("invalid shift operator"));
2280 return 0;
2281 }
2282 break;
2283 }
2284 break;
2285
2286 case AARCH64_OPND_FPIMM:
2287 case AARCH64_OPND_SIMD_FPIMM:
2288 case AARCH64_OPND_SVE_FPIMM8:
2289 if (opnd->imm.is_fp == 0)
2290 {
2291 set_other_error (mismatch_detail, idx,
2292 _("floating-point immediate expected"));
2293 return 0;
2294 }
2295 /* The value is expected to be an 8-bit floating-point constant with
2296 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2297 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2298 instruction). */
2299 if (!value_in_range_p (opnd->imm.value, 0, 255))
2300 {
2301 set_other_error (mismatch_detail, idx,
2302 _("immediate out of range"));
2303 return 0;
2304 }
2305 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2306 {
2307 set_other_error (mismatch_detail, idx,
2308 _("invalid shift operator"));
2309 return 0;
2310 }
2311 break;
2312
2313 case AARCH64_OPND_SVE_AIMM:
2314 min_value = 0;
2315 sve_aimm:
2316 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2317 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2318 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2319 uvalue = opnd->imm.value;
2320 shift = opnd->shifter.amount;
2321 if (size == 1)
2322 {
2323 if (shift != 0)
2324 {
2325 set_other_error (mismatch_detail, idx,
2326 _("no shift amount allowed for"
2327 " 8-bit constants"));
2328 return 0;
2329 }
2330 }
2331 else
2332 {
2333 if (shift != 0 && shift != 8)
2334 {
2335 set_other_error (mismatch_detail, idx,
2336 _("shift amount must be 0 or 8"));
2337 return 0;
2338 }
2339 if (shift == 0 && (uvalue & 0xff) == 0)
2340 {
2341 shift = 8;
2342 uvalue = (int64_t) uvalue / 256;
2343 }
2344 }
2345 mask >>= shift;
2346 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2347 {
2348 set_other_error (mismatch_detail, idx,
2349 _("immediate too big for element size"));
2350 return 0;
2351 }
2352 uvalue = (uvalue - min_value) & mask;
2353 if (uvalue > 0xff)
2354 {
2355 set_other_error (mismatch_detail, idx,
2356 _("invalid arithmetic immediate"));
2357 return 0;
2358 }
2359 break;
2360
2361 case AARCH64_OPND_SVE_ASIMM:
2362 min_value = -128;
2363 goto sve_aimm;
2364
2365 case AARCH64_OPND_SVE_I1_HALF_ONE:
2366 assert (opnd->imm.is_fp);
2367 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2368 {
2369 set_other_error (mismatch_detail, idx,
2370 _("floating-point value must be 0.5 or 1.0"));
2371 return 0;
2372 }
2373 break;
2374
2375 case AARCH64_OPND_SVE_I1_HALF_TWO:
2376 assert (opnd->imm.is_fp);
2377 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2378 {
2379 set_other_error (mismatch_detail, idx,
2380 _("floating-point value must be 0.5 or 2.0"));
2381 return 0;
2382 }
2383 break;
2384
2385 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2386 assert (opnd->imm.is_fp);
2387 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2388 {
2389 set_other_error (mismatch_detail, idx,
2390 _("floating-point value must be 0.0 or 1.0"));
2391 return 0;
2392 }
2393 break;
2394
2395 case AARCH64_OPND_SVE_INV_LIMM:
2396 {
2397 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2398 uint64_t uimm = ~opnd->imm.value;
2399 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2400 {
2401 set_other_error (mismatch_detail, idx,
2402 _("immediate out of range"));
2403 return 0;
2404 }
2405 }
2406 break;
2407
2408 case AARCH64_OPND_SVE_LIMM_MOV:
2409 {
2410 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2411 uint64_t uimm = opnd->imm.value;
2412 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2413 {
2414 set_other_error (mismatch_detail, idx,
2415 _("immediate out of range"));
2416 return 0;
2417 }
2418 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2419 {
2420 set_other_error (mismatch_detail, idx,
2421 _("invalid replicated MOV immediate"));
2422 return 0;
2423 }
2424 }
2425 break;
2426
2427 case AARCH64_OPND_SVE_PATTERN_SCALED:
2428 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2429 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2430 {
2431 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2432 return 0;
2433 }
2434 break;
2435
2436 case AARCH64_OPND_SVE_SHLIMM_PRED:
2437 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2438 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2439 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2440 {
2441 set_imm_out_of_range_error (mismatch_detail, idx,
2442 0, 8 * size - 1);
2443 return 0;
2444 }
2445 break;
2446
2447 case AARCH64_OPND_SVE_SHRIMM_PRED:
2448 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2449 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2450 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2451 {
2452 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2453 return 0;
2454 }
2455 break;
2456
2457 default:
2458 break;
2459 }
2460 break;
2461
2462 case AARCH64_OPND_CLASS_SYSTEM:
2463 switch (type)
2464 {
2465 case AARCH64_OPND_PSTATEFIELD:
2466 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2467 /* MSR UAO, #uimm4
2468 MSR PAN, #uimm4
2469 The immediate must be #0 or #1. */
2470 if ((opnd->pstatefield == 0x03 /* UAO. */
2471 || opnd->pstatefield == 0x04 /* PAN. */
2472 || opnd->pstatefield == 0x1a) /* DIT. */
2473 && opnds[1].imm.value > 1)
2474 {
2475 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2476 return 0;
2477 }
2478 /* MSR SPSel, #uimm4
2479 Uses uimm4 as a control value to select the stack pointer: if
2480 bit 0 is set it selects the current exception level's stack
2481 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2482 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2483 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2484 {
2485 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2486 return 0;
2487 }
2488 break;
2489 default:
2490 break;
2491 }
2492 break;
2493
2494 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2495 /* Get the upper bound for the element index. */
2496 if (opcode->op == OP_FCMLA_ELEM)
2497 /* FCMLA index range depends on the vector size of other operands
2498 and is halfed because complex numbers take two elements. */
2499 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2500 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2501 else
2502 num = 16;
2503 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2504
2505 /* Index out-of-range. */
2506 if (!value_in_range_p (opnd->reglane.index, 0, num))
2507 {
2508 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2509 return 0;
2510 }
2511 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2512 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2513 number is encoded in "size:M:Rm":
2514 size <Vm>
2515 00 RESERVED
2516 01 0:Rm
2517 10 M:Rm
2518 11 RESERVED */
2519 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2520 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2521 {
2522 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2523 return 0;
2524 }
2525 break;
2526
2527 case AARCH64_OPND_CLASS_MODIFIED_REG:
2528 assert (idx == 1 || idx == 2);
2529 switch (type)
2530 {
2531 case AARCH64_OPND_Rm_EXT:
2532 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2533 && opnd->shifter.kind != AARCH64_MOD_LSL)
2534 {
2535 set_other_error (mismatch_detail, idx,
2536 _("extend operator expected"));
2537 return 0;
2538 }
2539 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2540 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2541 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2542 case. */
2543 if (!aarch64_stack_pointer_p (opnds + 0)
2544 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2545 {
2546 if (!opnd->shifter.operator_present)
2547 {
2548 set_other_error (mismatch_detail, idx,
2549 _("missing extend operator"));
2550 return 0;
2551 }
2552 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2553 {
2554 set_other_error (mismatch_detail, idx,
2555 _("'LSL' operator not allowed"));
2556 return 0;
2557 }
2558 }
2559 assert (opnd->shifter.operator_present /* Default to LSL. */
2560 || opnd->shifter.kind == AARCH64_MOD_LSL);
2561 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2562 {
2563 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2564 return 0;
2565 }
2566 /* In the 64-bit form, the final register operand is written as Wm
2567 for all but the (possibly omitted) UXTX/LSL and SXTX
2568 operators.
2569 N.B. GAS allows X register to be used with any operator as a
2570 programming convenience. */
2571 if (qualifier == AARCH64_OPND_QLF_X
2572 && opnd->shifter.kind != AARCH64_MOD_LSL
2573 && opnd->shifter.kind != AARCH64_MOD_UXTX
2574 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2575 {
2576 set_other_error (mismatch_detail, idx, _("W register expected"));
2577 return 0;
2578 }
2579 break;
2580
2581 case AARCH64_OPND_Rm_SFT:
2582 /* ROR is not available to the shifted register operand in
2583 arithmetic instructions. */
2584 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2585 {
2586 set_other_error (mismatch_detail, idx,
2587 _("shift operator expected"));
2588 return 0;
2589 }
2590 if (opnd->shifter.kind == AARCH64_MOD_ROR
2591 && opcode->iclass != log_shift)
2592 {
2593 set_other_error (mismatch_detail, idx,
2594 _("'ROR' operator not allowed"));
2595 return 0;
2596 }
2597 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2598 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2599 {
2600 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2601 return 0;
2602 }
2603 break;
2604
2605 default:
2606 break;
2607 }
2608 break;
2609
2610 default:
2611 break;
2612 }
2613
2614 return 1;
2615 }
2616
2617 /* Main entrypoint for the operand constraint checking.
2618
2619 Return 1 if operands of *INST meet the constraint applied by the operand
2620 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2621 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2622 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2623 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2624 error kind when it is notified that an instruction does not pass the check).
2625
2626 Un-determined operand qualifiers may get established during the process. */
2627
2628 int
2629 aarch64_match_operands_constraint (aarch64_inst *inst,
2630 aarch64_operand_error *mismatch_detail)
2631 {
2632 int i;
2633
2634 DEBUG_TRACE ("enter");
2635
2636 /* Check for cases where a source register needs to be the same as the
2637 destination register. Do this before matching qualifiers since if
2638 an instruction has both invalid tying and invalid qualifiers,
2639 the error about qualifiers would suggest several alternative
2640 instructions that also have invalid tying. */
2641 i = inst->opcode->tied_operand;
2642 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2643 {
2644 if (mismatch_detail)
2645 {
2646 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2647 mismatch_detail->index = i;
2648 mismatch_detail->error = NULL;
2649 }
2650 return 0;
2651 }
2652
2653 /* Match operands' qualifier.
2654 *INST has already had qualifier establish for some, if not all, of
2655 its operands; we need to find out whether these established
2656 qualifiers match one of the qualifier sequence in
2657 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2658 with the corresponding qualifier in such a sequence.
2659 Only basic operand constraint checking is done here; the more thorough
2660 constraint checking will carried out by operand_general_constraint_met_p,
2661 which has be to called after this in order to get all of the operands'
2662 qualifiers established. */
2663 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2664 {
2665 DEBUG_TRACE ("FAIL on operand qualifier matching");
2666 if (mismatch_detail)
2667 {
2668 /* Return an error type to indicate that it is the qualifier
2669 matching failure; we don't care about which operand as there
2670 are enough information in the opcode table to reproduce it. */
2671 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2672 mismatch_detail->index = -1;
2673 mismatch_detail->error = NULL;
2674 }
2675 return 0;
2676 }
2677
2678 /* Match operands' constraint. */
2679 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2680 {
2681 enum aarch64_opnd type = inst->opcode->operands[i];
2682 if (type == AARCH64_OPND_NIL)
2683 break;
2684 if (inst->operands[i].skip)
2685 {
2686 DEBUG_TRACE ("skip the incomplete operand %d", i);
2687 continue;
2688 }
2689 if (operand_general_constraint_met_p (inst->operands, i, type,
2690 inst->opcode, mismatch_detail) == 0)
2691 {
2692 DEBUG_TRACE ("FAIL on operand %d", i);
2693 return 0;
2694 }
2695 }
2696
2697 DEBUG_TRACE ("PASS");
2698
2699 return 1;
2700 }
2701
2702 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2703 Also updates the TYPE of each INST->OPERANDS with the corresponding
2704 value of OPCODE->OPERANDS.
2705
2706 Note that some operand qualifiers may need to be manually cleared by
2707 the caller before it further calls the aarch64_opcode_encode; by
2708 doing this, it helps the qualifier matching facilities work
2709 properly. */
2710
2711 const aarch64_opcode*
2712 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2713 {
2714 int i;
2715 const aarch64_opcode *old = inst->opcode;
2716
2717 inst->opcode = opcode;
2718
2719 /* Update the operand types. */
2720 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2721 {
2722 inst->operands[i].type = opcode->operands[i];
2723 if (opcode->operands[i] == AARCH64_OPND_NIL)
2724 break;
2725 }
2726
2727 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2728
2729 return old;
2730 }
2731
2732 int
2733 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2734 {
2735 int i;
2736 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2737 if (operands[i] == operand)
2738 return i;
2739 else if (operands[i] == AARCH64_OPND_NIL)
2740 break;
2741 return -1;
2742 }
2743 \f
2744 /* R0...R30, followed by FOR31. */
2745 #define BANK(R, FOR31) \
2746 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2747 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2748 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2749 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2750 /* [0][0] 32-bit integer regs with sp Wn
2751 [0][1] 64-bit integer regs with sp Xn sf=1
2752 [1][0] 32-bit integer regs with #0 Wn
2753 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2754 static const char *int_reg[2][2][32] = {
2755 #define R32(X) "w" #X
2756 #define R64(X) "x" #X
2757 { BANK (R32, "wsp"), BANK (R64, "sp") },
2758 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2759 #undef R64
2760 #undef R32
2761 };
2762
2763 /* Names of the SVE vector registers, first with .S suffixes,
2764 then with .D suffixes. */
2765
2766 static const char *sve_reg[2][32] = {
2767 #define ZS(X) "z" #X ".s"
2768 #define ZD(X) "z" #X ".d"
2769 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2770 #undef ZD
2771 #undef ZS
2772 };
2773 #undef BANK
2774
2775 /* Return the integer register name.
2776 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2777
2778 static inline const char *
2779 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2780 {
2781 const int has_zr = sp_reg_p ? 0 : 1;
2782 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2783 return int_reg[has_zr][is_64][regno];
2784 }
2785
2786 /* Like get_int_reg_name, but IS_64 is always 1. */
2787
2788 static inline const char *
2789 get_64bit_int_reg_name (int regno, int sp_reg_p)
2790 {
2791 const int has_zr = sp_reg_p ? 0 : 1;
2792 return int_reg[has_zr][1][regno];
2793 }
2794
2795 /* Get the name of the integer offset register in OPND, using the shift type
2796 to decide whether it's a word or doubleword. */
2797
2798 static inline const char *
2799 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2800 {
2801 switch (opnd->shifter.kind)
2802 {
2803 case AARCH64_MOD_UXTW:
2804 case AARCH64_MOD_SXTW:
2805 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2806
2807 case AARCH64_MOD_LSL:
2808 case AARCH64_MOD_SXTX:
2809 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2810
2811 default:
2812 abort ();
2813 }
2814 }
2815
2816 /* Get the name of the SVE vector offset register in OPND, using the operand
2817 qualifier to decide whether the suffix should be .S or .D. */
2818
2819 static inline const char *
2820 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2821 {
2822 assert (qualifier == AARCH64_OPND_QLF_S_S
2823 || qualifier == AARCH64_OPND_QLF_S_D);
2824 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2825 }
2826
2827 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2828
2829 typedef union
2830 {
2831 uint64_t i;
2832 double d;
2833 } double_conv_t;
2834
2835 typedef union
2836 {
2837 uint32_t i;
2838 float f;
2839 } single_conv_t;
2840
2841 typedef union
2842 {
2843 uint32_t i;
2844 float f;
2845 } half_conv_t;
2846
2847 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2848 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2849 (depending on the type of the instruction). IMM8 will be expanded to a
2850 single-precision floating-point value (SIZE == 4) or a double-precision
2851 floating-point value (SIZE == 8). A half-precision floating-point value
2852 (SIZE == 2) is expanded to a single-precision floating-point value. The
2853 expanded value is returned. */
2854
2855 static uint64_t
2856 expand_fp_imm (int size, uint32_t imm8)
2857 {
2858 uint64_t imm = 0;
2859 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2860
2861 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2862 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2863 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2864 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2865 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2866 if (size == 8)
2867 {
2868 imm = (imm8_7 << (63-32)) /* imm8<7> */
2869 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2870 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2871 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2872 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2873 imm <<= 32;
2874 }
2875 else if (size == 4 || size == 2)
2876 {
2877 imm = (imm8_7 << 31) /* imm8<7> */
2878 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2879 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2880 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2881 }
2882 else
2883 {
2884 /* An unsupported size. */
2885 assert (0);
2886 }
2887
2888 return imm;
2889 }
2890
2891 /* Produce the string representation of the register list operand *OPND
2892 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2893 the register name that comes before the register number, such as "v". */
2894 static void
2895 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2896 const char *prefix)
2897 {
2898 const int num_regs = opnd->reglist.num_regs;
2899 const int first_reg = opnd->reglist.first_regno;
2900 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2901 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2902 char tb[8]; /* Temporary buffer. */
2903
2904 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2905 assert (num_regs >= 1 && num_regs <= 4);
2906
2907 /* Prepare the index if any. */
2908 if (opnd->reglist.has_index)
2909 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2910 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
2911 else
2912 tb[0] = '\0';
2913
2914 /* The hyphenated form is preferred for disassembly if there are
2915 more than two registers in the list, and the register numbers
2916 are monotonically increasing in increments of one. */
2917 if (num_regs > 2 && last_reg > first_reg)
2918 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2919 prefix, last_reg, qlf_name, tb);
2920 else
2921 {
2922 const int reg0 = first_reg;
2923 const int reg1 = (first_reg + 1) & 0x1f;
2924 const int reg2 = (first_reg + 2) & 0x1f;
2925 const int reg3 = (first_reg + 3) & 0x1f;
2926
2927 switch (num_regs)
2928 {
2929 case 1:
2930 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2931 break;
2932 case 2:
2933 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2934 prefix, reg1, qlf_name, tb);
2935 break;
2936 case 3:
2937 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2938 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2939 prefix, reg2, qlf_name, tb);
2940 break;
2941 case 4:
2942 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2943 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2944 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2945 break;
2946 }
2947 }
2948 }
2949
2950 /* Print the register+immediate address in OPND to BUF, which has SIZE
2951 characters. BASE is the name of the base register. */
2952
2953 static void
2954 print_immediate_offset_address (char *buf, size_t size,
2955 const aarch64_opnd_info *opnd,
2956 const char *base)
2957 {
2958 if (opnd->addr.writeback)
2959 {
2960 if (opnd->addr.preind)
2961 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
2962 else
2963 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
2964 }
2965 else
2966 {
2967 if (opnd->shifter.operator_present)
2968 {
2969 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2970 snprintf (buf, size, "[%s, #%d, mul vl]",
2971 base, opnd->addr.offset.imm);
2972 }
2973 else if (opnd->addr.offset.imm)
2974 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
2975 else
2976 snprintf (buf, size, "[%s]", base);
2977 }
2978 }
2979
2980 /* Produce the string representation of the register offset address operand
2981 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2982 the names of the base and offset registers. */
2983 static void
2984 print_register_offset_address (char *buf, size_t size,
2985 const aarch64_opnd_info *opnd,
2986 const char *base, const char *offset)
2987 {
2988 char tb[16]; /* Temporary buffer. */
2989 bfd_boolean print_extend_p = TRUE;
2990 bfd_boolean print_amount_p = TRUE;
2991 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2992
2993 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2994 || !opnd->shifter.amount_present))
2995 {
2996 /* Not print the shift/extend amount when the amount is zero and
2997 when it is not the special case of 8-bit load/store instruction. */
2998 print_amount_p = FALSE;
2999 /* Likewise, no need to print the shift operator LSL in such a
3000 situation. */
3001 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3002 print_extend_p = FALSE;
3003 }
3004
3005 /* Prepare for the extend/shift. */
3006 if (print_extend_p)
3007 {
3008 if (print_amount_p)
3009 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3010 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3011 (opnd->shifter.amount % 100));
3012 else
3013 snprintf (tb, sizeof (tb), ", %s", shift_name);
3014 }
3015 else
3016 tb[0] = '\0';
3017
3018 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3019 }
3020
3021 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3022 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3023 PC, PCREL_P and ADDRESS are used to pass in and return information about
3024 the PC-relative address calculation, where the PC value is passed in
3025 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3026 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3027 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3028
3029 The function serves both the disassembler and the assembler diagnostics
3030 issuer, which is the reason why it lives in this file. */
3031
3032 void
3033 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3034 const aarch64_opcode *opcode,
3035 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3036 bfd_vma *address, char** notes ATTRIBUTE_UNUSED)
3037 {
3038 unsigned int i, num_conds;
3039 const char *name = NULL;
3040 const aarch64_opnd_info *opnd = opnds + idx;
3041 enum aarch64_modifier_kind kind;
3042 uint64_t addr, enum_value;
3043
3044 buf[0] = '\0';
3045 if (pcrel_p)
3046 *pcrel_p = 0;
3047
3048 switch (opnd->type)
3049 {
3050 case AARCH64_OPND_Rd:
3051 case AARCH64_OPND_Rn:
3052 case AARCH64_OPND_Rm:
3053 case AARCH64_OPND_Rt:
3054 case AARCH64_OPND_Rt2:
3055 case AARCH64_OPND_Rs:
3056 case AARCH64_OPND_Ra:
3057 case AARCH64_OPND_Rt_SYS:
3058 case AARCH64_OPND_PAIRREG:
3059 case AARCH64_OPND_SVE_Rm:
3060 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3061 the <ic_op>, therefore we use opnd->present to override the
3062 generic optional-ness information. */
3063 if (opnd->type == AARCH64_OPND_Rt_SYS)
3064 {
3065 if (!opnd->present)
3066 break;
3067 }
3068 /* Omit the operand, e.g. RET. */
3069 else if (optional_operand_p (opcode, idx)
3070 && (opnd->reg.regno
3071 == get_optional_operand_default_value (opcode)))
3072 break;
3073 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3074 || opnd->qualifier == AARCH64_OPND_QLF_X);
3075 snprintf (buf, size, "%s",
3076 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3077 break;
3078
3079 case AARCH64_OPND_Rd_SP:
3080 case AARCH64_OPND_Rn_SP:
3081 case AARCH64_OPND_SVE_Rn_SP:
3082 case AARCH64_OPND_Rm_SP:
3083 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3084 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3085 || opnd->qualifier == AARCH64_OPND_QLF_X
3086 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3087 snprintf (buf, size, "%s",
3088 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3089 break;
3090
3091 case AARCH64_OPND_Rm_EXT:
3092 kind = opnd->shifter.kind;
3093 assert (idx == 1 || idx == 2);
3094 if ((aarch64_stack_pointer_p (opnds)
3095 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3096 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3097 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3098 && kind == AARCH64_MOD_UXTW)
3099 || (opnd->qualifier == AARCH64_OPND_QLF_X
3100 && kind == AARCH64_MOD_UXTX)))
3101 {
3102 /* 'LSL' is the preferred form in this case. */
3103 kind = AARCH64_MOD_LSL;
3104 if (opnd->shifter.amount == 0)
3105 {
3106 /* Shifter omitted. */
3107 snprintf (buf, size, "%s",
3108 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3109 break;
3110 }
3111 }
3112 if (opnd->shifter.amount)
3113 snprintf (buf, size, "%s, %s #%" PRIi64,
3114 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3115 aarch64_operand_modifiers[kind].name,
3116 opnd->shifter.amount);
3117 else
3118 snprintf (buf, size, "%s, %s",
3119 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3120 aarch64_operand_modifiers[kind].name);
3121 break;
3122
3123 case AARCH64_OPND_Rm_SFT:
3124 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3125 || opnd->qualifier == AARCH64_OPND_QLF_X);
3126 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3127 snprintf (buf, size, "%s",
3128 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3129 else
3130 snprintf (buf, size, "%s, %s #%" PRIi64,
3131 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3132 aarch64_operand_modifiers[opnd->shifter.kind].name,
3133 opnd->shifter.amount);
3134 break;
3135
3136 case AARCH64_OPND_Fd:
3137 case AARCH64_OPND_Fn:
3138 case AARCH64_OPND_Fm:
3139 case AARCH64_OPND_Fa:
3140 case AARCH64_OPND_Ft:
3141 case AARCH64_OPND_Ft2:
3142 case AARCH64_OPND_Sd:
3143 case AARCH64_OPND_Sn:
3144 case AARCH64_OPND_Sm:
3145 case AARCH64_OPND_SVE_VZn:
3146 case AARCH64_OPND_SVE_Vd:
3147 case AARCH64_OPND_SVE_Vm:
3148 case AARCH64_OPND_SVE_Vn:
3149 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3150 opnd->reg.regno);
3151 break;
3152
3153 case AARCH64_OPND_Va:
3154 case AARCH64_OPND_Vd:
3155 case AARCH64_OPND_Vn:
3156 case AARCH64_OPND_Vm:
3157 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3158 aarch64_get_qualifier_name (opnd->qualifier));
3159 break;
3160
3161 case AARCH64_OPND_Ed:
3162 case AARCH64_OPND_En:
3163 case AARCH64_OPND_Em:
3164 case AARCH64_OPND_SM3_IMM2:
3165 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3166 aarch64_get_qualifier_name (opnd->qualifier),
3167 opnd->reglane.index);
3168 break;
3169
3170 case AARCH64_OPND_VdD1:
3171 case AARCH64_OPND_VnD1:
3172 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3173 break;
3174
3175 case AARCH64_OPND_LVn:
3176 case AARCH64_OPND_LVt:
3177 case AARCH64_OPND_LVt_AL:
3178 case AARCH64_OPND_LEt:
3179 print_register_list (buf, size, opnd, "v");
3180 break;
3181
3182 case AARCH64_OPND_SVE_Pd:
3183 case AARCH64_OPND_SVE_Pg3:
3184 case AARCH64_OPND_SVE_Pg4_5:
3185 case AARCH64_OPND_SVE_Pg4_10:
3186 case AARCH64_OPND_SVE_Pg4_16:
3187 case AARCH64_OPND_SVE_Pm:
3188 case AARCH64_OPND_SVE_Pn:
3189 case AARCH64_OPND_SVE_Pt:
3190 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3191 snprintf (buf, size, "p%d", opnd->reg.regno);
3192 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3193 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3194 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3195 aarch64_get_qualifier_name (opnd->qualifier));
3196 else
3197 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3198 aarch64_get_qualifier_name (opnd->qualifier));
3199 break;
3200
3201 case AARCH64_OPND_SVE_Za_5:
3202 case AARCH64_OPND_SVE_Za_16:
3203 case AARCH64_OPND_SVE_Zd:
3204 case AARCH64_OPND_SVE_Zm_5:
3205 case AARCH64_OPND_SVE_Zm_16:
3206 case AARCH64_OPND_SVE_Zn:
3207 case AARCH64_OPND_SVE_Zt:
3208 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3209 snprintf (buf, size, "z%d", opnd->reg.regno);
3210 else
3211 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3212 aarch64_get_qualifier_name (opnd->qualifier));
3213 break;
3214
3215 case AARCH64_OPND_SVE_ZnxN:
3216 case AARCH64_OPND_SVE_ZtxN:
3217 print_register_list (buf, size, opnd, "z");
3218 break;
3219
3220 case AARCH64_OPND_SVE_Zm3_INDEX:
3221 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3222 case AARCH64_OPND_SVE_Zm4_INDEX:
3223 case AARCH64_OPND_SVE_Zn_INDEX:
3224 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3225 aarch64_get_qualifier_name (opnd->qualifier),
3226 opnd->reglane.index);
3227 break;
3228
3229 case AARCH64_OPND_CRn:
3230 case AARCH64_OPND_CRm:
3231 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3232 break;
3233
3234 case AARCH64_OPND_IDX:
3235 case AARCH64_OPND_MASK:
3236 case AARCH64_OPND_IMM:
3237 case AARCH64_OPND_IMM_2:
3238 case AARCH64_OPND_WIDTH:
3239 case AARCH64_OPND_UIMM3_OP1:
3240 case AARCH64_OPND_UIMM3_OP2:
3241 case AARCH64_OPND_BIT_NUM:
3242 case AARCH64_OPND_IMM_VLSL:
3243 case AARCH64_OPND_IMM_VLSR:
3244 case AARCH64_OPND_SHLL_IMM:
3245 case AARCH64_OPND_IMM0:
3246 case AARCH64_OPND_IMMR:
3247 case AARCH64_OPND_IMMS:
3248 case AARCH64_OPND_FBITS:
3249 case AARCH64_OPND_SIMM5:
3250 case AARCH64_OPND_SVE_SHLIMM_PRED:
3251 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3252 case AARCH64_OPND_SVE_SHRIMM_PRED:
3253 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3254 case AARCH64_OPND_SVE_SIMM5:
3255 case AARCH64_OPND_SVE_SIMM5B:
3256 case AARCH64_OPND_SVE_SIMM6:
3257 case AARCH64_OPND_SVE_SIMM8:
3258 case AARCH64_OPND_SVE_UIMM3:
3259 case AARCH64_OPND_SVE_UIMM7:
3260 case AARCH64_OPND_SVE_UIMM8:
3261 case AARCH64_OPND_SVE_UIMM8_53:
3262 case AARCH64_OPND_IMM_ROT1:
3263 case AARCH64_OPND_IMM_ROT2:
3264 case AARCH64_OPND_IMM_ROT3:
3265 case AARCH64_OPND_SVE_IMM_ROT1:
3266 case AARCH64_OPND_SVE_IMM_ROT2:
3267 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3268 break;
3269
3270 case AARCH64_OPND_SVE_I1_HALF_ONE:
3271 case AARCH64_OPND_SVE_I1_HALF_TWO:
3272 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3273 {
3274 single_conv_t c;
3275 c.i = opnd->imm.value;
3276 snprintf (buf, size, "#%.1f", c.f);
3277 break;
3278 }
3279
3280 case AARCH64_OPND_SVE_PATTERN:
3281 if (optional_operand_p (opcode, idx)
3282 && opnd->imm.value == get_optional_operand_default_value (opcode))
3283 break;
3284 enum_value = opnd->imm.value;
3285 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3286 if (aarch64_sve_pattern_array[enum_value])
3287 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3288 else
3289 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3290 break;
3291
3292 case AARCH64_OPND_SVE_PATTERN_SCALED:
3293 if (optional_operand_p (opcode, idx)
3294 && !opnd->shifter.operator_present
3295 && opnd->imm.value == get_optional_operand_default_value (opcode))
3296 break;
3297 enum_value = opnd->imm.value;
3298 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3299 if (aarch64_sve_pattern_array[opnd->imm.value])
3300 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3301 else
3302 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3303 if (opnd->shifter.operator_present)
3304 {
3305 size_t len = strlen (buf);
3306 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3307 aarch64_operand_modifiers[opnd->shifter.kind].name,
3308 opnd->shifter.amount);
3309 }
3310 break;
3311
3312 case AARCH64_OPND_SVE_PRFOP:
3313 enum_value = opnd->imm.value;
3314 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3315 if (aarch64_sve_prfop_array[enum_value])
3316 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3317 else
3318 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3319 break;
3320
3321 case AARCH64_OPND_IMM_MOV:
3322 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3323 {
3324 case 4: /* e.g. MOV Wd, #<imm32>. */
3325 {
3326 int imm32 = opnd->imm.value;
3327 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3328 }
3329 break;
3330 case 8: /* e.g. MOV Xd, #<imm64>. */
3331 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3332 opnd->imm.value, opnd->imm.value);
3333 break;
3334 default: assert (0);
3335 }
3336 break;
3337
3338 case AARCH64_OPND_FPIMM0:
3339 snprintf (buf, size, "#0.0");
3340 break;
3341
3342 case AARCH64_OPND_LIMM:
3343 case AARCH64_OPND_AIMM:
3344 case AARCH64_OPND_HALF:
3345 case AARCH64_OPND_SVE_INV_LIMM:
3346 case AARCH64_OPND_SVE_LIMM:
3347 case AARCH64_OPND_SVE_LIMM_MOV:
3348 if (opnd->shifter.amount)
3349 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3350 opnd->shifter.amount);
3351 else
3352 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3353 break;
3354
3355 case AARCH64_OPND_SIMD_IMM:
3356 case AARCH64_OPND_SIMD_IMM_SFT:
3357 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3358 || opnd->shifter.kind == AARCH64_MOD_NONE)
3359 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3360 else
3361 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3362 aarch64_operand_modifiers[opnd->shifter.kind].name,
3363 opnd->shifter.amount);
3364 break;
3365
3366 case AARCH64_OPND_SVE_AIMM:
3367 case AARCH64_OPND_SVE_ASIMM:
3368 if (opnd->shifter.amount)
3369 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3370 opnd->shifter.amount);
3371 else
3372 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3373 break;
3374
3375 case AARCH64_OPND_FPIMM:
3376 case AARCH64_OPND_SIMD_FPIMM:
3377 case AARCH64_OPND_SVE_FPIMM8:
3378 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3379 {
3380 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3381 {
3382 half_conv_t c;
3383 c.i = expand_fp_imm (2, opnd->imm.value);
3384 snprintf (buf, size, "#%.18e", c.f);
3385 }
3386 break;
3387 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3388 {
3389 single_conv_t c;
3390 c.i = expand_fp_imm (4, opnd->imm.value);
3391 snprintf (buf, size, "#%.18e", c.f);
3392 }
3393 break;
3394 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3395 {
3396 double_conv_t c;
3397 c.i = expand_fp_imm (8, opnd->imm.value);
3398 snprintf (buf, size, "#%.18e", c.d);
3399 }
3400 break;
3401 default: assert (0);
3402 }
3403 break;
3404
3405 case AARCH64_OPND_CCMP_IMM:
3406 case AARCH64_OPND_NZCV:
3407 case AARCH64_OPND_EXCEPTION:
3408 case AARCH64_OPND_UIMM4:
3409 case AARCH64_OPND_UIMM7:
3410 if (optional_operand_p (opcode, idx) == TRUE
3411 && (opnd->imm.value ==
3412 (int64_t) get_optional_operand_default_value (opcode)))
3413 /* Omit the operand, e.g. DCPS1. */
3414 break;
3415 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3416 break;
3417
3418 case AARCH64_OPND_COND:
3419 case AARCH64_OPND_COND1:
3420 snprintf (buf, size, "%s", opnd->cond->names[0]);
3421 num_conds = ARRAY_SIZE (opnd->cond->names);
3422 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3423 {
3424 size_t len = strlen (buf);
3425 if (i == 1)
3426 snprintf (buf + len, size - len, " // %s = %s",
3427 opnd->cond->names[0], opnd->cond->names[i]);
3428 else
3429 snprintf (buf + len, size - len, ", %s",
3430 opnd->cond->names[i]);
3431 }
3432 break;
3433
3434 case AARCH64_OPND_ADDR_ADRP:
3435 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3436 + opnd->imm.value;
3437 if (pcrel_p)
3438 *pcrel_p = 1;
3439 if (address)
3440 *address = addr;
3441 /* This is not necessary during the disassembling, as print_address_func
3442 in the disassemble_info will take care of the printing. But some
3443 other callers may be still interested in getting the string in *STR,
3444 so here we do snprintf regardless. */
3445 snprintf (buf, size, "#0x%" PRIx64, addr);
3446 break;
3447
3448 case AARCH64_OPND_ADDR_PCREL14:
3449 case AARCH64_OPND_ADDR_PCREL19:
3450 case AARCH64_OPND_ADDR_PCREL21:
3451 case AARCH64_OPND_ADDR_PCREL26:
3452 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3453 if (pcrel_p)
3454 *pcrel_p = 1;
3455 if (address)
3456 *address = addr;
3457 /* This is not necessary during the disassembling, as print_address_func
3458 in the disassemble_info will take care of the printing. But some
3459 other callers may be still interested in getting the string in *STR,
3460 so here we do snprintf regardless. */
3461 snprintf (buf, size, "#0x%" PRIx64, addr);
3462 break;
3463
3464 case AARCH64_OPND_ADDR_SIMPLE:
3465 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3466 case AARCH64_OPND_SIMD_ADDR_POST:
3467 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3468 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3469 {
3470 if (opnd->addr.offset.is_reg)
3471 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3472 else
3473 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3474 }
3475 else
3476 snprintf (buf, size, "[%s]", name);
3477 break;
3478
3479 case AARCH64_OPND_ADDR_REGOFF:
3480 case AARCH64_OPND_SVE_ADDR_R:
3481 case AARCH64_OPND_SVE_ADDR_RR:
3482 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3483 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3484 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3485 case AARCH64_OPND_SVE_ADDR_RX:
3486 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3487 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3488 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3489 print_register_offset_address
3490 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3491 get_offset_int_reg_name (opnd));
3492 break;
3493
3494 case AARCH64_OPND_SVE_ADDR_RZ:
3495 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3496 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3497 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3498 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3499 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3500 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3501 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3502 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3503 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3504 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3505 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3506 print_register_offset_address
3507 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3508 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3509 break;
3510
3511 case AARCH64_OPND_ADDR_SIMM7:
3512 case AARCH64_OPND_ADDR_SIMM9:
3513 case AARCH64_OPND_ADDR_SIMM9_2:
3514 case AARCH64_OPND_ADDR_SIMM10:
3515 case AARCH64_OPND_ADDR_OFFSET:
3516 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3517 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3518 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3519 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3520 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3521 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3522 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3523 case AARCH64_OPND_SVE_ADDR_RI_U6:
3524 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3525 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3526 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3527 print_immediate_offset_address
3528 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3529 break;
3530
3531 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3532 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3533 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3534 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3535 print_immediate_offset_address
3536 (buf, size, opnd,
3537 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3538 break;
3539
3540 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3541 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3542 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3543 print_register_offset_address
3544 (buf, size, opnd,
3545 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3546 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3547 break;
3548
3549 case AARCH64_OPND_ADDR_UIMM12:
3550 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3551 if (opnd->addr.offset.imm)
3552 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3553 else
3554 snprintf (buf, size, "[%s]", name);
3555 break;
3556
3557 case AARCH64_OPND_SYSREG:
3558 for (i = 0; aarch64_sys_regs[i].name; ++i)
3559 {
3560 bfd_boolean exact_match
3561 = (aarch64_sys_regs[i].flags & opnd->sysreg.flags)
3562 == opnd->sysreg.flags;
3563
3564 /* Try and find an exact match, But if that fails, return the first
3565 partial match that was found. */
3566 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3567 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])
3568 && (name == NULL || exact_match))
3569 {
3570 name = aarch64_sys_regs[i].name;
3571 if (exact_match)
3572 {
3573 if (notes)
3574 *notes = NULL;
3575 break;
3576 }
3577
3578 /* If we didn't match exactly, that means the presense of a flag
3579 indicates what we didn't want for this instruction. e.g. If
3580 F_REG_READ is there, that means we were looking for a write
3581 register. See aarch64_ext_sysreg. */
3582 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3583 *notes = _("reading from a write-only register.");
3584 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3585 *notes = _("writing to a read-only register.");
3586 }
3587 }
3588
3589 if (name)
3590 snprintf (buf, size, "%s", name);
3591 else
3592 {
3593 /* Implementation defined system register. */
3594 unsigned int value = opnd->sysreg.value;
3595 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3596 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3597 value & 0x7);
3598 }
3599 break;
3600
3601 case AARCH64_OPND_PSTATEFIELD:
3602 for (i = 0; aarch64_pstatefields[i].name; ++i)
3603 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3604 break;
3605 assert (aarch64_pstatefields[i].name);
3606 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3607 break;
3608
3609 case AARCH64_OPND_SYSREG_AT:
3610 case AARCH64_OPND_SYSREG_DC:
3611 case AARCH64_OPND_SYSREG_IC:
3612 case AARCH64_OPND_SYSREG_TLBI:
3613 snprintf (buf, size, "%s", opnd->sysins_op->name);
3614 break;
3615
3616 case AARCH64_OPND_BARRIER:
3617 snprintf (buf, size, "%s", opnd->barrier->name);
3618 break;
3619
3620 case AARCH64_OPND_BARRIER_ISB:
3621 /* Operand can be omitted, e.g. in DCPS1. */
3622 if (! optional_operand_p (opcode, idx)
3623 || (opnd->barrier->value
3624 != get_optional_operand_default_value (opcode)))
3625 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3626 break;
3627
3628 case AARCH64_OPND_PRFOP:
3629 if (opnd->prfop->name != NULL)
3630 snprintf (buf, size, "%s", opnd->prfop->name);
3631 else
3632 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3633 break;
3634
3635 case AARCH64_OPND_BARRIER_PSB:
3636 snprintf (buf, size, "%s", opnd->hint_option->name);
3637 break;
3638
3639 default:
3640 assert (0);
3641 }
3642 }
3643 \f
3644 #define CPENC(op0,op1,crn,crm,op2) \
3645 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3646 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3647 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3648 /* for 3.9.10 System Instructions */
3649 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3650
3651 #define C0 0
3652 #define C1 1
3653 #define C2 2
3654 #define C3 3
3655 #define C4 4
3656 #define C5 5
3657 #define C6 6
3658 #define C7 7
3659 #define C8 8
3660 #define C9 9
3661 #define C10 10
3662 #define C11 11
3663 #define C12 12
3664 #define C13 13
3665 #define C14 14
3666 #define C15 15
3667
3668 /* TODO there is one more issues need to be resolved
3669 1. handle cpu-implementation-defined system registers. */
3670 const aarch64_sys_reg aarch64_sys_regs [] =
3671 {
3672 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3673 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3674 { "elr_el1", CPEN_(0,C0,1), 0 },
3675 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3676 { "sp_el0", CPEN_(0,C1,0), 0 },
3677 { "spsel", CPEN_(0,C2,0), 0 },
3678 { "daif", CPEN_(3,C2,1), 0 },
3679 { "currentel", CPEN_(0,C2,2), F_REG_READ }, /* RO */
3680 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3681 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3682 { "nzcv", CPEN_(3,C2,0), 0 },
3683 { "fpcr", CPEN_(3,C4,0), 0 },
3684 { "fpsr", CPEN_(3,C4,1), 0 },
3685 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3686 { "dlr_el0", CPEN_(3,C5,1), 0 },
3687 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3688 { "elr_el2", CPEN_(4,C0,1), 0 },
3689 { "sp_el1", CPEN_(4,C1,0), 0 },
3690 { "spsr_irq", CPEN_(4,C3,0), 0 },
3691 { "spsr_abt", CPEN_(4,C3,1), 0 },
3692 { "spsr_und", CPEN_(4,C3,2), 0 },
3693 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3694 { "spsr_el3", CPEN_(6,C0,0), 0 },
3695 { "elr_el3", CPEN_(6,C0,1), 0 },
3696 { "sp_el2", CPEN_(6,C1,0), 0 },
3697 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3698 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3699 { "midr_el1", CPENC(3,0,C0,C0,0), F_REG_READ }, /* RO */
3700 { "ctr_el0", CPENC(3,3,C0,C0,1), F_REG_READ }, /* RO */
3701 { "mpidr_el1", CPENC(3,0,C0,C0,5), F_REG_READ }, /* RO */
3702 { "revidr_el1", CPENC(3,0,C0,C0,6), F_REG_READ }, /* RO */
3703 { "aidr_el1", CPENC(3,1,C0,C0,7), F_REG_READ }, /* RO */
3704 { "dczid_el0", CPENC(3,3,C0,C0,7), F_REG_READ }, /* RO */
3705 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), F_REG_READ }, /* RO */
3706 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), F_REG_READ }, /* RO */
3707 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), F_REG_READ }, /* RO */
3708 { "id_afr0_el1", CPENC(3,0,C0,C1,3), F_REG_READ }, /* RO */
3709 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), F_REG_READ }, /* RO */
3710 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), F_REG_READ }, /* RO */
3711 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), F_REG_READ }, /* RO */
3712 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), F_REG_READ }, /* RO */
3713 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), F_REG_READ }, /* RO */
3714 { "id_isar0_el1", CPENC(3,0,C0,C2,0), F_REG_READ }, /* RO */
3715 { "id_isar1_el1", CPENC(3,0,C0,C2,1), F_REG_READ }, /* RO */
3716 { "id_isar2_el1", CPENC(3,0,C0,C2,2), F_REG_READ }, /* RO */
3717 { "id_isar3_el1", CPENC(3,0,C0,C2,3), F_REG_READ }, /* RO */
3718 { "id_isar4_el1", CPENC(3,0,C0,C2,4), F_REG_READ }, /* RO */
3719 { "id_isar5_el1", CPENC(3,0,C0,C2,5), F_REG_READ }, /* RO */
3720 { "mvfr0_el1", CPENC(3,0,C0,C3,0), F_REG_READ }, /* RO */
3721 { "mvfr1_el1", CPENC(3,0,C0,C3,1), F_REG_READ }, /* RO */
3722 { "mvfr2_el1", CPENC(3,0,C0,C3,2), F_REG_READ }, /* RO */
3723 { "ccsidr_el1", CPENC(3,1,C0,C0,0), F_REG_READ }, /* RO */
3724 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), F_REG_READ }, /* RO */
3725 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), F_REG_READ }, /* RO */
3726 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), F_REG_READ }, /* RO */
3727 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), F_REG_READ }, /* RO */
3728 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), F_REG_READ }, /* RO */
3729 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), F_REG_READ }, /* RO */
3730 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), F_REG_READ }, /* RO */
3731 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), F_REG_READ }, /* RO */
3732 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT | F_REG_READ }, /* RO */
3733 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), F_REG_READ }, /* RO */
3734 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), F_REG_READ }, /* RO */
3735 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT | F_REG_READ }, /* RO */
3736 { "clidr_el1", CPENC(3,1,C0,C0,1), F_REG_READ }, /* RO */
3737 { "csselr_el1", CPENC(3,2,C0,C0,0), F_REG_READ }, /* RO */
3738 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3739 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3740 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3741 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3742 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3743 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3744 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3745 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3746 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3747 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3748 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3749 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3750 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3751 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3752 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3753 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3754 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3755 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3756 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3757 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3758 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3759 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3760 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3761 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3762 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3763 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3764 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3765 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3766 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3767 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3768 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3769 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3770 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3771 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3772 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3773 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3774 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3775 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3776 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3777 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3778 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3779 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3780 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3781 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3782 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3783 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3784 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3785 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3786 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3787 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3788 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3789 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3790 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3791 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3792 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3793 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3794 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3795 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3796 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3797 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT | F_REG_READ }, /* RO */
3798 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3799 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3800 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3801 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3802 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3803 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3804 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3805 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3806 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3807 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3808 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3809 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3810 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3811 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3812 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3813 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3814 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3815 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3816 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3817 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3818 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3819 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3820 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3821 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3822 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3823 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3824 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3825 { "rvbar_el1", CPENC(3,0,C12,C0,1), F_REG_READ }, /* RO */
3826 { "rvbar_el2", CPENC(3,4,C12,C0,1), F_REG_READ }, /* RO */
3827 { "rvbar_el3", CPENC(3,6,C12,C0,1), F_REG_READ }, /* RO */
3828 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3829 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3830 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3831 { "isr_el1", CPENC(3,0,C12,C1,0), F_REG_READ }, /* RO */
3832 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3833 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3834 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3835 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3836 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3837 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3838 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RW */
3839 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3840 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3841 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3842 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3843 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RW */
3844 { "cntpct_el0", CPENC(3,3,C14,C0,1), F_REG_READ }, /* RO */
3845 { "cntvct_el0", CPENC(3,3,C14,C0,2), F_REG_READ }, /* RO */
3846 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3847 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3848 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3849 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3850 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3851 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3852 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3853 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3854 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3855 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3856 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3857 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3858 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3859 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3860 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3861 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3862 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3863 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3864 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3865 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3866 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3867 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3868 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3869 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3870 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3871 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3872 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3873 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3874 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3875 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3876 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), F_REG_READ }, /* r */
3877 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3878 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3879 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), F_REG_READ }, /* r */
3880 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), F_REG_WRITE }, /* w */
3881 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), F_REG_READ }, /* r */
3882 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), F_REG_WRITE }, /* w */
3883 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3884 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3885 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3886 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3887 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3888 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3889 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3890 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3891 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3892 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3893 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3894 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3895 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3896 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3897 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3898 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3899 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3900 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3901 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3902 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3903 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3904 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3905 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3906 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3907 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3908 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3909 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3910 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3911 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3912 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3913 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3914 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3915 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3916 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3917 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3918 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3919 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3920 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3921 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3922 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3923 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3924 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3925 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3926 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3927 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3928 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3929 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3930 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3931 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3932 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3933 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3934 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3935 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3936 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3937 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3938 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3939 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3940 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3941 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3942 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3943 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3944 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3945 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3946 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3947 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3948 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3949 { "mdrar_el1", CPENC(2,0,C1, C0, 0), F_REG_READ }, /* r */
3950 { "oslar_el1", CPENC(2,0,C1, C0, 4), F_REG_WRITE }, /* w */
3951 { "oslsr_el1", CPENC(2,0,C1, C1, 4), F_REG_READ }, /* r */
3952 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3953 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3954 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3955 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3956 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), F_REG_READ }, /* r */
3957 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3958 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3959 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3960 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT | F_REG_READ }, /* ro */
3961 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3962 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3963 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3964 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3965 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3966 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3967 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT | F_REG_READ }, /* ro */
3968 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3969 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3970 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3971 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3972 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3973 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3974 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), F_REG_WRITE }, /* w */
3975 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3976 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), F_REG_READ }, /* r */
3977 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), F_REG_READ }, /* r */
3978 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3979 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3980 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3981 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3982 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3983 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3984 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3985 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3986 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3987 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3988 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3989 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3990 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3991 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3992 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3993 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3994 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3995 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3996 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3997 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3998 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3999 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
4000 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
4001 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
4002 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
4003 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
4004 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
4005 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
4006 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
4007 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
4008 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
4009 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
4010 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
4011 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4012 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4013 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4014 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4015 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4016 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4017 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4018 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4019 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4020 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4021 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4022 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4023 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4024 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4025 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4026 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4027 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4028 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4029 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4030 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4031 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4032 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4033 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4034 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4035 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4036 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4037 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4038 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4039 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4040 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4041 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4042 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4043 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4044 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4045 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4046 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4047 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4048
4049 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4050 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4051 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4052 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4053 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4054 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4055 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4056 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4057 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4058 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4059 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4060 { 0, CPENC(0,0,0,0,0), 0 },
4061 };
4062
4063 bfd_boolean
4064 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4065 {
4066 return (reg->flags & F_DEPRECATED) != 0;
4067 }
4068
4069 bfd_boolean
4070 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4071 const aarch64_sys_reg *reg)
4072 {
4073 if (!(reg->flags & F_ARCHEXT))
4074 return TRUE;
4075
4076 /* PAN. Values are from aarch64_sys_regs. */
4077 if (reg->value == CPEN_(0,C2,3)
4078 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4079 return FALSE;
4080
4081 /* Virtualization host extensions: system registers. */
4082 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4083 || reg->value == CPENC (3, 4, C13, C0, 1)
4084 || reg->value == CPENC (3, 4, C14, C3, 0)
4085 || reg->value == CPENC (3, 4, C14, C3, 1)
4086 || reg->value == CPENC (3, 4, C14, C3, 2))
4087 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4088 return FALSE;
4089
4090 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4091 if ((reg->value == CPEN_ (5, C0, 0)
4092 || reg->value == CPEN_ (5, C0, 1)
4093 || reg->value == CPENC (3, 5, C1, C0, 0)
4094 || reg->value == CPENC (3, 5, C1, C0, 2)
4095 || reg->value == CPENC (3, 5, C2, C0, 0)
4096 || reg->value == CPENC (3, 5, C2, C0, 1)
4097 || reg->value == CPENC (3, 5, C2, C0, 2)
4098 || reg->value == CPENC (3, 5, C5, C1, 0)
4099 || reg->value == CPENC (3, 5, C5, C1, 1)
4100 || reg->value == CPENC (3, 5, C5, C2, 0)
4101 || reg->value == CPENC (3, 5, C6, C0, 0)
4102 || reg->value == CPENC (3, 5, C10, C2, 0)
4103 || reg->value == CPENC (3, 5, C10, C3, 0)
4104 || reg->value == CPENC (3, 5, C12, C0, 0)
4105 || reg->value == CPENC (3, 5, C13, C0, 1)
4106 || reg->value == CPENC (3, 5, C14, C1, 0))
4107 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4108 return FALSE;
4109
4110 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4111 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4112 || reg->value == CPENC (3, 5, C14, C2, 1)
4113 || reg->value == CPENC (3, 5, C14, C2, 2)
4114 || reg->value == CPENC (3, 5, C14, C3, 0)
4115 || reg->value == CPENC (3, 5, C14, C3, 1)
4116 || reg->value == CPENC (3, 5, C14, C3, 2))
4117 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4118 return FALSE;
4119
4120 /* ARMv8.2 features. */
4121
4122 /* ID_AA64MMFR2_EL1. */
4123 if (reg->value == CPENC (3, 0, C0, C7, 2)
4124 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4125 return FALSE;
4126
4127 /* PSTATE.UAO. */
4128 if (reg->value == CPEN_ (0, C2, 4)
4129 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4130 return FALSE;
4131
4132 /* RAS extension. */
4133
4134 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4135 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4136 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4137 || reg->value == CPENC (3, 0, C5, C3, 1)
4138 || reg->value == CPENC (3, 0, C5, C3, 2)
4139 || reg->value == CPENC (3, 0, C5, C3, 3)
4140 || reg->value == CPENC (3, 0, C5, C4, 0)
4141 || reg->value == CPENC (3, 0, C5, C4, 1)
4142 || reg->value == CPENC (3, 0, C5, C4, 2)
4143 || reg->value == CPENC (3, 0, C5, C4, 3)
4144 || reg->value == CPENC (3, 0, C5, C5, 0)
4145 || reg->value == CPENC (3, 0, C5, C5, 1))
4146 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4147 return FALSE;
4148
4149 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4150 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4151 || reg->value == CPENC (3, 0, C12, C1, 1)
4152 || reg->value == CPENC (3, 4, C12, C1, 1))
4153 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4154 return FALSE;
4155
4156 /* Statistical Profiling extension. */
4157 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4158 || reg->value == CPENC (3, 0, C9, C10, 1)
4159 || reg->value == CPENC (3, 0, C9, C10, 3)
4160 || reg->value == CPENC (3, 0, C9, C10, 7)
4161 || reg->value == CPENC (3, 0, C9, C9, 0)
4162 || reg->value == CPENC (3, 0, C9, C9, 2)
4163 || reg->value == CPENC (3, 0, C9, C9, 3)
4164 || reg->value == CPENC (3, 0, C9, C9, 4)
4165 || reg->value == CPENC (3, 0, C9, C9, 5)
4166 || reg->value == CPENC (3, 0, C9, C9, 6)
4167 || reg->value == CPENC (3, 0, C9, C9, 7)
4168 || reg->value == CPENC (3, 4, C9, C9, 0)
4169 || reg->value == CPENC (3, 5, C9, C9, 0))
4170 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4171 return FALSE;
4172
4173 /* ARMv8.3 Pointer authentication keys. */
4174 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4175 || reg->value == CPENC (3, 0, C2, C1, 1)
4176 || reg->value == CPENC (3, 0, C2, C1, 2)
4177 || reg->value == CPENC (3, 0, C2, C1, 3)
4178 || reg->value == CPENC (3, 0, C2, C2, 0)
4179 || reg->value == CPENC (3, 0, C2, C2, 1)
4180 || reg->value == CPENC (3, 0, C2, C2, 2)
4181 || reg->value == CPENC (3, 0, C2, C2, 3)
4182 || reg->value == CPENC (3, 0, C2, C3, 0)
4183 || reg->value == CPENC (3, 0, C2, C3, 1))
4184 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4185 return FALSE;
4186
4187 /* SVE. */
4188 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4189 || reg->value == CPENC (3, 0, C1, C2, 0)
4190 || reg->value == CPENC (3, 4, C1, C2, 0)
4191 || reg->value == CPENC (3, 6, C1, C2, 0)
4192 || reg->value == CPENC (3, 5, C1, C2, 0)
4193 || reg->value == CPENC (3, 0, C0, C0, 7))
4194 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4195 return FALSE;
4196
4197 /* ARMv8.4 features. */
4198
4199 /* PSTATE.DIT. */
4200 if (reg->value == CPEN_ (3, C2, 5)
4201 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4202 return FALSE;
4203
4204 /* Virtualization extensions. */
4205 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4206 || reg->value == CPENC(3, 4, C2, C6, 0)
4207 || reg->value == CPENC(3, 4, C14, C4, 0)
4208 || reg->value == CPENC(3, 4, C14, C4, 2)
4209 || reg->value == CPENC(3, 4, C14, C4, 1)
4210 || reg->value == CPENC(3, 4, C14, C5, 0)
4211 || reg->value == CPENC(3, 4, C14, C5, 2)
4212 || reg->value == CPENC(3, 4, C14, C5, 1)
4213 || reg->value == CPENC(3, 4, C1, C3, 1)
4214 || reg->value == CPENC(3, 4, C2, C2, 0))
4215 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4216 return FALSE;
4217
4218 /* ARMv8.4 TLB instructions. */
4219 if ((reg->value == CPENS (0, C8, C1, 0)
4220 || reg->value == CPENS (0, C8, C1, 1)
4221 || reg->value == CPENS (0, C8, C1, 2)
4222 || reg->value == CPENS (0, C8, C1, 3)
4223 || reg->value == CPENS (0, C8, C1, 5)
4224 || reg->value == CPENS (0, C8, C1, 7)
4225 || reg->value == CPENS (4, C8, C4, 0)
4226 || reg->value == CPENS (4, C8, C4, 4)
4227 || reg->value == CPENS (4, C8, C1, 1)
4228 || reg->value == CPENS (4, C8, C1, 5)
4229 || reg->value == CPENS (4, C8, C1, 6)
4230 || reg->value == CPENS (6, C8, C1, 1)
4231 || reg->value == CPENS (6, C8, C1, 5)
4232 || reg->value == CPENS (4, C8, C1, 0)
4233 || reg->value == CPENS (4, C8, C1, 4)
4234 || reg->value == CPENS (6, C8, C1, 0)
4235 || reg->value == CPENS (0, C8, C6, 1)
4236 || reg->value == CPENS (0, C8, C6, 3)
4237 || reg->value == CPENS (0, C8, C6, 5)
4238 || reg->value == CPENS (0, C8, C6, 7)
4239 || reg->value == CPENS (0, C8, C2, 1)
4240 || reg->value == CPENS (0, C8, C2, 3)
4241 || reg->value == CPENS (0, C8, C2, 5)
4242 || reg->value == CPENS (0, C8, C2, 7)
4243 || reg->value == CPENS (0, C8, C5, 1)
4244 || reg->value == CPENS (0, C8, C5, 3)
4245 || reg->value == CPENS (0, C8, C5, 5)
4246 || reg->value == CPENS (0, C8, C5, 7)
4247 || reg->value == CPENS (4, C8, C0, 2)
4248 || reg->value == CPENS (4, C8, C0, 6)
4249 || reg->value == CPENS (4, C8, C4, 2)
4250 || reg->value == CPENS (4, C8, C4, 6)
4251 || reg->value == CPENS (4, C8, C4, 3)
4252 || reg->value == CPENS (4, C8, C4, 7)
4253 || reg->value == CPENS (4, C8, C6, 1)
4254 || reg->value == CPENS (4, C8, C6, 5)
4255 || reg->value == CPENS (4, C8, C2, 1)
4256 || reg->value == CPENS (4, C8, C2, 5)
4257 || reg->value == CPENS (4, C8, C5, 1)
4258 || reg->value == CPENS (4, C8, C5, 5)
4259 || reg->value == CPENS (6, C8, C6, 1)
4260 || reg->value == CPENS (6, C8, C6, 5)
4261 || reg->value == CPENS (6, C8, C2, 1)
4262 || reg->value == CPENS (6, C8, C2, 5)
4263 || reg->value == CPENS (6, C8, C5, 1)
4264 || reg->value == CPENS (6, C8, C5, 5))
4265 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4266 return FALSE;
4267
4268 return TRUE;
4269 }
4270
4271 /* The CPENC below is fairly misleading, the fields
4272 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4273 by ins_pstatefield, which just shifts the value by the width of the fields
4274 in a loop. So if you CPENC them only the first value will be set, the rest
4275 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4276 value of 0b110000000001000000 (0x30040) while what you want is
4277 0b011010 (0x1a). */
4278 const aarch64_sys_reg aarch64_pstatefields [] =
4279 {
4280 { "spsel", 0x05, 0 },
4281 { "daifset", 0x1e, 0 },
4282 { "daifclr", 0x1f, 0 },
4283 { "pan", 0x04, F_ARCHEXT },
4284 { "uao", 0x03, F_ARCHEXT },
4285 { "dit", 0x1a, F_ARCHEXT },
4286 { 0, CPENC(0,0,0,0,0), 0 },
4287 };
4288
4289 bfd_boolean
4290 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4291 const aarch64_sys_reg *reg)
4292 {
4293 if (!(reg->flags & F_ARCHEXT))
4294 return TRUE;
4295
4296 /* PAN. Values are from aarch64_pstatefields. */
4297 if (reg->value == 0x04
4298 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4299 return FALSE;
4300
4301 /* UAO. Values are from aarch64_pstatefields. */
4302 if (reg->value == 0x03
4303 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4304 return FALSE;
4305
4306 /* DIT. Values are from aarch64_pstatefields. */
4307 if (reg->value == 0x1a
4308 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4309 return FALSE;
4310
4311 return TRUE;
4312 }
4313
4314 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4315 {
4316 { "ialluis", CPENS(0,C7,C1,0), 0 },
4317 { "iallu", CPENS(0,C7,C5,0), 0 },
4318 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4319 { 0, CPENS(0,0,0,0), 0 }
4320 };
4321
4322 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4323 {
4324 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4325 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4326 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4327 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4328 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4329 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4330 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4331 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4332 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4333 { 0, CPENS(0,0,0,0), 0 }
4334 };
4335
4336 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4337 {
4338 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4339 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4340 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4341 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4342 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4343 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4344 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4345 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4346 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4347 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4348 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4349 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4350 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4351 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4352 { 0, CPENS(0,0,0,0), 0 }
4353 };
4354
4355 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4356 {
4357 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4358 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4359 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4360 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4361 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4362 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4363 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4364 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4365 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4366 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4367 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4368 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4369 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4370 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4371 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4372 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4373 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4374 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4375 { "alle2", CPENS(4,C8,C7,0), 0 },
4376 { "alle2is", CPENS(4,C8,C3,0), 0 },
4377 { "alle1", CPENS(4,C8,C7,4), 0 },
4378 { "alle1is", CPENS(4,C8,C3,4), 0 },
4379 { "alle3", CPENS(6,C8,C7,0), 0 },
4380 { "alle3is", CPENS(6,C8,C3,0), 0 },
4381 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4382 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4383 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4384 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4385 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4386 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4387 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4388 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4389
4390 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4391 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4392 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4393 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4394 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4395 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4396 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4397 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4398 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4399 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4400 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4401 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4402 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4403 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4404 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4405 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4406
4407 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4408 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4409 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4410 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4411 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4412 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4413 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4414 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4415 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4416 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4417 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4418 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4419 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4420 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4421 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4422 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4423 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4424 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4425 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4426 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4427 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4428 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4429 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4430 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4431 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4432 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4433 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4434 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4435 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4436 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4437
4438 { 0, CPENS(0,0,0,0), 0 }
4439 };
4440
4441 bfd_boolean
4442 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4443 {
4444 return (sys_ins_reg->flags & F_HASXT) != 0;
4445 }
4446
4447 extern bfd_boolean
4448 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4449 const aarch64_sys_ins_reg *reg)
4450 {
4451 if (!(reg->flags & F_ARCHEXT))
4452 return TRUE;
4453
4454 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4455 if (reg->value == CPENS (3, C7, C12, 1)
4456 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4457 return FALSE;
4458
4459 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4460 if ((reg->value == CPENS (0, C7, C9, 0)
4461 || reg->value == CPENS (0, C7, C9, 1))
4462 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4463 return FALSE;
4464
4465 return TRUE;
4466 }
4467
4468 #undef C0
4469 #undef C1
4470 #undef C2
4471 #undef C3
4472 #undef C4
4473 #undef C5
4474 #undef C6
4475 #undef C7
4476 #undef C8
4477 #undef C9
4478 #undef C10
4479 #undef C11
4480 #undef C12
4481 #undef C13
4482 #undef C14
4483 #undef C15
4484
4485 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4486 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4487
4488 static bfd_boolean
4489 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4490 const aarch64_insn insn)
4491 {
4492 int t = BITS (insn, 4, 0);
4493 int n = BITS (insn, 9, 5);
4494 int t2 = BITS (insn, 14, 10);
4495
4496 if (BIT (insn, 23))
4497 {
4498 /* Write back enabled. */
4499 if ((t == n || t2 == n) && n != 31)
4500 return FALSE;
4501 }
4502
4503 if (BIT (insn, 22))
4504 {
4505 /* Load */
4506 if (t == t2)
4507 return FALSE;
4508 }
4509
4510 return TRUE;
4511 }
4512
4513 /* Return true if VALUE cannot be moved into an SVE register using DUP
4514 (with any element size, not just ESIZE) and if using DUPM would
4515 therefore be OK. ESIZE is the number of bytes in the immediate. */
4516
4517 bfd_boolean
4518 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4519 {
4520 int64_t svalue = uvalue;
4521 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4522
4523 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4524 return FALSE;
4525 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4526 {
4527 svalue = (int32_t) uvalue;
4528 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4529 {
4530 svalue = (int16_t) uvalue;
4531 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4532 return FALSE;
4533 }
4534 }
4535 if ((svalue & 0xff) == 0)
4536 svalue /= 256;
4537 return svalue < -128 || svalue >= 128;
4538 }
4539
4540 /* Include the opcode description table as well as the operand description
4541 table. */
4542 #define VERIFIER(x) verify_##x
4543 #include "aarch64-tbl.h"
This page took 0.244047 seconds and 5 git commands to generate.