[AArch64] Use "must" rather than "should" in error messages
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
268 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
269 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
270 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
271 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
272 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
273 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
274 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
275 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
276 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
277 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
278 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
279 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
280 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
281 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
282 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
283 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
284 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
285 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
286 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
287 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
289 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
290 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
291 { 5, 1 }, /* SVE_i1: single-bit immediate. */
292 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
293 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
294 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
295 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
296 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
297 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
298 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
299 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
300 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
301 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
302 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
303 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
304 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
305 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
306 { 16, 4 }, /* SVE_tsz: triangular size select. */
307 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
308 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
309 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
310 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
311 { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
312 };
313
314 enum aarch64_operand_class
315 aarch64_get_operand_class (enum aarch64_opnd type)
316 {
317 return aarch64_operands[type].op_class;
318 }
319
320 const char *
321 aarch64_get_operand_name (enum aarch64_opnd type)
322 {
323 return aarch64_operands[type].name;
324 }
325
326 /* Get operand description string.
327 This is usually for the diagnosis purpose. */
328 const char *
329 aarch64_get_operand_desc (enum aarch64_opnd type)
330 {
331 return aarch64_operands[type].desc;
332 }
333
334 /* Table of all conditional affixes. */
335 const aarch64_cond aarch64_conds[16] =
336 {
337 {{"eq", "none"}, 0x0},
338 {{"ne", "any"}, 0x1},
339 {{"cs", "hs", "nlast"}, 0x2},
340 {{"cc", "lo", "ul", "last"}, 0x3},
341 {{"mi", "first"}, 0x4},
342 {{"pl", "nfrst"}, 0x5},
343 {{"vs"}, 0x6},
344 {{"vc"}, 0x7},
345 {{"hi", "pmore"}, 0x8},
346 {{"ls", "plast"}, 0x9},
347 {{"ge", "tcont"}, 0xa},
348 {{"lt", "tstop"}, 0xb},
349 {{"gt"}, 0xc},
350 {{"le"}, 0xd},
351 {{"al"}, 0xe},
352 {{"nv"}, 0xf},
353 };
354
355 const aarch64_cond *
356 get_cond_from_value (aarch64_insn value)
357 {
358 assert (value < 16);
359 return &aarch64_conds[(unsigned int) value];
360 }
361
362 const aarch64_cond *
363 get_inverted_cond (const aarch64_cond *cond)
364 {
365 return &aarch64_conds[cond->value ^ 0x1];
366 }
367
368 /* Table describing the operand extension/shifting operators; indexed by
369 enum aarch64_modifier_kind.
370
371 The value column provides the most common values for encoding modifiers,
372 which enables table-driven encoding/decoding for the modifiers. */
373 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
374 {
375 {"none", 0x0},
376 {"msl", 0x0},
377 {"ror", 0x3},
378 {"asr", 0x2},
379 {"lsr", 0x1},
380 {"lsl", 0x0},
381 {"uxtb", 0x0},
382 {"uxth", 0x1},
383 {"uxtw", 0x2},
384 {"uxtx", 0x3},
385 {"sxtb", 0x4},
386 {"sxth", 0x5},
387 {"sxtw", 0x6},
388 {"sxtx", 0x7},
389 {"mul", 0x0},
390 {"mul vl", 0x0},
391 {NULL, 0},
392 };
393
394 enum aarch64_modifier_kind
395 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
396 {
397 return desc - aarch64_operand_modifiers;
398 }
399
400 aarch64_insn
401 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
402 {
403 return aarch64_operand_modifiers[kind].value;
404 }
405
406 enum aarch64_modifier_kind
407 aarch64_get_operand_modifier_from_value (aarch64_insn value,
408 bfd_boolean extend_p)
409 {
410 if (extend_p == TRUE)
411 return AARCH64_MOD_UXTB + value;
412 else
413 return AARCH64_MOD_LSL - value;
414 }
415
416 bfd_boolean
417 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
418 {
419 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
420 ? TRUE : FALSE;
421 }
422
423 static inline bfd_boolean
424 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
425 {
426 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
427 ? TRUE : FALSE;
428 }
429
430 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
431 {
432 { "#0x00", 0x0 },
433 { "oshld", 0x1 },
434 { "oshst", 0x2 },
435 { "osh", 0x3 },
436 { "#0x04", 0x4 },
437 { "nshld", 0x5 },
438 { "nshst", 0x6 },
439 { "nsh", 0x7 },
440 { "#0x08", 0x8 },
441 { "ishld", 0x9 },
442 { "ishst", 0xa },
443 { "ish", 0xb },
444 { "#0x0c", 0xc },
445 { "ld", 0xd },
446 { "st", 0xe },
447 { "sy", 0xf },
448 };
449
450 /* Table describing the operands supported by the aliases of the HINT
451 instruction.
452
453 The name column is the operand that is accepted for the alias. The value
454 column is the hint number of the alias. The list of operands is terminated
455 by NULL in the name column. */
456
457 const struct aarch64_name_value_pair aarch64_hint_options[] =
458 {
459 { "csync", 0x11 }, /* PSB CSYNC. */
460 { NULL, 0x0 },
461 };
462
463 /* op -> op: load = 0 instruction = 1 store = 2
464 l -> level: 1-3
465 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
466 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
467 const struct aarch64_name_value_pair aarch64_prfops[32] =
468 {
469 { "pldl1keep", B(0, 1, 0) },
470 { "pldl1strm", B(0, 1, 1) },
471 { "pldl2keep", B(0, 2, 0) },
472 { "pldl2strm", B(0, 2, 1) },
473 { "pldl3keep", B(0, 3, 0) },
474 { "pldl3strm", B(0, 3, 1) },
475 { NULL, 0x06 },
476 { NULL, 0x07 },
477 { "plil1keep", B(1, 1, 0) },
478 { "plil1strm", B(1, 1, 1) },
479 { "plil2keep", B(1, 2, 0) },
480 { "plil2strm", B(1, 2, 1) },
481 { "plil3keep", B(1, 3, 0) },
482 { "plil3strm", B(1, 3, 1) },
483 { NULL, 0x0e },
484 { NULL, 0x0f },
485 { "pstl1keep", B(2, 1, 0) },
486 { "pstl1strm", B(2, 1, 1) },
487 { "pstl2keep", B(2, 2, 0) },
488 { "pstl2strm", B(2, 2, 1) },
489 { "pstl3keep", B(2, 3, 0) },
490 { "pstl3strm", B(2, 3, 1) },
491 { NULL, 0x16 },
492 { NULL, 0x17 },
493 { NULL, 0x18 },
494 { NULL, 0x19 },
495 { NULL, 0x1a },
496 { NULL, 0x1b },
497 { NULL, 0x1c },
498 { NULL, 0x1d },
499 { NULL, 0x1e },
500 { NULL, 0x1f },
501 };
502 #undef B
503 \f
504 /* Utilities on value constraint. */
505
506 static inline int
507 value_in_range_p (int64_t value, int low, int high)
508 {
509 return (value >= low && value <= high) ? 1 : 0;
510 }
511
512 /* Return true if VALUE is a multiple of ALIGN. */
513 static inline int
514 value_aligned_p (int64_t value, int align)
515 {
516 return (value % align) == 0;
517 }
518
519 /* A signed value fits in a field. */
520 static inline int
521 value_fit_signed_field_p (int64_t value, unsigned width)
522 {
523 assert (width < 32);
524 if (width < sizeof (value) * 8)
525 {
526 int64_t lim = (int64_t)1 << (width - 1);
527 if (value >= -lim && value < lim)
528 return 1;
529 }
530 return 0;
531 }
532
533 /* An unsigned value fits in a field. */
534 static inline int
535 value_fit_unsigned_field_p (int64_t value, unsigned width)
536 {
537 assert (width < 32);
538 if (width < sizeof (value) * 8)
539 {
540 int64_t lim = (int64_t)1 << width;
541 if (value >= 0 && value < lim)
542 return 1;
543 }
544 return 0;
545 }
546
547 /* Return 1 if OPERAND is SP or WSP. */
548 int
549 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
550 {
551 return ((aarch64_get_operand_class (operand->type)
552 == AARCH64_OPND_CLASS_INT_REG)
553 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
554 && operand->reg.regno == 31);
555 }
556
557 /* Return 1 if OPERAND is XZR or WZP. */
558 int
559 aarch64_zero_register_p (const aarch64_opnd_info *operand)
560 {
561 return ((aarch64_get_operand_class (operand->type)
562 == AARCH64_OPND_CLASS_INT_REG)
563 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
564 && operand->reg.regno == 31);
565 }
566
567 /* Return true if the operand *OPERAND that has the operand code
568 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
569 qualified by the qualifier TARGET. */
570
571 static inline int
572 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
573 aarch64_opnd_qualifier_t target)
574 {
575 switch (operand->qualifier)
576 {
577 case AARCH64_OPND_QLF_W:
578 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
579 return 1;
580 break;
581 case AARCH64_OPND_QLF_X:
582 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
583 return 1;
584 break;
585 case AARCH64_OPND_QLF_WSP:
586 if (target == AARCH64_OPND_QLF_W
587 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
588 return 1;
589 break;
590 case AARCH64_OPND_QLF_SP:
591 if (target == AARCH64_OPND_QLF_X
592 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
593 return 1;
594 break;
595 default:
596 break;
597 }
598
599 return 0;
600 }
601
602 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
603 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
604
605 Return NIL if more than one expected qualifiers are found. */
606
607 aarch64_opnd_qualifier_t
608 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
609 int idx,
610 const aarch64_opnd_qualifier_t known_qlf,
611 int known_idx)
612 {
613 int i, saved_i;
614
615 /* Special case.
616
617 When the known qualifier is NIL, we have to assume that there is only
618 one qualifier sequence in the *QSEQ_LIST and return the corresponding
619 qualifier directly. One scenario is that for instruction
620 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
621 which has only one possible valid qualifier sequence
622 NIL, S_D
623 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
624 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
625
626 Because the qualifier NIL has dual roles in the qualifier sequence:
627 it can mean no qualifier for the operand, or the qualifer sequence is
628 not in use (when all qualifiers in the sequence are NILs), we have to
629 handle this special case here. */
630 if (known_qlf == AARCH64_OPND_NIL)
631 {
632 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
633 return qseq_list[0][idx];
634 }
635
636 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
637 {
638 if (qseq_list[i][known_idx] == known_qlf)
639 {
640 if (saved_i != -1)
641 /* More than one sequences are found to have KNOWN_QLF at
642 KNOWN_IDX. */
643 return AARCH64_OPND_NIL;
644 saved_i = i;
645 }
646 }
647
648 return qseq_list[saved_i][idx];
649 }
650
651 enum operand_qualifier_kind
652 {
653 OQK_NIL,
654 OQK_OPD_VARIANT,
655 OQK_VALUE_IN_RANGE,
656 OQK_MISC,
657 };
658
659 /* Operand qualifier description. */
660 struct operand_qualifier_data
661 {
662 /* The usage of the three data fields depends on the qualifier kind. */
663 int data0;
664 int data1;
665 int data2;
666 /* Description. */
667 const char *desc;
668 /* Kind. */
669 enum operand_qualifier_kind kind;
670 };
671
672 /* Indexed by the operand qualifier enumerators. */
673 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
674 {
675 {0, 0, 0, "NIL", OQK_NIL},
676
677 /* Operand variant qualifiers.
678 First 3 fields:
679 element size, number of elements and common value for encoding. */
680
681 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
682 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
683 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
684 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
685
686 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
687 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
688 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
689 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
690 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
691
692 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
693 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
694 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
695 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
696 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
697 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
698 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
699 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
700 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
701 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
702
703 {0, 0, 0, "z", OQK_OPD_VARIANT},
704 {0, 0, 0, "m", OQK_OPD_VARIANT},
705
706 /* Qualifiers constraining the value range.
707 First 3 fields:
708 Lower bound, higher bound, unused. */
709
710 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
711 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
712 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
713 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
714 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
715 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
716
717 /* Qualifiers for miscellaneous purpose.
718 First 3 fields:
719 unused, unused and unused. */
720
721 {0, 0, 0, "lsl", 0},
722 {0, 0, 0, "msl", 0},
723
724 {0, 0, 0, "retrieving", 0},
725 };
726
727 static inline bfd_boolean
728 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
729 {
730 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
731 ? TRUE : FALSE;
732 }
733
734 static inline bfd_boolean
735 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
736 {
737 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
738 ? TRUE : FALSE;
739 }
740
741 const char*
742 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
743 {
744 return aarch64_opnd_qualifiers[qualifier].desc;
745 }
746
747 /* Given an operand qualifier, return the expected data element size
748 of a qualified operand. */
749 unsigned char
750 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
751 {
752 assert (operand_variant_qualifier_p (qualifier) == TRUE);
753 return aarch64_opnd_qualifiers[qualifier].data0;
754 }
755
756 unsigned char
757 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
758 {
759 assert (operand_variant_qualifier_p (qualifier) == TRUE);
760 return aarch64_opnd_qualifiers[qualifier].data1;
761 }
762
763 aarch64_insn
764 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
765 {
766 assert (operand_variant_qualifier_p (qualifier) == TRUE);
767 return aarch64_opnd_qualifiers[qualifier].data2;
768 }
769
770 static int
771 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
772 {
773 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
774 return aarch64_opnd_qualifiers[qualifier].data0;
775 }
776
777 static int
778 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
779 {
780 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
781 return aarch64_opnd_qualifiers[qualifier].data1;
782 }
783
784 #ifdef DEBUG_AARCH64
785 void
786 aarch64_verbose (const char *str, ...)
787 {
788 va_list ap;
789 va_start (ap, str);
790 printf ("#### ");
791 vprintf (str, ap);
792 printf ("\n");
793 va_end (ap);
794 }
795
796 static inline void
797 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
798 {
799 int i;
800 printf ("#### \t");
801 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
802 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
803 printf ("\n");
804 }
805
806 static void
807 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
808 const aarch64_opnd_qualifier_t *qualifier)
809 {
810 int i;
811 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
812
813 aarch64_verbose ("dump_match_qualifiers:");
814 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
815 curr[i] = opnd[i].qualifier;
816 dump_qualifier_sequence (curr);
817 aarch64_verbose ("against");
818 dump_qualifier_sequence (qualifier);
819 }
820 #endif /* DEBUG_AARCH64 */
821
822 /* TODO improve this, we can have an extra field at the runtime to
823 store the number of operands rather than calculating it every time. */
824
825 int
826 aarch64_num_of_operands (const aarch64_opcode *opcode)
827 {
828 int i = 0;
829 const enum aarch64_opnd *opnds = opcode->operands;
830 while (opnds[i++] != AARCH64_OPND_NIL)
831 ;
832 --i;
833 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
834 return i;
835 }
836
837 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
838 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
839
840 N.B. on the entry, it is very likely that only some operands in *INST
841 have had their qualifiers been established.
842
843 If STOP_AT is not -1, the function will only try to match
844 the qualifier sequence for operands before and including the operand
845 of index STOP_AT; and on success *RET will only be filled with the first
846 (STOP_AT+1) qualifiers.
847
848 A couple examples of the matching algorithm:
849
850 X,W,NIL should match
851 X,W,NIL
852
853 NIL,NIL should match
854 X ,NIL
855
856 Apart from serving the main encoding routine, this can also be called
857 during or after the operand decoding. */
858
859 int
860 aarch64_find_best_match (const aarch64_inst *inst,
861 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
862 int stop_at, aarch64_opnd_qualifier_t *ret)
863 {
864 int found = 0;
865 int i, num_opnds;
866 const aarch64_opnd_qualifier_t *qualifiers;
867
868 num_opnds = aarch64_num_of_operands (inst->opcode);
869 if (num_opnds == 0)
870 {
871 DEBUG_TRACE ("SUCCEED: no operand");
872 return 1;
873 }
874
875 if (stop_at < 0 || stop_at >= num_opnds)
876 stop_at = num_opnds - 1;
877
878 /* For each pattern. */
879 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
880 {
881 int j;
882 qualifiers = *qualifiers_list;
883
884 /* Start as positive. */
885 found = 1;
886
887 DEBUG_TRACE ("%d", i);
888 #ifdef DEBUG_AARCH64
889 if (debug_dump)
890 dump_match_qualifiers (inst->operands, qualifiers);
891 #endif
892
893 /* Most opcodes has much fewer patterns in the list.
894 First NIL qualifier indicates the end in the list. */
895 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
896 {
897 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
898 if (i)
899 found = 0;
900 break;
901 }
902
903 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
904 {
905 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
906 {
907 /* Either the operand does not have qualifier, or the qualifier
908 for the operand needs to be deduced from the qualifier
909 sequence.
910 In the latter case, any constraint checking related with
911 the obtained qualifier should be done later in
912 operand_general_constraint_met_p. */
913 continue;
914 }
915 else if (*qualifiers != inst->operands[j].qualifier)
916 {
917 /* Unless the target qualifier can also qualify the operand
918 (which has already had a non-nil qualifier), non-equal
919 qualifiers are generally un-matched. */
920 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
921 continue;
922 else
923 {
924 found = 0;
925 break;
926 }
927 }
928 else
929 continue; /* Equal qualifiers are certainly matched. */
930 }
931
932 /* Qualifiers established. */
933 if (found == 1)
934 break;
935 }
936
937 if (found == 1)
938 {
939 /* Fill the result in *RET. */
940 int j;
941 qualifiers = *qualifiers_list;
942
943 DEBUG_TRACE ("complete qualifiers using list %d", i);
944 #ifdef DEBUG_AARCH64
945 if (debug_dump)
946 dump_qualifier_sequence (qualifiers);
947 #endif
948
949 for (j = 0; j <= stop_at; ++j, ++qualifiers)
950 ret[j] = *qualifiers;
951 for (; j < AARCH64_MAX_OPND_NUM; ++j)
952 ret[j] = AARCH64_OPND_QLF_NIL;
953
954 DEBUG_TRACE ("SUCCESS");
955 return 1;
956 }
957
958 DEBUG_TRACE ("FAIL");
959 return 0;
960 }
961
962 /* Operand qualifier matching and resolving.
963
964 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
965 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
966
967 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
968 succeeds. */
969
970 static int
971 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
972 {
973 int i, nops;
974 aarch64_opnd_qualifier_seq_t qualifiers;
975
976 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
977 qualifiers))
978 {
979 DEBUG_TRACE ("matching FAIL");
980 return 0;
981 }
982
983 if (inst->opcode->flags & F_STRICT)
984 {
985 /* Require an exact qualifier match, even for NIL qualifiers. */
986 nops = aarch64_num_of_operands (inst->opcode);
987 for (i = 0; i < nops; ++i)
988 if (inst->operands[i].qualifier != qualifiers[i])
989 return FALSE;
990 }
991
992 /* Update the qualifiers. */
993 if (update_p == TRUE)
994 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
995 {
996 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
997 break;
998 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
999 "update %s with %s for operand %d",
1000 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1001 aarch64_get_qualifier_name (qualifiers[i]), i);
1002 inst->operands[i].qualifier = qualifiers[i];
1003 }
1004
1005 DEBUG_TRACE ("matching SUCCESS");
1006 return 1;
1007 }
1008
1009 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1010 register by MOVZ.
1011
1012 IS32 indicates whether value is a 32-bit immediate or not.
1013 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1014 amount will be returned in *SHIFT_AMOUNT. */
1015
1016 bfd_boolean
1017 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1018 {
1019 int amount;
1020
1021 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1022
1023 if (is32)
1024 {
1025 /* Allow all zeros or all ones in top 32-bits, so that
1026 32-bit constant expressions like ~0x80000000 are
1027 permitted. */
1028 uint64_t ext = value;
1029 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1030 /* Immediate out of range. */
1031 return FALSE;
1032 value &= (int64_t) 0xffffffff;
1033 }
1034
1035 /* first, try movz then movn */
1036 amount = -1;
1037 if ((value & ((int64_t) 0xffff << 0)) == value)
1038 amount = 0;
1039 else if ((value & ((int64_t) 0xffff << 16)) == value)
1040 amount = 16;
1041 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1042 amount = 32;
1043 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1044 amount = 48;
1045
1046 if (amount == -1)
1047 {
1048 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1049 return FALSE;
1050 }
1051
1052 if (shift_amount != NULL)
1053 *shift_amount = amount;
1054
1055 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1056
1057 return TRUE;
1058 }
1059
1060 /* Build the accepted values for immediate logical SIMD instructions.
1061
1062 The standard encodings of the immediate value are:
1063 N imms immr SIMD size R S
1064 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1065 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1066 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1067 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1068 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1069 0 11110s 00000r 2 UInt(r) UInt(s)
1070 where all-ones value of S is reserved.
1071
1072 Let's call E the SIMD size.
1073
1074 The immediate value is: S+1 bits '1' rotated to the right by R.
1075
1076 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1077 (remember S != E - 1). */
1078
1079 #define TOTAL_IMM_NB 5334
1080
1081 typedef struct
1082 {
1083 uint64_t imm;
1084 aarch64_insn encoding;
1085 } simd_imm_encoding;
1086
1087 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1088
1089 static int
1090 simd_imm_encoding_cmp(const void *i1, const void *i2)
1091 {
1092 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1093 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1094
1095 if (imm1->imm < imm2->imm)
1096 return -1;
1097 if (imm1->imm > imm2->imm)
1098 return +1;
1099 return 0;
1100 }
1101
1102 /* immediate bitfield standard encoding
1103 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1104 1 ssssss rrrrrr 64 rrrrrr ssssss
1105 0 0sssss 0rrrrr 32 rrrrr sssss
1106 0 10ssss 00rrrr 16 rrrr ssss
1107 0 110sss 000rrr 8 rrr sss
1108 0 1110ss 0000rr 4 rr ss
1109 0 11110s 00000r 2 r s */
1110 static inline int
1111 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1112 {
1113 return (is64 << 12) | (r << 6) | s;
1114 }
1115
1116 static void
1117 build_immediate_table (void)
1118 {
1119 uint32_t log_e, e, s, r, s_mask;
1120 uint64_t mask, imm;
1121 int nb_imms;
1122 int is64;
1123
1124 nb_imms = 0;
1125 for (log_e = 1; log_e <= 6; log_e++)
1126 {
1127 /* Get element size. */
1128 e = 1u << log_e;
1129 if (log_e == 6)
1130 {
1131 is64 = 1;
1132 mask = 0xffffffffffffffffull;
1133 s_mask = 0;
1134 }
1135 else
1136 {
1137 is64 = 0;
1138 mask = (1ull << e) - 1;
1139 /* log_e s_mask
1140 1 ((1 << 4) - 1) << 2 = 111100
1141 2 ((1 << 3) - 1) << 3 = 111000
1142 3 ((1 << 2) - 1) << 4 = 110000
1143 4 ((1 << 1) - 1) << 5 = 100000
1144 5 ((1 << 0) - 1) << 6 = 000000 */
1145 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1146 }
1147 for (s = 0; s < e - 1; s++)
1148 for (r = 0; r < e; r++)
1149 {
1150 /* s+1 consecutive bits to 1 (s < 63) */
1151 imm = (1ull << (s + 1)) - 1;
1152 /* rotate right by r */
1153 if (r != 0)
1154 imm = (imm >> r) | ((imm << (e - r)) & mask);
1155 /* replicate the constant depending on SIMD size */
1156 switch (log_e)
1157 {
1158 case 1: imm = (imm << 2) | imm;
1159 case 2: imm = (imm << 4) | imm;
1160 case 3: imm = (imm << 8) | imm;
1161 case 4: imm = (imm << 16) | imm;
1162 case 5: imm = (imm << 32) | imm;
1163 case 6: break;
1164 default: abort ();
1165 }
1166 simd_immediates[nb_imms].imm = imm;
1167 simd_immediates[nb_imms].encoding =
1168 encode_immediate_bitfield(is64, s | s_mask, r);
1169 nb_imms++;
1170 }
1171 }
1172 assert (nb_imms == TOTAL_IMM_NB);
1173 qsort(simd_immediates, nb_imms,
1174 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1175 }
1176
1177 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1178 be accepted by logical (immediate) instructions
1179 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1180
1181 ESIZE is the number of bytes in the decoded immediate value.
1182 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1183 VALUE will be returned in *ENCODING. */
1184
1185 bfd_boolean
1186 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1187 {
1188 simd_imm_encoding imm_enc;
1189 const simd_imm_encoding *imm_encoding;
1190 static bfd_boolean initialized = FALSE;
1191 uint64_t upper;
1192 int i;
1193
1194 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1195 value, is32);
1196
1197 if (initialized == FALSE)
1198 {
1199 build_immediate_table ();
1200 initialized = TRUE;
1201 }
1202
1203 /* Allow all zeros or all ones in top bits, so that
1204 constant expressions like ~1 are permitted. */
1205 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1206 if ((value & ~upper) != value && (value | upper) != value)
1207 return FALSE;
1208
1209 /* Replicate to a full 64-bit value. */
1210 value &= ~upper;
1211 for (i = esize * 8; i < 64; i *= 2)
1212 value |= (value << i);
1213
1214 imm_enc.imm = value;
1215 imm_encoding = (const simd_imm_encoding *)
1216 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1217 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1218 if (imm_encoding == NULL)
1219 {
1220 DEBUG_TRACE ("exit with FALSE");
1221 return FALSE;
1222 }
1223 if (encoding != NULL)
1224 *encoding = imm_encoding->encoding;
1225 DEBUG_TRACE ("exit with TRUE");
1226 return TRUE;
1227 }
1228
1229 /* If 64-bit immediate IMM is in the format of
1230 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1231 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1232 of value "abcdefgh". Otherwise return -1. */
1233 int
1234 aarch64_shrink_expanded_imm8 (uint64_t imm)
1235 {
1236 int i, ret;
1237 uint32_t byte;
1238
1239 ret = 0;
1240 for (i = 0; i < 8; i++)
1241 {
1242 byte = (imm >> (8 * i)) & 0xff;
1243 if (byte == 0xff)
1244 ret |= 1 << i;
1245 else if (byte != 0x00)
1246 return -1;
1247 }
1248 return ret;
1249 }
1250
1251 /* Utility inline functions for operand_general_constraint_met_p. */
1252
1253 static inline void
1254 set_error (aarch64_operand_error *mismatch_detail,
1255 enum aarch64_operand_error_kind kind, int idx,
1256 const char* error)
1257 {
1258 if (mismatch_detail == NULL)
1259 return;
1260 mismatch_detail->kind = kind;
1261 mismatch_detail->index = idx;
1262 mismatch_detail->error = error;
1263 }
1264
1265 static inline void
1266 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1267 const char* error)
1268 {
1269 if (mismatch_detail == NULL)
1270 return;
1271 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1272 }
1273
1274 static inline void
1275 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1276 int idx, int lower_bound, int upper_bound,
1277 const char* error)
1278 {
1279 if (mismatch_detail == NULL)
1280 return;
1281 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1282 mismatch_detail->data[0] = lower_bound;
1283 mismatch_detail->data[1] = upper_bound;
1284 }
1285
1286 static inline void
1287 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1288 int idx, int lower_bound, int upper_bound)
1289 {
1290 if (mismatch_detail == NULL)
1291 return;
1292 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1293 _("immediate value"));
1294 }
1295
1296 static inline void
1297 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1298 int idx, int lower_bound, int upper_bound)
1299 {
1300 if (mismatch_detail == NULL)
1301 return;
1302 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1303 _("immediate offset"));
1304 }
1305
1306 static inline void
1307 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1308 int idx, int lower_bound, int upper_bound)
1309 {
1310 if (mismatch_detail == NULL)
1311 return;
1312 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1313 _("register number"));
1314 }
1315
1316 static inline void
1317 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1318 int idx, int lower_bound, int upper_bound)
1319 {
1320 if (mismatch_detail == NULL)
1321 return;
1322 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1323 _("register element index"));
1324 }
1325
1326 static inline void
1327 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1328 int idx, int lower_bound, int upper_bound)
1329 {
1330 if (mismatch_detail == NULL)
1331 return;
1332 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1333 _("shift amount"));
1334 }
1335
1336 /* Report that the MUL modifier in operand IDX should be in the range
1337 [LOWER_BOUND, UPPER_BOUND]. */
1338 static inline void
1339 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1340 int idx, int lower_bound, int upper_bound)
1341 {
1342 if (mismatch_detail == NULL)
1343 return;
1344 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1345 _("multiplier"));
1346 }
1347
1348 static inline void
1349 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1350 int alignment)
1351 {
1352 if (mismatch_detail == NULL)
1353 return;
1354 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1355 mismatch_detail->data[0] = alignment;
1356 }
1357
1358 static inline void
1359 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1360 int expected_num)
1361 {
1362 if (mismatch_detail == NULL)
1363 return;
1364 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1365 mismatch_detail->data[0] = expected_num;
1366 }
1367
1368 static inline void
1369 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1370 const char* error)
1371 {
1372 if (mismatch_detail == NULL)
1373 return;
1374 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1375 }
1376
1377 /* General constraint checking based on operand code.
1378
1379 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1380 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1381
1382 This function has to be called after the qualifiers for all operands
1383 have been resolved.
1384
1385 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1386 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1387 of error message during the disassembling where error message is not
1388 wanted. We avoid the dynamic construction of strings of error messages
1389 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1390 use a combination of error code, static string and some integer data to
1391 represent an error. */
1392
1393 static int
1394 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1395 enum aarch64_opnd type,
1396 const aarch64_opcode *opcode,
1397 aarch64_operand_error *mismatch_detail)
1398 {
1399 unsigned num, modifiers, shift;
1400 unsigned char size;
1401 int64_t imm, min_value, max_value;
1402 uint64_t uvalue, mask;
1403 const aarch64_opnd_info *opnd = opnds + idx;
1404 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1405
1406 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1407
1408 switch (aarch64_operands[type].op_class)
1409 {
1410 case AARCH64_OPND_CLASS_INT_REG:
1411 /* Check pair reg constraints for cas* instructions. */
1412 if (type == AARCH64_OPND_PAIRREG)
1413 {
1414 assert (idx == 1 || idx == 3);
1415 if (opnds[idx - 1].reg.regno % 2 != 0)
1416 {
1417 set_syntax_error (mismatch_detail, idx - 1,
1418 _("reg pair must start from even reg"));
1419 return 0;
1420 }
1421 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1422 {
1423 set_syntax_error (mismatch_detail, idx,
1424 _("reg pair must be contiguous"));
1425 return 0;
1426 }
1427 break;
1428 }
1429
1430 /* <Xt> may be optional in some IC and TLBI instructions. */
1431 if (type == AARCH64_OPND_Rt_SYS)
1432 {
1433 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1434 == AARCH64_OPND_CLASS_SYSTEM));
1435 if (opnds[1].present
1436 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1437 {
1438 set_other_error (mismatch_detail, idx, _("extraneous register"));
1439 return 0;
1440 }
1441 if (!opnds[1].present
1442 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1443 {
1444 set_other_error (mismatch_detail, idx, _("missing register"));
1445 return 0;
1446 }
1447 }
1448 switch (qualifier)
1449 {
1450 case AARCH64_OPND_QLF_WSP:
1451 case AARCH64_OPND_QLF_SP:
1452 if (!aarch64_stack_pointer_p (opnd))
1453 {
1454 set_other_error (mismatch_detail, idx,
1455 _("stack pointer register expected"));
1456 return 0;
1457 }
1458 break;
1459 default:
1460 break;
1461 }
1462 break;
1463
1464 case AARCH64_OPND_CLASS_SVE_REG:
1465 switch (type)
1466 {
1467 case AARCH64_OPND_SVE_Zn_INDEX:
1468 size = aarch64_get_qualifier_esize (opnd->qualifier);
1469 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1470 {
1471 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1472 0, 64 / size - 1);
1473 return 0;
1474 }
1475 break;
1476
1477 case AARCH64_OPND_SVE_ZnxN:
1478 case AARCH64_OPND_SVE_ZtxN:
1479 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1480 {
1481 set_other_error (mismatch_detail, idx,
1482 _("invalid register list"));
1483 return 0;
1484 }
1485 break;
1486
1487 default:
1488 break;
1489 }
1490 break;
1491
1492 case AARCH64_OPND_CLASS_PRED_REG:
1493 if (opnd->reg.regno >= 8
1494 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1495 {
1496 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1497 return 0;
1498 }
1499 break;
1500
1501 case AARCH64_OPND_CLASS_COND:
1502 if (type == AARCH64_OPND_COND1
1503 && (opnds[idx].cond->value & 0xe) == 0xe)
1504 {
1505 /* Not allow AL or NV. */
1506 set_syntax_error (mismatch_detail, idx, NULL);
1507 }
1508 break;
1509
1510 case AARCH64_OPND_CLASS_ADDRESS:
1511 /* Check writeback. */
1512 switch (opcode->iclass)
1513 {
1514 case ldst_pos:
1515 case ldst_unscaled:
1516 case ldstnapair_offs:
1517 case ldstpair_off:
1518 case ldst_unpriv:
1519 if (opnd->addr.writeback == 1)
1520 {
1521 set_syntax_error (mismatch_detail, idx,
1522 _("unexpected address writeback"));
1523 return 0;
1524 }
1525 break;
1526 case ldst_imm9:
1527 case ldstpair_indexed:
1528 case asisdlsep:
1529 case asisdlsop:
1530 if (opnd->addr.writeback == 0)
1531 {
1532 set_syntax_error (mismatch_detail, idx,
1533 _("address writeback expected"));
1534 return 0;
1535 }
1536 break;
1537 default:
1538 assert (opnd->addr.writeback == 0);
1539 break;
1540 }
1541 switch (type)
1542 {
1543 case AARCH64_OPND_ADDR_SIMM7:
1544 /* Scaled signed 7 bits immediate offset. */
1545 /* Get the size of the data element that is accessed, which may be
1546 different from that of the source register size,
1547 e.g. in strb/ldrb. */
1548 size = aarch64_get_qualifier_esize (opnd->qualifier);
1549 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1550 {
1551 set_offset_out_of_range_error (mismatch_detail, idx,
1552 -64 * size, 63 * size);
1553 return 0;
1554 }
1555 if (!value_aligned_p (opnd->addr.offset.imm, size))
1556 {
1557 set_unaligned_error (mismatch_detail, idx, size);
1558 return 0;
1559 }
1560 break;
1561 case AARCH64_OPND_ADDR_SIMM9:
1562 /* Unscaled signed 9 bits immediate offset. */
1563 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1564 {
1565 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1566 return 0;
1567 }
1568 break;
1569
1570 case AARCH64_OPND_ADDR_SIMM9_2:
1571 /* Unscaled signed 9 bits immediate offset, which has to be negative
1572 or unaligned. */
1573 size = aarch64_get_qualifier_esize (qualifier);
1574 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1575 && !value_aligned_p (opnd->addr.offset.imm, size))
1576 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1577 return 1;
1578 set_other_error (mismatch_detail, idx,
1579 _("negative or unaligned offset expected"));
1580 return 0;
1581
1582 case AARCH64_OPND_SIMD_ADDR_POST:
1583 /* AdvSIMD load/store multiple structures, post-index. */
1584 assert (idx == 1);
1585 if (opnd->addr.offset.is_reg)
1586 {
1587 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1588 return 1;
1589 else
1590 {
1591 set_other_error (mismatch_detail, idx,
1592 _("invalid register offset"));
1593 return 0;
1594 }
1595 }
1596 else
1597 {
1598 const aarch64_opnd_info *prev = &opnds[idx-1];
1599 unsigned num_bytes; /* total number of bytes transferred. */
1600 /* The opcode dependent area stores the number of elements in
1601 each structure to be loaded/stored. */
1602 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1603 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1604 /* Special handling of loading single structure to all lane. */
1605 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1606 * aarch64_get_qualifier_esize (prev->qualifier);
1607 else
1608 num_bytes = prev->reglist.num_regs
1609 * aarch64_get_qualifier_esize (prev->qualifier)
1610 * aarch64_get_qualifier_nelem (prev->qualifier);
1611 if ((int) num_bytes != opnd->addr.offset.imm)
1612 {
1613 set_other_error (mismatch_detail, idx,
1614 _("invalid post-increment amount"));
1615 return 0;
1616 }
1617 }
1618 break;
1619
1620 case AARCH64_OPND_ADDR_REGOFF:
1621 /* Get the size of the data element that is accessed, which may be
1622 different from that of the source register size,
1623 e.g. in strb/ldrb. */
1624 size = aarch64_get_qualifier_esize (opnd->qualifier);
1625 /* It is either no shift or shift by the binary logarithm of SIZE. */
1626 if (opnd->shifter.amount != 0
1627 && opnd->shifter.amount != (int)get_logsz (size))
1628 {
1629 set_other_error (mismatch_detail, idx,
1630 _("invalid shift amount"));
1631 return 0;
1632 }
1633 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1634 operators. */
1635 switch (opnd->shifter.kind)
1636 {
1637 case AARCH64_MOD_UXTW:
1638 case AARCH64_MOD_LSL:
1639 case AARCH64_MOD_SXTW:
1640 case AARCH64_MOD_SXTX: break;
1641 default:
1642 set_other_error (mismatch_detail, idx,
1643 _("invalid extend/shift operator"));
1644 return 0;
1645 }
1646 break;
1647
1648 case AARCH64_OPND_ADDR_UIMM12:
1649 imm = opnd->addr.offset.imm;
1650 /* Get the size of the data element that is accessed, which may be
1651 different from that of the source register size,
1652 e.g. in strb/ldrb. */
1653 size = aarch64_get_qualifier_esize (qualifier);
1654 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1655 {
1656 set_offset_out_of_range_error (mismatch_detail, idx,
1657 0, 4095 * size);
1658 return 0;
1659 }
1660 if (!value_aligned_p (opnd->addr.offset.imm, size))
1661 {
1662 set_unaligned_error (mismatch_detail, idx, size);
1663 return 0;
1664 }
1665 break;
1666
1667 case AARCH64_OPND_ADDR_PCREL14:
1668 case AARCH64_OPND_ADDR_PCREL19:
1669 case AARCH64_OPND_ADDR_PCREL21:
1670 case AARCH64_OPND_ADDR_PCREL26:
1671 imm = opnd->imm.value;
1672 if (operand_need_shift_by_two (get_operand_from_code (type)))
1673 {
1674 /* The offset value in a PC-relative branch instruction is alway
1675 4-byte aligned and is encoded without the lowest 2 bits. */
1676 if (!value_aligned_p (imm, 4))
1677 {
1678 set_unaligned_error (mismatch_detail, idx, 4);
1679 return 0;
1680 }
1681 /* Right shift by 2 so that we can carry out the following check
1682 canonically. */
1683 imm >>= 2;
1684 }
1685 size = get_operand_fields_width (get_operand_from_code (type));
1686 if (!value_fit_signed_field_p (imm, size))
1687 {
1688 set_other_error (mismatch_detail, idx,
1689 _("immediate out of range"));
1690 return 0;
1691 }
1692 break;
1693
1694 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1695 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1696 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1697 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1698 min_value = -8;
1699 max_value = 7;
1700 sve_imm_offset_vl:
1701 assert (!opnd->addr.offset.is_reg);
1702 assert (opnd->addr.preind);
1703 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1704 min_value *= num;
1705 max_value *= num;
1706 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1707 || (opnd->shifter.operator_present
1708 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1709 {
1710 set_other_error (mismatch_detail, idx,
1711 _("invalid addressing mode"));
1712 return 0;
1713 }
1714 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1715 {
1716 set_offset_out_of_range_error (mismatch_detail, idx,
1717 min_value, max_value);
1718 return 0;
1719 }
1720 if (!value_aligned_p (opnd->addr.offset.imm, num))
1721 {
1722 set_unaligned_error (mismatch_detail, idx, num);
1723 return 0;
1724 }
1725 break;
1726
1727 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1728 min_value = -32;
1729 max_value = 31;
1730 goto sve_imm_offset_vl;
1731
1732 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1733 min_value = -256;
1734 max_value = 255;
1735 goto sve_imm_offset_vl;
1736
1737 case AARCH64_OPND_SVE_ADDR_RI_U6:
1738 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1739 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1740 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1741 min_value = 0;
1742 max_value = 63;
1743 sve_imm_offset:
1744 assert (!opnd->addr.offset.is_reg);
1745 assert (opnd->addr.preind);
1746 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1747 min_value *= num;
1748 max_value *= num;
1749 if (opnd->shifter.operator_present
1750 || opnd->shifter.amount_present)
1751 {
1752 set_other_error (mismatch_detail, idx,
1753 _("invalid addressing mode"));
1754 return 0;
1755 }
1756 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1757 {
1758 set_offset_out_of_range_error (mismatch_detail, idx,
1759 min_value, max_value);
1760 return 0;
1761 }
1762 if (!value_aligned_p (opnd->addr.offset.imm, num))
1763 {
1764 set_unaligned_error (mismatch_detail, idx, num);
1765 return 0;
1766 }
1767 break;
1768
1769 case AARCH64_OPND_SVE_ADDR_RR:
1770 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1771 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1772 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1773 case AARCH64_OPND_SVE_ADDR_RX:
1774 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1775 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1776 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1777 case AARCH64_OPND_SVE_ADDR_RZ:
1778 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1779 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1780 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1781 modifiers = 1 << AARCH64_MOD_LSL;
1782 sve_rr_operand:
1783 assert (opnd->addr.offset.is_reg);
1784 assert (opnd->addr.preind);
1785 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1786 && opnd->addr.offset.regno == 31)
1787 {
1788 set_other_error (mismatch_detail, idx,
1789 _("index register xzr is not allowed"));
1790 return 0;
1791 }
1792 if (((1 << opnd->shifter.kind) & modifiers) == 0
1793 || (opnd->shifter.amount
1794 != get_operand_specific_data (&aarch64_operands[type])))
1795 {
1796 set_other_error (mismatch_detail, idx,
1797 _("invalid addressing mode"));
1798 return 0;
1799 }
1800 break;
1801
1802 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1803 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1804 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1805 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1806 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1807 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1808 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1809 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1810 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1811 goto sve_rr_operand;
1812
1813 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1814 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1815 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1816 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1817 min_value = 0;
1818 max_value = 31;
1819 goto sve_imm_offset;
1820
1821 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1822 modifiers = 1 << AARCH64_MOD_LSL;
1823 sve_zz_operand:
1824 assert (opnd->addr.offset.is_reg);
1825 assert (opnd->addr.preind);
1826 if (((1 << opnd->shifter.kind) & modifiers) == 0
1827 || opnd->shifter.amount < 0
1828 || opnd->shifter.amount > 3)
1829 {
1830 set_other_error (mismatch_detail, idx,
1831 _("invalid addressing mode"));
1832 return 0;
1833 }
1834 break;
1835
1836 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1837 modifiers = (1 << AARCH64_MOD_SXTW);
1838 goto sve_zz_operand;
1839
1840 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1841 modifiers = 1 << AARCH64_MOD_UXTW;
1842 goto sve_zz_operand;
1843
1844 default:
1845 break;
1846 }
1847 break;
1848
1849 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1850 if (type == AARCH64_OPND_LEt)
1851 {
1852 /* Get the upper bound for the element index. */
1853 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1854 if (!value_in_range_p (opnd->reglist.index, 0, num))
1855 {
1856 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1857 return 0;
1858 }
1859 }
1860 /* The opcode dependent area stores the number of elements in
1861 each structure to be loaded/stored. */
1862 num = get_opcode_dependent_value (opcode);
1863 switch (type)
1864 {
1865 case AARCH64_OPND_LVt:
1866 assert (num >= 1 && num <= 4);
1867 /* Unless LD1/ST1, the number of registers should be equal to that
1868 of the structure elements. */
1869 if (num != 1 && opnd->reglist.num_regs != num)
1870 {
1871 set_reg_list_error (mismatch_detail, idx, num);
1872 return 0;
1873 }
1874 break;
1875 case AARCH64_OPND_LVt_AL:
1876 case AARCH64_OPND_LEt:
1877 assert (num >= 1 && num <= 4);
1878 /* The number of registers should be equal to that of the structure
1879 elements. */
1880 if (opnd->reglist.num_regs != num)
1881 {
1882 set_reg_list_error (mismatch_detail, idx, num);
1883 return 0;
1884 }
1885 break;
1886 default:
1887 break;
1888 }
1889 break;
1890
1891 case AARCH64_OPND_CLASS_IMMEDIATE:
1892 /* Constraint check on immediate operand. */
1893 imm = opnd->imm.value;
1894 /* E.g. imm_0_31 constrains value to be 0..31. */
1895 if (qualifier_value_in_range_constraint_p (qualifier)
1896 && !value_in_range_p (imm, get_lower_bound (qualifier),
1897 get_upper_bound (qualifier)))
1898 {
1899 set_imm_out_of_range_error (mismatch_detail, idx,
1900 get_lower_bound (qualifier),
1901 get_upper_bound (qualifier));
1902 return 0;
1903 }
1904
1905 switch (type)
1906 {
1907 case AARCH64_OPND_AIMM:
1908 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1909 {
1910 set_other_error (mismatch_detail, idx,
1911 _("invalid shift operator"));
1912 return 0;
1913 }
1914 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1915 {
1916 set_other_error (mismatch_detail, idx,
1917 _("shift amount must be 0 or 12"));
1918 return 0;
1919 }
1920 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1921 {
1922 set_other_error (mismatch_detail, idx,
1923 _("immediate out of range"));
1924 return 0;
1925 }
1926 break;
1927
1928 case AARCH64_OPND_HALF:
1929 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1930 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1931 {
1932 set_other_error (mismatch_detail, idx,
1933 _("invalid shift operator"));
1934 return 0;
1935 }
1936 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1937 if (!value_aligned_p (opnd->shifter.amount, 16))
1938 {
1939 set_other_error (mismatch_detail, idx,
1940 _("shift amount must be a multiple of 16"));
1941 return 0;
1942 }
1943 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1944 {
1945 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1946 0, size * 8 - 16);
1947 return 0;
1948 }
1949 if (opnd->imm.value < 0)
1950 {
1951 set_other_error (mismatch_detail, idx,
1952 _("negative immediate value not allowed"));
1953 return 0;
1954 }
1955 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1956 {
1957 set_other_error (mismatch_detail, idx,
1958 _("immediate out of range"));
1959 return 0;
1960 }
1961 break;
1962
1963 case AARCH64_OPND_IMM_MOV:
1964 {
1965 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1966 imm = opnd->imm.value;
1967 assert (idx == 1);
1968 switch (opcode->op)
1969 {
1970 case OP_MOV_IMM_WIDEN:
1971 imm = ~imm;
1972 /* Fall through... */
1973 case OP_MOV_IMM_WIDE:
1974 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1975 {
1976 set_other_error (mismatch_detail, idx,
1977 _("immediate out of range"));
1978 return 0;
1979 }
1980 break;
1981 case OP_MOV_IMM_LOG:
1982 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1983 {
1984 set_other_error (mismatch_detail, idx,
1985 _("immediate out of range"));
1986 return 0;
1987 }
1988 break;
1989 default:
1990 assert (0);
1991 return 0;
1992 }
1993 }
1994 break;
1995
1996 case AARCH64_OPND_NZCV:
1997 case AARCH64_OPND_CCMP_IMM:
1998 case AARCH64_OPND_EXCEPTION:
1999 case AARCH64_OPND_UIMM4:
2000 case AARCH64_OPND_UIMM7:
2001 case AARCH64_OPND_UIMM3_OP1:
2002 case AARCH64_OPND_UIMM3_OP2:
2003 case AARCH64_OPND_SVE_UIMM3:
2004 case AARCH64_OPND_SVE_UIMM7:
2005 case AARCH64_OPND_SVE_UIMM8:
2006 case AARCH64_OPND_SVE_UIMM8_53:
2007 size = get_operand_fields_width (get_operand_from_code (type));
2008 assert (size < 32);
2009 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2010 {
2011 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2012 (1 << size) - 1);
2013 return 0;
2014 }
2015 break;
2016
2017 case AARCH64_OPND_SIMM5:
2018 case AARCH64_OPND_SVE_SIMM5:
2019 case AARCH64_OPND_SVE_SIMM5B:
2020 case AARCH64_OPND_SVE_SIMM6:
2021 case AARCH64_OPND_SVE_SIMM8:
2022 size = get_operand_fields_width (get_operand_from_code (type));
2023 assert (size < 32);
2024 if (!value_fit_signed_field_p (opnd->imm.value, size))
2025 {
2026 set_imm_out_of_range_error (mismatch_detail, idx,
2027 -(1 << (size - 1)),
2028 (1 << (size - 1)) - 1);
2029 return 0;
2030 }
2031 break;
2032
2033 case AARCH64_OPND_WIDTH:
2034 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2035 && opnds[0].type == AARCH64_OPND_Rd);
2036 size = get_upper_bound (qualifier);
2037 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2038 /* lsb+width <= reg.size */
2039 {
2040 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2041 size - opnds[idx-1].imm.value);
2042 return 0;
2043 }
2044 break;
2045
2046 case AARCH64_OPND_LIMM:
2047 case AARCH64_OPND_SVE_LIMM:
2048 {
2049 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2050 uint64_t uimm = opnd->imm.value;
2051 if (opcode->op == OP_BIC)
2052 uimm = ~uimm;
2053 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
2054 {
2055 set_other_error (mismatch_detail, idx,
2056 _("immediate out of range"));
2057 return 0;
2058 }
2059 }
2060 break;
2061
2062 case AARCH64_OPND_IMM0:
2063 case AARCH64_OPND_FPIMM0:
2064 if (opnd->imm.value != 0)
2065 {
2066 set_other_error (mismatch_detail, idx,
2067 _("immediate zero expected"));
2068 return 0;
2069 }
2070 break;
2071
2072 case AARCH64_OPND_SHLL_IMM:
2073 assert (idx == 2);
2074 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2075 if (opnd->imm.value != size)
2076 {
2077 set_other_error (mismatch_detail, idx,
2078 _("invalid shift amount"));
2079 return 0;
2080 }
2081 break;
2082
2083 case AARCH64_OPND_IMM_VLSL:
2084 size = aarch64_get_qualifier_esize (qualifier);
2085 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2086 {
2087 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2088 size * 8 - 1);
2089 return 0;
2090 }
2091 break;
2092
2093 case AARCH64_OPND_IMM_VLSR:
2094 size = aarch64_get_qualifier_esize (qualifier);
2095 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2096 {
2097 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2098 return 0;
2099 }
2100 break;
2101
2102 case AARCH64_OPND_SIMD_IMM:
2103 case AARCH64_OPND_SIMD_IMM_SFT:
2104 /* Qualifier check. */
2105 switch (qualifier)
2106 {
2107 case AARCH64_OPND_QLF_LSL:
2108 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2109 {
2110 set_other_error (mismatch_detail, idx,
2111 _("invalid shift operator"));
2112 return 0;
2113 }
2114 break;
2115 case AARCH64_OPND_QLF_MSL:
2116 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2117 {
2118 set_other_error (mismatch_detail, idx,
2119 _("invalid shift operator"));
2120 return 0;
2121 }
2122 break;
2123 case AARCH64_OPND_QLF_NIL:
2124 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2125 {
2126 set_other_error (mismatch_detail, idx,
2127 _("shift is not permitted"));
2128 return 0;
2129 }
2130 break;
2131 default:
2132 assert (0);
2133 return 0;
2134 }
2135 /* Is the immediate valid? */
2136 assert (idx == 1);
2137 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2138 {
2139 /* uimm8 or simm8 */
2140 if (!value_in_range_p (opnd->imm.value, -128, 255))
2141 {
2142 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2143 return 0;
2144 }
2145 }
2146 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2147 {
2148 /* uimm64 is not
2149 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2150 ffffffffgggggggghhhhhhhh'. */
2151 set_other_error (mismatch_detail, idx,
2152 _("invalid value for immediate"));
2153 return 0;
2154 }
2155 /* Is the shift amount valid? */
2156 switch (opnd->shifter.kind)
2157 {
2158 case AARCH64_MOD_LSL:
2159 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2160 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2161 {
2162 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2163 (size - 1) * 8);
2164 return 0;
2165 }
2166 if (!value_aligned_p (opnd->shifter.amount, 8))
2167 {
2168 set_unaligned_error (mismatch_detail, idx, 8);
2169 return 0;
2170 }
2171 break;
2172 case AARCH64_MOD_MSL:
2173 /* Only 8 and 16 are valid shift amount. */
2174 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2175 {
2176 set_other_error (mismatch_detail, idx,
2177 _("shift amount must be 0 or 16"));
2178 return 0;
2179 }
2180 break;
2181 default:
2182 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2183 {
2184 set_other_error (mismatch_detail, idx,
2185 _("invalid shift operator"));
2186 return 0;
2187 }
2188 break;
2189 }
2190 break;
2191
2192 case AARCH64_OPND_FPIMM:
2193 case AARCH64_OPND_SIMD_FPIMM:
2194 case AARCH64_OPND_SVE_FPIMM8:
2195 if (opnd->imm.is_fp == 0)
2196 {
2197 set_other_error (mismatch_detail, idx,
2198 _("floating-point immediate expected"));
2199 return 0;
2200 }
2201 /* The value is expected to be an 8-bit floating-point constant with
2202 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2203 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2204 instruction). */
2205 if (!value_in_range_p (opnd->imm.value, 0, 255))
2206 {
2207 set_other_error (mismatch_detail, idx,
2208 _("immediate out of range"));
2209 return 0;
2210 }
2211 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2212 {
2213 set_other_error (mismatch_detail, idx,
2214 _("invalid shift operator"));
2215 return 0;
2216 }
2217 break;
2218
2219 case AARCH64_OPND_SVE_AIMM:
2220 min_value = 0;
2221 sve_aimm:
2222 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2223 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2224 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2225 uvalue = opnd->imm.value;
2226 shift = opnd->shifter.amount;
2227 if (size == 1)
2228 {
2229 if (shift != 0)
2230 {
2231 set_other_error (mismatch_detail, idx,
2232 _("no shift amount allowed for"
2233 " 8-bit constants"));
2234 return 0;
2235 }
2236 }
2237 else
2238 {
2239 if (shift != 0 && shift != 8)
2240 {
2241 set_other_error (mismatch_detail, idx,
2242 _("shift amount must be 0 or 8"));
2243 return 0;
2244 }
2245 if (shift == 0 && (uvalue & 0xff) == 0)
2246 {
2247 shift = 8;
2248 uvalue = (int64_t) uvalue / 256;
2249 }
2250 }
2251 mask >>= shift;
2252 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2253 {
2254 set_other_error (mismatch_detail, idx,
2255 _("immediate too big for element size"));
2256 return 0;
2257 }
2258 uvalue = (uvalue - min_value) & mask;
2259 if (uvalue > 0xff)
2260 {
2261 set_other_error (mismatch_detail, idx,
2262 _("invalid arithmetic immediate"));
2263 return 0;
2264 }
2265 break;
2266
2267 case AARCH64_OPND_SVE_ASIMM:
2268 min_value = -128;
2269 goto sve_aimm;
2270
2271 case AARCH64_OPND_SVE_I1_HALF_ONE:
2272 assert (opnd->imm.is_fp);
2273 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2274 {
2275 set_other_error (mismatch_detail, idx,
2276 _("floating-point value must be 0.5 or 1.0"));
2277 return 0;
2278 }
2279 break;
2280
2281 case AARCH64_OPND_SVE_I1_HALF_TWO:
2282 assert (opnd->imm.is_fp);
2283 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2284 {
2285 set_other_error (mismatch_detail, idx,
2286 _("floating-point value must be 0.5 or 2.0"));
2287 return 0;
2288 }
2289 break;
2290
2291 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2292 assert (opnd->imm.is_fp);
2293 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2294 {
2295 set_other_error (mismatch_detail, idx,
2296 _("floating-point value must be 0.0 or 1.0"));
2297 return 0;
2298 }
2299 break;
2300
2301 case AARCH64_OPND_SVE_INV_LIMM:
2302 {
2303 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2304 uint64_t uimm = ~opnd->imm.value;
2305 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2306 {
2307 set_other_error (mismatch_detail, idx,
2308 _("immediate out of range"));
2309 return 0;
2310 }
2311 }
2312 break;
2313
2314 case AARCH64_OPND_SVE_LIMM_MOV:
2315 {
2316 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2317 uint64_t uimm = opnd->imm.value;
2318 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2319 {
2320 set_other_error (mismatch_detail, idx,
2321 _("immediate out of range"));
2322 return 0;
2323 }
2324 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2325 {
2326 set_other_error (mismatch_detail, idx,
2327 _("invalid replicated MOV immediate"));
2328 return 0;
2329 }
2330 }
2331 break;
2332
2333 case AARCH64_OPND_SVE_PATTERN_SCALED:
2334 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2335 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2336 {
2337 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2338 return 0;
2339 }
2340 break;
2341
2342 case AARCH64_OPND_SVE_SHLIMM_PRED:
2343 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2344 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2345 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2346 {
2347 set_imm_out_of_range_error (mismatch_detail, idx,
2348 0, 8 * size - 1);
2349 return 0;
2350 }
2351 break;
2352
2353 case AARCH64_OPND_SVE_SHRIMM_PRED:
2354 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2355 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2356 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2357 {
2358 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2359 return 0;
2360 }
2361 break;
2362
2363 default:
2364 break;
2365 }
2366 break;
2367
2368 case AARCH64_OPND_CLASS_CP_REG:
2369 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2370 valid range: C0 - C15. */
2371 if (opnd->reg.regno > 15)
2372 {
2373 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2374 return 0;
2375 }
2376 break;
2377
2378 case AARCH64_OPND_CLASS_SYSTEM:
2379 switch (type)
2380 {
2381 case AARCH64_OPND_PSTATEFIELD:
2382 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2383 /* MSR UAO, #uimm4
2384 MSR PAN, #uimm4
2385 The immediate must be #0 or #1. */
2386 if ((opnd->pstatefield == 0x03 /* UAO. */
2387 || opnd->pstatefield == 0x04) /* PAN. */
2388 && opnds[1].imm.value > 1)
2389 {
2390 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2391 return 0;
2392 }
2393 /* MSR SPSel, #uimm4
2394 Uses uimm4 as a control value to select the stack pointer: if
2395 bit 0 is set it selects the current exception level's stack
2396 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2397 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2398 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2399 {
2400 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2401 return 0;
2402 }
2403 break;
2404 default:
2405 break;
2406 }
2407 break;
2408
2409 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2410 /* Get the upper bound for the element index. */
2411 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2412 /* Index out-of-range. */
2413 if (!value_in_range_p (opnd->reglane.index, 0, num))
2414 {
2415 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2416 return 0;
2417 }
2418 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2419 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2420 number is encoded in "size:M:Rm":
2421 size <Vm>
2422 00 RESERVED
2423 01 0:Rm
2424 10 M:Rm
2425 11 RESERVED */
2426 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2427 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2428 {
2429 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2430 return 0;
2431 }
2432 break;
2433
2434 case AARCH64_OPND_CLASS_MODIFIED_REG:
2435 assert (idx == 1 || idx == 2);
2436 switch (type)
2437 {
2438 case AARCH64_OPND_Rm_EXT:
2439 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2440 && opnd->shifter.kind != AARCH64_MOD_LSL)
2441 {
2442 set_other_error (mismatch_detail, idx,
2443 _("extend operator expected"));
2444 return 0;
2445 }
2446 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2447 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2448 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2449 case. */
2450 if (!aarch64_stack_pointer_p (opnds + 0)
2451 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2452 {
2453 if (!opnd->shifter.operator_present)
2454 {
2455 set_other_error (mismatch_detail, idx,
2456 _("missing extend operator"));
2457 return 0;
2458 }
2459 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2460 {
2461 set_other_error (mismatch_detail, idx,
2462 _("'LSL' operator not allowed"));
2463 return 0;
2464 }
2465 }
2466 assert (opnd->shifter.operator_present /* Default to LSL. */
2467 || opnd->shifter.kind == AARCH64_MOD_LSL);
2468 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2469 {
2470 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2471 return 0;
2472 }
2473 /* In the 64-bit form, the final register operand is written as Wm
2474 for all but the (possibly omitted) UXTX/LSL and SXTX
2475 operators.
2476 N.B. GAS allows X register to be used with any operator as a
2477 programming convenience. */
2478 if (qualifier == AARCH64_OPND_QLF_X
2479 && opnd->shifter.kind != AARCH64_MOD_LSL
2480 && opnd->shifter.kind != AARCH64_MOD_UXTX
2481 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2482 {
2483 set_other_error (mismatch_detail, idx, _("W register expected"));
2484 return 0;
2485 }
2486 break;
2487
2488 case AARCH64_OPND_Rm_SFT:
2489 /* ROR is not available to the shifted register operand in
2490 arithmetic instructions. */
2491 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2492 {
2493 set_other_error (mismatch_detail, idx,
2494 _("shift operator expected"));
2495 return 0;
2496 }
2497 if (opnd->shifter.kind == AARCH64_MOD_ROR
2498 && opcode->iclass != log_shift)
2499 {
2500 set_other_error (mismatch_detail, idx,
2501 _("'ROR' operator not allowed"));
2502 return 0;
2503 }
2504 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2505 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2506 {
2507 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2508 return 0;
2509 }
2510 break;
2511
2512 default:
2513 break;
2514 }
2515 break;
2516
2517 default:
2518 break;
2519 }
2520
2521 return 1;
2522 }
2523
2524 /* Main entrypoint for the operand constraint checking.
2525
2526 Return 1 if operands of *INST meet the constraint applied by the operand
2527 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2528 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2529 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2530 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2531 error kind when it is notified that an instruction does not pass the check).
2532
2533 Un-determined operand qualifiers may get established during the process. */
2534
2535 int
2536 aarch64_match_operands_constraint (aarch64_inst *inst,
2537 aarch64_operand_error *mismatch_detail)
2538 {
2539 int i;
2540
2541 DEBUG_TRACE ("enter");
2542
2543 /* Check for cases where a source register needs to be the same as the
2544 destination register. Do this before matching qualifiers since if
2545 an instruction has both invalid tying and invalid qualifiers,
2546 the error about qualifiers would suggest several alternative
2547 instructions that also have invalid tying. */
2548 i = inst->opcode->tied_operand;
2549 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2550 {
2551 if (mismatch_detail)
2552 {
2553 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2554 mismatch_detail->index = i;
2555 mismatch_detail->error = NULL;
2556 }
2557 return 0;
2558 }
2559
2560 /* Match operands' qualifier.
2561 *INST has already had qualifier establish for some, if not all, of
2562 its operands; we need to find out whether these established
2563 qualifiers match one of the qualifier sequence in
2564 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2565 with the corresponding qualifier in such a sequence.
2566 Only basic operand constraint checking is done here; the more thorough
2567 constraint checking will carried out by operand_general_constraint_met_p,
2568 which has be to called after this in order to get all of the operands'
2569 qualifiers established. */
2570 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2571 {
2572 DEBUG_TRACE ("FAIL on operand qualifier matching");
2573 if (mismatch_detail)
2574 {
2575 /* Return an error type to indicate that it is the qualifier
2576 matching failure; we don't care about which operand as there
2577 are enough information in the opcode table to reproduce it. */
2578 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2579 mismatch_detail->index = -1;
2580 mismatch_detail->error = NULL;
2581 }
2582 return 0;
2583 }
2584
2585 /* Match operands' constraint. */
2586 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2587 {
2588 enum aarch64_opnd type = inst->opcode->operands[i];
2589 if (type == AARCH64_OPND_NIL)
2590 break;
2591 if (inst->operands[i].skip)
2592 {
2593 DEBUG_TRACE ("skip the incomplete operand %d", i);
2594 continue;
2595 }
2596 if (operand_general_constraint_met_p (inst->operands, i, type,
2597 inst->opcode, mismatch_detail) == 0)
2598 {
2599 DEBUG_TRACE ("FAIL on operand %d", i);
2600 return 0;
2601 }
2602 }
2603
2604 DEBUG_TRACE ("PASS");
2605
2606 return 1;
2607 }
2608
2609 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2610 Also updates the TYPE of each INST->OPERANDS with the corresponding
2611 value of OPCODE->OPERANDS.
2612
2613 Note that some operand qualifiers may need to be manually cleared by
2614 the caller before it further calls the aarch64_opcode_encode; by
2615 doing this, it helps the qualifier matching facilities work
2616 properly. */
2617
2618 const aarch64_opcode*
2619 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2620 {
2621 int i;
2622 const aarch64_opcode *old = inst->opcode;
2623
2624 inst->opcode = opcode;
2625
2626 /* Update the operand types. */
2627 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2628 {
2629 inst->operands[i].type = opcode->operands[i];
2630 if (opcode->operands[i] == AARCH64_OPND_NIL)
2631 break;
2632 }
2633
2634 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2635
2636 return old;
2637 }
2638
2639 int
2640 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2641 {
2642 int i;
2643 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2644 if (operands[i] == operand)
2645 return i;
2646 else if (operands[i] == AARCH64_OPND_NIL)
2647 break;
2648 return -1;
2649 }
2650 \f
2651 /* R0...R30, followed by FOR31. */
2652 #define BANK(R, FOR31) \
2653 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2654 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2655 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2656 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2657 /* [0][0] 32-bit integer regs with sp Wn
2658 [0][1] 64-bit integer regs with sp Xn sf=1
2659 [1][0] 32-bit integer regs with #0 Wn
2660 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2661 static const char *int_reg[2][2][32] = {
2662 #define R32(X) "w" #X
2663 #define R64(X) "x" #X
2664 { BANK (R32, "wsp"), BANK (R64, "sp") },
2665 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2666 #undef R64
2667 #undef R32
2668 };
2669
2670 /* Names of the SVE vector registers, first with .S suffixes,
2671 then with .D suffixes. */
2672
2673 static const char *sve_reg[2][32] = {
2674 #define ZS(X) "z" #X ".s"
2675 #define ZD(X) "z" #X ".d"
2676 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2677 #undef ZD
2678 #undef ZS
2679 };
2680 #undef BANK
2681
2682 /* Return the integer register name.
2683 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2684
2685 static inline const char *
2686 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2687 {
2688 const int has_zr = sp_reg_p ? 0 : 1;
2689 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2690 return int_reg[has_zr][is_64][regno];
2691 }
2692
2693 /* Like get_int_reg_name, but IS_64 is always 1. */
2694
2695 static inline const char *
2696 get_64bit_int_reg_name (int regno, int sp_reg_p)
2697 {
2698 const int has_zr = sp_reg_p ? 0 : 1;
2699 return int_reg[has_zr][1][regno];
2700 }
2701
2702 /* Get the name of the integer offset register in OPND, using the shift type
2703 to decide whether it's a word or doubleword. */
2704
2705 static inline const char *
2706 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2707 {
2708 switch (opnd->shifter.kind)
2709 {
2710 case AARCH64_MOD_UXTW:
2711 case AARCH64_MOD_SXTW:
2712 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2713
2714 case AARCH64_MOD_LSL:
2715 case AARCH64_MOD_SXTX:
2716 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2717
2718 default:
2719 abort ();
2720 }
2721 }
2722
2723 /* Get the name of the SVE vector offset register in OPND, using the operand
2724 qualifier to decide whether the suffix should be .S or .D. */
2725
2726 static inline const char *
2727 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2728 {
2729 assert (qualifier == AARCH64_OPND_QLF_S_S
2730 || qualifier == AARCH64_OPND_QLF_S_D);
2731 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2732 }
2733
2734 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2735
2736 typedef union
2737 {
2738 uint64_t i;
2739 double d;
2740 } double_conv_t;
2741
2742 typedef union
2743 {
2744 uint32_t i;
2745 float f;
2746 } single_conv_t;
2747
2748 typedef union
2749 {
2750 uint32_t i;
2751 float f;
2752 } half_conv_t;
2753
2754 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2755 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2756 (depending on the type of the instruction). IMM8 will be expanded to a
2757 single-precision floating-point value (SIZE == 4) or a double-precision
2758 floating-point value (SIZE == 8). A half-precision floating-point value
2759 (SIZE == 2) is expanded to a single-precision floating-point value. The
2760 expanded value is returned. */
2761
2762 static uint64_t
2763 expand_fp_imm (int size, uint32_t imm8)
2764 {
2765 uint64_t imm;
2766 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2767
2768 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2769 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2770 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2771 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2772 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2773 if (size == 8)
2774 {
2775 imm = (imm8_7 << (63-32)) /* imm8<7> */
2776 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2777 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2778 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2779 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2780 imm <<= 32;
2781 }
2782 else if (size == 4 || size == 2)
2783 {
2784 imm = (imm8_7 << 31) /* imm8<7> */
2785 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2786 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2787 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2788 }
2789 else
2790 {
2791 /* An unsupported size. */
2792 assert (0);
2793 }
2794
2795 return imm;
2796 }
2797
2798 /* Produce the string representation of the register list operand *OPND
2799 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2800 the register name that comes before the register number, such as "v". */
2801 static void
2802 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2803 const char *prefix)
2804 {
2805 const int num_regs = opnd->reglist.num_regs;
2806 const int first_reg = opnd->reglist.first_regno;
2807 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2808 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2809 char tb[8]; /* Temporary buffer. */
2810
2811 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2812 assert (num_regs >= 1 && num_regs <= 4);
2813
2814 /* Prepare the index if any. */
2815 if (opnd->reglist.has_index)
2816 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2817 else
2818 tb[0] = '\0';
2819
2820 /* The hyphenated form is preferred for disassembly if there are
2821 more than two registers in the list, and the register numbers
2822 are monotonically increasing in increments of one. */
2823 if (num_regs > 2 && last_reg > first_reg)
2824 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2825 prefix, last_reg, qlf_name, tb);
2826 else
2827 {
2828 const int reg0 = first_reg;
2829 const int reg1 = (first_reg + 1) & 0x1f;
2830 const int reg2 = (first_reg + 2) & 0x1f;
2831 const int reg3 = (first_reg + 3) & 0x1f;
2832
2833 switch (num_regs)
2834 {
2835 case 1:
2836 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2837 break;
2838 case 2:
2839 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2840 prefix, reg1, qlf_name, tb);
2841 break;
2842 case 3:
2843 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2844 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2845 prefix, reg2, qlf_name, tb);
2846 break;
2847 case 4:
2848 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2849 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2850 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2851 break;
2852 }
2853 }
2854 }
2855
2856 /* Print the register+immediate address in OPND to BUF, which has SIZE
2857 characters. BASE is the name of the base register. */
2858
2859 static void
2860 print_immediate_offset_address (char *buf, size_t size,
2861 const aarch64_opnd_info *opnd,
2862 const char *base)
2863 {
2864 if (opnd->addr.writeback)
2865 {
2866 if (opnd->addr.preind)
2867 snprintf (buf, size, "[%s,#%d]!", base, opnd->addr.offset.imm);
2868 else
2869 snprintf (buf, size, "[%s],#%d", base, opnd->addr.offset.imm);
2870 }
2871 else
2872 {
2873 if (opnd->shifter.operator_present)
2874 {
2875 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2876 snprintf (buf, size, "[%s,#%d,mul vl]",
2877 base, opnd->addr.offset.imm);
2878 }
2879 else if (opnd->addr.offset.imm)
2880 snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
2881 else
2882 snprintf (buf, size, "[%s]", base);
2883 }
2884 }
2885
2886 /* Produce the string representation of the register offset address operand
2887 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2888 the names of the base and offset registers. */
2889 static void
2890 print_register_offset_address (char *buf, size_t size,
2891 const aarch64_opnd_info *opnd,
2892 const char *base, const char *offset)
2893 {
2894 char tb[16]; /* Temporary buffer. */
2895 bfd_boolean print_extend_p = TRUE;
2896 bfd_boolean print_amount_p = TRUE;
2897 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2898
2899 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2900 || !opnd->shifter.amount_present))
2901 {
2902 /* Not print the shift/extend amount when the amount is zero and
2903 when it is not the special case of 8-bit load/store instruction. */
2904 print_amount_p = FALSE;
2905 /* Likewise, no need to print the shift operator LSL in such a
2906 situation. */
2907 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2908 print_extend_p = FALSE;
2909 }
2910
2911 /* Prepare for the extend/shift. */
2912 if (print_extend_p)
2913 {
2914 if (print_amount_p)
2915 snprintf (tb, sizeof (tb), ",%s #%" PRIi64, shift_name,
2916 opnd->shifter.amount);
2917 else
2918 snprintf (tb, sizeof (tb), ",%s", shift_name);
2919 }
2920 else
2921 tb[0] = '\0';
2922
2923 snprintf (buf, size, "[%s,%s%s]", base, offset, tb);
2924 }
2925
2926 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2927 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2928 PC, PCREL_P and ADDRESS are used to pass in and return information about
2929 the PC-relative address calculation, where the PC value is passed in
2930 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2931 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2932 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2933
2934 The function serves both the disassembler and the assembler diagnostics
2935 issuer, which is the reason why it lives in this file. */
2936
2937 void
2938 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2939 const aarch64_opcode *opcode,
2940 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2941 bfd_vma *address)
2942 {
2943 unsigned int i, num_conds;
2944 const char *name = NULL;
2945 const aarch64_opnd_info *opnd = opnds + idx;
2946 enum aarch64_modifier_kind kind;
2947 uint64_t addr, enum_value;
2948
2949 buf[0] = '\0';
2950 if (pcrel_p)
2951 *pcrel_p = 0;
2952
2953 switch (opnd->type)
2954 {
2955 case AARCH64_OPND_Rd:
2956 case AARCH64_OPND_Rn:
2957 case AARCH64_OPND_Rm:
2958 case AARCH64_OPND_Rt:
2959 case AARCH64_OPND_Rt2:
2960 case AARCH64_OPND_Rs:
2961 case AARCH64_OPND_Ra:
2962 case AARCH64_OPND_Rt_SYS:
2963 case AARCH64_OPND_PAIRREG:
2964 case AARCH64_OPND_SVE_Rm:
2965 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2966 the <ic_op>, therefore we we use opnd->present to override the
2967 generic optional-ness information. */
2968 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2969 break;
2970 /* Omit the operand, e.g. RET. */
2971 if (optional_operand_p (opcode, idx)
2972 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2973 break;
2974 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2975 || opnd->qualifier == AARCH64_OPND_QLF_X);
2976 snprintf (buf, size, "%s",
2977 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2978 break;
2979
2980 case AARCH64_OPND_Rd_SP:
2981 case AARCH64_OPND_Rn_SP:
2982 case AARCH64_OPND_SVE_Rn_SP:
2983 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2984 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2985 || opnd->qualifier == AARCH64_OPND_QLF_X
2986 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2987 snprintf (buf, size, "%s",
2988 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2989 break;
2990
2991 case AARCH64_OPND_Rm_EXT:
2992 kind = opnd->shifter.kind;
2993 assert (idx == 1 || idx == 2);
2994 if ((aarch64_stack_pointer_p (opnds)
2995 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2996 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2997 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2998 && kind == AARCH64_MOD_UXTW)
2999 || (opnd->qualifier == AARCH64_OPND_QLF_X
3000 && kind == AARCH64_MOD_UXTX)))
3001 {
3002 /* 'LSL' is the preferred form in this case. */
3003 kind = AARCH64_MOD_LSL;
3004 if (opnd->shifter.amount == 0)
3005 {
3006 /* Shifter omitted. */
3007 snprintf (buf, size, "%s",
3008 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3009 break;
3010 }
3011 }
3012 if (opnd->shifter.amount)
3013 snprintf (buf, size, "%s, %s #%" PRIi64,
3014 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3015 aarch64_operand_modifiers[kind].name,
3016 opnd->shifter.amount);
3017 else
3018 snprintf (buf, size, "%s, %s",
3019 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3020 aarch64_operand_modifiers[kind].name);
3021 break;
3022
3023 case AARCH64_OPND_Rm_SFT:
3024 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3025 || opnd->qualifier == AARCH64_OPND_QLF_X);
3026 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3027 snprintf (buf, size, "%s",
3028 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3029 else
3030 snprintf (buf, size, "%s, %s #%" PRIi64,
3031 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3032 aarch64_operand_modifiers[opnd->shifter.kind].name,
3033 opnd->shifter.amount);
3034 break;
3035
3036 case AARCH64_OPND_Fd:
3037 case AARCH64_OPND_Fn:
3038 case AARCH64_OPND_Fm:
3039 case AARCH64_OPND_Fa:
3040 case AARCH64_OPND_Ft:
3041 case AARCH64_OPND_Ft2:
3042 case AARCH64_OPND_Sd:
3043 case AARCH64_OPND_Sn:
3044 case AARCH64_OPND_Sm:
3045 case AARCH64_OPND_SVE_VZn:
3046 case AARCH64_OPND_SVE_Vd:
3047 case AARCH64_OPND_SVE_Vm:
3048 case AARCH64_OPND_SVE_Vn:
3049 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3050 opnd->reg.regno);
3051 break;
3052
3053 case AARCH64_OPND_Vd:
3054 case AARCH64_OPND_Vn:
3055 case AARCH64_OPND_Vm:
3056 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3057 aarch64_get_qualifier_name (opnd->qualifier));
3058 break;
3059
3060 case AARCH64_OPND_Ed:
3061 case AARCH64_OPND_En:
3062 case AARCH64_OPND_Em:
3063 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3064 aarch64_get_qualifier_name (opnd->qualifier),
3065 opnd->reglane.index);
3066 break;
3067
3068 case AARCH64_OPND_VdD1:
3069 case AARCH64_OPND_VnD1:
3070 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3071 break;
3072
3073 case AARCH64_OPND_LVn:
3074 case AARCH64_OPND_LVt:
3075 case AARCH64_OPND_LVt_AL:
3076 case AARCH64_OPND_LEt:
3077 print_register_list (buf, size, opnd, "v");
3078 break;
3079
3080 case AARCH64_OPND_SVE_Pd:
3081 case AARCH64_OPND_SVE_Pg3:
3082 case AARCH64_OPND_SVE_Pg4_5:
3083 case AARCH64_OPND_SVE_Pg4_10:
3084 case AARCH64_OPND_SVE_Pg4_16:
3085 case AARCH64_OPND_SVE_Pm:
3086 case AARCH64_OPND_SVE_Pn:
3087 case AARCH64_OPND_SVE_Pt:
3088 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3089 snprintf (buf, size, "p%d", opnd->reg.regno);
3090 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3091 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3092 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3093 aarch64_get_qualifier_name (opnd->qualifier));
3094 else
3095 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3096 aarch64_get_qualifier_name (opnd->qualifier));
3097 break;
3098
3099 case AARCH64_OPND_SVE_Za_5:
3100 case AARCH64_OPND_SVE_Za_16:
3101 case AARCH64_OPND_SVE_Zd:
3102 case AARCH64_OPND_SVE_Zm_5:
3103 case AARCH64_OPND_SVE_Zm_16:
3104 case AARCH64_OPND_SVE_Zn:
3105 case AARCH64_OPND_SVE_Zt:
3106 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3107 snprintf (buf, size, "z%d", opnd->reg.regno);
3108 else
3109 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3110 aarch64_get_qualifier_name (opnd->qualifier));
3111 break;
3112
3113 case AARCH64_OPND_SVE_ZnxN:
3114 case AARCH64_OPND_SVE_ZtxN:
3115 print_register_list (buf, size, opnd, "z");
3116 break;
3117
3118 case AARCH64_OPND_SVE_Zn_INDEX:
3119 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3120 aarch64_get_qualifier_name (opnd->qualifier),
3121 opnd->reglane.index);
3122 break;
3123
3124 case AARCH64_OPND_Cn:
3125 case AARCH64_OPND_Cm:
3126 snprintf (buf, size, "C%d", opnd->reg.regno);
3127 break;
3128
3129 case AARCH64_OPND_IDX:
3130 case AARCH64_OPND_IMM:
3131 case AARCH64_OPND_WIDTH:
3132 case AARCH64_OPND_UIMM3_OP1:
3133 case AARCH64_OPND_UIMM3_OP2:
3134 case AARCH64_OPND_BIT_NUM:
3135 case AARCH64_OPND_IMM_VLSL:
3136 case AARCH64_OPND_IMM_VLSR:
3137 case AARCH64_OPND_SHLL_IMM:
3138 case AARCH64_OPND_IMM0:
3139 case AARCH64_OPND_IMMR:
3140 case AARCH64_OPND_IMMS:
3141 case AARCH64_OPND_FBITS:
3142 case AARCH64_OPND_SIMM5:
3143 case AARCH64_OPND_SVE_SHLIMM_PRED:
3144 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3145 case AARCH64_OPND_SVE_SHRIMM_PRED:
3146 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3147 case AARCH64_OPND_SVE_SIMM5:
3148 case AARCH64_OPND_SVE_SIMM5B:
3149 case AARCH64_OPND_SVE_SIMM6:
3150 case AARCH64_OPND_SVE_SIMM8:
3151 case AARCH64_OPND_SVE_UIMM3:
3152 case AARCH64_OPND_SVE_UIMM7:
3153 case AARCH64_OPND_SVE_UIMM8:
3154 case AARCH64_OPND_SVE_UIMM8_53:
3155 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3156 break;
3157
3158 case AARCH64_OPND_SVE_I1_HALF_ONE:
3159 case AARCH64_OPND_SVE_I1_HALF_TWO:
3160 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3161 {
3162 single_conv_t c;
3163 c.i = opnd->imm.value;
3164 snprintf (buf, size, "#%.1f", c.f);
3165 break;
3166 }
3167
3168 case AARCH64_OPND_SVE_PATTERN:
3169 if (optional_operand_p (opcode, idx)
3170 && opnd->imm.value == get_optional_operand_default_value (opcode))
3171 break;
3172 enum_value = opnd->imm.value;
3173 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3174 if (aarch64_sve_pattern_array[enum_value])
3175 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3176 else
3177 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3178 break;
3179
3180 case AARCH64_OPND_SVE_PATTERN_SCALED:
3181 if (optional_operand_p (opcode, idx)
3182 && !opnd->shifter.operator_present
3183 && opnd->imm.value == get_optional_operand_default_value (opcode))
3184 break;
3185 enum_value = opnd->imm.value;
3186 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3187 if (aarch64_sve_pattern_array[opnd->imm.value])
3188 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3189 else
3190 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3191 if (opnd->shifter.operator_present)
3192 {
3193 size_t len = strlen (buf);
3194 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3195 aarch64_operand_modifiers[opnd->shifter.kind].name,
3196 opnd->shifter.amount);
3197 }
3198 break;
3199
3200 case AARCH64_OPND_SVE_PRFOP:
3201 enum_value = opnd->imm.value;
3202 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3203 if (aarch64_sve_prfop_array[enum_value])
3204 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3205 else
3206 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3207 break;
3208
3209 case AARCH64_OPND_IMM_MOV:
3210 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3211 {
3212 case 4: /* e.g. MOV Wd, #<imm32>. */
3213 {
3214 int imm32 = opnd->imm.value;
3215 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3216 }
3217 break;
3218 case 8: /* e.g. MOV Xd, #<imm64>. */
3219 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3220 opnd->imm.value, opnd->imm.value);
3221 break;
3222 default: assert (0);
3223 }
3224 break;
3225
3226 case AARCH64_OPND_FPIMM0:
3227 snprintf (buf, size, "#0.0");
3228 break;
3229
3230 case AARCH64_OPND_LIMM:
3231 case AARCH64_OPND_AIMM:
3232 case AARCH64_OPND_HALF:
3233 case AARCH64_OPND_SVE_INV_LIMM:
3234 case AARCH64_OPND_SVE_LIMM:
3235 case AARCH64_OPND_SVE_LIMM_MOV:
3236 if (opnd->shifter.amount)
3237 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3238 opnd->shifter.amount);
3239 else
3240 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3241 break;
3242
3243 case AARCH64_OPND_SIMD_IMM:
3244 case AARCH64_OPND_SIMD_IMM_SFT:
3245 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3246 || opnd->shifter.kind == AARCH64_MOD_NONE)
3247 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3248 else
3249 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3250 aarch64_operand_modifiers[opnd->shifter.kind].name,
3251 opnd->shifter.amount);
3252 break;
3253
3254 case AARCH64_OPND_SVE_AIMM:
3255 case AARCH64_OPND_SVE_ASIMM:
3256 if (opnd->shifter.amount)
3257 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3258 opnd->shifter.amount);
3259 else
3260 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3261 break;
3262
3263 case AARCH64_OPND_FPIMM:
3264 case AARCH64_OPND_SIMD_FPIMM:
3265 case AARCH64_OPND_SVE_FPIMM8:
3266 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3267 {
3268 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3269 {
3270 half_conv_t c;
3271 c.i = expand_fp_imm (2, opnd->imm.value);
3272 snprintf (buf, size, "#%.18e", c.f);
3273 }
3274 break;
3275 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3276 {
3277 single_conv_t c;
3278 c.i = expand_fp_imm (4, opnd->imm.value);
3279 snprintf (buf, size, "#%.18e", c.f);
3280 }
3281 break;
3282 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3283 {
3284 double_conv_t c;
3285 c.i = expand_fp_imm (8, opnd->imm.value);
3286 snprintf (buf, size, "#%.18e", c.d);
3287 }
3288 break;
3289 default: assert (0);
3290 }
3291 break;
3292
3293 case AARCH64_OPND_CCMP_IMM:
3294 case AARCH64_OPND_NZCV:
3295 case AARCH64_OPND_EXCEPTION:
3296 case AARCH64_OPND_UIMM4:
3297 case AARCH64_OPND_UIMM7:
3298 if (optional_operand_p (opcode, idx) == TRUE
3299 && (opnd->imm.value ==
3300 (int64_t) get_optional_operand_default_value (opcode)))
3301 /* Omit the operand, e.g. DCPS1. */
3302 break;
3303 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3304 break;
3305
3306 case AARCH64_OPND_COND:
3307 case AARCH64_OPND_COND1:
3308 snprintf (buf, size, "%s", opnd->cond->names[0]);
3309 num_conds = ARRAY_SIZE (opnd->cond->names);
3310 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3311 {
3312 size_t len = strlen (buf);
3313 if (i == 1)
3314 snprintf (buf + len, size - len, " // %s = %s",
3315 opnd->cond->names[0], opnd->cond->names[i]);
3316 else
3317 snprintf (buf + len, size - len, ", %s",
3318 opnd->cond->names[i]);
3319 }
3320 break;
3321
3322 case AARCH64_OPND_ADDR_ADRP:
3323 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3324 + opnd->imm.value;
3325 if (pcrel_p)
3326 *pcrel_p = 1;
3327 if (address)
3328 *address = addr;
3329 /* This is not necessary during the disassembling, as print_address_func
3330 in the disassemble_info will take care of the printing. But some
3331 other callers may be still interested in getting the string in *STR,
3332 so here we do snprintf regardless. */
3333 snprintf (buf, size, "#0x%" PRIx64, addr);
3334 break;
3335
3336 case AARCH64_OPND_ADDR_PCREL14:
3337 case AARCH64_OPND_ADDR_PCREL19:
3338 case AARCH64_OPND_ADDR_PCREL21:
3339 case AARCH64_OPND_ADDR_PCREL26:
3340 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3341 if (pcrel_p)
3342 *pcrel_p = 1;
3343 if (address)
3344 *address = addr;
3345 /* This is not necessary during the disassembling, as print_address_func
3346 in the disassemble_info will take care of the printing. But some
3347 other callers may be still interested in getting the string in *STR,
3348 so here we do snprintf regardless. */
3349 snprintf (buf, size, "#0x%" PRIx64, addr);
3350 break;
3351
3352 case AARCH64_OPND_ADDR_SIMPLE:
3353 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3354 case AARCH64_OPND_SIMD_ADDR_POST:
3355 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3356 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3357 {
3358 if (opnd->addr.offset.is_reg)
3359 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3360 else
3361 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3362 }
3363 else
3364 snprintf (buf, size, "[%s]", name);
3365 break;
3366
3367 case AARCH64_OPND_ADDR_REGOFF:
3368 case AARCH64_OPND_SVE_ADDR_RR:
3369 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3370 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3371 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3372 case AARCH64_OPND_SVE_ADDR_RX:
3373 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3374 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3375 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3376 print_register_offset_address
3377 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3378 get_offset_int_reg_name (opnd));
3379 break;
3380
3381 case AARCH64_OPND_SVE_ADDR_RZ:
3382 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3383 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3384 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3385 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3386 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3387 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3388 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3389 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3390 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3391 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3392 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3393 print_register_offset_address
3394 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3395 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3396 break;
3397
3398 case AARCH64_OPND_ADDR_SIMM7:
3399 case AARCH64_OPND_ADDR_SIMM9:
3400 case AARCH64_OPND_ADDR_SIMM9_2:
3401 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3402 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3403 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3404 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3405 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3406 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3407 case AARCH64_OPND_SVE_ADDR_RI_U6:
3408 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3409 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3410 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3411 print_immediate_offset_address
3412 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3413 break;
3414
3415 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3416 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3417 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3418 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3419 print_immediate_offset_address
3420 (buf, size, opnd,
3421 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3422 break;
3423
3424 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3425 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3426 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3427 print_register_offset_address
3428 (buf, size, opnd,
3429 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3430 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3431 break;
3432
3433 case AARCH64_OPND_ADDR_UIMM12:
3434 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3435 if (opnd->addr.offset.imm)
3436 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
3437 else
3438 snprintf (buf, size, "[%s]", name);
3439 break;
3440
3441 case AARCH64_OPND_SYSREG:
3442 for (i = 0; aarch64_sys_regs[i].name; ++i)
3443 if (aarch64_sys_regs[i].value == opnd->sysreg
3444 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3445 break;
3446 if (aarch64_sys_regs[i].name)
3447 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3448 else
3449 {
3450 /* Implementation defined system register. */
3451 unsigned int value = opnd->sysreg;
3452 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3453 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3454 value & 0x7);
3455 }
3456 break;
3457
3458 case AARCH64_OPND_PSTATEFIELD:
3459 for (i = 0; aarch64_pstatefields[i].name; ++i)
3460 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3461 break;
3462 assert (aarch64_pstatefields[i].name);
3463 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3464 break;
3465
3466 case AARCH64_OPND_SYSREG_AT:
3467 case AARCH64_OPND_SYSREG_DC:
3468 case AARCH64_OPND_SYSREG_IC:
3469 case AARCH64_OPND_SYSREG_TLBI:
3470 snprintf (buf, size, "%s", opnd->sysins_op->name);
3471 break;
3472
3473 case AARCH64_OPND_BARRIER:
3474 snprintf (buf, size, "%s", opnd->barrier->name);
3475 break;
3476
3477 case AARCH64_OPND_BARRIER_ISB:
3478 /* Operand can be omitted, e.g. in DCPS1. */
3479 if (! optional_operand_p (opcode, idx)
3480 || (opnd->barrier->value
3481 != get_optional_operand_default_value (opcode)))
3482 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3483 break;
3484
3485 case AARCH64_OPND_PRFOP:
3486 if (opnd->prfop->name != NULL)
3487 snprintf (buf, size, "%s", opnd->prfop->name);
3488 else
3489 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3490 break;
3491
3492 case AARCH64_OPND_BARRIER_PSB:
3493 snprintf (buf, size, "%s", opnd->hint_option->name);
3494 break;
3495
3496 default:
3497 assert (0);
3498 }
3499 }
3500 \f
3501 #define CPENC(op0,op1,crn,crm,op2) \
3502 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3503 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3504 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3505 /* for 3.9.10 System Instructions */
3506 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3507
3508 #define C0 0
3509 #define C1 1
3510 #define C2 2
3511 #define C3 3
3512 #define C4 4
3513 #define C5 5
3514 #define C6 6
3515 #define C7 7
3516 #define C8 8
3517 #define C9 9
3518 #define C10 10
3519 #define C11 11
3520 #define C12 12
3521 #define C13 13
3522 #define C14 14
3523 #define C15 15
3524
3525 #ifdef F_DEPRECATED
3526 #undef F_DEPRECATED
3527 #endif
3528 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3529
3530 #ifdef F_ARCHEXT
3531 #undef F_ARCHEXT
3532 #endif
3533 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3534
3535 #ifdef F_HASXT
3536 #undef F_HASXT
3537 #endif
3538 #define F_HASXT 0x4 /* System instruction register <Xt>
3539 operand. */
3540
3541
3542 /* TODO there are two more issues need to be resolved
3543 1. handle read-only and write-only system registers
3544 2. handle cpu-implementation-defined system registers. */
3545 const aarch64_sys_reg aarch64_sys_regs [] =
3546 {
3547 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3548 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3549 { "elr_el1", CPEN_(0,C0,1), 0 },
3550 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3551 { "sp_el0", CPEN_(0,C1,0), 0 },
3552 { "spsel", CPEN_(0,C2,0), 0 },
3553 { "daif", CPEN_(3,C2,1), 0 },
3554 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3555 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3556 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3557 { "nzcv", CPEN_(3,C2,0), 0 },
3558 { "fpcr", CPEN_(3,C4,0), 0 },
3559 { "fpsr", CPEN_(3,C4,1), 0 },
3560 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3561 { "dlr_el0", CPEN_(3,C5,1), 0 },
3562 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3563 { "elr_el2", CPEN_(4,C0,1), 0 },
3564 { "sp_el1", CPEN_(4,C1,0), 0 },
3565 { "spsr_irq", CPEN_(4,C3,0), 0 },
3566 { "spsr_abt", CPEN_(4,C3,1), 0 },
3567 { "spsr_und", CPEN_(4,C3,2), 0 },
3568 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3569 { "spsr_el3", CPEN_(6,C0,0), 0 },
3570 { "elr_el3", CPEN_(6,C0,1), 0 },
3571 { "sp_el2", CPEN_(6,C1,0), 0 },
3572 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3573 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3574 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3575 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3576 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3577 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3578 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3579 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3580 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3581 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3582 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3583 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3584 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3585 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3586 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3587 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3588 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3589 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3590 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3591 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3592 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3593 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3594 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3595 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3596 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3597 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3598 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3599 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3600 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3601 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3602 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3603 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3604 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3605 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3606 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3607 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3608 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3609 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3610 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3611 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3612 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3613 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3614 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3615 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3616 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3617 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3618 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3619 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3620 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3621 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3622 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3623 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3624 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3625 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3626 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3627 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3628 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3629 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3630 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3631 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3632 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3633 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3634 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3635 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3636 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3637 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3638 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3639 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3640 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3641 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3642 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3643 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3644 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3645 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3646 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3647 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3648 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3649 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3650 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3651 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3652 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3653 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3654 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3655 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3656 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3657 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3658 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3659 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3660 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3661 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3662 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3663 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3664 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3665 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3666 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3667 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3668 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3669 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3670 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3671 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3672 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3673 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3674 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3675 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3676 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3677 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3678 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3679 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3680 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3681 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3682 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3683 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3684 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3685 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3686 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3687 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3688 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3689 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3690 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3691 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3692 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3693 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3694 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3695 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3696 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3697 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3698 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3699 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3700 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3701 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3702 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3703 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3704 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3705 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3706 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3707 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3708 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3709 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3710 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3711 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3712 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3713 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3714 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3715 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3716 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3717 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3718 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3719 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3720 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3721 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3722 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3723 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3724 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3725 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3726 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3727 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3728 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3729 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3730 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3731 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3732 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3733 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3734 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3735 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3736 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3737 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3738 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3739 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3740 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3741 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3742 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3743 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3744 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3745 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3746 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3747 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3748 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3749 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3750 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3751 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3752 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3753 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3754 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3755 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3756 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3757 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3758 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3759 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3760 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3761 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3762 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3763 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3764 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3765 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3766 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3767 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3768 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3769 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3770 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3771 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3772 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3773 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3774 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3775 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3776 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3777 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3778 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3779 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3780 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3781 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3782 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3783 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3784 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3785 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3786 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3787 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3788 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3789 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3790 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3791 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3792 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3793 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3794 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3795 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3796 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3797 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3798 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3799 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3800 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3801 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3802 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3803 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3804 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3805 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3806 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3807 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3808 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3809 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3810 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3811 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3812 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3813 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3814 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3815 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3816 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3817 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3818 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3819 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3820 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3821 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3822 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3823 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3824 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3825 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3826 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3827 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3828 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3829 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3830 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3831 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3832 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3833 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3834 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3835 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3836 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3837 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3838 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3839 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3840 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3841 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3842 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3843 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3844 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3845 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3846 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3847 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3848 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3849 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3850 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3851 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3852 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3853 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3854 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3855 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3856 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3857 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3858 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3859 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3860 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3861 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3862 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3863 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3864 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3865 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3866 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3867 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3868 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3869 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3870 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3871 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3872 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3873 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3874 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3875 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3876 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3877 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3878 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3879 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3880 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3881 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3882 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3883 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3884 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3885 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3886 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3887 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3888 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3889 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3890 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3891 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3892 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3893 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3894 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3895 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3896 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3897 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3898 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3899 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3900 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3901 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3902 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3903 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3904 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3905 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3906 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3907 { 0, CPENC(0,0,0,0,0), 0 },
3908 };
3909
3910 bfd_boolean
3911 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3912 {
3913 return (reg->flags & F_DEPRECATED) != 0;
3914 }
3915
3916 bfd_boolean
3917 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3918 const aarch64_sys_reg *reg)
3919 {
3920 if (!(reg->flags & F_ARCHEXT))
3921 return TRUE;
3922
3923 /* PAN. Values are from aarch64_sys_regs. */
3924 if (reg->value == CPEN_(0,C2,3)
3925 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3926 return FALSE;
3927
3928 /* Virtualization host extensions: system registers. */
3929 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3930 || reg->value == CPENC (3, 4, C13, C0, 1)
3931 || reg->value == CPENC (3, 4, C14, C3, 0)
3932 || reg->value == CPENC (3, 4, C14, C3, 1)
3933 || reg->value == CPENC (3, 4, C14, C3, 2))
3934 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3935 return FALSE;
3936
3937 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3938 if ((reg->value == CPEN_ (5, C0, 0)
3939 || reg->value == CPEN_ (5, C0, 1)
3940 || reg->value == CPENC (3, 5, C1, C0, 0)
3941 || reg->value == CPENC (3, 5, C1, C0, 2)
3942 || reg->value == CPENC (3, 5, C2, C0, 0)
3943 || reg->value == CPENC (3, 5, C2, C0, 1)
3944 || reg->value == CPENC (3, 5, C2, C0, 2)
3945 || reg->value == CPENC (3, 5, C5, C1, 0)
3946 || reg->value == CPENC (3, 5, C5, C1, 1)
3947 || reg->value == CPENC (3, 5, C5, C2, 0)
3948 || reg->value == CPENC (3, 5, C6, C0, 0)
3949 || reg->value == CPENC (3, 5, C10, C2, 0)
3950 || reg->value == CPENC (3, 5, C10, C3, 0)
3951 || reg->value == CPENC (3, 5, C12, C0, 0)
3952 || reg->value == CPENC (3, 5, C13, C0, 1)
3953 || reg->value == CPENC (3, 5, C14, C1, 0))
3954 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3955 return FALSE;
3956
3957 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3958 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3959 || reg->value == CPENC (3, 5, C14, C2, 1)
3960 || reg->value == CPENC (3, 5, C14, C2, 2)
3961 || reg->value == CPENC (3, 5, C14, C3, 0)
3962 || reg->value == CPENC (3, 5, C14, C3, 1)
3963 || reg->value == CPENC (3, 5, C14, C3, 2))
3964 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3965 return FALSE;
3966
3967 /* ARMv8.2 features. */
3968
3969 /* ID_AA64MMFR2_EL1. */
3970 if (reg->value == CPENC (3, 0, C0, C7, 2)
3971 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3972 return FALSE;
3973
3974 /* PSTATE.UAO. */
3975 if (reg->value == CPEN_ (0, C2, 4)
3976 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3977 return FALSE;
3978
3979 /* RAS extension. */
3980
3981 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3982 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3983 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3984 || reg->value == CPENC (3, 0, C5, C3, 1)
3985 || reg->value == CPENC (3, 0, C5, C3, 2)
3986 || reg->value == CPENC (3, 0, C5, C3, 3)
3987 || reg->value == CPENC (3, 0, C5, C4, 0)
3988 || reg->value == CPENC (3, 0, C5, C4, 1)
3989 || reg->value == CPENC (3, 0, C5, C4, 2)
3990 || reg->value == CPENC (3, 0, C5, C4, 3)
3991 || reg->value == CPENC (3, 0, C5, C5, 0)
3992 || reg->value == CPENC (3, 0, C5, C5, 1))
3993 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3994 return FALSE;
3995
3996 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3997 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3998 || reg->value == CPENC (3, 0, C12, C1, 1)
3999 || reg->value == CPENC (3, 4, C12, C1, 1))
4000 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4001 return FALSE;
4002
4003 /* Statistical Profiling extension. */
4004 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4005 || reg->value == CPENC (3, 0, C9, C10, 1)
4006 || reg->value == CPENC (3, 0, C9, C10, 3)
4007 || reg->value == CPENC (3, 0, C9, C10, 7)
4008 || reg->value == CPENC (3, 0, C9, C9, 0)
4009 || reg->value == CPENC (3, 0, C9, C9, 2)
4010 || reg->value == CPENC (3, 0, C9, C9, 3)
4011 || reg->value == CPENC (3, 0, C9, C9, 4)
4012 || reg->value == CPENC (3, 0, C9, C9, 5)
4013 || reg->value == CPENC (3, 0, C9, C9, 6)
4014 || reg->value == CPENC (3, 0, C9, C9, 7)
4015 || reg->value == CPENC (3, 4, C9, C9, 0)
4016 || reg->value == CPENC (3, 5, C9, C9, 0))
4017 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4018 return FALSE;
4019
4020 return TRUE;
4021 }
4022
4023 const aarch64_sys_reg aarch64_pstatefields [] =
4024 {
4025 { "spsel", 0x05, 0 },
4026 { "daifset", 0x1e, 0 },
4027 { "daifclr", 0x1f, 0 },
4028 { "pan", 0x04, F_ARCHEXT },
4029 { "uao", 0x03, F_ARCHEXT },
4030 { 0, CPENC(0,0,0,0,0), 0 },
4031 };
4032
4033 bfd_boolean
4034 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4035 const aarch64_sys_reg *reg)
4036 {
4037 if (!(reg->flags & F_ARCHEXT))
4038 return TRUE;
4039
4040 /* PAN. Values are from aarch64_pstatefields. */
4041 if (reg->value == 0x04
4042 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4043 return FALSE;
4044
4045 /* UAO. Values are from aarch64_pstatefields. */
4046 if (reg->value == 0x03
4047 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4048 return FALSE;
4049
4050 return TRUE;
4051 }
4052
4053 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4054 {
4055 { "ialluis", CPENS(0,C7,C1,0), 0 },
4056 { "iallu", CPENS(0,C7,C5,0), 0 },
4057 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4058 { 0, CPENS(0,0,0,0), 0 }
4059 };
4060
4061 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4062 {
4063 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4064 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4065 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4066 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4067 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4068 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4069 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4070 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4071 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4072 { 0, CPENS(0,0,0,0), 0 }
4073 };
4074
4075 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4076 {
4077 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4078 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4079 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4080 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4081 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4082 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4083 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4084 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4085 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4086 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4087 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4088 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4089 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4090 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4091 { 0, CPENS(0,0,0,0), 0 }
4092 };
4093
4094 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4095 {
4096 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4097 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4098 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4099 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4100 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4101 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4102 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4103 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4104 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4105 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4106 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4107 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4108 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4109 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4110 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4111 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4112 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4113 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4114 { "alle2", CPENS(4,C8,C7,0), 0 },
4115 { "alle2is", CPENS(4,C8,C3,0), 0 },
4116 { "alle1", CPENS(4,C8,C7,4), 0 },
4117 { "alle1is", CPENS(4,C8,C3,4), 0 },
4118 { "alle3", CPENS(6,C8,C7,0), 0 },
4119 { "alle3is", CPENS(6,C8,C3,0), 0 },
4120 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4121 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4122 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4123 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4124 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4125 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4126 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4127 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4128 { 0, CPENS(0,0,0,0), 0 }
4129 };
4130
4131 bfd_boolean
4132 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4133 {
4134 return (sys_ins_reg->flags & F_HASXT) != 0;
4135 }
4136
4137 extern bfd_boolean
4138 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4139 const aarch64_sys_ins_reg *reg)
4140 {
4141 if (!(reg->flags & F_ARCHEXT))
4142 return TRUE;
4143
4144 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4145 if (reg->value == CPENS (3, C7, C12, 1)
4146 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4147 return FALSE;
4148
4149 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4150 if ((reg->value == CPENS (0, C7, C9, 0)
4151 || reg->value == CPENS (0, C7, C9, 1))
4152 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4153 return FALSE;
4154
4155 return TRUE;
4156 }
4157
4158 #undef C0
4159 #undef C1
4160 #undef C2
4161 #undef C3
4162 #undef C4
4163 #undef C5
4164 #undef C6
4165 #undef C7
4166 #undef C8
4167 #undef C9
4168 #undef C10
4169 #undef C11
4170 #undef C12
4171 #undef C13
4172 #undef C14
4173 #undef C15
4174
4175 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4176 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4177
4178 static bfd_boolean
4179 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4180 const aarch64_insn insn)
4181 {
4182 int t = BITS (insn, 4, 0);
4183 int n = BITS (insn, 9, 5);
4184 int t2 = BITS (insn, 14, 10);
4185
4186 if (BIT (insn, 23))
4187 {
4188 /* Write back enabled. */
4189 if ((t == n || t2 == n) && n != 31)
4190 return FALSE;
4191 }
4192
4193 if (BIT (insn, 22))
4194 {
4195 /* Load */
4196 if (t == t2)
4197 return FALSE;
4198 }
4199
4200 return TRUE;
4201 }
4202
4203 /* Return true if VALUE cannot be moved into an SVE register using DUP
4204 (with any element size, not just ESIZE) and if using DUPM would
4205 therefore be OK. ESIZE is the number of bytes in the immediate. */
4206
4207 bfd_boolean
4208 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4209 {
4210 int64_t svalue = uvalue;
4211 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4212
4213 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4214 return FALSE;
4215 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4216 {
4217 svalue = (int32_t) uvalue;
4218 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4219 {
4220 svalue = (int16_t) uvalue;
4221 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4222 return FALSE;
4223 }
4224 }
4225 if ((svalue & 0xff) == 0)
4226 svalue /= 256;
4227 return svalue < -128 || svalue >= 128;
4228 }
4229
4230 /* Include the opcode description table as well as the operand description
4231 table. */
4232 #define VERIFIER(x) verify_##x
4233 #include "aarch64-tbl.h"
This page took 0.195883 seconds and 5 git commands to generate.