[AArch64][SVE 28/32] Add SVE FP immediate operands
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
268 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
269 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
270 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
271 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
272 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
273 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
274 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
275 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
276 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
277 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
278 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
279 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
280 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
281 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
282 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
283 { 5, 1 }, /* SVE_i1: single-bit immediate. */
284 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
285 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
286 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
287 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
288 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
289 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
290 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
291 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
292 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
293 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
294 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
295 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
296 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
297 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
298 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
299 { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
300 };
301
302 enum aarch64_operand_class
303 aarch64_get_operand_class (enum aarch64_opnd type)
304 {
305 return aarch64_operands[type].op_class;
306 }
307
308 const char *
309 aarch64_get_operand_name (enum aarch64_opnd type)
310 {
311 return aarch64_operands[type].name;
312 }
313
314 /* Get operand description string.
315 This is usually for the diagnosis purpose. */
316 const char *
317 aarch64_get_operand_desc (enum aarch64_opnd type)
318 {
319 return aarch64_operands[type].desc;
320 }
321
322 /* Table of all conditional affixes. */
323 const aarch64_cond aarch64_conds[16] =
324 {
325 {{"eq"}, 0x0},
326 {{"ne"}, 0x1},
327 {{"cs", "hs"}, 0x2},
328 {{"cc", "lo", "ul"}, 0x3},
329 {{"mi"}, 0x4},
330 {{"pl"}, 0x5},
331 {{"vs"}, 0x6},
332 {{"vc"}, 0x7},
333 {{"hi"}, 0x8},
334 {{"ls"}, 0x9},
335 {{"ge"}, 0xa},
336 {{"lt"}, 0xb},
337 {{"gt"}, 0xc},
338 {{"le"}, 0xd},
339 {{"al"}, 0xe},
340 {{"nv"}, 0xf},
341 };
342
343 const aarch64_cond *
344 get_cond_from_value (aarch64_insn value)
345 {
346 assert (value < 16);
347 return &aarch64_conds[(unsigned int) value];
348 }
349
350 const aarch64_cond *
351 get_inverted_cond (const aarch64_cond *cond)
352 {
353 return &aarch64_conds[cond->value ^ 0x1];
354 }
355
356 /* Table describing the operand extension/shifting operators; indexed by
357 enum aarch64_modifier_kind.
358
359 The value column provides the most common values for encoding modifiers,
360 which enables table-driven encoding/decoding for the modifiers. */
361 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
362 {
363 {"none", 0x0},
364 {"msl", 0x0},
365 {"ror", 0x3},
366 {"asr", 0x2},
367 {"lsr", 0x1},
368 {"lsl", 0x0},
369 {"uxtb", 0x0},
370 {"uxth", 0x1},
371 {"uxtw", 0x2},
372 {"uxtx", 0x3},
373 {"sxtb", 0x4},
374 {"sxth", 0x5},
375 {"sxtw", 0x6},
376 {"sxtx", 0x7},
377 {"mul", 0x0},
378 {"mul vl", 0x0},
379 {NULL, 0},
380 };
381
382 enum aarch64_modifier_kind
383 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
384 {
385 return desc - aarch64_operand_modifiers;
386 }
387
388 aarch64_insn
389 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
390 {
391 return aarch64_operand_modifiers[kind].value;
392 }
393
394 enum aarch64_modifier_kind
395 aarch64_get_operand_modifier_from_value (aarch64_insn value,
396 bfd_boolean extend_p)
397 {
398 if (extend_p == TRUE)
399 return AARCH64_MOD_UXTB + value;
400 else
401 return AARCH64_MOD_LSL - value;
402 }
403
404 bfd_boolean
405 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
406 {
407 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
408 ? TRUE : FALSE;
409 }
410
411 static inline bfd_boolean
412 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
413 {
414 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
415 ? TRUE : FALSE;
416 }
417
418 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
419 {
420 { "#0x00", 0x0 },
421 { "oshld", 0x1 },
422 { "oshst", 0x2 },
423 { "osh", 0x3 },
424 { "#0x04", 0x4 },
425 { "nshld", 0x5 },
426 { "nshst", 0x6 },
427 { "nsh", 0x7 },
428 { "#0x08", 0x8 },
429 { "ishld", 0x9 },
430 { "ishst", 0xa },
431 { "ish", 0xb },
432 { "#0x0c", 0xc },
433 { "ld", 0xd },
434 { "st", 0xe },
435 { "sy", 0xf },
436 };
437
438 /* Table describing the operands supported by the aliases of the HINT
439 instruction.
440
441 The name column is the operand that is accepted for the alias. The value
442 column is the hint number of the alias. The list of operands is terminated
443 by NULL in the name column. */
444
445 const struct aarch64_name_value_pair aarch64_hint_options[] =
446 {
447 { "csync", 0x11 }, /* PSB CSYNC. */
448 { NULL, 0x0 },
449 };
450
451 /* op -> op: load = 0 instruction = 1 store = 2
452 l -> level: 1-3
453 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
454 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
455 const struct aarch64_name_value_pair aarch64_prfops[32] =
456 {
457 { "pldl1keep", B(0, 1, 0) },
458 { "pldl1strm", B(0, 1, 1) },
459 { "pldl2keep", B(0, 2, 0) },
460 { "pldl2strm", B(0, 2, 1) },
461 { "pldl3keep", B(0, 3, 0) },
462 { "pldl3strm", B(0, 3, 1) },
463 { NULL, 0x06 },
464 { NULL, 0x07 },
465 { "plil1keep", B(1, 1, 0) },
466 { "plil1strm", B(1, 1, 1) },
467 { "plil2keep", B(1, 2, 0) },
468 { "plil2strm", B(1, 2, 1) },
469 { "plil3keep", B(1, 3, 0) },
470 { "plil3strm", B(1, 3, 1) },
471 { NULL, 0x0e },
472 { NULL, 0x0f },
473 { "pstl1keep", B(2, 1, 0) },
474 { "pstl1strm", B(2, 1, 1) },
475 { "pstl2keep", B(2, 2, 0) },
476 { "pstl2strm", B(2, 2, 1) },
477 { "pstl3keep", B(2, 3, 0) },
478 { "pstl3strm", B(2, 3, 1) },
479 { NULL, 0x16 },
480 { NULL, 0x17 },
481 { NULL, 0x18 },
482 { NULL, 0x19 },
483 { NULL, 0x1a },
484 { NULL, 0x1b },
485 { NULL, 0x1c },
486 { NULL, 0x1d },
487 { NULL, 0x1e },
488 { NULL, 0x1f },
489 };
490 #undef B
491 \f
492 /* Utilities on value constraint. */
493
494 static inline int
495 value_in_range_p (int64_t value, int low, int high)
496 {
497 return (value >= low && value <= high) ? 1 : 0;
498 }
499
500 /* Return true if VALUE is a multiple of ALIGN. */
501 static inline int
502 value_aligned_p (int64_t value, int align)
503 {
504 return (value % align) == 0;
505 }
506
507 /* A signed value fits in a field. */
508 static inline int
509 value_fit_signed_field_p (int64_t value, unsigned width)
510 {
511 assert (width < 32);
512 if (width < sizeof (value) * 8)
513 {
514 int64_t lim = (int64_t)1 << (width - 1);
515 if (value >= -lim && value < lim)
516 return 1;
517 }
518 return 0;
519 }
520
521 /* An unsigned value fits in a field. */
522 static inline int
523 value_fit_unsigned_field_p (int64_t value, unsigned width)
524 {
525 assert (width < 32);
526 if (width < sizeof (value) * 8)
527 {
528 int64_t lim = (int64_t)1 << width;
529 if (value >= 0 && value < lim)
530 return 1;
531 }
532 return 0;
533 }
534
535 /* Return 1 if OPERAND is SP or WSP. */
536 int
537 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
538 {
539 return ((aarch64_get_operand_class (operand->type)
540 == AARCH64_OPND_CLASS_INT_REG)
541 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
542 && operand->reg.regno == 31);
543 }
544
545 /* Return 1 if OPERAND is XZR or WZP. */
546 int
547 aarch64_zero_register_p (const aarch64_opnd_info *operand)
548 {
549 return ((aarch64_get_operand_class (operand->type)
550 == AARCH64_OPND_CLASS_INT_REG)
551 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
552 && operand->reg.regno == 31);
553 }
554
555 /* Return true if the operand *OPERAND that has the operand code
556 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
557 qualified by the qualifier TARGET. */
558
559 static inline int
560 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
561 aarch64_opnd_qualifier_t target)
562 {
563 switch (operand->qualifier)
564 {
565 case AARCH64_OPND_QLF_W:
566 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
567 return 1;
568 break;
569 case AARCH64_OPND_QLF_X:
570 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
571 return 1;
572 break;
573 case AARCH64_OPND_QLF_WSP:
574 if (target == AARCH64_OPND_QLF_W
575 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
576 return 1;
577 break;
578 case AARCH64_OPND_QLF_SP:
579 if (target == AARCH64_OPND_QLF_X
580 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
581 return 1;
582 break;
583 default:
584 break;
585 }
586
587 return 0;
588 }
589
590 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
591 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
592
593 Return NIL if more than one expected qualifiers are found. */
594
595 aarch64_opnd_qualifier_t
596 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
597 int idx,
598 const aarch64_opnd_qualifier_t known_qlf,
599 int known_idx)
600 {
601 int i, saved_i;
602
603 /* Special case.
604
605 When the known qualifier is NIL, we have to assume that there is only
606 one qualifier sequence in the *QSEQ_LIST and return the corresponding
607 qualifier directly. One scenario is that for instruction
608 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
609 which has only one possible valid qualifier sequence
610 NIL, S_D
611 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
612 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
613
614 Because the qualifier NIL has dual roles in the qualifier sequence:
615 it can mean no qualifier for the operand, or the qualifer sequence is
616 not in use (when all qualifiers in the sequence are NILs), we have to
617 handle this special case here. */
618 if (known_qlf == AARCH64_OPND_NIL)
619 {
620 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
621 return qseq_list[0][idx];
622 }
623
624 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
625 {
626 if (qseq_list[i][known_idx] == known_qlf)
627 {
628 if (saved_i != -1)
629 /* More than one sequences are found to have KNOWN_QLF at
630 KNOWN_IDX. */
631 return AARCH64_OPND_NIL;
632 saved_i = i;
633 }
634 }
635
636 return qseq_list[saved_i][idx];
637 }
638
639 enum operand_qualifier_kind
640 {
641 OQK_NIL,
642 OQK_OPD_VARIANT,
643 OQK_VALUE_IN_RANGE,
644 OQK_MISC,
645 };
646
647 /* Operand qualifier description. */
648 struct operand_qualifier_data
649 {
650 /* The usage of the three data fields depends on the qualifier kind. */
651 int data0;
652 int data1;
653 int data2;
654 /* Description. */
655 const char *desc;
656 /* Kind. */
657 enum operand_qualifier_kind kind;
658 };
659
660 /* Indexed by the operand qualifier enumerators. */
661 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
662 {
663 {0, 0, 0, "NIL", OQK_NIL},
664
665 /* Operand variant qualifiers.
666 First 3 fields:
667 element size, number of elements and common value for encoding. */
668
669 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
670 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
671 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
672 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
673
674 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
675 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
676 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
677 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
678 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
679
680 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
681 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
682 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
683 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
684 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
685 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
686 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
687 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
688 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
689 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
690
691 {0, 0, 0, "z", OQK_OPD_VARIANT},
692 {0, 0, 0, "m", OQK_OPD_VARIANT},
693
694 /* Qualifiers constraining the value range.
695 First 3 fields:
696 Lower bound, higher bound, unused. */
697
698 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
699 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
700 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
701 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
702 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
703 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
704
705 /* Qualifiers for miscellaneous purpose.
706 First 3 fields:
707 unused, unused and unused. */
708
709 {0, 0, 0, "lsl", 0},
710 {0, 0, 0, "msl", 0},
711
712 {0, 0, 0, "retrieving", 0},
713 };
714
715 static inline bfd_boolean
716 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
717 {
718 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
719 ? TRUE : FALSE;
720 }
721
722 static inline bfd_boolean
723 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
724 {
725 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
726 ? TRUE : FALSE;
727 }
728
729 const char*
730 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
731 {
732 return aarch64_opnd_qualifiers[qualifier].desc;
733 }
734
735 /* Given an operand qualifier, return the expected data element size
736 of a qualified operand. */
737 unsigned char
738 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
739 {
740 assert (operand_variant_qualifier_p (qualifier) == TRUE);
741 return aarch64_opnd_qualifiers[qualifier].data0;
742 }
743
744 unsigned char
745 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
746 {
747 assert (operand_variant_qualifier_p (qualifier) == TRUE);
748 return aarch64_opnd_qualifiers[qualifier].data1;
749 }
750
751 aarch64_insn
752 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
753 {
754 assert (operand_variant_qualifier_p (qualifier) == TRUE);
755 return aarch64_opnd_qualifiers[qualifier].data2;
756 }
757
758 static int
759 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
760 {
761 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
762 return aarch64_opnd_qualifiers[qualifier].data0;
763 }
764
765 static int
766 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
767 {
768 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
769 return aarch64_opnd_qualifiers[qualifier].data1;
770 }
771
772 #ifdef DEBUG_AARCH64
773 void
774 aarch64_verbose (const char *str, ...)
775 {
776 va_list ap;
777 va_start (ap, str);
778 printf ("#### ");
779 vprintf (str, ap);
780 printf ("\n");
781 va_end (ap);
782 }
783
784 static inline void
785 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
786 {
787 int i;
788 printf ("#### \t");
789 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
790 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
791 printf ("\n");
792 }
793
794 static void
795 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
796 const aarch64_opnd_qualifier_t *qualifier)
797 {
798 int i;
799 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
800
801 aarch64_verbose ("dump_match_qualifiers:");
802 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
803 curr[i] = opnd[i].qualifier;
804 dump_qualifier_sequence (curr);
805 aarch64_verbose ("against");
806 dump_qualifier_sequence (qualifier);
807 }
808 #endif /* DEBUG_AARCH64 */
809
810 /* TODO improve this, we can have an extra field at the runtime to
811 store the number of operands rather than calculating it every time. */
812
813 int
814 aarch64_num_of_operands (const aarch64_opcode *opcode)
815 {
816 int i = 0;
817 const enum aarch64_opnd *opnds = opcode->operands;
818 while (opnds[i++] != AARCH64_OPND_NIL)
819 ;
820 --i;
821 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
822 return i;
823 }
824
825 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
826 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
827
828 N.B. on the entry, it is very likely that only some operands in *INST
829 have had their qualifiers been established.
830
831 If STOP_AT is not -1, the function will only try to match
832 the qualifier sequence for operands before and including the operand
833 of index STOP_AT; and on success *RET will only be filled with the first
834 (STOP_AT+1) qualifiers.
835
836 A couple examples of the matching algorithm:
837
838 X,W,NIL should match
839 X,W,NIL
840
841 NIL,NIL should match
842 X ,NIL
843
844 Apart from serving the main encoding routine, this can also be called
845 during or after the operand decoding. */
846
847 int
848 aarch64_find_best_match (const aarch64_inst *inst,
849 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
850 int stop_at, aarch64_opnd_qualifier_t *ret)
851 {
852 int found = 0;
853 int i, num_opnds;
854 const aarch64_opnd_qualifier_t *qualifiers;
855
856 num_opnds = aarch64_num_of_operands (inst->opcode);
857 if (num_opnds == 0)
858 {
859 DEBUG_TRACE ("SUCCEED: no operand");
860 return 1;
861 }
862
863 if (stop_at < 0 || stop_at >= num_opnds)
864 stop_at = num_opnds - 1;
865
866 /* For each pattern. */
867 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
868 {
869 int j;
870 qualifiers = *qualifiers_list;
871
872 /* Start as positive. */
873 found = 1;
874
875 DEBUG_TRACE ("%d", i);
876 #ifdef DEBUG_AARCH64
877 if (debug_dump)
878 dump_match_qualifiers (inst->operands, qualifiers);
879 #endif
880
881 /* Most opcodes has much fewer patterns in the list.
882 First NIL qualifier indicates the end in the list. */
883 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
884 {
885 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
886 if (i)
887 found = 0;
888 break;
889 }
890
891 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
892 {
893 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
894 {
895 /* Either the operand does not have qualifier, or the qualifier
896 for the operand needs to be deduced from the qualifier
897 sequence.
898 In the latter case, any constraint checking related with
899 the obtained qualifier should be done later in
900 operand_general_constraint_met_p. */
901 continue;
902 }
903 else if (*qualifiers != inst->operands[j].qualifier)
904 {
905 /* Unless the target qualifier can also qualify the operand
906 (which has already had a non-nil qualifier), non-equal
907 qualifiers are generally un-matched. */
908 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
909 continue;
910 else
911 {
912 found = 0;
913 break;
914 }
915 }
916 else
917 continue; /* Equal qualifiers are certainly matched. */
918 }
919
920 /* Qualifiers established. */
921 if (found == 1)
922 break;
923 }
924
925 if (found == 1)
926 {
927 /* Fill the result in *RET. */
928 int j;
929 qualifiers = *qualifiers_list;
930
931 DEBUG_TRACE ("complete qualifiers using list %d", i);
932 #ifdef DEBUG_AARCH64
933 if (debug_dump)
934 dump_qualifier_sequence (qualifiers);
935 #endif
936
937 for (j = 0; j <= stop_at; ++j, ++qualifiers)
938 ret[j] = *qualifiers;
939 for (; j < AARCH64_MAX_OPND_NUM; ++j)
940 ret[j] = AARCH64_OPND_QLF_NIL;
941
942 DEBUG_TRACE ("SUCCESS");
943 return 1;
944 }
945
946 DEBUG_TRACE ("FAIL");
947 return 0;
948 }
949
950 /* Operand qualifier matching and resolving.
951
952 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
953 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
954
955 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
956 succeeds. */
957
958 static int
959 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
960 {
961 int i, nops;
962 aarch64_opnd_qualifier_seq_t qualifiers;
963
964 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
965 qualifiers))
966 {
967 DEBUG_TRACE ("matching FAIL");
968 return 0;
969 }
970
971 if (inst->opcode->flags & F_STRICT)
972 {
973 /* Require an exact qualifier match, even for NIL qualifiers. */
974 nops = aarch64_num_of_operands (inst->opcode);
975 for (i = 0; i < nops; ++i)
976 if (inst->operands[i].qualifier != qualifiers[i])
977 return FALSE;
978 }
979
980 /* Update the qualifiers. */
981 if (update_p == TRUE)
982 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
983 {
984 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
985 break;
986 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
987 "update %s with %s for operand %d",
988 aarch64_get_qualifier_name (inst->operands[i].qualifier),
989 aarch64_get_qualifier_name (qualifiers[i]), i);
990 inst->operands[i].qualifier = qualifiers[i];
991 }
992
993 DEBUG_TRACE ("matching SUCCESS");
994 return 1;
995 }
996
997 /* Return TRUE if VALUE is a wide constant that can be moved into a general
998 register by MOVZ.
999
1000 IS32 indicates whether value is a 32-bit immediate or not.
1001 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1002 amount will be returned in *SHIFT_AMOUNT. */
1003
1004 bfd_boolean
1005 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1006 {
1007 int amount;
1008
1009 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1010
1011 if (is32)
1012 {
1013 /* Allow all zeros or all ones in top 32-bits, so that
1014 32-bit constant expressions like ~0x80000000 are
1015 permitted. */
1016 uint64_t ext = value;
1017 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1018 /* Immediate out of range. */
1019 return FALSE;
1020 value &= (int64_t) 0xffffffff;
1021 }
1022
1023 /* first, try movz then movn */
1024 amount = -1;
1025 if ((value & ((int64_t) 0xffff << 0)) == value)
1026 amount = 0;
1027 else if ((value & ((int64_t) 0xffff << 16)) == value)
1028 amount = 16;
1029 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1030 amount = 32;
1031 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1032 amount = 48;
1033
1034 if (amount == -1)
1035 {
1036 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1037 return FALSE;
1038 }
1039
1040 if (shift_amount != NULL)
1041 *shift_amount = amount;
1042
1043 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1044
1045 return TRUE;
1046 }
1047
1048 /* Build the accepted values for immediate logical SIMD instructions.
1049
1050 The standard encodings of the immediate value are:
1051 N imms immr SIMD size R S
1052 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1053 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1054 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1055 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1056 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1057 0 11110s 00000r 2 UInt(r) UInt(s)
1058 where all-ones value of S is reserved.
1059
1060 Let's call E the SIMD size.
1061
1062 The immediate value is: S+1 bits '1' rotated to the right by R.
1063
1064 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1065 (remember S != E - 1). */
1066
1067 #define TOTAL_IMM_NB 5334
1068
1069 typedef struct
1070 {
1071 uint64_t imm;
1072 aarch64_insn encoding;
1073 } simd_imm_encoding;
1074
1075 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1076
1077 static int
1078 simd_imm_encoding_cmp(const void *i1, const void *i2)
1079 {
1080 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1081 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1082
1083 if (imm1->imm < imm2->imm)
1084 return -1;
1085 if (imm1->imm > imm2->imm)
1086 return +1;
1087 return 0;
1088 }
1089
1090 /* immediate bitfield standard encoding
1091 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1092 1 ssssss rrrrrr 64 rrrrrr ssssss
1093 0 0sssss 0rrrrr 32 rrrrr sssss
1094 0 10ssss 00rrrr 16 rrrr ssss
1095 0 110sss 000rrr 8 rrr sss
1096 0 1110ss 0000rr 4 rr ss
1097 0 11110s 00000r 2 r s */
1098 static inline int
1099 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1100 {
1101 return (is64 << 12) | (r << 6) | s;
1102 }
1103
1104 static void
1105 build_immediate_table (void)
1106 {
1107 uint32_t log_e, e, s, r, s_mask;
1108 uint64_t mask, imm;
1109 int nb_imms;
1110 int is64;
1111
1112 nb_imms = 0;
1113 for (log_e = 1; log_e <= 6; log_e++)
1114 {
1115 /* Get element size. */
1116 e = 1u << log_e;
1117 if (log_e == 6)
1118 {
1119 is64 = 1;
1120 mask = 0xffffffffffffffffull;
1121 s_mask = 0;
1122 }
1123 else
1124 {
1125 is64 = 0;
1126 mask = (1ull << e) - 1;
1127 /* log_e s_mask
1128 1 ((1 << 4) - 1) << 2 = 111100
1129 2 ((1 << 3) - 1) << 3 = 111000
1130 3 ((1 << 2) - 1) << 4 = 110000
1131 4 ((1 << 1) - 1) << 5 = 100000
1132 5 ((1 << 0) - 1) << 6 = 000000 */
1133 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1134 }
1135 for (s = 0; s < e - 1; s++)
1136 for (r = 0; r < e; r++)
1137 {
1138 /* s+1 consecutive bits to 1 (s < 63) */
1139 imm = (1ull << (s + 1)) - 1;
1140 /* rotate right by r */
1141 if (r != 0)
1142 imm = (imm >> r) | ((imm << (e - r)) & mask);
1143 /* replicate the constant depending on SIMD size */
1144 switch (log_e)
1145 {
1146 case 1: imm = (imm << 2) | imm;
1147 case 2: imm = (imm << 4) | imm;
1148 case 3: imm = (imm << 8) | imm;
1149 case 4: imm = (imm << 16) | imm;
1150 case 5: imm = (imm << 32) | imm;
1151 case 6: break;
1152 default: abort ();
1153 }
1154 simd_immediates[nb_imms].imm = imm;
1155 simd_immediates[nb_imms].encoding =
1156 encode_immediate_bitfield(is64, s | s_mask, r);
1157 nb_imms++;
1158 }
1159 }
1160 assert (nb_imms == TOTAL_IMM_NB);
1161 qsort(simd_immediates, nb_imms,
1162 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1163 }
1164
1165 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1166 be accepted by logical (immediate) instructions
1167 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1168
1169 ESIZE is the number of bytes in the decoded immediate value.
1170 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1171 VALUE will be returned in *ENCODING. */
1172
1173 bfd_boolean
1174 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1175 {
1176 simd_imm_encoding imm_enc;
1177 const simd_imm_encoding *imm_encoding;
1178 static bfd_boolean initialized = FALSE;
1179 uint64_t upper;
1180 int i;
1181
1182 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1183 value, is32);
1184
1185 if (initialized == FALSE)
1186 {
1187 build_immediate_table ();
1188 initialized = TRUE;
1189 }
1190
1191 /* Allow all zeros or all ones in top bits, so that
1192 constant expressions like ~1 are permitted. */
1193 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1194 if ((value & ~upper) != value && (value | upper) != value)
1195 return FALSE;
1196
1197 /* Replicate to a full 64-bit value. */
1198 value &= ~upper;
1199 for (i = esize * 8; i < 64; i *= 2)
1200 value |= (value << i);
1201
1202 imm_enc.imm = value;
1203 imm_encoding = (const simd_imm_encoding *)
1204 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1205 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1206 if (imm_encoding == NULL)
1207 {
1208 DEBUG_TRACE ("exit with FALSE");
1209 return FALSE;
1210 }
1211 if (encoding != NULL)
1212 *encoding = imm_encoding->encoding;
1213 DEBUG_TRACE ("exit with TRUE");
1214 return TRUE;
1215 }
1216
1217 /* If 64-bit immediate IMM is in the format of
1218 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1219 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1220 of value "abcdefgh". Otherwise return -1. */
1221 int
1222 aarch64_shrink_expanded_imm8 (uint64_t imm)
1223 {
1224 int i, ret;
1225 uint32_t byte;
1226
1227 ret = 0;
1228 for (i = 0; i < 8; i++)
1229 {
1230 byte = (imm >> (8 * i)) & 0xff;
1231 if (byte == 0xff)
1232 ret |= 1 << i;
1233 else if (byte != 0x00)
1234 return -1;
1235 }
1236 return ret;
1237 }
1238
1239 /* Utility inline functions for operand_general_constraint_met_p. */
1240
1241 static inline void
1242 set_error (aarch64_operand_error *mismatch_detail,
1243 enum aarch64_operand_error_kind kind, int idx,
1244 const char* error)
1245 {
1246 if (mismatch_detail == NULL)
1247 return;
1248 mismatch_detail->kind = kind;
1249 mismatch_detail->index = idx;
1250 mismatch_detail->error = error;
1251 }
1252
1253 static inline void
1254 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1255 const char* error)
1256 {
1257 if (mismatch_detail == NULL)
1258 return;
1259 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1260 }
1261
1262 static inline void
1263 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1264 int idx, int lower_bound, int upper_bound,
1265 const char* error)
1266 {
1267 if (mismatch_detail == NULL)
1268 return;
1269 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1270 mismatch_detail->data[0] = lower_bound;
1271 mismatch_detail->data[1] = upper_bound;
1272 }
1273
1274 static inline void
1275 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1276 int idx, int lower_bound, int upper_bound)
1277 {
1278 if (mismatch_detail == NULL)
1279 return;
1280 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1281 _("immediate value"));
1282 }
1283
1284 static inline void
1285 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1286 int idx, int lower_bound, int upper_bound)
1287 {
1288 if (mismatch_detail == NULL)
1289 return;
1290 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1291 _("immediate offset"));
1292 }
1293
1294 static inline void
1295 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1296 int idx, int lower_bound, int upper_bound)
1297 {
1298 if (mismatch_detail == NULL)
1299 return;
1300 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1301 _("register number"));
1302 }
1303
1304 static inline void
1305 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1306 int idx, int lower_bound, int upper_bound)
1307 {
1308 if (mismatch_detail == NULL)
1309 return;
1310 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1311 _("register element index"));
1312 }
1313
1314 static inline void
1315 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1316 int idx, int lower_bound, int upper_bound)
1317 {
1318 if (mismatch_detail == NULL)
1319 return;
1320 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1321 _("shift amount"));
1322 }
1323
1324 /* Report that the MUL modifier in operand IDX should be in the range
1325 [LOWER_BOUND, UPPER_BOUND]. */
1326 static inline void
1327 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1328 int idx, int lower_bound, int upper_bound)
1329 {
1330 if (mismatch_detail == NULL)
1331 return;
1332 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1333 _("multiplier"));
1334 }
1335
1336 static inline void
1337 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1338 int alignment)
1339 {
1340 if (mismatch_detail == NULL)
1341 return;
1342 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1343 mismatch_detail->data[0] = alignment;
1344 }
1345
1346 static inline void
1347 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1348 int expected_num)
1349 {
1350 if (mismatch_detail == NULL)
1351 return;
1352 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1353 mismatch_detail->data[0] = expected_num;
1354 }
1355
1356 static inline void
1357 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1358 const char* error)
1359 {
1360 if (mismatch_detail == NULL)
1361 return;
1362 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1363 }
1364
1365 /* General constraint checking based on operand code.
1366
1367 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1368 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1369
1370 This function has to be called after the qualifiers for all operands
1371 have been resolved.
1372
1373 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1374 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1375 of error message during the disassembling where error message is not
1376 wanted. We avoid the dynamic construction of strings of error messages
1377 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1378 use a combination of error code, static string and some integer data to
1379 represent an error. */
1380
1381 static int
1382 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1383 enum aarch64_opnd type,
1384 const aarch64_opcode *opcode,
1385 aarch64_operand_error *mismatch_detail)
1386 {
1387 unsigned num, modifiers, shift;
1388 unsigned char size;
1389 int64_t imm, min_value, max_value;
1390 uint64_t uvalue, mask;
1391 const aarch64_opnd_info *opnd = opnds + idx;
1392 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1393
1394 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1395
1396 switch (aarch64_operands[type].op_class)
1397 {
1398 case AARCH64_OPND_CLASS_INT_REG:
1399 /* Check pair reg constraints for cas* instructions. */
1400 if (type == AARCH64_OPND_PAIRREG)
1401 {
1402 assert (idx == 1 || idx == 3);
1403 if (opnds[idx - 1].reg.regno % 2 != 0)
1404 {
1405 set_syntax_error (mismatch_detail, idx - 1,
1406 _("reg pair must start from even reg"));
1407 return 0;
1408 }
1409 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1410 {
1411 set_syntax_error (mismatch_detail, idx,
1412 _("reg pair must be contiguous"));
1413 return 0;
1414 }
1415 break;
1416 }
1417
1418 /* <Xt> may be optional in some IC and TLBI instructions. */
1419 if (type == AARCH64_OPND_Rt_SYS)
1420 {
1421 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1422 == AARCH64_OPND_CLASS_SYSTEM));
1423 if (opnds[1].present
1424 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1425 {
1426 set_other_error (mismatch_detail, idx, _("extraneous register"));
1427 return 0;
1428 }
1429 if (!opnds[1].present
1430 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1431 {
1432 set_other_error (mismatch_detail, idx, _("missing register"));
1433 return 0;
1434 }
1435 }
1436 switch (qualifier)
1437 {
1438 case AARCH64_OPND_QLF_WSP:
1439 case AARCH64_OPND_QLF_SP:
1440 if (!aarch64_stack_pointer_p (opnd))
1441 {
1442 set_other_error (mismatch_detail, idx,
1443 _("stack pointer register expected"));
1444 return 0;
1445 }
1446 break;
1447 default:
1448 break;
1449 }
1450 break;
1451
1452 case AARCH64_OPND_CLASS_SVE_REG:
1453 switch (type)
1454 {
1455 case AARCH64_OPND_SVE_Zn_INDEX:
1456 size = aarch64_get_qualifier_esize (opnd->qualifier);
1457 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1458 {
1459 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1460 0, 64 / size - 1);
1461 return 0;
1462 }
1463 break;
1464
1465 case AARCH64_OPND_SVE_ZnxN:
1466 case AARCH64_OPND_SVE_ZtxN:
1467 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1468 {
1469 set_other_error (mismatch_detail, idx,
1470 _("invalid register list"));
1471 return 0;
1472 }
1473 break;
1474
1475 default:
1476 break;
1477 }
1478 break;
1479
1480 case AARCH64_OPND_CLASS_PRED_REG:
1481 if (opnd->reg.regno >= 8
1482 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1483 {
1484 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1485 return 0;
1486 }
1487 break;
1488
1489 case AARCH64_OPND_CLASS_COND:
1490 if (type == AARCH64_OPND_COND1
1491 && (opnds[idx].cond->value & 0xe) == 0xe)
1492 {
1493 /* Not allow AL or NV. */
1494 set_syntax_error (mismatch_detail, idx, NULL);
1495 }
1496 break;
1497
1498 case AARCH64_OPND_CLASS_ADDRESS:
1499 /* Check writeback. */
1500 switch (opcode->iclass)
1501 {
1502 case ldst_pos:
1503 case ldst_unscaled:
1504 case ldstnapair_offs:
1505 case ldstpair_off:
1506 case ldst_unpriv:
1507 if (opnd->addr.writeback == 1)
1508 {
1509 set_syntax_error (mismatch_detail, idx,
1510 _("unexpected address writeback"));
1511 return 0;
1512 }
1513 break;
1514 case ldst_imm9:
1515 case ldstpair_indexed:
1516 case asisdlsep:
1517 case asisdlsop:
1518 if (opnd->addr.writeback == 0)
1519 {
1520 set_syntax_error (mismatch_detail, idx,
1521 _("address writeback expected"));
1522 return 0;
1523 }
1524 break;
1525 default:
1526 assert (opnd->addr.writeback == 0);
1527 break;
1528 }
1529 switch (type)
1530 {
1531 case AARCH64_OPND_ADDR_SIMM7:
1532 /* Scaled signed 7 bits immediate offset. */
1533 /* Get the size of the data element that is accessed, which may be
1534 different from that of the source register size,
1535 e.g. in strb/ldrb. */
1536 size = aarch64_get_qualifier_esize (opnd->qualifier);
1537 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1538 {
1539 set_offset_out_of_range_error (mismatch_detail, idx,
1540 -64 * size, 63 * size);
1541 return 0;
1542 }
1543 if (!value_aligned_p (opnd->addr.offset.imm, size))
1544 {
1545 set_unaligned_error (mismatch_detail, idx, size);
1546 return 0;
1547 }
1548 break;
1549 case AARCH64_OPND_ADDR_SIMM9:
1550 /* Unscaled signed 9 bits immediate offset. */
1551 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1552 {
1553 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1554 return 0;
1555 }
1556 break;
1557
1558 case AARCH64_OPND_ADDR_SIMM9_2:
1559 /* Unscaled signed 9 bits immediate offset, which has to be negative
1560 or unaligned. */
1561 size = aarch64_get_qualifier_esize (qualifier);
1562 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1563 && !value_aligned_p (opnd->addr.offset.imm, size))
1564 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1565 return 1;
1566 set_other_error (mismatch_detail, idx,
1567 _("negative or unaligned offset expected"));
1568 return 0;
1569
1570 case AARCH64_OPND_SIMD_ADDR_POST:
1571 /* AdvSIMD load/store multiple structures, post-index. */
1572 assert (idx == 1);
1573 if (opnd->addr.offset.is_reg)
1574 {
1575 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1576 return 1;
1577 else
1578 {
1579 set_other_error (mismatch_detail, idx,
1580 _("invalid register offset"));
1581 return 0;
1582 }
1583 }
1584 else
1585 {
1586 const aarch64_opnd_info *prev = &opnds[idx-1];
1587 unsigned num_bytes; /* total number of bytes transferred. */
1588 /* The opcode dependent area stores the number of elements in
1589 each structure to be loaded/stored. */
1590 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1591 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1592 /* Special handling of loading single structure to all lane. */
1593 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1594 * aarch64_get_qualifier_esize (prev->qualifier);
1595 else
1596 num_bytes = prev->reglist.num_regs
1597 * aarch64_get_qualifier_esize (prev->qualifier)
1598 * aarch64_get_qualifier_nelem (prev->qualifier);
1599 if ((int) num_bytes != opnd->addr.offset.imm)
1600 {
1601 set_other_error (mismatch_detail, idx,
1602 _("invalid post-increment amount"));
1603 return 0;
1604 }
1605 }
1606 break;
1607
1608 case AARCH64_OPND_ADDR_REGOFF:
1609 /* Get the size of the data element that is accessed, which may be
1610 different from that of the source register size,
1611 e.g. in strb/ldrb. */
1612 size = aarch64_get_qualifier_esize (opnd->qualifier);
1613 /* It is either no shift or shift by the binary logarithm of SIZE. */
1614 if (opnd->shifter.amount != 0
1615 && opnd->shifter.amount != (int)get_logsz (size))
1616 {
1617 set_other_error (mismatch_detail, idx,
1618 _("invalid shift amount"));
1619 return 0;
1620 }
1621 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1622 operators. */
1623 switch (opnd->shifter.kind)
1624 {
1625 case AARCH64_MOD_UXTW:
1626 case AARCH64_MOD_LSL:
1627 case AARCH64_MOD_SXTW:
1628 case AARCH64_MOD_SXTX: break;
1629 default:
1630 set_other_error (mismatch_detail, idx,
1631 _("invalid extend/shift operator"));
1632 return 0;
1633 }
1634 break;
1635
1636 case AARCH64_OPND_ADDR_UIMM12:
1637 imm = opnd->addr.offset.imm;
1638 /* Get the size of the data element that is accessed, which may be
1639 different from that of the source register size,
1640 e.g. in strb/ldrb. */
1641 size = aarch64_get_qualifier_esize (qualifier);
1642 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1643 {
1644 set_offset_out_of_range_error (mismatch_detail, idx,
1645 0, 4095 * size);
1646 return 0;
1647 }
1648 if (!value_aligned_p (opnd->addr.offset.imm, size))
1649 {
1650 set_unaligned_error (mismatch_detail, idx, size);
1651 return 0;
1652 }
1653 break;
1654
1655 case AARCH64_OPND_ADDR_PCREL14:
1656 case AARCH64_OPND_ADDR_PCREL19:
1657 case AARCH64_OPND_ADDR_PCREL21:
1658 case AARCH64_OPND_ADDR_PCREL26:
1659 imm = opnd->imm.value;
1660 if (operand_need_shift_by_two (get_operand_from_code (type)))
1661 {
1662 /* The offset value in a PC-relative branch instruction is alway
1663 4-byte aligned and is encoded without the lowest 2 bits. */
1664 if (!value_aligned_p (imm, 4))
1665 {
1666 set_unaligned_error (mismatch_detail, idx, 4);
1667 return 0;
1668 }
1669 /* Right shift by 2 so that we can carry out the following check
1670 canonically. */
1671 imm >>= 2;
1672 }
1673 size = get_operand_fields_width (get_operand_from_code (type));
1674 if (!value_fit_signed_field_p (imm, size))
1675 {
1676 set_other_error (mismatch_detail, idx,
1677 _("immediate out of range"));
1678 return 0;
1679 }
1680 break;
1681
1682 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1683 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1684 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1685 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1686 min_value = -8;
1687 max_value = 7;
1688 sve_imm_offset_vl:
1689 assert (!opnd->addr.offset.is_reg);
1690 assert (opnd->addr.preind);
1691 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1692 min_value *= num;
1693 max_value *= num;
1694 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1695 || (opnd->shifter.operator_present
1696 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1697 {
1698 set_other_error (mismatch_detail, idx,
1699 _("invalid addressing mode"));
1700 return 0;
1701 }
1702 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1703 {
1704 set_offset_out_of_range_error (mismatch_detail, idx,
1705 min_value, max_value);
1706 return 0;
1707 }
1708 if (!value_aligned_p (opnd->addr.offset.imm, num))
1709 {
1710 set_unaligned_error (mismatch_detail, idx, num);
1711 return 0;
1712 }
1713 break;
1714
1715 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1716 min_value = -32;
1717 max_value = 31;
1718 goto sve_imm_offset_vl;
1719
1720 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1721 min_value = -256;
1722 max_value = 255;
1723 goto sve_imm_offset_vl;
1724
1725 case AARCH64_OPND_SVE_ADDR_RI_U6:
1726 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1727 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1728 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1729 min_value = 0;
1730 max_value = 63;
1731 sve_imm_offset:
1732 assert (!opnd->addr.offset.is_reg);
1733 assert (opnd->addr.preind);
1734 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1735 min_value *= num;
1736 max_value *= num;
1737 if (opnd->shifter.operator_present
1738 || opnd->shifter.amount_present)
1739 {
1740 set_other_error (mismatch_detail, idx,
1741 _("invalid addressing mode"));
1742 return 0;
1743 }
1744 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1745 {
1746 set_offset_out_of_range_error (mismatch_detail, idx,
1747 min_value, max_value);
1748 return 0;
1749 }
1750 if (!value_aligned_p (opnd->addr.offset.imm, num))
1751 {
1752 set_unaligned_error (mismatch_detail, idx, num);
1753 return 0;
1754 }
1755 break;
1756
1757 case AARCH64_OPND_SVE_ADDR_RR:
1758 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1759 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1760 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1761 case AARCH64_OPND_SVE_ADDR_RX:
1762 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1763 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1764 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1765 case AARCH64_OPND_SVE_ADDR_RZ:
1766 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1767 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1768 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1769 modifiers = 1 << AARCH64_MOD_LSL;
1770 sve_rr_operand:
1771 assert (opnd->addr.offset.is_reg);
1772 assert (opnd->addr.preind);
1773 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1774 && opnd->addr.offset.regno == 31)
1775 {
1776 set_other_error (mismatch_detail, idx,
1777 _("index register xzr is not allowed"));
1778 return 0;
1779 }
1780 if (((1 << opnd->shifter.kind) & modifiers) == 0
1781 || (opnd->shifter.amount
1782 != get_operand_specific_data (&aarch64_operands[type])))
1783 {
1784 set_other_error (mismatch_detail, idx,
1785 _("invalid addressing mode"));
1786 return 0;
1787 }
1788 break;
1789
1790 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1791 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1792 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1793 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1794 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1795 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1796 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1797 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1798 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1799 goto sve_rr_operand;
1800
1801 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1802 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1803 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1804 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1805 min_value = 0;
1806 max_value = 31;
1807 goto sve_imm_offset;
1808
1809 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1810 modifiers = 1 << AARCH64_MOD_LSL;
1811 sve_zz_operand:
1812 assert (opnd->addr.offset.is_reg);
1813 assert (opnd->addr.preind);
1814 if (((1 << opnd->shifter.kind) & modifiers) == 0
1815 || opnd->shifter.amount < 0
1816 || opnd->shifter.amount > 3)
1817 {
1818 set_other_error (mismatch_detail, idx,
1819 _("invalid addressing mode"));
1820 return 0;
1821 }
1822 break;
1823
1824 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1825 modifiers = (1 << AARCH64_MOD_SXTW);
1826 goto sve_zz_operand;
1827
1828 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1829 modifiers = 1 << AARCH64_MOD_UXTW;
1830 goto sve_zz_operand;
1831
1832 default:
1833 break;
1834 }
1835 break;
1836
1837 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1838 if (type == AARCH64_OPND_LEt)
1839 {
1840 /* Get the upper bound for the element index. */
1841 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1842 if (!value_in_range_p (opnd->reglist.index, 0, num))
1843 {
1844 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1845 return 0;
1846 }
1847 }
1848 /* The opcode dependent area stores the number of elements in
1849 each structure to be loaded/stored. */
1850 num = get_opcode_dependent_value (opcode);
1851 switch (type)
1852 {
1853 case AARCH64_OPND_LVt:
1854 assert (num >= 1 && num <= 4);
1855 /* Unless LD1/ST1, the number of registers should be equal to that
1856 of the structure elements. */
1857 if (num != 1 && opnd->reglist.num_regs != num)
1858 {
1859 set_reg_list_error (mismatch_detail, idx, num);
1860 return 0;
1861 }
1862 break;
1863 case AARCH64_OPND_LVt_AL:
1864 case AARCH64_OPND_LEt:
1865 assert (num >= 1 && num <= 4);
1866 /* The number of registers should be equal to that of the structure
1867 elements. */
1868 if (opnd->reglist.num_regs != num)
1869 {
1870 set_reg_list_error (mismatch_detail, idx, num);
1871 return 0;
1872 }
1873 break;
1874 default:
1875 break;
1876 }
1877 break;
1878
1879 case AARCH64_OPND_CLASS_IMMEDIATE:
1880 /* Constraint check on immediate operand. */
1881 imm = opnd->imm.value;
1882 /* E.g. imm_0_31 constrains value to be 0..31. */
1883 if (qualifier_value_in_range_constraint_p (qualifier)
1884 && !value_in_range_p (imm, get_lower_bound (qualifier),
1885 get_upper_bound (qualifier)))
1886 {
1887 set_imm_out_of_range_error (mismatch_detail, idx,
1888 get_lower_bound (qualifier),
1889 get_upper_bound (qualifier));
1890 return 0;
1891 }
1892
1893 switch (type)
1894 {
1895 case AARCH64_OPND_AIMM:
1896 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1897 {
1898 set_other_error (mismatch_detail, idx,
1899 _("invalid shift operator"));
1900 return 0;
1901 }
1902 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1903 {
1904 set_other_error (mismatch_detail, idx,
1905 _("shift amount expected to be 0 or 12"));
1906 return 0;
1907 }
1908 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1909 {
1910 set_other_error (mismatch_detail, idx,
1911 _("immediate out of range"));
1912 return 0;
1913 }
1914 break;
1915
1916 case AARCH64_OPND_HALF:
1917 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1918 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1919 {
1920 set_other_error (mismatch_detail, idx,
1921 _("invalid shift operator"));
1922 return 0;
1923 }
1924 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1925 if (!value_aligned_p (opnd->shifter.amount, 16))
1926 {
1927 set_other_error (mismatch_detail, idx,
1928 _("shift amount should be a multiple of 16"));
1929 return 0;
1930 }
1931 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1932 {
1933 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1934 0, size * 8 - 16);
1935 return 0;
1936 }
1937 if (opnd->imm.value < 0)
1938 {
1939 set_other_error (mismatch_detail, idx,
1940 _("negative immediate value not allowed"));
1941 return 0;
1942 }
1943 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1944 {
1945 set_other_error (mismatch_detail, idx,
1946 _("immediate out of range"));
1947 return 0;
1948 }
1949 break;
1950
1951 case AARCH64_OPND_IMM_MOV:
1952 {
1953 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1954 imm = opnd->imm.value;
1955 assert (idx == 1);
1956 switch (opcode->op)
1957 {
1958 case OP_MOV_IMM_WIDEN:
1959 imm = ~imm;
1960 /* Fall through... */
1961 case OP_MOV_IMM_WIDE:
1962 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1963 {
1964 set_other_error (mismatch_detail, idx,
1965 _("immediate out of range"));
1966 return 0;
1967 }
1968 break;
1969 case OP_MOV_IMM_LOG:
1970 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1971 {
1972 set_other_error (mismatch_detail, idx,
1973 _("immediate out of range"));
1974 return 0;
1975 }
1976 break;
1977 default:
1978 assert (0);
1979 return 0;
1980 }
1981 }
1982 break;
1983
1984 case AARCH64_OPND_NZCV:
1985 case AARCH64_OPND_CCMP_IMM:
1986 case AARCH64_OPND_EXCEPTION:
1987 case AARCH64_OPND_UIMM4:
1988 case AARCH64_OPND_UIMM7:
1989 case AARCH64_OPND_UIMM3_OP1:
1990 case AARCH64_OPND_UIMM3_OP2:
1991 case AARCH64_OPND_SVE_UIMM3:
1992 case AARCH64_OPND_SVE_UIMM7:
1993 case AARCH64_OPND_SVE_UIMM8:
1994 case AARCH64_OPND_SVE_UIMM8_53:
1995 size = get_operand_fields_width (get_operand_from_code (type));
1996 assert (size < 32);
1997 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1998 {
1999 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2000 (1 << size) - 1);
2001 return 0;
2002 }
2003 break;
2004
2005 case AARCH64_OPND_SIMM5:
2006 case AARCH64_OPND_SVE_SIMM5:
2007 case AARCH64_OPND_SVE_SIMM5B:
2008 case AARCH64_OPND_SVE_SIMM6:
2009 case AARCH64_OPND_SVE_SIMM8:
2010 size = get_operand_fields_width (get_operand_from_code (type));
2011 assert (size < 32);
2012 if (!value_fit_signed_field_p (opnd->imm.value, size))
2013 {
2014 set_imm_out_of_range_error (mismatch_detail, idx,
2015 -(1 << (size - 1)),
2016 (1 << (size - 1)) - 1);
2017 return 0;
2018 }
2019 break;
2020
2021 case AARCH64_OPND_WIDTH:
2022 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2023 && opnds[0].type == AARCH64_OPND_Rd);
2024 size = get_upper_bound (qualifier);
2025 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2026 /* lsb+width <= reg.size */
2027 {
2028 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2029 size - opnds[idx-1].imm.value);
2030 return 0;
2031 }
2032 break;
2033
2034 case AARCH64_OPND_LIMM:
2035 case AARCH64_OPND_SVE_LIMM:
2036 {
2037 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2038 uint64_t uimm = opnd->imm.value;
2039 if (opcode->op == OP_BIC)
2040 uimm = ~uimm;
2041 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
2042 {
2043 set_other_error (mismatch_detail, idx,
2044 _("immediate out of range"));
2045 return 0;
2046 }
2047 }
2048 break;
2049
2050 case AARCH64_OPND_IMM0:
2051 case AARCH64_OPND_FPIMM0:
2052 if (opnd->imm.value != 0)
2053 {
2054 set_other_error (mismatch_detail, idx,
2055 _("immediate zero expected"));
2056 return 0;
2057 }
2058 break;
2059
2060 case AARCH64_OPND_SHLL_IMM:
2061 assert (idx == 2);
2062 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2063 if (opnd->imm.value != size)
2064 {
2065 set_other_error (mismatch_detail, idx,
2066 _("invalid shift amount"));
2067 return 0;
2068 }
2069 break;
2070
2071 case AARCH64_OPND_IMM_VLSL:
2072 size = aarch64_get_qualifier_esize (qualifier);
2073 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2074 {
2075 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2076 size * 8 - 1);
2077 return 0;
2078 }
2079 break;
2080
2081 case AARCH64_OPND_IMM_VLSR:
2082 size = aarch64_get_qualifier_esize (qualifier);
2083 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2084 {
2085 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2086 return 0;
2087 }
2088 break;
2089
2090 case AARCH64_OPND_SIMD_IMM:
2091 case AARCH64_OPND_SIMD_IMM_SFT:
2092 /* Qualifier check. */
2093 switch (qualifier)
2094 {
2095 case AARCH64_OPND_QLF_LSL:
2096 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2097 {
2098 set_other_error (mismatch_detail, idx,
2099 _("invalid shift operator"));
2100 return 0;
2101 }
2102 break;
2103 case AARCH64_OPND_QLF_MSL:
2104 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2105 {
2106 set_other_error (mismatch_detail, idx,
2107 _("invalid shift operator"));
2108 return 0;
2109 }
2110 break;
2111 case AARCH64_OPND_QLF_NIL:
2112 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2113 {
2114 set_other_error (mismatch_detail, idx,
2115 _("shift is not permitted"));
2116 return 0;
2117 }
2118 break;
2119 default:
2120 assert (0);
2121 return 0;
2122 }
2123 /* Is the immediate valid? */
2124 assert (idx == 1);
2125 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2126 {
2127 /* uimm8 or simm8 */
2128 if (!value_in_range_p (opnd->imm.value, -128, 255))
2129 {
2130 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2131 return 0;
2132 }
2133 }
2134 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2135 {
2136 /* uimm64 is not
2137 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2138 ffffffffgggggggghhhhhhhh'. */
2139 set_other_error (mismatch_detail, idx,
2140 _("invalid value for immediate"));
2141 return 0;
2142 }
2143 /* Is the shift amount valid? */
2144 switch (opnd->shifter.kind)
2145 {
2146 case AARCH64_MOD_LSL:
2147 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2148 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2149 {
2150 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2151 (size - 1) * 8);
2152 return 0;
2153 }
2154 if (!value_aligned_p (opnd->shifter.amount, 8))
2155 {
2156 set_unaligned_error (mismatch_detail, idx, 8);
2157 return 0;
2158 }
2159 break;
2160 case AARCH64_MOD_MSL:
2161 /* Only 8 and 16 are valid shift amount. */
2162 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2163 {
2164 set_other_error (mismatch_detail, idx,
2165 _("shift amount expected to be 0 or 16"));
2166 return 0;
2167 }
2168 break;
2169 default:
2170 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2171 {
2172 set_other_error (mismatch_detail, idx,
2173 _("invalid shift operator"));
2174 return 0;
2175 }
2176 break;
2177 }
2178 break;
2179
2180 case AARCH64_OPND_FPIMM:
2181 case AARCH64_OPND_SIMD_FPIMM:
2182 case AARCH64_OPND_SVE_FPIMM8:
2183 if (opnd->imm.is_fp == 0)
2184 {
2185 set_other_error (mismatch_detail, idx,
2186 _("floating-point immediate expected"));
2187 return 0;
2188 }
2189 /* The value is expected to be an 8-bit floating-point constant with
2190 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2191 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2192 instruction). */
2193 if (!value_in_range_p (opnd->imm.value, 0, 255))
2194 {
2195 set_other_error (mismatch_detail, idx,
2196 _("immediate out of range"));
2197 return 0;
2198 }
2199 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2200 {
2201 set_other_error (mismatch_detail, idx,
2202 _("invalid shift operator"));
2203 return 0;
2204 }
2205 break;
2206
2207 case AARCH64_OPND_SVE_AIMM:
2208 min_value = 0;
2209 sve_aimm:
2210 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2211 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2212 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2213 uvalue = opnd->imm.value;
2214 shift = opnd->shifter.amount;
2215 if (size == 1)
2216 {
2217 if (shift != 0)
2218 {
2219 set_other_error (mismatch_detail, idx,
2220 _("no shift amount allowed for"
2221 " 8-bit constants"));
2222 return 0;
2223 }
2224 }
2225 else
2226 {
2227 if (shift != 0 && shift != 8)
2228 {
2229 set_other_error (mismatch_detail, idx,
2230 _("shift amount must be 0 or 8"));
2231 return 0;
2232 }
2233 if (shift == 0 && (uvalue & 0xff) == 0)
2234 {
2235 shift = 8;
2236 uvalue = (int64_t) uvalue / 256;
2237 }
2238 }
2239 mask >>= shift;
2240 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2241 {
2242 set_other_error (mismatch_detail, idx,
2243 _("immediate too big for element size"));
2244 return 0;
2245 }
2246 uvalue = (uvalue - min_value) & mask;
2247 if (uvalue > 0xff)
2248 {
2249 set_other_error (mismatch_detail, idx,
2250 _("invalid arithmetic immediate"));
2251 return 0;
2252 }
2253 break;
2254
2255 case AARCH64_OPND_SVE_ASIMM:
2256 min_value = -128;
2257 goto sve_aimm;
2258
2259 case AARCH64_OPND_SVE_I1_HALF_ONE:
2260 assert (opnd->imm.is_fp);
2261 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2262 {
2263 set_other_error (mismatch_detail, idx,
2264 _("floating-point value must be 0.5 or 1.0"));
2265 return 0;
2266 }
2267 break;
2268
2269 case AARCH64_OPND_SVE_I1_HALF_TWO:
2270 assert (opnd->imm.is_fp);
2271 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2272 {
2273 set_other_error (mismatch_detail, idx,
2274 _("floating-point value must be 0.5 or 2.0"));
2275 return 0;
2276 }
2277 break;
2278
2279 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2280 assert (opnd->imm.is_fp);
2281 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2282 {
2283 set_other_error (mismatch_detail, idx,
2284 _("floating-point value must be 0.0 or 1.0"));
2285 return 0;
2286 }
2287 break;
2288
2289 case AARCH64_OPND_SVE_INV_LIMM:
2290 {
2291 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2292 uint64_t uimm = ~opnd->imm.value;
2293 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2294 {
2295 set_other_error (mismatch_detail, idx,
2296 _("immediate out of range"));
2297 return 0;
2298 }
2299 }
2300 break;
2301
2302 case AARCH64_OPND_SVE_LIMM_MOV:
2303 {
2304 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2305 uint64_t uimm = opnd->imm.value;
2306 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2307 {
2308 set_other_error (mismatch_detail, idx,
2309 _("immediate out of range"));
2310 return 0;
2311 }
2312 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2313 {
2314 set_other_error (mismatch_detail, idx,
2315 _("invalid replicated MOV immediate"));
2316 return 0;
2317 }
2318 }
2319 break;
2320
2321 case AARCH64_OPND_SVE_PATTERN_SCALED:
2322 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2323 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2324 {
2325 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2326 return 0;
2327 }
2328 break;
2329
2330 case AARCH64_OPND_SVE_SHLIMM_PRED:
2331 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2332 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2333 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2334 {
2335 set_imm_out_of_range_error (mismatch_detail, idx,
2336 0, 8 * size - 1);
2337 return 0;
2338 }
2339 break;
2340
2341 case AARCH64_OPND_SVE_SHRIMM_PRED:
2342 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2343 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2344 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2345 {
2346 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2347 return 0;
2348 }
2349 break;
2350
2351 default:
2352 break;
2353 }
2354 break;
2355
2356 case AARCH64_OPND_CLASS_CP_REG:
2357 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2358 valid range: C0 - C15. */
2359 if (opnd->reg.regno > 15)
2360 {
2361 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2362 return 0;
2363 }
2364 break;
2365
2366 case AARCH64_OPND_CLASS_SYSTEM:
2367 switch (type)
2368 {
2369 case AARCH64_OPND_PSTATEFIELD:
2370 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2371 /* MSR UAO, #uimm4
2372 MSR PAN, #uimm4
2373 The immediate must be #0 or #1. */
2374 if ((opnd->pstatefield == 0x03 /* UAO. */
2375 || opnd->pstatefield == 0x04) /* PAN. */
2376 && opnds[1].imm.value > 1)
2377 {
2378 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2379 return 0;
2380 }
2381 /* MSR SPSel, #uimm4
2382 Uses uimm4 as a control value to select the stack pointer: if
2383 bit 0 is set it selects the current exception level's stack
2384 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2385 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2386 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2387 {
2388 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2389 return 0;
2390 }
2391 break;
2392 default:
2393 break;
2394 }
2395 break;
2396
2397 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2398 /* Get the upper bound for the element index. */
2399 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2400 /* Index out-of-range. */
2401 if (!value_in_range_p (opnd->reglane.index, 0, num))
2402 {
2403 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2404 return 0;
2405 }
2406 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2407 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2408 number is encoded in "size:M:Rm":
2409 size <Vm>
2410 00 RESERVED
2411 01 0:Rm
2412 10 M:Rm
2413 11 RESERVED */
2414 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2415 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2416 {
2417 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2418 return 0;
2419 }
2420 break;
2421
2422 case AARCH64_OPND_CLASS_MODIFIED_REG:
2423 assert (idx == 1 || idx == 2);
2424 switch (type)
2425 {
2426 case AARCH64_OPND_Rm_EXT:
2427 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2428 && opnd->shifter.kind != AARCH64_MOD_LSL)
2429 {
2430 set_other_error (mismatch_detail, idx,
2431 _("extend operator expected"));
2432 return 0;
2433 }
2434 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2435 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2436 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2437 case. */
2438 if (!aarch64_stack_pointer_p (opnds + 0)
2439 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2440 {
2441 if (!opnd->shifter.operator_present)
2442 {
2443 set_other_error (mismatch_detail, idx,
2444 _("missing extend operator"));
2445 return 0;
2446 }
2447 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2448 {
2449 set_other_error (mismatch_detail, idx,
2450 _("'LSL' operator not allowed"));
2451 return 0;
2452 }
2453 }
2454 assert (opnd->shifter.operator_present /* Default to LSL. */
2455 || opnd->shifter.kind == AARCH64_MOD_LSL);
2456 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2457 {
2458 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2459 return 0;
2460 }
2461 /* In the 64-bit form, the final register operand is written as Wm
2462 for all but the (possibly omitted) UXTX/LSL and SXTX
2463 operators.
2464 N.B. GAS allows X register to be used with any operator as a
2465 programming convenience. */
2466 if (qualifier == AARCH64_OPND_QLF_X
2467 && opnd->shifter.kind != AARCH64_MOD_LSL
2468 && opnd->shifter.kind != AARCH64_MOD_UXTX
2469 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2470 {
2471 set_other_error (mismatch_detail, idx, _("W register expected"));
2472 return 0;
2473 }
2474 break;
2475
2476 case AARCH64_OPND_Rm_SFT:
2477 /* ROR is not available to the shifted register operand in
2478 arithmetic instructions. */
2479 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2480 {
2481 set_other_error (mismatch_detail, idx,
2482 _("shift operator expected"));
2483 return 0;
2484 }
2485 if (opnd->shifter.kind == AARCH64_MOD_ROR
2486 && opcode->iclass != log_shift)
2487 {
2488 set_other_error (mismatch_detail, idx,
2489 _("'ROR' operator not allowed"));
2490 return 0;
2491 }
2492 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2493 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2494 {
2495 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2496 return 0;
2497 }
2498 break;
2499
2500 default:
2501 break;
2502 }
2503 break;
2504
2505 default:
2506 break;
2507 }
2508
2509 return 1;
2510 }
2511
2512 /* Main entrypoint for the operand constraint checking.
2513
2514 Return 1 if operands of *INST meet the constraint applied by the operand
2515 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2516 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2517 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2518 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2519 error kind when it is notified that an instruction does not pass the check).
2520
2521 Un-determined operand qualifiers may get established during the process. */
2522
2523 int
2524 aarch64_match_operands_constraint (aarch64_inst *inst,
2525 aarch64_operand_error *mismatch_detail)
2526 {
2527 int i;
2528
2529 DEBUG_TRACE ("enter");
2530
2531 /* Check for cases where a source register needs to be the same as the
2532 destination register. Do this before matching qualifiers since if
2533 an instruction has both invalid tying and invalid qualifiers,
2534 the error about qualifiers would suggest several alternative
2535 instructions that also have invalid tying. */
2536 i = inst->opcode->tied_operand;
2537 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2538 {
2539 if (mismatch_detail)
2540 {
2541 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2542 mismatch_detail->index = i;
2543 mismatch_detail->error = NULL;
2544 }
2545 return 0;
2546 }
2547
2548 /* Match operands' qualifier.
2549 *INST has already had qualifier establish for some, if not all, of
2550 its operands; we need to find out whether these established
2551 qualifiers match one of the qualifier sequence in
2552 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2553 with the corresponding qualifier in such a sequence.
2554 Only basic operand constraint checking is done here; the more thorough
2555 constraint checking will carried out by operand_general_constraint_met_p,
2556 which has be to called after this in order to get all of the operands'
2557 qualifiers established. */
2558 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2559 {
2560 DEBUG_TRACE ("FAIL on operand qualifier matching");
2561 if (mismatch_detail)
2562 {
2563 /* Return an error type to indicate that it is the qualifier
2564 matching failure; we don't care about which operand as there
2565 are enough information in the opcode table to reproduce it. */
2566 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2567 mismatch_detail->index = -1;
2568 mismatch_detail->error = NULL;
2569 }
2570 return 0;
2571 }
2572
2573 /* Match operands' constraint. */
2574 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2575 {
2576 enum aarch64_opnd type = inst->opcode->operands[i];
2577 if (type == AARCH64_OPND_NIL)
2578 break;
2579 if (inst->operands[i].skip)
2580 {
2581 DEBUG_TRACE ("skip the incomplete operand %d", i);
2582 continue;
2583 }
2584 if (operand_general_constraint_met_p (inst->operands, i, type,
2585 inst->opcode, mismatch_detail) == 0)
2586 {
2587 DEBUG_TRACE ("FAIL on operand %d", i);
2588 return 0;
2589 }
2590 }
2591
2592 DEBUG_TRACE ("PASS");
2593
2594 return 1;
2595 }
2596
2597 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2598 Also updates the TYPE of each INST->OPERANDS with the corresponding
2599 value of OPCODE->OPERANDS.
2600
2601 Note that some operand qualifiers may need to be manually cleared by
2602 the caller before it further calls the aarch64_opcode_encode; by
2603 doing this, it helps the qualifier matching facilities work
2604 properly. */
2605
2606 const aarch64_opcode*
2607 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2608 {
2609 int i;
2610 const aarch64_opcode *old = inst->opcode;
2611
2612 inst->opcode = opcode;
2613
2614 /* Update the operand types. */
2615 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2616 {
2617 inst->operands[i].type = opcode->operands[i];
2618 if (opcode->operands[i] == AARCH64_OPND_NIL)
2619 break;
2620 }
2621
2622 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2623
2624 return old;
2625 }
2626
2627 int
2628 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2629 {
2630 int i;
2631 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2632 if (operands[i] == operand)
2633 return i;
2634 else if (operands[i] == AARCH64_OPND_NIL)
2635 break;
2636 return -1;
2637 }
2638 \f
2639 /* R0...R30, followed by FOR31. */
2640 #define BANK(R, FOR31) \
2641 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2642 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2643 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2644 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2645 /* [0][0] 32-bit integer regs with sp Wn
2646 [0][1] 64-bit integer regs with sp Xn sf=1
2647 [1][0] 32-bit integer regs with #0 Wn
2648 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2649 static const char *int_reg[2][2][32] = {
2650 #define R32(X) "w" #X
2651 #define R64(X) "x" #X
2652 { BANK (R32, "wsp"), BANK (R64, "sp") },
2653 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2654 #undef R64
2655 #undef R32
2656 };
2657
2658 /* Names of the SVE vector registers, first with .S suffixes,
2659 then with .D suffixes. */
2660
2661 static const char *sve_reg[2][32] = {
2662 #define ZS(X) "z" #X ".s"
2663 #define ZD(X) "z" #X ".d"
2664 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2665 #undef ZD
2666 #undef ZS
2667 };
2668 #undef BANK
2669
2670 /* Return the integer register name.
2671 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2672
2673 static inline const char *
2674 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2675 {
2676 const int has_zr = sp_reg_p ? 0 : 1;
2677 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2678 return int_reg[has_zr][is_64][regno];
2679 }
2680
2681 /* Like get_int_reg_name, but IS_64 is always 1. */
2682
2683 static inline const char *
2684 get_64bit_int_reg_name (int regno, int sp_reg_p)
2685 {
2686 const int has_zr = sp_reg_p ? 0 : 1;
2687 return int_reg[has_zr][1][regno];
2688 }
2689
2690 /* Get the name of the integer offset register in OPND, using the shift type
2691 to decide whether it's a word or doubleword. */
2692
2693 static inline const char *
2694 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2695 {
2696 switch (opnd->shifter.kind)
2697 {
2698 case AARCH64_MOD_UXTW:
2699 case AARCH64_MOD_SXTW:
2700 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2701
2702 case AARCH64_MOD_LSL:
2703 case AARCH64_MOD_SXTX:
2704 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2705
2706 default:
2707 abort ();
2708 }
2709 }
2710
2711 /* Get the name of the SVE vector offset register in OPND, using the operand
2712 qualifier to decide whether the suffix should be .S or .D. */
2713
2714 static inline const char *
2715 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2716 {
2717 assert (qualifier == AARCH64_OPND_QLF_S_S
2718 || qualifier == AARCH64_OPND_QLF_S_D);
2719 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2720 }
2721
2722 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2723
2724 typedef union
2725 {
2726 uint64_t i;
2727 double d;
2728 } double_conv_t;
2729
2730 typedef union
2731 {
2732 uint32_t i;
2733 float f;
2734 } single_conv_t;
2735
2736 typedef union
2737 {
2738 uint32_t i;
2739 float f;
2740 } half_conv_t;
2741
2742 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2743 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2744 (depending on the type of the instruction). IMM8 will be expanded to a
2745 single-precision floating-point value (SIZE == 4) or a double-precision
2746 floating-point value (SIZE == 8). A half-precision floating-point value
2747 (SIZE == 2) is expanded to a single-precision floating-point value. The
2748 expanded value is returned. */
2749
2750 static uint64_t
2751 expand_fp_imm (int size, uint32_t imm8)
2752 {
2753 uint64_t imm;
2754 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2755
2756 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2757 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2758 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2759 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2760 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2761 if (size == 8)
2762 {
2763 imm = (imm8_7 << (63-32)) /* imm8<7> */
2764 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2765 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2766 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2767 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2768 imm <<= 32;
2769 }
2770 else if (size == 4 || size == 2)
2771 {
2772 imm = (imm8_7 << 31) /* imm8<7> */
2773 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2774 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2775 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2776 }
2777 else
2778 {
2779 /* An unsupported size. */
2780 assert (0);
2781 }
2782
2783 return imm;
2784 }
2785
2786 /* Produce the string representation of the register list operand *OPND
2787 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2788 the register name that comes before the register number, such as "v". */
2789 static void
2790 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2791 const char *prefix)
2792 {
2793 const int num_regs = opnd->reglist.num_regs;
2794 const int first_reg = opnd->reglist.first_regno;
2795 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2796 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2797 char tb[8]; /* Temporary buffer. */
2798
2799 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2800 assert (num_regs >= 1 && num_regs <= 4);
2801
2802 /* Prepare the index if any. */
2803 if (opnd->reglist.has_index)
2804 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2805 else
2806 tb[0] = '\0';
2807
2808 /* The hyphenated form is preferred for disassembly if there are
2809 more than two registers in the list, and the register numbers
2810 are monotonically increasing in increments of one. */
2811 if (num_regs > 2 && last_reg > first_reg)
2812 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2813 prefix, last_reg, qlf_name, tb);
2814 else
2815 {
2816 const int reg0 = first_reg;
2817 const int reg1 = (first_reg + 1) & 0x1f;
2818 const int reg2 = (first_reg + 2) & 0x1f;
2819 const int reg3 = (first_reg + 3) & 0x1f;
2820
2821 switch (num_regs)
2822 {
2823 case 1:
2824 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2825 break;
2826 case 2:
2827 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2828 prefix, reg1, qlf_name, tb);
2829 break;
2830 case 3:
2831 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2832 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2833 prefix, reg2, qlf_name, tb);
2834 break;
2835 case 4:
2836 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2837 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2838 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2839 break;
2840 }
2841 }
2842 }
2843
2844 /* Print the register+immediate address in OPND to BUF, which has SIZE
2845 characters. BASE is the name of the base register. */
2846
2847 static void
2848 print_immediate_offset_address (char *buf, size_t size,
2849 const aarch64_opnd_info *opnd,
2850 const char *base)
2851 {
2852 if (opnd->addr.writeback)
2853 {
2854 if (opnd->addr.preind)
2855 snprintf (buf, size, "[%s,#%d]!", base, opnd->addr.offset.imm);
2856 else
2857 snprintf (buf, size, "[%s],#%d", base, opnd->addr.offset.imm);
2858 }
2859 else
2860 {
2861 if (opnd->shifter.operator_present)
2862 {
2863 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2864 snprintf (buf, size, "[%s,#%d,mul vl]",
2865 base, opnd->addr.offset.imm);
2866 }
2867 else if (opnd->addr.offset.imm)
2868 snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
2869 else
2870 snprintf (buf, size, "[%s]", base);
2871 }
2872 }
2873
2874 /* Produce the string representation of the register offset address operand
2875 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2876 the names of the base and offset registers. */
2877 static void
2878 print_register_offset_address (char *buf, size_t size,
2879 const aarch64_opnd_info *opnd,
2880 const char *base, const char *offset)
2881 {
2882 char tb[16]; /* Temporary buffer. */
2883 bfd_boolean print_extend_p = TRUE;
2884 bfd_boolean print_amount_p = TRUE;
2885 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2886
2887 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2888 || !opnd->shifter.amount_present))
2889 {
2890 /* Not print the shift/extend amount when the amount is zero and
2891 when it is not the special case of 8-bit load/store instruction. */
2892 print_amount_p = FALSE;
2893 /* Likewise, no need to print the shift operator LSL in such a
2894 situation. */
2895 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2896 print_extend_p = FALSE;
2897 }
2898
2899 /* Prepare for the extend/shift. */
2900 if (print_extend_p)
2901 {
2902 if (print_amount_p)
2903 snprintf (tb, sizeof (tb), ",%s #%" PRIi64, shift_name,
2904 opnd->shifter.amount);
2905 else
2906 snprintf (tb, sizeof (tb), ",%s", shift_name);
2907 }
2908 else
2909 tb[0] = '\0';
2910
2911 snprintf (buf, size, "[%s,%s%s]", base, offset, tb);
2912 }
2913
2914 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2915 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2916 PC, PCREL_P and ADDRESS are used to pass in and return information about
2917 the PC-relative address calculation, where the PC value is passed in
2918 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2919 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2920 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2921
2922 The function serves both the disassembler and the assembler diagnostics
2923 issuer, which is the reason why it lives in this file. */
2924
2925 void
2926 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2927 const aarch64_opcode *opcode,
2928 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2929 bfd_vma *address)
2930 {
2931 int i;
2932 const char *name = NULL;
2933 const aarch64_opnd_info *opnd = opnds + idx;
2934 enum aarch64_modifier_kind kind;
2935 uint64_t addr, enum_value;
2936
2937 buf[0] = '\0';
2938 if (pcrel_p)
2939 *pcrel_p = 0;
2940
2941 switch (opnd->type)
2942 {
2943 case AARCH64_OPND_Rd:
2944 case AARCH64_OPND_Rn:
2945 case AARCH64_OPND_Rm:
2946 case AARCH64_OPND_Rt:
2947 case AARCH64_OPND_Rt2:
2948 case AARCH64_OPND_Rs:
2949 case AARCH64_OPND_Ra:
2950 case AARCH64_OPND_Rt_SYS:
2951 case AARCH64_OPND_PAIRREG:
2952 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2953 the <ic_op>, therefore we we use opnd->present to override the
2954 generic optional-ness information. */
2955 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2956 break;
2957 /* Omit the operand, e.g. RET. */
2958 if (optional_operand_p (opcode, idx)
2959 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2960 break;
2961 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2962 || opnd->qualifier == AARCH64_OPND_QLF_X);
2963 snprintf (buf, size, "%s",
2964 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2965 break;
2966
2967 case AARCH64_OPND_Rd_SP:
2968 case AARCH64_OPND_Rn_SP:
2969 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2970 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2971 || opnd->qualifier == AARCH64_OPND_QLF_X
2972 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2973 snprintf (buf, size, "%s",
2974 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2975 break;
2976
2977 case AARCH64_OPND_Rm_EXT:
2978 kind = opnd->shifter.kind;
2979 assert (idx == 1 || idx == 2);
2980 if ((aarch64_stack_pointer_p (opnds)
2981 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2982 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2983 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2984 && kind == AARCH64_MOD_UXTW)
2985 || (opnd->qualifier == AARCH64_OPND_QLF_X
2986 && kind == AARCH64_MOD_UXTX)))
2987 {
2988 /* 'LSL' is the preferred form in this case. */
2989 kind = AARCH64_MOD_LSL;
2990 if (opnd->shifter.amount == 0)
2991 {
2992 /* Shifter omitted. */
2993 snprintf (buf, size, "%s",
2994 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2995 break;
2996 }
2997 }
2998 if (opnd->shifter.amount)
2999 snprintf (buf, size, "%s, %s #%" PRIi64,
3000 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3001 aarch64_operand_modifiers[kind].name,
3002 opnd->shifter.amount);
3003 else
3004 snprintf (buf, size, "%s, %s",
3005 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3006 aarch64_operand_modifiers[kind].name);
3007 break;
3008
3009 case AARCH64_OPND_Rm_SFT:
3010 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3011 || opnd->qualifier == AARCH64_OPND_QLF_X);
3012 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3013 snprintf (buf, size, "%s",
3014 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3015 else
3016 snprintf (buf, size, "%s, %s #%" PRIi64,
3017 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3018 aarch64_operand_modifiers[opnd->shifter.kind].name,
3019 opnd->shifter.amount);
3020 break;
3021
3022 case AARCH64_OPND_Fd:
3023 case AARCH64_OPND_Fn:
3024 case AARCH64_OPND_Fm:
3025 case AARCH64_OPND_Fa:
3026 case AARCH64_OPND_Ft:
3027 case AARCH64_OPND_Ft2:
3028 case AARCH64_OPND_Sd:
3029 case AARCH64_OPND_Sn:
3030 case AARCH64_OPND_Sm:
3031 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3032 opnd->reg.regno);
3033 break;
3034
3035 case AARCH64_OPND_Vd:
3036 case AARCH64_OPND_Vn:
3037 case AARCH64_OPND_Vm:
3038 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3039 aarch64_get_qualifier_name (opnd->qualifier));
3040 break;
3041
3042 case AARCH64_OPND_Ed:
3043 case AARCH64_OPND_En:
3044 case AARCH64_OPND_Em:
3045 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3046 aarch64_get_qualifier_name (opnd->qualifier),
3047 opnd->reglane.index);
3048 break;
3049
3050 case AARCH64_OPND_VdD1:
3051 case AARCH64_OPND_VnD1:
3052 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3053 break;
3054
3055 case AARCH64_OPND_LVn:
3056 case AARCH64_OPND_LVt:
3057 case AARCH64_OPND_LVt_AL:
3058 case AARCH64_OPND_LEt:
3059 print_register_list (buf, size, opnd, "v");
3060 break;
3061
3062 case AARCH64_OPND_SVE_Pd:
3063 case AARCH64_OPND_SVE_Pg3:
3064 case AARCH64_OPND_SVE_Pg4_5:
3065 case AARCH64_OPND_SVE_Pg4_10:
3066 case AARCH64_OPND_SVE_Pg4_16:
3067 case AARCH64_OPND_SVE_Pm:
3068 case AARCH64_OPND_SVE_Pn:
3069 case AARCH64_OPND_SVE_Pt:
3070 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3071 snprintf (buf, size, "p%d", opnd->reg.regno);
3072 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3073 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3074 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3075 aarch64_get_qualifier_name (opnd->qualifier));
3076 else
3077 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3078 aarch64_get_qualifier_name (opnd->qualifier));
3079 break;
3080
3081 case AARCH64_OPND_SVE_Za_5:
3082 case AARCH64_OPND_SVE_Za_16:
3083 case AARCH64_OPND_SVE_Zd:
3084 case AARCH64_OPND_SVE_Zm_5:
3085 case AARCH64_OPND_SVE_Zm_16:
3086 case AARCH64_OPND_SVE_Zn:
3087 case AARCH64_OPND_SVE_Zt:
3088 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3089 snprintf (buf, size, "z%d", opnd->reg.regno);
3090 else
3091 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3092 aarch64_get_qualifier_name (opnd->qualifier));
3093 break;
3094
3095 case AARCH64_OPND_SVE_ZnxN:
3096 case AARCH64_OPND_SVE_ZtxN:
3097 print_register_list (buf, size, opnd, "z");
3098 break;
3099
3100 case AARCH64_OPND_SVE_Zn_INDEX:
3101 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3102 aarch64_get_qualifier_name (opnd->qualifier),
3103 opnd->reglane.index);
3104 break;
3105
3106 case AARCH64_OPND_Cn:
3107 case AARCH64_OPND_Cm:
3108 snprintf (buf, size, "C%d", opnd->reg.regno);
3109 break;
3110
3111 case AARCH64_OPND_IDX:
3112 case AARCH64_OPND_IMM:
3113 case AARCH64_OPND_WIDTH:
3114 case AARCH64_OPND_UIMM3_OP1:
3115 case AARCH64_OPND_UIMM3_OP2:
3116 case AARCH64_OPND_BIT_NUM:
3117 case AARCH64_OPND_IMM_VLSL:
3118 case AARCH64_OPND_IMM_VLSR:
3119 case AARCH64_OPND_SHLL_IMM:
3120 case AARCH64_OPND_IMM0:
3121 case AARCH64_OPND_IMMR:
3122 case AARCH64_OPND_IMMS:
3123 case AARCH64_OPND_FBITS:
3124 case AARCH64_OPND_SIMM5:
3125 case AARCH64_OPND_SVE_SHLIMM_PRED:
3126 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3127 case AARCH64_OPND_SVE_SHRIMM_PRED:
3128 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3129 case AARCH64_OPND_SVE_SIMM5:
3130 case AARCH64_OPND_SVE_SIMM5B:
3131 case AARCH64_OPND_SVE_SIMM6:
3132 case AARCH64_OPND_SVE_SIMM8:
3133 case AARCH64_OPND_SVE_UIMM3:
3134 case AARCH64_OPND_SVE_UIMM7:
3135 case AARCH64_OPND_SVE_UIMM8:
3136 case AARCH64_OPND_SVE_UIMM8_53:
3137 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3138 break;
3139
3140 case AARCH64_OPND_SVE_I1_HALF_ONE:
3141 case AARCH64_OPND_SVE_I1_HALF_TWO:
3142 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3143 {
3144 single_conv_t c;
3145 c.i = opnd->imm.value;
3146 snprintf (buf, size, "#%.1f", c.f);
3147 break;
3148 }
3149
3150 case AARCH64_OPND_SVE_PATTERN:
3151 if (optional_operand_p (opcode, idx)
3152 && opnd->imm.value == get_optional_operand_default_value (opcode))
3153 break;
3154 enum_value = opnd->imm.value;
3155 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3156 if (aarch64_sve_pattern_array[enum_value])
3157 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3158 else
3159 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3160 break;
3161
3162 case AARCH64_OPND_SVE_PATTERN_SCALED:
3163 if (optional_operand_p (opcode, idx)
3164 && !opnd->shifter.operator_present
3165 && opnd->imm.value == get_optional_operand_default_value (opcode))
3166 break;
3167 enum_value = opnd->imm.value;
3168 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3169 if (aarch64_sve_pattern_array[opnd->imm.value])
3170 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3171 else
3172 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3173 if (opnd->shifter.operator_present)
3174 {
3175 size_t len = strlen (buf);
3176 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3177 aarch64_operand_modifiers[opnd->shifter.kind].name,
3178 opnd->shifter.amount);
3179 }
3180 break;
3181
3182 case AARCH64_OPND_SVE_PRFOP:
3183 enum_value = opnd->imm.value;
3184 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3185 if (aarch64_sve_prfop_array[enum_value])
3186 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3187 else
3188 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3189 break;
3190
3191 case AARCH64_OPND_IMM_MOV:
3192 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3193 {
3194 case 4: /* e.g. MOV Wd, #<imm32>. */
3195 {
3196 int imm32 = opnd->imm.value;
3197 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3198 }
3199 break;
3200 case 8: /* e.g. MOV Xd, #<imm64>. */
3201 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3202 opnd->imm.value, opnd->imm.value);
3203 break;
3204 default: assert (0);
3205 }
3206 break;
3207
3208 case AARCH64_OPND_FPIMM0:
3209 snprintf (buf, size, "#0.0");
3210 break;
3211
3212 case AARCH64_OPND_LIMM:
3213 case AARCH64_OPND_AIMM:
3214 case AARCH64_OPND_HALF:
3215 case AARCH64_OPND_SVE_INV_LIMM:
3216 case AARCH64_OPND_SVE_LIMM:
3217 case AARCH64_OPND_SVE_LIMM_MOV:
3218 if (opnd->shifter.amount)
3219 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3220 opnd->shifter.amount);
3221 else
3222 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3223 break;
3224
3225 case AARCH64_OPND_SIMD_IMM:
3226 case AARCH64_OPND_SIMD_IMM_SFT:
3227 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3228 || opnd->shifter.kind == AARCH64_MOD_NONE)
3229 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3230 else
3231 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3232 aarch64_operand_modifiers[opnd->shifter.kind].name,
3233 opnd->shifter.amount);
3234 break;
3235
3236 case AARCH64_OPND_SVE_AIMM:
3237 case AARCH64_OPND_SVE_ASIMM:
3238 if (opnd->shifter.amount)
3239 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3240 opnd->shifter.amount);
3241 else
3242 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3243 break;
3244
3245 case AARCH64_OPND_FPIMM:
3246 case AARCH64_OPND_SIMD_FPIMM:
3247 case AARCH64_OPND_SVE_FPIMM8:
3248 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3249 {
3250 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3251 {
3252 half_conv_t c;
3253 c.i = expand_fp_imm (2, opnd->imm.value);
3254 snprintf (buf, size, "#%.18e", c.f);
3255 }
3256 break;
3257 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3258 {
3259 single_conv_t c;
3260 c.i = expand_fp_imm (4, opnd->imm.value);
3261 snprintf (buf, size, "#%.18e", c.f);
3262 }
3263 break;
3264 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3265 {
3266 double_conv_t c;
3267 c.i = expand_fp_imm (8, opnd->imm.value);
3268 snprintf (buf, size, "#%.18e", c.d);
3269 }
3270 break;
3271 default: assert (0);
3272 }
3273 break;
3274
3275 case AARCH64_OPND_CCMP_IMM:
3276 case AARCH64_OPND_NZCV:
3277 case AARCH64_OPND_EXCEPTION:
3278 case AARCH64_OPND_UIMM4:
3279 case AARCH64_OPND_UIMM7:
3280 if (optional_operand_p (opcode, idx) == TRUE
3281 && (opnd->imm.value ==
3282 (int64_t) get_optional_operand_default_value (opcode)))
3283 /* Omit the operand, e.g. DCPS1. */
3284 break;
3285 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3286 break;
3287
3288 case AARCH64_OPND_COND:
3289 case AARCH64_OPND_COND1:
3290 snprintf (buf, size, "%s", opnd->cond->names[0]);
3291 break;
3292
3293 case AARCH64_OPND_ADDR_ADRP:
3294 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3295 + opnd->imm.value;
3296 if (pcrel_p)
3297 *pcrel_p = 1;
3298 if (address)
3299 *address = addr;
3300 /* This is not necessary during the disassembling, as print_address_func
3301 in the disassemble_info will take care of the printing. But some
3302 other callers may be still interested in getting the string in *STR,
3303 so here we do snprintf regardless. */
3304 snprintf (buf, size, "#0x%" PRIx64, addr);
3305 break;
3306
3307 case AARCH64_OPND_ADDR_PCREL14:
3308 case AARCH64_OPND_ADDR_PCREL19:
3309 case AARCH64_OPND_ADDR_PCREL21:
3310 case AARCH64_OPND_ADDR_PCREL26:
3311 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3312 if (pcrel_p)
3313 *pcrel_p = 1;
3314 if (address)
3315 *address = addr;
3316 /* This is not necessary during the disassembling, as print_address_func
3317 in the disassemble_info will take care of the printing. But some
3318 other callers may be still interested in getting the string in *STR,
3319 so here we do snprintf regardless. */
3320 snprintf (buf, size, "#0x%" PRIx64, addr);
3321 break;
3322
3323 case AARCH64_OPND_ADDR_SIMPLE:
3324 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3325 case AARCH64_OPND_SIMD_ADDR_POST:
3326 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3327 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3328 {
3329 if (opnd->addr.offset.is_reg)
3330 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3331 else
3332 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3333 }
3334 else
3335 snprintf (buf, size, "[%s]", name);
3336 break;
3337
3338 case AARCH64_OPND_ADDR_REGOFF:
3339 case AARCH64_OPND_SVE_ADDR_RR:
3340 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3341 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3342 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3343 case AARCH64_OPND_SVE_ADDR_RX:
3344 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3345 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3346 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3347 print_register_offset_address
3348 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3349 get_offset_int_reg_name (opnd));
3350 break;
3351
3352 case AARCH64_OPND_SVE_ADDR_RZ:
3353 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3354 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3355 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3356 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3357 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3358 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3359 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3360 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3361 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3362 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3363 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3364 print_register_offset_address
3365 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3366 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3367 break;
3368
3369 case AARCH64_OPND_ADDR_SIMM7:
3370 case AARCH64_OPND_ADDR_SIMM9:
3371 case AARCH64_OPND_ADDR_SIMM9_2:
3372 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3373 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3374 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3375 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3376 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3377 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3378 case AARCH64_OPND_SVE_ADDR_RI_U6:
3379 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3380 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3381 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3382 print_immediate_offset_address
3383 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3384 break;
3385
3386 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3387 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3388 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3389 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3390 print_immediate_offset_address
3391 (buf, size, opnd,
3392 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3393 break;
3394
3395 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3396 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3397 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3398 print_register_offset_address
3399 (buf, size, opnd,
3400 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3401 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3402 break;
3403
3404 case AARCH64_OPND_ADDR_UIMM12:
3405 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3406 if (opnd->addr.offset.imm)
3407 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
3408 else
3409 snprintf (buf, size, "[%s]", name);
3410 break;
3411
3412 case AARCH64_OPND_SYSREG:
3413 for (i = 0; aarch64_sys_regs[i].name; ++i)
3414 if (aarch64_sys_regs[i].value == opnd->sysreg
3415 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3416 break;
3417 if (aarch64_sys_regs[i].name)
3418 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3419 else
3420 {
3421 /* Implementation defined system register. */
3422 unsigned int value = opnd->sysreg;
3423 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3424 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3425 value & 0x7);
3426 }
3427 break;
3428
3429 case AARCH64_OPND_PSTATEFIELD:
3430 for (i = 0; aarch64_pstatefields[i].name; ++i)
3431 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3432 break;
3433 assert (aarch64_pstatefields[i].name);
3434 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3435 break;
3436
3437 case AARCH64_OPND_SYSREG_AT:
3438 case AARCH64_OPND_SYSREG_DC:
3439 case AARCH64_OPND_SYSREG_IC:
3440 case AARCH64_OPND_SYSREG_TLBI:
3441 snprintf (buf, size, "%s", opnd->sysins_op->name);
3442 break;
3443
3444 case AARCH64_OPND_BARRIER:
3445 snprintf (buf, size, "%s", opnd->barrier->name);
3446 break;
3447
3448 case AARCH64_OPND_BARRIER_ISB:
3449 /* Operand can be omitted, e.g. in DCPS1. */
3450 if (! optional_operand_p (opcode, idx)
3451 || (opnd->barrier->value
3452 != get_optional_operand_default_value (opcode)))
3453 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3454 break;
3455
3456 case AARCH64_OPND_PRFOP:
3457 if (opnd->prfop->name != NULL)
3458 snprintf (buf, size, "%s", opnd->prfop->name);
3459 else
3460 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3461 break;
3462
3463 case AARCH64_OPND_BARRIER_PSB:
3464 snprintf (buf, size, "%s", opnd->hint_option->name);
3465 break;
3466
3467 default:
3468 assert (0);
3469 }
3470 }
3471 \f
3472 #define CPENC(op0,op1,crn,crm,op2) \
3473 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3474 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3475 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3476 /* for 3.9.10 System Instructions */
3477 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3478
3479 #define C0 0
3480 #define C1 1
3481 #define C2 2
3482 #define C3 3
3483 #define C4 4
3484 #define C5 5
3485 #define C6 6
3486 #define C7 7
3487 #define C8 8
3488 #define C9 9
3489 #define C10 10
3490 #define C11 11
3491 #define C12 12
3492 #define C13 13
3493 #define C14 14
3494 #define C15 15
3495
3496 #ifdef F_DEPRECATED
3497 #undef F_DEPRECATED
3498 #endif
3499 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3500
3501 #ifdef F_ARCHEXT
3502 #undef F_ARCHEXT
3503 #endif
3504 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3505
3506 #ifdef F_HASXT
3507 #undef F_HASXT
3508 #endif
3509 #define F_HASXT 0x4 /* System instruction register <Xt>
3510 operand. */
3511
3512
3513 /* TODO there are two more issues need to be resolved
3514 1. handle read-only and write-only system registers
3515 2. handle cpu-implementation-defined system registers. */
3516 const aarch64_sys_reg aarch64_sys_regs [] =
3517 {
3518 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3519 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3520 { "elr_el1", CPEN_(0,C0,1), 0 },
3521 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3522 { "sp_el0", CPEN_(0,C1,0), 0 },
3523 { "spsel", CPEN_(0,C2,0), 0 },
3524 { "daif", CPEN_(3,C2,1), 0 },
3525 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3526 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3527 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3528 { "nzcv", CPEN_(3,C2,0), 0 },
3529 { "fpcr", CPEN_(3,C4,0), 0 },
3530 { "fpsr", CPEN_(3,C4,1), 0 },
3531 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3532 { "dlr_el0", CPEN_(3,C5,1), 0 },
3533 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3534 { "elr_el2", CPEN_(4,C0,1), 0 },
3535 { "sp_el1", CPEN_(4,C1,0), 0 },
3536 { "spsr_irq", CPEN_(4,C3,0), 0 },
3537 { "spsr_abt", CPEN_(4,C3,1), 0 },
3538 { "spsr_und", CPEN_(4,C3,2), 0 },
3539 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3540 { "spsr_el3", CPEN_(6,C0,0), 0 },
3541 { "elr_el3", CPEN_(6,C0,1), 0 },
3542 { "sp_el2", CPEN_(6,C1,0), 0 },
3543 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3544 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3545 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3546 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3547 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3548 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3549 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3550 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3551 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3552 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3553 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3554 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3555 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3556 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3557 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3558 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3559 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3560 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3561 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3562 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3563 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3564 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3565 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3566 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3567 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3568 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3569 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3570 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3571 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3572 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3573 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3574 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3575 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3576 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3577 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3578 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3579 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3580 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3581 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3582 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3583 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3584 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3585 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3586 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3587 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3588 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3589 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3590 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3591 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3592 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3593 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3594 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3595 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3596 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3597 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3598 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3599 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3600 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3601 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3602 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3603 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3604 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3605 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3606 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3607 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3608 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3609 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3610 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3611 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3612 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3613 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3614 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3615 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3616 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3617 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3618 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3619 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3620 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3621 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3622 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3623 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3624 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3625 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3626 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3627 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3628 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3629 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3630 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3631 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3632 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3633 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3634 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3635 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3636 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3637 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3638 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3639 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3640 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3641 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3642 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3643 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3644 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3645 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3646 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3647 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3648 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3649 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3650 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3651 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3652 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3653 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3654 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3655 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3656 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3657 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3658 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3659 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3660 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3661 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3662 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3663 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3664 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3665 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3666 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3667 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3668 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3669 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3670 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3671 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3672 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3673 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3674 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3675 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3676 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3677 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3678 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3679 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3680 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3681 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3682 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3683 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3684 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3685 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3686 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3687 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3688 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3689 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3690 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3691 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3692 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3693 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3694 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3695 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3696 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3697 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3698 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3699 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3700 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3701 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3702 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3703 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3704 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3705 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3706 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3707 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3708 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3709 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3710 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3711 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3712 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3713 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3714 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3715 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3716 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3717 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3718 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3719 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3720 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3721 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3722 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3723 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3724 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3725 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3726 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3727 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3728 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3729 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3730 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3731 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3732 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3733 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3734 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3735 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3736 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3737 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3738 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3739 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3740 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3741 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3742 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3743 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3744 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3745 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3746 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3747 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3748 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3749 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3750 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3751 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3752 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3753 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3754 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3755 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3756 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3757 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3758 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3759 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3760 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3761 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3762 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3763 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3764 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3765 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3766 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3767 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3768 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3769 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3770 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3771 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3772 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3773 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3774 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3775 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3776 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3777 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3778 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3779 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3780 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3781 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3782 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3783 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3784 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3785 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3786 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3787 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3788 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3789 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3790 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3791 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3792 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3793 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3794 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3795 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3796 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3797 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3798 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3799 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3800 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3801 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3802 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3803 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3804 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3805 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3806 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3807 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3808 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3809 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3810 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3811 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3812 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3813 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3814 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3815 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3816 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3817 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3818 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3819 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3820 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3821 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3822 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3823 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3824 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3825 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3826 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3827 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3828 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3829 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3830 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3831 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3832 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3833 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3834 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3835 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3836 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3837 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3838 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3839 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3840 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3841 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3842 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3843 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3844 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3845 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3846 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3847 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3848 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3849 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3850 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3851 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3852 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3853 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3854 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3855 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3856 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3857 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3858 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3859 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3860 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3861 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3862 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3863 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3864 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3865 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3866 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3867 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3868 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3869 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3870 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3871 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3872 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3873 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3874 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3875 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3876 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3877 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3878 { 0, CPENC(0,0,0,0,0), 0 },
3879 };
3880
3881 bfd_boolean
3882 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3883 {
3884 return (reg->flags & F_DEPRECATED) != 0;
3885 }
3886
3887 bfd_boolean
3888 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3889 const aarch64_sys_reg *reg)
3890 {
3891 if (!(reg->flags & F_ARCHEXT))
3892 return TRUE;
3893
3894 /* PAN. Values are from aarch64_sys_regs. */
3895 if (reg->value == CPEN_(0,C2,3)
3896 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3897 return FALSE;
3898
3899 /* Virtualization host extensions: system registers. */
3900 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3901 || reg->value == CPENC (3, 4, C13, C0, 1)
3902 || reg->value == CPENC (3, 4, C14, C3, 0)
3903 || reg->value == CPENC (3, 4, C14, C3, 1)
3904 || reg->value == CPENC (3, 4, C14, C3, 2))
3905 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3906 return FALSE;
3907
3908 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3909 if ((reg->value == CPEN_ (5, C0, 0)
3910 || reg->value == CPEN_ (5, C0, 1)
3911 || reg->value == CPENC (3, 5, C1, C0, 0)
3912 || reg->value == CPENC (3, 5, C1, C0, 2)
3913 || reg->value == CPENC (3, 5, C2, C0, 0)
3914 || reg->value == CPENC (3, 5, C2, C0, 1)
3915 || reg->value == CPENC (3, 5, C2, C0, 2)
3916 || reg->value == CPENC (3, 5, C5, C1, 0)
3917 || reg->value == CPENC (3, 5, C5, C1, 1)
3918 || reg->value == CPENC (3, 5, C5, C2, 0)
3919 || reg->value == CPENC (3, 5, C6, C0, 0)
3920 || reg->value == CPENC (3, 5, C10, C2, 0)
3921 || reg->value == CPENC (3, 5, C10, C3, 0)
3922 || reg->value == CPENC (3, 5, C12, C0, 0)
3923 || reg->value == CPENC (3, 5, C13, C0, 1)
3924 || reg->value == CPENC (3, 5, C14, C1, 0))
3925 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3926 return FALSE;
3927
3928 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3929 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3930 || reg->value == CPENC (3, 5, C14, C2, 1)
3931 || reg->value == CPENC (3, 5, C14, C2, 2)
3932 || reg->value == CPENC (3, 5, C14, C3, 0)
3933 || reg->value == CPENC (3, 5, C14, C3, 1)
3934 || reg->value == CPENC (3, 5, C14, C3, 2))
3935 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3936 return FALSE;
3937
3938 /* ARMv8.2 features. */
3939
3940 /* ID_AA64MMFR2_EL1. */
3941 if (reg->value == CPENC (3, 0, C0, C7, 2)
3942 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3943 return FALSE;
3944
3945 /* PSTATE.UAO. */
3946 if (reg->value == CPEN_ (0, C2, 4)
3947 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3948 return FALSE;
3949
3950 /* RAS extension. */
3951
3952 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3953 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3954 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3955 || reg->value == CPENC (3, 0, C5, C3, 1)
3956 || reg->value == CPENC (3, 0, C5, C3, 2)
3957 || reg->value == CPENC (3, 0, C5, C3, 3)
3958 || reg->value == CPENC (3, 0, C5, C4, 0)
3959 || reg->value == CPENC (3, 0, C5, C4, 1)
3960 || reg->value == CPENC (3, 0, C5, C4, 2)
3961 || reg->value == CPENC (3, 0, C5, C4, 3)
3962 || reg->value == CPENC (3, 0, C5, C5, 0)
3963 || reg->value == CPENC (3, 0, C5, C5, 1))
3964 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3965 return FALSE;
3966
3967 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3968 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3969 || reg->value == CPENC (3, 0, C12, C1, 1)
3970 || reg->value == CPENC (3, 4, C12, C1, 1))
3971 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3972 return FALSE;
3973
3974 /* Statistical Profiling extension. */
3975 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3976 || reg->value == CPENC (3, 0, C9, C10, 1)
3977 || reg->value == CPENC (3, 0, C9, C10, 3)
3978 || reg->value == CPENC (3, 0, C9, C10, 7)
3979 || reg->value == CPENC (3, 0, C9, C9, 0)
3980 || reg->value == CPENC (3, 0, C9, C9, 2)
3981 || reg->value == CPENC (3, 0, C9, C9, 3)
3982 || reg->value == CPENC (3, 0, C9, C9, 4)
3983 || reg->value == CPENC (3, 0, C9, C9, 5)
3984 || reg->value == CPENC (3, 0, C9, C9, 6)
3985 || reg->value == CPENC (3, 0, C9, C9, 7)
3986 || reg->value == CPENC (3, 4, C9, C9, 0)
3987 || reg->value == CPENC (3, 5, C9, C9, 0))
3988 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
3989 return FALSE;
3990
3991 return TRUE;
3992 }
3993
3994 const aarch64_sys_reg aarch64_pstatefields [] =
3995 {
3996 { "spsel", 0x05, 0 },
3997 { "daifset", 0x1e, 0 },
3998 { "daifclr", 0x1f, 0 },
3999 { "pan", 0x04, F_ARCHEXT },
4000 { "uao", 0x03, F_ARCHEXT },
4001 { 0, CPENC(0,0,0,0,0), 0 },
4002 };
4003
4004 bfd_boolean
4005 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4006 const aarch64_sys_reg *reg)
4007 {
4008 if (!(reg->flags & F_ARCHEXT))
4009 return TRUE;
4010
4011 /* PAN. Values are from aarch64_pstatefields. */
4012 if (reg->value == 0x04
4013 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4014 return FALSE;
4015
4016 /* UAO. Values are from aarch64_pstatefields. */
4017 if (reg->value == 0x03
4018 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4019 return FALSE;
4020
4021 return TRUE;
4022 }
4023
4024 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4025 {
4026 { "ialluis", CPENS(0,C7,C1,0), 0 },
4027 { "iallu", CPENS(0,C7,C5,0), 0 },
4028 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4029 { 0, CPENS(0,0,0,0), 0 }
4030 };
4031
4032 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4033 {
4034 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4035 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4036 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4037 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4038 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4039 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4040 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4041 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4042 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4043 { 0, CPENS(0,0,0,0), 0 }
4044 };
4045
4046 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4047 {
4048 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4049 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4050 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4051 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4052 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4053 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4054 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4055 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4056 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4057 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4058 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4059 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4060 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4061 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4062 { 0, CPENS(0,0,0,0), 0 }
4063 };
4064
4065 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4066 {
4067 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4068 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4069 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4070 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4071 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4072 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4073 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4074 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4075 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4076 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4077 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4078 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4079 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4080 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4081 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4082 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4083 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4084 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4085 { "alle2", CPENS(4,C8,C7,0), 0 },
4086 { "alle2is", CPENS(4,C8,C3,0), 0 },
4087 { "alle1", CPENS(4,C8,C7,4), 0 },
4088 { "alle1is", CPENS(4,C8,C3,4), 0 },
4089 { "alle3", CPENS(6,C8,C7,0), 0 },
4090 { "alle3is", CPENS(6,C8,C3,0), 0 },
4091 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4092 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4093 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4094 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4095 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4096 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4097 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4098 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4099 { 0, CPENS(0,0,0,0), 0 }
4100 };
4101
4102 bfd_boolean
4103 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4104 {
4105 return (sys_ins_reg->flags & F_HASXT) != 0;
4106 }
4107
4108 extern bfd_boolean
4109 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4110 const aarch64_sys_ins_reg *reg)
4111 {
4112 if (!(reg->flags & F_ARCHEXT))
4113 return TRUE;
4114
4115 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4116 if (reg->value == CPENS (3, C7, C12, 1)
4117 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4118 return FALSE;
4119
4120 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4121 if ((reg->value == CPENS (0, C7, C9, 0)
4122 || reg->value == CPENS (0, C7, C9, 1))
4123 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4124 return FALSE;
4125
4126 return TRUE;
4127 }
4128
4129 #undef C0
4130 #undef C1
4131 #undef C2
4132 #undef C3
4133 #undef C4
4134 #undef C5
4135 #undef C6
4136 #undef C7
4137 #undef C8
4138 #undef C9
4139 #undef C10
4140 #undef C11
4141 #undef C12
4142 #undef C13
4143 #undef C14
4144 #undef C15
4145
4146 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4147 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4148
4149 static bfd_boolean
4150 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4151 const aarch64_insn insn)
4152 {
4153 int t = BITS (insn, 4, 0);
4154 int n = BITS (insn, 9, 5);
4155 int t2 = BITS (insn, 14, 10);
4156
4157 if (BIT (insn, 23))
4158 {
4159 /* Write back enabled. */
4160 if ((t == n || t2 == n) && n != 31)
4161 return FALSE;
4162 }
4163
4164 if (BIT (insn, 22))
4165 {
4166 /* Load */
4167 if (t == t2)
4168 return FALSE;
4169 }
4170
4171 return TRUE;
4172 }
4173
4174 /* Return true if VALUE cannot be moved into an SVE register using DUP
4175 (with any element size, not just ESIZE) and if using DUPM would
4176 therefore be OK. ESIZE is the number of bytes in the immediate. */
4177
4178 bfd_boolean
4179 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4180 {
4181 int64_t svalue = uvalue;
4182 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4183
4184 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4185 return FALSE;
4186 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4187 {
4188 svalue = (int32_t) uvalue;
4189 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4190 {
4191 svalue = (int16_t) uvalue;
4192 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4193 return FALSE;
4194 }
4195 }
4196 if ((svalue & 0xff) == 0)
4197 svalue /= 256;
4198 return svalue < -128 || svalue >= 128;
4199 }
4200
4201 /* Include the opcode description table as well as the operand description
4202 table. */
4203 #define VERIFIER(x) verify_##x
4204 #include "aarch64-tbl.h"
This page took 0.121966 seconds and 5 git commands to generate.