47c50797fb5068c221d093827cadfe8bb21bd9ab
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
268 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
269 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
270 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
271 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
272 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
273 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
274 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
275 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
276 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
277 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
278 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
279 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
280 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
281 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
282 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
283 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
284 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
285 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
286 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
287 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
288 { 5, 1 }, /* SVE_i1: single-bit immediate. */
289 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
290 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
291 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
292 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
293 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
294 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
295 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
296 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
297 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
298 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
299 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
300 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
301 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
302 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
303 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
304 { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
305 };
306
307 enum aarch64_operand_class
308 aarch64_get_operand_class (enum aarch64_opnd type)
309 {
310 return aarch64_operands[type].op_class;
311 }
312
313 const char *
314 aarch64_get_operand_name (enum aarch64_opnd type)
315 {
316 return aarch64_operands[type].name;
317 }
318
319 /* Get operand description string.
320 This is usually for the diagnosis purpose. */
321 const char *
322 aarch64_get_operand_desc (enum aarch64_opnd type)
323 {
324 return aarch64_operands[type].desc;
325 }
326
327 /* Table of all conditional affixes. */
328 const aarch64_cond aarch64_conds[16] =
329 {
330 {{"eq"}, 0x0},
331 {{"ne"}, 0x1},
332 {{"cs", "hs"}, 0x2},
333 {{"cc", "lo", "ul"}, 0x3},
334 {{"mi"}, 0x4},
335 {{"pl"}, 0x5},
336 {{"vs"}, 0x6},
337 {{"vc"}, 0x7},
338 {{"hi"}, 0x8},
339 {{"ls"}, 0x9},
340 {{"ge"}, 0xa},
341 {{"lt"}, 0xb},
342 {{"gt"}, 0xc},
343 {{"le"}, 0xd},
344 {{"al"}, 0xe},
345 {{"nv"}, 0xf},
346 };
347
348 const aarch64_cond *
349 get_cond_from_value (aarch64_insn value)
350 {
351 assert (value < 16);
352 return &aarch64_conds[(unsigned int) value];
353 }
354
355 const aarch64_cond *
356 get_inverted_cond (const aarch64_cond *cond)
357 {
358 return &aarch64_conds[cond->value ^ 0x1];
359 }
360
361 /* Table describing the operand extension/shifting operators; indexed by
362 enum aarch64_modifier_kind.
363
364 The value column provides the most common values for encoding modifiers,
365 which enables table-driven encoding/decoding for the modifiers. */
366 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
367 {
368 {"none", 0x0},
369 {"msl", 0x0},
370 {"ror", 0x3},
371 {"asr", 0x2},
372 {"lsr", 0x1},
373 {"lsl", 0x0},
374 {"uxtb", 0x0},
375 {"uxth", 0x1},
376 {"uxtw", 0x2},
377 {"uxtx", 0x3},
378 {"sxtb", 0x4},
379 {"sxth", 0x5},
380 {"sxtw", 0x6},
381 {"sxtx", 0x7},
382 {"mul", 0x0},
383 {"mul vl", 0x0},
384 {NULL, 0},
385 };
386
387 enum aarch64_modifier_kind
388 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
389 {
390 return desc - aarch64_operand_modifiers;
391 }
392
393 aarch64_insn
394 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
395 {
396 return aarch64_operand_modifiers[kind].value;
397 }
398
399 enum aarch64_modifier_kind
400 aarch64_get_operand_modifier_from_value (aarch64_insn value,
401 bfd_boolean extend_p)
402 {
403 if (extend_p == TRUE)
404 return AARCH64_MOD_UXTB + value;
405 else
406 return AARCH64_MOD_LSL - value;
407 }
408
409 bfd_boolean
410 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
411 {
412 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
413 ? TRUE : FALSE;
414 }
415
416 static inline bfd_boolean
417 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
418 {
419 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
420 ? TRUE : FALSE;
421 }
422
423 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
424 {
425 { "#0x00", 0x0 },
426 { "oshld", 0x1 },
427 { "oshst", 0x2 },
428 { "osh", 0x3 },
429 { "#0x04", 0x4 },
430 { "nshld", 0x5 },
431 { "nshst", 0x6 },
432 { "nsh", 0x7 },
433 { "#0x08", 0x8 },
434 { "ishld", 0x9 },
435 { "ishst", 0xa },
436 { "ish", 0xb },
437 { "#0x0c", 0xc },
438 { "ld", 0xd },
439 { "st", 0xe },
440 { "sy", 0xf },
441 };
442
443 /* Table describing the operands supported by the aliases of the HINT
444 instruction.
445
446 The name column is the operand that is accepted for the alias. The value
447 column is the hint number of the alias. The list of operands is terminated
448 by NULL in the name column. */
449
450 const struct aarch64_name_value_pair aarch64_hint_options[] =
451 {
452 { "csync", 0x11 }, /* PSB CSYNC. */
453 { NULL, 0x0 },
454 };
455
456 /* op -> op: load = 0 instruction = 1 store = 2
457 l -> level: 1-3
458 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
459 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
460 const struct aarch64_name_value_pair aarch64_prfops[32] =
461 {
462 { "pldl1keep", B(0, 1, 0) },
463 { "pldl1strm", B(0, 1, 1) },
464 { "pldl2keep", B(0, 2, 0) },
465 { "pldl2strm", B(0, 2, 1) },
466 { "pldl3keep", B(0, 3, 0) },
467 { "pldl3strm", B(0, 3, 1) },
468 { NULL, 0x06 },
469 { NULL, 0x07 },
470 { "plil1keep", B(1, 1, 0) },
471 { "plil1strm", B(1, 1, 1) },
472 { "plil2keep", B(1, 2, 0) },
473 { "plil2strm", B(1, 2, 1) },
474 { "plil3keep", B(1, 3, 0) },
475 { "plil3strm", B(1, 3, 1) },
476 { NULL, 0x0e },
477 { NULL, 0x0f },
478 { "pstl1keep", B(2, 1, 0) },
479 { "pstl1strm", B(2, 1, 1) },
480 { "pstl2keep", B(2, 2, 0) },
481 { "pstl2strm", B(2, 2, 1) },
482 { "pstl3keep", B(2, 3, 0) },
483 { "pstl3strm", B(2, 3, 1) },
484 { NULL, 0x16 },
485 { NULL, 0x17 },
486 { NULL, 0x18 },
487 { NULL, 0x19 },
488 { NULL, 0x1a },
489 { NULL, 0x1b },
490 { NULL, 0x1c },
491 { NULL, 0x1d },
492 { NULL, 0x1e },
493 { NULL, 0x1f },
494 };
495 #undef B
496 \f
497 /* Utilities on value constraint. */
498
499 static inline int
500 value_in_range_p (int64_t value, int low, int high)
501 {
502 return (value >= low && value <= high) ? 1 : 0;
503 }
504
505 /* Return true if VALUE is a multiple of ALIGN. */
506 static inline int
507 value_aligned_p (int64_t value, int align)
508 {
509 return (value % align) == 0;
510 }
511
512 /* A signed value fits in a field. */
513 static inline int
514 value_fit_signed_field_p (int64_t value, unsigned width)
515 {
516 assert (width < 32);
517 if (width < sizeof (value) * 8)
518 {
519 int64_t lim = (int64_t)1 << (width - 1);
520 if (value >= -lim && value < lim)
521 return 1;
522 }
523 return 0;
524 }
525
526 /* An unsigned value fits in a field. */
527 static inline int
528 value_fit_unsigned_field_p (int64_t value, unsigned width)
529 {
530 assert (width < 32);
531 if (width < sizeof (value) * 8)
532 {
533 int64_t lim = (int64_t)1 << width;
534 if (value >= 0 && value < lim)
535 return 1;
536 }
537 return 0;
538 }
539
540 /* Return 1 if OPERAND is SP or WSP. */
541 int
542 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
543 {
544 return ((aarch64_get_operand_class (operand->type)
545 == AARCH64_OPND_CLASS_INT_REG)
546 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
547 && operand->reg.regno == 31);
548 }
549
550 /* Return 1 if OPERAND is XZR or WZP. */
551 int
552 aarch64_zero_register_p (const aarch64_opnd_info *operand)
553 {
554 return ((aarch64_get_operand_class (operand->type)
555 == AARCH64_OPND_CLASS_INT_REG)
556 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
557 && operand->reg.regno == 31);
558 }
559
560 /* Return true if the operand *OPERAND that has the operand code
561 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
562 qualified by the qualifier TARGET. */
563
564 static inline int
565 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
566 aarch64_opnd_qualifier_t target)
567 {
568 switch (operand->qualifier)
569 {
570 case AARCH64_OPND_QLF_W:
571 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
572 return 1;
573 break;
574 case AARCH64_OPND_QLF_X:
575 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
576 return 1;
577 break;
578 case AARCH64_OPND_QLF_WSP:
579 if (target == AARCH64_OPND_QLF_W
580 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
581 return 1;
582 break;
583 case AARCH64_OPND_QLF_SP:
584 if (target == AARCH64_OPND_QLF_X
585 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
586 return 1;
587 break;
588 default:
589 break;
590 }
591
592 return 0;
593 }
594
595 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
596 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
597
598 Return NIL if more than one expected qualifiers are found. */
599
600 aarch64_opnd_qualifier_t
601 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
602 int idx,
603 const aarch64_opnd_qualifier_t known_qlf,
604 int known_idx)
605 {
606 int i, saved_i;
607
608 /* Special case.
609
610 When the known qualifier is NIL, we have to assume that there is only
611 one qualifier sequence in the *QSEQ_LIST and return the corresponding
612 qualifier directly. One scenario is that for instruction
613 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
614 which has only one possible valid qualifier sequence
615 NIL, S_D
616 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
617 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
618
619 Because the qualifier NIL has dual roles in the qualifier sequence:
620 it can mean no qualifier for the operand, or the qualifer sequence is
621 not in use (when all qualifiers in the sequence are NILs), we have to
622 handle this special case here. */
623 if (known_qlf == AARCH64_OPND_NIL)
624 {
625 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
626 return qseq_list[0][idx];
627 }
628
629 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
630 {
631 if (qseq_list[i][known_idx] == known_qlf)
632 {
633 if (saved_i != -1)
634 /* More than one sequences are found to have KNOWN_QLF at
635 KNOWN_IDX. */
636 return AARCH64_OPND_NIL;
637 saved_i = i;
638 }
639 }
640
641 return qseq_list[saved_i][idx];
642 }
643
644 enum operand_qualifier_kind
645 {
646 OQK_NIL,
647 OQK_OPD_VARIANT,
648 OQK_VALUE_IN_RANGE,
649 OQK_MISC,
650 };
651
652 /* Operand qualifier description. */
653 struct operand_qualifier_data
654 {
655 /* The usage of the three data fields depends on the qualifier kind. */
656 int data0;
657 int data1;
658 int data2;
659 /* Description. */
660 const char *desc;
661 /* Kind. */
662 enum operand_qualifier_kind kind;
663 };
664
665 /* Indexed by the operand qualifier enumerators. */
666 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
667 {
668 {0, 0, 0, "NIL", OQK_NIL},
669
670 /* Operand variant qualifiers.
671 First 3 fields:
672 element size, number of elements and common value for encoding. */
673
674 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
675 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
676 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
677 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
678
679 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
680 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
681 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
682 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
683 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
684
685 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
686 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
687 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
688 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
689 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
690 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
691 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
692 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
693 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
694 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
695
696 {0, 0, 0, "z", OQK_OPD_VARIANT},
697 {0, 0, 0, "m", OQK_OPD_VARIANT},
698
699 /* Qualifiers constraining the value range.
700 First 3 fields:
701 Lower bound, higher bound, unused. */
702
703 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
704 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
705 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
706 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
707 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
708 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
709
710 /* Qualifiers for miscellaneous purpose.
711 First 3 fields:
712 unused, unused and unused. */
713
714 {0, 0, 0, "lsl", 0},
715 {0, 0, 0, "msl", 0},
716
717 {0, 0, 0, "retrieving", 0},
718 };
719
720 static inline bfd_boolean
721 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
722 {
723 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
724 ? TRUE : FALSE;
725 }
726
727 static inline bfd_boolean
728 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
729 {
730 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
731 ? TRUE : FALSE;
732 }
733
734 const char*
735 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
736 {
737 return aarch64_opnd_qualifiers[qualifier].desc;
738 }
739
740 /* Given an operand qualifier, return the expected data element size
741 of a qualified operand. */
742 unsigned char
743 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
744 {
745 assert (operand_variant_qualifier_p (qualifier) == TRUE);
746 return aarch64_opnd_qualifiers[qualifier].data0;
747 }
748
749 unsigned char
750 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
751 {
752 assert (operand_variant_qualifier_p (qualifier) == TRUE);
753 return aarch64_opnd_qualifiers[qualifier].data1;
754 }
755
756 aarch64_insn
757 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
758 {
759 assert (operand_variant_qualifier_p (qualifier) == TRUE);
760 return aarch64_opnd_qualifiers[qualifier].data2;
761 }
762
763 static int
764 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
765 {
766 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
767 return aarch64_opnd_qualifiers[qualifier].data0;
768 }
769
770 static int
771 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
772 {
773 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
774 return aarch64_opnd_qualifiers[qualifier].data1;
775 }
776
777 #ifdef DEBUG_AARCH64
778 void
779 aarch64_verbose (const char *str, ...)
780 {
781 va_list ap;
782 va_start (ap, str);
783 printf ("#### ");
784 vprintf (str, ap);
785 printf ("\n");
786 va_end (ap);
787 }
788
789 static inline void
790 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
791 {
792 int i;
793 printf ("#### \t");
794 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
795 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
796 printf ("\n");
797 }
798
799 static void
800 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
801 const aarch64_opnd_qualifier_t *qualifier)
802 {
803 int i;
804 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
805
806 aarch64_verbose ("dump_match_qualifiers:");
807 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
808 curr[i] = opnd[i].qualifier;
809 dump_qualifier_sequence (curr);
810 aarch64_verbose ("against");
811 dump_qualifier_sequence (qualifier);
812 }
813 #endif /* DEBUG_AARCH64 */
814
815 /* TODO improve this, we can have an extra field at the runtime to
816 store the number of operands rather than calculating it every time. */
817
818 int
819 aarch64_num_of_operands (const aarch64_opcode *opcode)
820 {
821 int i = 0;
822 const enum aarch64_opnd *opnds = opcode->operands;
823 while (opnds[i++] != AARCH64_OPND_NIL)
824 ;
825 --i;
826 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
827 return i;
828 }
829
830 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
831 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
832
833 N.B. on the entry, it is very likely that only some operands in *INST
834 have had their qualifiers been established.
835
836 If STOP_AT is not -1, the function will only try to match
837 the qualifier sequence for operands before and including the operand
838 of index STOP_AT; and on success *RET will only be filled with the first
839 (STOP_AT+1) qualifiers.
840
841 A couple examples of the matching algorithm:
842
843 X,W,NIL should match
844 X,W,NIL
845
846 NIL,NIL should match
847 X ,NIL
848
849 Apart from serving the main encoding routine, this can also be called
850 during or after the operand decoding. */
851
852 int
853 aarch64_find_best_match (const aarch64_inst *inst,
854 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
855 int stop_at, aarch64_opnd_qualifier_t *ret)
856 {
857 int found = 0;
858 int i, num_opnds;
859 const aarch64_opnd_qualifier_t *qualifiers;
860
861 num_opnds = aarch64_num_of_operands (inst->opcode);
862 if (num_opnds == 0)
863 {
864 DEBUG_TRACE ("SUCCEED: no operand");
865 return 1;
866 }
867
868 if (stop_at < 0 || stop_at >= num_opnds)
869 stop_at = num_opnds - 1;
870
871 /* For each pattern. */
872 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
873 {
874 int j;
875 qualifiers = *qualifiers_list;
876
877 /* Start as positive. */
878 found = 1;
879
880 DEBUG_TRACE ("%d", i);
881 #ifdef DEBUG_AARCH64
882 if (debug_dump)
883 dump_match_qualifiers (inst->operands, qualifiers);
884 #endif
885
886 /* Most opcodes has much fewer patterns in the list.
887 First NIL qualifier indicates the end in the list. */
888 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
889 {
890 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
891 if (i)
892 found = 0;
893 break;
894 }
895
896 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
897 {
898 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
899 {
900 /* Either the operand does not have qualifier, or the qualifier
901 for the operand needs to be deduced from the qualifier
902 sequence.
903 In the latter case, any constraint checking related with
904 the obtained qualifier should be done later in
905 operand_general_constraint_met_p. */
906 continue;
907 }
908 else if (*qualifiers != inst->operands[j].qualifier)
909 {
910 /* Unless the target qualifier can also qualify the operand
911 (which has already had a non-nil qualifier), non-equal
912 qualifiers are generally un-matched. */
913 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
914 continue;
915 else
916 {
917 found = 0;
918 break;
919 }
920 }
921 else
922 continue; /* Equal qualifiers are certainly matched. */
923 }
924
925 /* Qualifiers established. */
926 if (found == 1)
927 break;
928 }
929
930 if (found == 1)
931 {
932 /* Fill the result in *RET. */
933 int j;
934 qualifiers = *qualifiers_list;
935
936 DEBUG_TRACE ("complete qualifiers using list %d", i);
937 #ifdef DEBUG_AARCH64
938 if (debug_dump)
939 dump_qualifier_sequence (qualifiers);
940 #endif
941
942 for (j = 0; j <= stop_at; ++j, ++qualifiers)
943 ret[j] = *qualifiers;
944 for (; j < AARCH64_MAX_OPND_NUM; ++j)
945 ret[j] = AARCH64_OPND_QLF_NIL;
946
947 DEBUG_TRACE ("SUCCESS");
948 return 1;
949 }
950
951 DEBUG_TRACE ("FAIL");
952 return 0;
953 }
954
955 /* Operand qualifier matching and resolving.
956
957 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
958 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
959
960 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
961 succeeds. */
962
963 static int
964 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
965 {
966 int i, nops;
967 aarch64_opnd_qualifier_seq_t qualifiers;
968
969 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
970 qualifiers))
971 {
972 DEBUG_TRACE ("matching FAIL");
973 return 0;
974 }
975
976 if (inst->opcode->flags & F_STRICT)
977 {
978 /* Require an exact qualifier match, even for NIL qualifiers. */
979 nops = aarch64_num_of_operands (inst->opcode);
980 for (i = 0; i < nops; ++i)
981 if (inst->operands[i].qualifier != qualifiers[i])
982 return FALSE;
983 }
984
985 /* Update the qualifiers. */
986 if (update_p == TRUE)
987 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
988 {
989 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
990 break;
991 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
992 "update %s with %s for operand %d",
993 aarch64_get_qualifier_name (inst->operands[i].qualifier),
994 aarch64_get_qualifier_name (qualifiers[i]), i);
995 inst->operands[i].qualifier = qualifiers[i];
996 }
997
998 DEBUG_TRACE ("matching SUCCESS");
999 return 1;
1000 }
1001
1002 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1003 register by MOVZ.
1004
1005 IS32 indicates whether value is a 32-bit immediate or not.
1006 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1007 amount will be returned in *SHIFT_AMOUNT. */
1008
1009 bfd_boolean
1010 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1011 {
1012 int amount;
1013
1014 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1015
1016 if (is32)
1017 {
1018 /* Allow all zeros or all ones in top 32-bits, so that
1019 32-bit constant expressions like ~0x80000000 are
1020 permitted. */
1021 uint64_t ext = value;
1022 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1023 /* Immediate out of range. */
1024 return FALSE;
1025 value &= (int64_t) 0xffffffff;
1026 }
1027
1028 /* first, try movz then movn */
1029 amount = -1;
1030 if ((value & ((int64_t) 0xffff << 0)) == value)
1031 amount = 0;
1032 else if ((value & ((int64_t) 0xffff << 16)) == value)
1033 amount = 16;
1034 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1035 amount = 32;
1036 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1037 amount = 48;
1038
1039 if (amount == -1)
1040 {
1041 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1042 return FALSE;
1043 }
1044
1045 if (shift_amount != NULL)
1046 *shift_amount = amount;
1047
1048 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1049
1050 return TRUE;
1051 }
1052
1053 /* Build the accepted values for immediate logical SIMD instructions.
1054
1055 The standard encodings of the immediate value are:
1056 N imms immr SIMD size R S
1057 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1058 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1059 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1060 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1061 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1062 0 11110s 00000r 2 UInt(r) UInt(s)
1063 where all-ones value of S is reserved.
1064
1065 Let's call E the SIMD size.
1066
1067 The immediate value is: S+1 bits '1' rotated to the right by R.
1068
1069 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1070 (remember S != E - 1). */
1071
1072 #define TOTAL_IMM_NB 5334
1073
1074 typedef struct
1075 {
1076 uint64_t imm;
1077 aarch64_insn encoding;
1078 } simd_imm_encoding;
1079
1080 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1081
1082 static int
1083 simd_imm_encoding_cmp(const void *i1, const void *i2)
1084 {
1085 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1086 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1087
1088 if (imm1->imm < imm2->imm)
1089 return -1;
1090 if (imm1->imm > imm2->imm)
1091 return +1;
1092 return 0;
1093 }
1094
1095 /* immediate bitfield standard encoding
1096 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1097 1 ssssss rrrrrr 64 rrrrrr ssssss
1098 0 0sssss 0rrrrr 32 rrrrr sssss
1099 0 10ssss 00rrrr 16 rrrr ssss
1100 0 110sss 000rrr 8 rrr sss
1101 0 1110ss 0000rr 4 rr ss
1102 0 11110s 00000r 2 r s */
1103 static inline int
1104 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1105 {
1106 return (is64 << 12) | (r << 6) | s;
1107 }
1108
1109 static void
1110 build_immediate_table (void)
1111 {
1112 uint32_t log_e, e, s, r, s_mask;
1113 uint64_t mask, imm;
1114 int nb_imms;
1115 int is64;
1116
1117 nb_imms = 0;
1118 for (log_e = 1; log_e <= 6; log_e++)
1119 {
1120 /* Get element size. */
1121 e = 1u << log_e;
1122 if (log_e == 6)
1123 {
1124 is64 = 1;
1125 mask = 0xffffffffffffffffull;
1126 s_mask = 0;
1127 }
1128 else
1129 {
1130 is64 = 0;
1131 mask = (1ull << e) - 1;
1132 /* log_e s_mask
1133 1 ((1 << 4) - 1) << 2 = 111100
1134 2 ((1 << 3) - 1) << 3 = 111000
1135 3 ((1 << 2) - 1) << 4 = 110000
1136 4 ((1 << 1) - 1) << 5 = 100000
1137 5 ((1 << 0) - 1) << 6 = 000000 */
1138 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1139 }
1140 for (s = 0; s < e - 1; s++)
1141 for (r = 0; r < e; r++)
1142 {
1143 /* s+1 consecutive bits to 1 (s < 63) */
1144 imm = (1ull << (s + 1)) - 1;
1145 /* rotate right by r */
1146 if (r != 0)
1147 imm = (imm >> r) | ((imm << (e - r)) & mask);
1148 /* replicate the constant depending on SIMD size */
1149 switch (log_e)
1150 {
1151 case 1: imm = (imm << 2) | imm;
1152 case 2: imm = (imm << 4) | imm;
1153 case 3: imm = (imm << 8) | imm;
1154 case 4: imm = (imm << 16) | imm;
1155 case 5: imm = (imm << 32) | imm;
1156 case 6: break;
1157 default: abort ();
1158 }
1159 simd_immediates[nb_imms].imm = imm;
1160 simd_immediates[nb_imms].encoding =
1161 encode_immediate_bitfield(is64, s | s_mask, r);
1162 nb_imms++;
1163 }
1164 }
1165 assert (nb_imms == TOTAL_IMM_NB);
1166 qsort(simd_immediates, nb_imms,
1167 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1168 }
1169
1170 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1171 be accepted by logical (immediate) instructions
1172 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1173
1174 ESIZE is the number of bytes in the decoded immediate value.
1175 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1176 VALUE will be returned in *ENCODING. */
1177
1178 bfd_boolean
1179 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1180 {
1181 simd_imm_encoding imm_enc;
1182 const simd_imm_encoding *imm_encoding;
1183 static bfd_boolean initialized = FALSE;
1184 uint64_t upper;
1185 int i;
1186
1187 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1188 value, is32);
1189
1190 if (initialized == FALSE)
1191 {
1192 build_immediate_table ();
1193 initialized = TRUE;
1194 }
1195
1196 /* Allow all zeros or all ones in top bits, so that
1197 constant expressions like ~1 are permitted. */
1198 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1199 if ((value & ~upper) != value && (value | upper) != value)
1200 return FALSE;
1201
1202 /* Replicate to a full 64-bit value. */
1203 value &= ~upper;
1204 for (i = esize * 8; i < 64; i *= 2)
1205 value |= (value << i);
1206
1207 imm_enc.imm = value;
1208 imm_encoding = (const simd_imm_encoding *)
1209 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1210 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1211 if (imm_encoding == NULL)
1212 {
1213 DEBUG_TRACE ("exit with FALSE");
1214 return FALSE;
1215 }
1216 if (encoding != NULL)
1217 *encoding = imm_encoding->encoding;
1218 DEBUG_TRACE ("exit with TRUE");
1219 return TRUE;
1220 }
1221
1222 /* If 64-bit immediate IMM is in the format of
1223 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1224 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1225 of value "abcdefgh". Otherwise return -1. */
1226 int
1227 aarch64_shrink_expanded_imm8 (uint64_t imm)
1228 {
1229 int i, ret;
1230 uint32_t byte;
1231
1232 ret = 0;
1233 for (i = 0; i < 8; i++)
1234 {
1235 byte = (imm >> (8 * i)) & 0xff;
1236 if (byte == 0xff)
1237 ret |= 1 << i;
1238 else if (byte != 0x00)
1239 return -1;
1240 }
1241 return ret;
1242 }
1243
1244 /* Utility inline functions for operand_general_constraint_met_p. */
1245
1246 static inline void
1247 set_error (aarch64_operand_error *mismatch_detail,
1248 enum aarch64_operand_error_kind kind, int idx,
1249 const char* error)
1250 {
1251 if (mismatch_detail == NULL)
1252 return;
1253 mismatch_detail->kind = kind;
1254 mismatch_detail->index = idx;
1255 mismatch_detail->error = error;
1256 }
1257
1258 static inline void
1259 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1260 const char* error)
1261 {
1262 if (mismatch_detail == NULL)
1263 return;
1264 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1265 }
1266
1267 static inline void
1268 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1269 int idx, int lower_bound, int upper_bound,
1270 const char* error)
1271 {
1272 if (mismatch_detail == NULL)
1273 return;
1274 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1275 mismatch_detail->data[0] = lower_bound;
1276 mismatch_detail->data[1] = upper_bound;
1277 }
1278
1279 static inline void
1280 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1281 int idx, int lower_bound, int upper_bound)
1282 {
1283 if (mismatch_detail == NULL)
1284 return;
1285 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1286 _("immediate value"));
1287 }
1288
1289 static inline void
1290 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1291 int idx, int lower_bound, int upper_bound)
1292 {
1293 if (mismatch_detail == NULL)
1294 return;
1295 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1296 _("immediate offset"));
1297 }
1298
1299 static inline void
1300 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1301 int idx, int lower_bound, int upper_bound)
1302 {
1303 if (mismatch_detail == NULL)
1304 return;
1305 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1306 _("register number"));
1307 }
1308
1309 static inline void
1310 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1311 int idx, int lower_bound, int upper_bound)
1312 {
1313 if (mismatch_detail == NULL)
1314 return;
1315 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1316 _("register element index"));
1317 }
1318
1319 static inline void
1320 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1321 int idx, int lower_bound, int upper_bound)
1322 {
1323 if (mismatch_detail == NULL)
1324 return;
1325 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1326 _("shift amount"));
1327 }
1328
1329 /* Report that the MUL modifier in operand IDX should be in the range
1330 [LOWER_BOUND, UPPER_BOUND]. */
1331 static inline void
1332 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1333 int idx, int lower_bound, int upper_bound)
1334 {
1335 if (mismatch_detail == NULL)
1336 return;
1337 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1338 _("multiplier"));
1339 }
1340
1341 static inline void
1342 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1343 int alignment)
1344 {
1345 if (mismatch_detail == NULL)
1346 return;
1347 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1348 mismatch_detail->data[0] = alignment;
1349 }
1350
1351 static inline void
1352 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1353 int expected_num)
1354 {
1355 if (mismatch_detail == NULL)
1356 return;
1357 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1358 mismatch_detail->data[0] = expected_num;
1359 }
1360
1361 static inline void
1362 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1363 const char* error)
1364 {
1365 if (mismatch_detail == NULL)
1366 return;
1367 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1368 }
1369
1370 /* General constraint checking based on operand code.
1371
1372 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1373 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1374
1375 This function has to be called after the qualifiers for all operands
1376 have been resolved.
1377
1378 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1379 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1380 of error message during the disassembling where error message is not
1381 wanted. We avoid the dynamic construction of strings of error messages
1382 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1383 use a combination of error code, static string and some integer data to
1384 represent an error. */
1385
1386 static int
1387 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1388 enum aarch64_opnd type,
1389 const aarch64_opcode *opcode,
1390 aarch64_operand_error *mismatch_detail)
1391 {
1392 unsigned num, modifiers, shift;
1393 unsigned char size;
1394 int64_t imm, min_value, max_value;
1395 uint64_t uvalue, mask;
1396 const aarch64_opnd_info *opnd = opnds + idx;
1397 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1398
1399 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1400
1401 switch (aarch64_operands[type].op_class)
1402 {
1403 case AARCH64_OPND_CLASS_INT_REG:
1404 /* Check pair reg constraints for cas* instructions. */
1405 if (type == AARCH64_OPND_PAIRREG)
1406 {
1407 assert (idx == 1 || idx == 3);
1408 if (opnds[idx - 1].reg.regno % 2 != 0)
1409 {
1410 set_syntax_error (mismatch_detail, idx - 1,
1411 _("reg pair must start from even reg"));
1412 return 0;
1413 }
1414 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1415 {
1416 set_syntax_error (mismatch_detail, idx,
1417 _("reg pair must be contiguous"));
1418 return 0;
1419 }
1420 break;
1421 }
1422
1423 /* <Xt> may be optional in some IC and TLBI instructions. */
1424 if (type == AARCH64_OPND_Rt_SYS)
1425 {
1426 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1427 == AARCH64_OPND_CLASS_SYSTEM));
1428 if (opnds[1].present
1429 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1430 {
1431 set_other_error (mismatch_detail, idx, _("extraneous register"));
1432 return 0;
1433 }
1434 if (!opnds[1].present
1435 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1436 {
1437 set_other_error (mismatch_detail, idx, _("missing register"));
1438 return 0;
1439 }
1440 }
1441 switch (qualifier)
1442 {
1443 case AARCH64_OPND_QLF_WSP:
1444 case AARCH64_OPND_QLF_SP:
1445 if (!aarch64_stack_pointer_p (opnd))
1446 {
1447 set_other_error (mismatch_detail, idx,
1448 _("stack pointer register expected"));
1449 return 0;
1450 }
1451 break;
1452 default:
1453 break;
1454 }
1455 break;
1456
1457 case AARCH64_OPND_CLASS_SVE_REG:
1458 switch (type)
1459 {
1460 case AARCH64_OPND_SVE_Zn_INDEX:
1461 size = aarch64_get_qualifier_esize (opnd->qualifier);
1462 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1463 {
1464 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1465 0, 64 / size - 1);
1466 return 0;
1467 }
1468 break;
1469
1470 case AARCH64_OPND_SVE_ZnxN:
1471 case AARCH64_OPND_SVE_ZtxN:
1472 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1473 {
1474 set_other_error (mismatch_detail, idx,
1475 _("invalid register list"));
1476 return 0;
1477 }
1478 break;
1479
1480 default:
1481 break;
1482 }
1483 break;
1484
1485 case AARCH64_OPND_CLASS_PRED_REG:
1486 if (opnd->reg.regno >= 8
1487 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1488 {
1489 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1490 return 0;
1491 }
1492 break;
1493
1494 case AARCH64_OPND_CLASS_COND:
1495 if (type == AARCH64_OPND_COND1
1496 && (opnds[idx].cond->value & 0xe) == 0xe)
1497 {
1498 /* Not allow AL or NV. */
1499 set_syntax_error (mismatch_detail, idx, NULL);
1500 }
1501 break;
1502
1503 case AARCH64_OPND_CLASS_ADDRESS:
1504 /* Check writeback. */
1505 switch (opcode->iclass)
1506 {
1507 case ldst_pos:
1508 case ldst_unscaled:
1509 case ldstnapair_offs:
1510 case ldstpair_off:
1511 case ldst_unpriv:
1512 if (opnd->addr.writeback == 1)
1513 {
1514 set_syntax_error (mismatch_detail, idx,
1515 _("unexpected address writeback"));
1516 return 0;
1517 }
1518 break;
1519 case ldst_imm9:
1520 case ldstpair_indexed:
1521 case asisdlsep:
1522 case asisdlsop:
1523 if (opnd->addr.writeback == 0)
1524 {
1525 set_syntax_error (mismatch_detail, idx,
1526 _("address writeback expected"));
1527 return 0;
1528 }
1529 break;
1530 default:
1531 assert (opnd->addr.writeback == 0);
1532 break;
1533 }
1534 switch (type)
1535 {
1536 case AARCH64_OPND_ADDR_SIMM7:
1537 /* Scaled signed 7 bits immediate offset. */
1538 /* Get the size of the data element that is accessed, which may be
1539 different from that of the source register size,
1540 e.g. in strb/ldrb. */
1541 size = aarch64_get_qualifier_esize (opnd->qualifier);
1542 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1543 {
1544 set_offset_out_of_range_error (mismatch_detail, idx,
1545 -64 * size, 63 * size);
1546 return 0;
1547 }
1548 if (!value_aligned_p (opnd->addr.offset.imm, size))
1549 {
1550 set_unaligned_error (mismatch_detail, idx, size);
1551 return 0;
1552 }
1553 break;
1554 case AARCH64_OPND_ADDR_SIMM9:
1555 /* Unscaled signed 9 bits immediate offset. */
1556 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1557 {
1558 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1559 return 0;
1560 }
1561 break;
1562
1563 case AARCH64_OPND_ADDR_SIMM9_2:
1564 /* Unscaled signed 9 bits immediate offset, which has to be negative
1565 or unaligned. */
1566 size = aarch64_get_qualifier_esize (qualifier);
1567 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1568 && !value_aligned_p (opnd->addr.offset.imm, size))
1569 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1570 return 1;
1571 set_other_error (mismatch_detail, idx,
1572 _("negative or unaligned offset expected"));
1573 return 0;
1574
1575 case AARCH64_OPND_SIMD_ADDR_POST:
1576 /* AdvSIMD load/store multiple structures, post-index. */
1577 assert (idx == 1);
1578 if (opnd->addr.offset.is_reg)
1579 {
1580 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1581 return 1;
1582 else
1583 {
1584 set_other_error (mismatch_detail, idx,
1585 _("invalid register offset"));
1586 return 0;
1587 }
1588 }
1589 else
1590 {
1591 const aarch64_opnd_info *prev = &opnds[idx-1];
1592 unsigned num_bytes; /* total number of bytes transferred. */
1593 /* The opcode dependent area stores the number of elements in
1594 each structure to be loaded/stored. */
1595 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1596 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1597 /* Special handling of loading single structure to all lane. */
1598 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1599 * aarch64_get_qualifier_esize (prev->qualifier);
1600 else
1601 num_bytes = prev->reglist.num_regs
1602 * aarch64_get_qualifier_esize (prev->qualifier)
1603 * aarch64_get_qualifier_nelem (prev->qualifier);
1604 if ((int) num_bytes != opnd->addr.offset.imm)
1605 {
1606 set_other_error (mismatch_detail, idx,
1607 _("invalid post-increment amount"));
1608 return 0;
1609 }
1610 }
1611 break;
1612
1613 case AARCH64_OPND_ADDR_REGOFF:
1614 /* Get the size of the data element that is accessed, which may be
1615 different from that of the source register size,
1616 e.g. in strb/ldrb. */
1617 size = aarch64_get_qualifier_esize (opnd->qualifier);
1618 /* It is either no shift or shift by the binary logarithm of SIZE. */
1619 if (opnd->shifter.amount != 0
1620 && opnd->shifter.amount != (int)get_logsz (size))
1621 {
1622 set_other_error (mismatch_detail, idx,
1623 _("invalid shift amount"));
1624 return 0;
1625 }
1626 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1627 operators. */
1628 switch (opnd->shifter.kind)
1629 {
1630 case AARCH64_MOD_UXTW:
1631 case AARCH64_MOD_LSL:
1632 case AARCH64_MOD_SXTW:
1633 case AARCH64_MOD_SXTX: break;
1634 default:
1635 set_other_error (mismatch_detail, idx,
1636 _("invalid extend/shift operator"));
1637 return 0;
1638 }
1639 break;
1640
1641 case AARCH64_OPND_ADDR_UIMM12:
1642 imm = opnd->addr.offset.imm;
1643 /* Get the size of the data element that is accessed, which may be
1644 different from that of the source register size,
1645 e.g. in strb/ldrb. */
1646 size = aarch64_get_qualifier_esize (qualifier);
1647 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1648 {
1649 set_offset_out_of_range_error (mismatch_detail, idx,
1650 0, 4095 * size);
1651 return 0;
1652 }
1653 if (!value_aligned_p (opnd->addr.offset.imm, size))
1654 {
1655 set_unaligned_error (mismatch_detail, idx, size);
1656 return 0;
1657 }
1658 break;
1659
1660 case AARCH64_OPND_ADDR_PCREL14:
1661 case AARCH64_OPND_ADDR_PCREL19:
1662 case AARCH64_OPND_ADDR_PCREL21:
1663 case AARCH64_OPND_ADDR_PCREL26:
1664 imm = opnd->imm.value;
1665 if (operand_need_shift_by_two (get_operand_from_code (type)))
1666 {
1667 /* The offset value in a PC-relative branch instruction is alway
1668 4-byte aligned and is encoded without the lowest 2 bits. */
1669 if (!value_aligned_p (imm, 4))
1670 {
1671 set_unaligned_error (mismatch_detail, idx, 4);
1672 return 0;
1673 }
1674 /* Right shift by 2 so that we can carry out the following check
1675 canonically. */
1676 imm >>= 2;
1677 }
1678 size = get_operand_fields_width (get_operand_from_code (type));
1679 if (!value_fit_signed_field_p (imm, size))
1680 {
1681 set_other_error (mismatch_detail, idx,
1682 _("immediate out of range"));
1683 return 0;
1684 }
1685 break;
1686
1687 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1688 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1689 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1690 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1691 min_value = -8;
1692 max_value = 7;
1693 sve_imm_offset_vl:
1694 assert (!opnd->addr.offset.is_reg);
1695 assert (opnd->addr.preind);
1696 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1697 min_value *= num;
1698 max_value *= num;
1699 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1700 || (opnd->shifter.operator_present
1701 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1702 {
1703 set_other_error (mismatch_detail, idx,
1704 _("invalid addressing mode"));
1705 return 0;
1706 }
1707 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1708 {
1709 set_offset_out_of_range_error (mismatch_detail, idx,
1710 min_value, max_value);
1711 return 0;
1712 }
1713 if (!value_aligned_p (opnd->addr.offset.imm, num))
1714 {
1715 set_unaligned_error (mismatch_detail, idx, num);
1716 return 0;
1717 }
1718 break;
1719
1720 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1721 min_value = -32;
1722 max_value = 31;
1723 goto sve_imm_offset_vl;
1724
1725 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1726 min_value = -256;
1727 max_value = 255;
1728 goto sve_imm_offset_vl;
1729
1730 case AARCH64_OPND_SVE_ADDR_RI_U6:
1731 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1732 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1733 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1734 min_value = 0;
1735 max_value = 63;
1736 sve_imm_offset:
1737 assert (!opnd->addr.offset.is_reg);
1738 assert (opnd->addr.preind);
1739 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1740 min_value *= num;
1741 max_value *= num;
1742 if (opnd->shifter.operator_present
1743 || opnd->shifter.amount_present)
1744 {
1745 set_other_error (mismatch_detail, idx,
1746 _("invalid addressing mode"));
1747 return 0;
1748 }
1749 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1750 {
1751 set_offset_out_of_range_error (mismatch_detail, idx,
1752 min_value, max_value);
1753 return 0;
1754 }
1755 if (!value_aligned_p (opnd->addr.offset.imm, num))
1756 {
1757 set_unaligned_error (mismatch_detail, idx, num);
1758 return 0;
1759 }
1760 break;
1761
1762 case AARCH64_OPND_SVE_ADDR_RR:
1763 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1764 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1765 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1766 case AARCH64_OPND_SVE_ADDR_RX:
1767 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1768 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1769 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1770 case AARCH64_OPND_SVE_ADDR_RZ:
1771 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1772 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1773 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1774 modifiers = 1 << AARCH64_MOD_LSL;
1775 sve_rr_operand:
1776 assert (opnd->addr.offset.is_reg);
1777 assert (opnd->addr.preind);
1778 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1779 && opnd->addr.offset.regno == 31)
1780 {
1781 set_other_error (mismatch_detail, idx,
1782 _("index register xzr is not allowed"));
1783 return 0;
1784 }
1785 if (((1 << opnd->shifter.kind) & modifiers) == 0
1786 || (opnd->shifter.amount
1787 != get_operand_specific_data (&aarch64_operands[type])))
1788 {
1789 set_other_error (mismatch_detail, idx,
1790 _("invalid addressing mode"));
1791 return 0;
1792 }
1793 break;
1794
1795 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1796 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1797 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1798 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1799 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1800 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1801 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1802 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1803 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1804 goto sve_rr_operand;
1805
1806 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1807 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1808 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1809 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1810 min_value = 0;
1811 max_value = 31;
1812 goto sve_imm_offset;
1813
1814 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1815 modifiers = 1 << AARCH64_MOD_LSL;
1816 sve_zz_operand:
1817 assert (opnd->addr.offset.is_reg);
1818 assert (opnd->addr.preind);
1819 if (((1 << opnd->shifter.kind) & modifiers) == 0
1820 || opnd->shifter.amount < 0
1821 || opnd->shifter.amount > 3)
1822 {
1823 set_other_error (mismatch_detail, idx,
1824 _("invalid addressing mode"));
1825 return 0;
1826 }
1827 break;
1828
1829 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1830 modifiers = (1 << AARCH64_MOD_SXTW);
1831 goto sve_zz_operand;
1832
1833 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1834 modifiers = 1 << AARCH64_MOD_UXTW;
1835 goto sve_zz_operand;
1836
1837 default:
1838 break;
1839 }
1840 break;
1841
1842 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1843 if (type == AARCH64_OPND_LEt)
1844 {
1845 /* Get the upper bound for the element index. */
1846 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1847 if (!value_in_range_p (opnd->reglist.index, 0, num))
1848 {
1849 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1850 return 0;
1851 }
1852 }
1853 /* The opcode dependent area stores the number of elements in
1854 each structure to be loaded/stored. */
1855 num = get_opcode_dependent_value (opcode);
1856 switch (type)
1857 {
1858 case AARCH64_OPND_LVt:
1859 assert (num >= 1 && num <= 4);
1860 /* Unless LD1/ST1, the number of registers should be equal to that
1861 of the structure elements. */
1862 if (num != 1 && opnd->reglist.num_regs != num)
1863 {
1864 set_reg_list_error (mismatch_detail, idx, num);
1865 return 0;
1866 }
1867 break;
1868 case AARCH64_OPND_LVt_AL:
1869 case AARCH64_OPND_LEt:
1870 assert (num >= 1 && num <= 4);
1871 /* The number of registers should be equal to that of the structure
1872 elements. */
1873 if (opnd->reglist.num_regs != num)
1874 {
1875 set_reg_list_error (mismatch_detail, idx, num);
1876 return 0;
1877 }
1878 break;
1879 default:
1880 break;
1881 }
1882 break;
1883
1884 case AARCH64_OPND_CLASS_IMMEDIATE:
1885 /* Constraint check on immediate operand. */
1886 imm = opnd->imm.value;
1887 /* E.g. imm_0_31 constrains value to be 0..31. */
1888 if (qualifier_value_in_range_constraint_p (qualifier)
1889 && !value_in_range_p (imm, get_lower_bound (qualifier),
1890 get_upper_bound (qualifier)))
1891 {
1892 set_imm_out_of_range_error (mismatch_detail, idx,
1893 get_lower_bound (qualifier),
1894 get_upper_bound (qualifier));
1895 return 0;
1896 }
1897
1898 switch (type)
1899 {
1900 case AARCH64_OPND_AIMM:
1901 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1902 {
1903 set_other_error (mismatch_detail, idx,
1904 _("invalid shift operator"));
1905 return 0;
1906 }
1907 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1908 {
1909 set_other_error (mismatch_detail, idx,
1910 _("shift amount expected to be 0 or 12"));
1911 return 0;
1912 }
1913 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1914 {
1915 set_other_error (mismatch_detail, idx,
1916 _("immediate out of range"));
1917 return 0;
1918 }
1919 break;
1920
1921 case AARCH64_OPND_HALF:
1922 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1923 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1924 {
1925 set_other_error (mismatch_detail, idx,
1926 _("invalid shift operator"));
1927 return 0;
1928 }
1929 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1930 if (!value_aligned_p (opnd->shifter.amount, 16))
1931 {
1932 set_other_error (mismatch_detail, idx,
1933 _("shift amount should be a multiple of 16"));
1934 return 0;
1935 }
1936 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1937 {
1938 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1939 0, size * 8 - 16);
1940 return 0;
1941 }
1942 if (opnd->imm.value < 0)
1943 {
1944 set_other_error (mismatch_detail, idx,
1945 _("negative immediate value not allowed"));
1946 return 0;
1947 }
1948 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1949 {
1950 set_other_error (mismatch_detail, idx,
1951 _("immediate out of range"));
1952 return 0;
1953 }
1954 break;
1955
1956 case AARCH64_OPND_IMM_MOV:
1957 {
1958 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1959 imm = opnd->imm.value;
1960 assert (idx == 1);
1961 switch (opcode->op)
1962 {
1963 case OP_MOV_IMM_WIDEN:
1964 imm = ~imm;
1965 /* Fall through... */
1966 case OP_MOV_IMM_WIDE:
1967 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1968 {
1969 set_other_error (mismatch_detail, idx,
1970 _("immediate out of range"));
1971 return 0;
1972 }
1973 break;
1974 case OP_MOV_IMM_LOG:
1975 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1976 {
1977 set_other_error (mismatch_detail, idx,
1978 _("immediate out of range"));
1979 return 0;
1980 }
1981 break;
1982 default:
1983 assert (0);
1984 return 0;
1985 }
1986 }
1987 break;
1988
1989 case AARCH64_OPND_NZCV:
1990 case AARCH64_OPND_CCMP_IMM:
1991 case AARCH64_OPND_EXCEPTION:
1992 case AARCH64_OPND_UIMM4:
1993 case AARCH64_OPND_UIMM7:
1994 case AARCH64_OPND_UIMM3_OP1:
1995 case AARCH64_OPND_UIMM3_OP2:
1996 case AARCH64_OPND_SVE_UIMM3:
1997 case AARCH64_OPND_SVE_UIMM7:
1998 case AARCH64_OPND_SVE_UIMM8:
1999 case AARCH64_OPND_SVE_UIMM8_53:
2000 size = get_operand_fields_width (get_operand_from_code (type));
2001 assert (size < 32);
2002 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2003 {
2004 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2005 (1 << size) - 1);
2006 return 0;
2007 }
2008 break;
2009
2010 case AARCH64_OPND_SIMM5:
2011 case AARCH64_OPND_SVE_SIMM5:
2012 case AARCH64_OPND_SVE_SIMM5B:
2013 case AARCH64_OPND_SVE_SIMM6:
2014 case AARCH64_OPND_SVE_SIMM8:
2015 size = get_operand_fields_width (get_operand_from_code (type));
2016 assert (size < 32);
2017 if (!value_fit_signed_field_p (opnd->imm.value, size))
2018 {
2019 set_imm_out_of_range_error (mismatch_detail, idx,
2020 -(1 << (size - 1)),
2021 (1 << (size - 1)) - 1);
2022 return 0;
2023 }
2024 break;
2025
2026 case AARCH64_OPND_WIDTH:
2027 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2028 && opnds[0].type == AARCH64_OPND_Rd);
2029 size = get_upper_bound (qualifier);
2030 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2031 /* lsb+width <= reg.size */
2032 {
2033 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2034 size - opnds[idx-1].imm.value);
2035 return 0;
2036 }
2037 break;
2038
2039 case AARCH64_OPND_LIMM:
2040 case AARCH64_OPND_SVE_LIMM:
2041 {
2042 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2043 uint64_t uimm = opnd->imm.value;
2044 if (opcode->op == OP_BIC)
2045 uimm = ~uimm;
2046 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
2047 {
2048 set_other_error (mismatch_detail, idx,
2049 _("immediate out of range"));
2050 return 0;
2051 }
2052 }
2053 break;
2054
2055 case AARCH64_OPND_IMM0:
2056 case AARCH64_OPND_FPIMM0:
2057 if (opnd->imm.value != 0)
2058 {
2059 set_other_error (mismatch_detail, idx,
2060 _("immediate zero expected"));
2061 return 0;
2062 }
2063 break;
2064
2065 case AARCH64_OPND_SHLL_IMM:
2066 assert (idx == 2);
2067 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2068 if (opnd->imm.value != size)
2069 {
2070 set_other_error (mismatch_detail, idx,
2071 _("invalid shift amount"));
2072 return 0;
2073 }
2074 break;
2075
2076 case AARCH64_OPND_IMM_VLSL:
2077 size = aarch64_get_qualifier_esize (qualifier);
2078 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2079 {
2080 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2081 size * 8 - 1);
2082 return 0;
2083 }
2084 break;
2085
2086 case AARCH64_OPND_IMM_VLSR:
2087 size = aarch64_get_qualifier_esize (qualifier);
2088 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2089 {
2090 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2091 return 0;
2092 }
2093 break;
2094
2095 case AARCH64_OPND_SIMD_IMM:
2096 case AARCH64_OPND_SIMD_IMM_SFT:
2097 /* Qualifier check. */
2098 switch (qualifier)
2099 {
2100 case AARCH64_OPND_QLF_LSL:
2101 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2102 {
2103 set_other_error (mismatch_detail, idx,
2104 _("invalid shift operator"));
2105 return 0;
2106 }
2107 break;
2108 case AARCH64_OPND_QLF_MSL:
2109 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2110 {
2111 set_other_error (mismatch_detail, idx,
2112 _("invalid shift operator"));
2113 return 0;
2114 }
2115 break;
2116 case AARCH64_OPND_QLF_NIL:
2117 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2118 {
2119 set_other_error (mismatch_detail, idx,
2120 _("shift is not permitted"));
2121 return 0;
2122 }
2123 break;
2124 default:
2125 assert (0);
2126 return 0;
2127 }
2128 /* Is the immediate valid? */
2129 assert (idx == 1);
2130 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2131 {
2132 /* uimm8 or simm8 */
2133 if (!value_in_range_p (opnd->imm.value, -128, 255))
2134 {
2135 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2136 return 0;
2137 }
2138 }
2139 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2140 {
2141 /* uimm64 is not
2142 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2143 ffffffffgggggggghhhhhhhh'. */
2144 set_other_error (mismatch_detail, idx,
2145 _("invalid value for immediate"));
2146 return 0;
2147 }
2148 /* Is the shift amount valid? */
2149 switch (opnd->shifter.kind)
2150 {
2151 case AARCH64_MOD_LSL:
2152 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2153 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2154 {
2155 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2156 (size - 1) * 8);
2157 return 0;
2158 }
2159 if (!value_aligned_p (opnd->shifter.amount, 8))
2160 {
2161 set_unaligned_error (mismatch_detail, idx, 8);
2162 return 0;
2163 }
2164 break;
2165 case AARCH64_MOD_MSL:
2166 /* Only 8 and 16 are valid shift amount. */
2167 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2168 {
2169 set_other_error (mismatch_detail, idx,
2170 _("shift amount expected to be 0 or 16"));
2171 return 0;
2172 }
2173 break;
2174 default:
2175 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2176 {
2177 set_other_error (mismatch_detail, idx,
2178 _("invalid shift operator"));
2179 return 0;
2180 }
2181 break;
2182 }
2183 break;
2184
2185 case AARCH64_OPND_FPIMM:
2186 case AARCH64_OPND_SIMD_FPIMM:
2187 case AARCH64_OPND_SVE_FPIMM8:
2188 if (opnd->imm.is_fp == 0)
2189 {
2190 set_other_error (mismatch_detail, idx,
2191 _("floating-point immediate expected"));
2192 return 0;
2193 }
2194 /* The value is expected to be an 8-bit floating-point constant with
2195 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2196 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2197 instruction). */
2198 if (!value_in_range_p (opnd->imm.value, 0, 255))
2199 {
2200 set_other_error (mismatch_detail, idx,
2201 _("immediate out of range"));
2202 return 0;
2203 }
2204 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2205 {
2206 set_other_error (mismatch_detail, idx,
2207 _("invalid shift operator"));
2208 return 0;
2209 }
2210 break;
2211
2212 case AARCH64_OPND_SVE_AIMM:
2213 min_value = 0;
2214 sve_aimm:
2215 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2216 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2217 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2218 uvalue = opnd->imm.value;
2219 shift = opnd->shifter.amount;
2220 if (size == 1)
2221 {
2222 if (shift != 0)
2223 {
2224 set_other_error (mismatch_detail, idx,
2225 _("no shift amount allowed for"
2226 " 8-bit constants"));
2227 return 0;
2228 }
2229 }
2230 else
2231 {
2232 if (shift != 0 && shift != 8)
2233 {
2234 set_other_error (mismatch_detail, idx,
2235 _("shift amount must be 0 or 8"));
2236 return 0;
2237 }
2238 if (shift == 0 && (uvalue & 0xff) == 0)
2239 {
2240 shift = 8;
2241 uvalue = (int64_t) uvalue / 256;
2242 }
2243 }
2244 mask >>= shift;
2245 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2246 {
2247 set_other_error (mismatch_detail, idx,
2248 _("immediate too big for element size"));
2249 return 0;
2250 }
2251 uvalue = (uvalue - min_value) & mask;
2252 if (uvalue > 0xff)
2253 {
2254 set_other_error (mismatch_detail, idx,
2255 _("invalid arithmetic immediate"));
2256 return 0;
2257 }
2258 break;
2259
2260 case AARCH64_OPND_SVE_ASIMM:
2261 min_value = -128;
2262 goto sve_aimm;
2263
2264 case AARCH64_OPND_SVE_I1_HALF_ONE:
2265 assert (opnd->imm.is_fp);
2266 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2267 {
2268 set_other_error (mismatch_detail, idx,
2269 _("floating-point value must be 0.5 or 1.0"));
2270 return 0;
2271 }
2272 break;
2273
2274 case AARCH64_OPND_SVE_I1_HALF_TWO:
2275 assert (opnd->imm.is_fp);
2276 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2277 {
2278 set_other_error (mismatch_detail, idx,
2279 _("floating-point value must be 0.5 or 2.0"));
2280 return 0;
2281 }
2282 break;
2283
2284 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2285 assert (opnd->imm.is_fp);
2286 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2287 {
2288 set_other_error (mismatch_detail, idx,
2289 _("floating-point value must be 0.0 or 1.0"));
2290 return 0;
2291 }
2292 break;
2293
2294 case AARCH64_OPND_SVE_INV_LIMM:
2295 {
2296 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2297 uint64_t uimm = ~opnd->imm.value;
2298 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2299 {
2300 set_other_error (mismatch_detail, idx,
2301 _("immediate out of range"));
2302 return 0;
2303 }
2304 }
2305 break;
2306
2307 case AARCH64_OPND_SVE_LIMM_MOV:
2308 {
2309 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2310 uint64_t uimm = opnd->imm.value;
2311 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2312 {
2313 set_other_error (mismatch_detail, idx,
2314 _("immediate out of range"));
2315 return 0;
2316 }
2317 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2318 {
2319 set_other_error (mismatch_detail, idx,
2320 _("invalid replicated MOV immediate"));
2321 return 0;
2322 }
2323 }
2324 break;
2325
2326 case AARCH64_OPND_SVE_PATTERN_SCALED:
2327 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2328 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2329 {
2330 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2331 return 0;
2332 }
2333 break;
2334
2335 case AARCH64_OPND_SVE_SHLIMM_PRED:
2336 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2337 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2338 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2339 {
2340 set_imm_out_of_range_error (mismatch_detail, idx,
2341 0, 8 * size - 1);
2342 return 0;
2343 }
2344 break;
2345
2346 case AARCH64_OPND_SVE_SHRIMM_PRED:
2347 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2348 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2349 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2350 {
2351 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2352 return 0;
2353 }
2354 break;
2355
2356 default:
2357 break;
2358 }
2359 break;
2360
2361 case AARCH64_OPND_CLASS_CP_REG:
2362 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2363 valid range: C0 - C15. */
2364 if (opnd->reg.regno > 15)
2365 {
2366 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2367 return 0;
2368 }
2369 break;
2370
2371 case AARCH64_OPND_CLASS_SYSTEM:
2372 switch (type)
2373 {
2374 case AARCH64_OPND_PSTATEFIELD:
2375 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2376 /* MSR UAO, #uimm4
2377 MSR PAN, #uimm4
2378 The immediate must be #0 or #1. */
2379 if ((opnd->pstatefield == 0x03 /* UAO. */
2380 || opnd->pstatefield == 0x04) /* PAN. */
2381 && opnds[1].imm.value > 1)
2382 {
2383 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2384 return 0;
2385 }
2386 /* MSR SPSel, #uimm4
2387 Uses uimm4 as a control value to select the stack pointer: if
2388 bit 0 is set it selects the current exception level's stack
2389 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2390 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2391 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2392 {
2393 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2394 return 0;
2395 }
2396 break;
2397 default:
2398 break;
2399 }
2400 break;
2401
2402 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2403 /* Get the upper bound for the element index. */
2404 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2405 /* Index out-of-range. */
2406 if (!value_in_range_p (opnd->reglane.index, 0, num))
2407 {
2408 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2409 return 0;
2410 }
2411 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2412 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2413 number is encoded in "size:M:Rm":
2414 size <Vm>
2415 00 RESERVED
2416 01 0:Rm
2417 10 M:Rm
2418 11 RESERVED */
2419 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2420 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2421 {
2422 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2423 return 0;
2424 }
2425 break;
2426
2427 case AARCH64_OPND_CLASS_MODIFIED_REG:
2428 assert (idx == 1 || idx == 2);
2429 switch (type)
2430 {
2431 case AARCH64_OPND_Rm_EXT:
2432 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2433 && opnd->shifter.kind != AARCH64_MOD_LSL)
2434 {
2435 set_other_error (mismatch_detail, idx,
2436 _("extend operator expected"));
2437 return 0;
2438 }
2439 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2440 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2441 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2442 case. */
2443 if (!aarch64_stack_pointer_p (opnds + 0)
2444 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2445 {
2446 if (!opnd->shifter.operator_present)
2447 {
2448 set_other_error (mismatch_detail, idx,
2449 _("missing extend operator"));
2450 return 0;
2451 }
2452 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2453 {
2454 set_other_error (mismatch_detail, idx,
2455 _("'LSL' operator not allowed"));
2456 return 0;
2457 }
2458 }
2459 assert (opnd->shifter.operator_present /* Default to LSL. */
2460 || opnd->shifter.kind == AARCH64_MOD_LSL);
2461 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2462 {
2463 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2464 return 0;
2465 }
2466 /* In the 64-bit form, the final register operand is written as Wm
2467 for all but the (possibly omitted) UXTX/LSL and SXTX
2468 operators.
2469 N.B. GAS allows X register to be used with any operator as a
2470 programming convenience. */
2471 if (qualifier == AARCH64_OPND_QLF_X
2472 && opnd->shifter.kind != AARCH64_MOD_LSL
2473 && opnd->shifter.kind != AARCH64_MOD_UXTX
2474 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2475 {
2476 set_other_error (mismatch_detail, idx, _("W register expected"));
2477 return 0;
2478 }
2479 break;
2480
2481 case AARCH64_OPND_Rm_SFT:
2482 /* ROR is not available to the shifted register operand in
2483 arithmetic instructions. */
2484 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2485 {
2486 set_other_error (mismatch_detail, idx,
2487 _("shift operator expected"));
2488 return 0;
2489 }
2490 if (opnd->shifter.kind == AARCH64_MOD_ROR
2491 && opcode->iclass != log_shift)
2492 {
2493 set_other_error (mismatch_detail, idx,
2494 _("'ROR' operator not allowed"));
2495 return 0;
2496 }
2497 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2498 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2499 {
2500 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2501 return 0;
2502 }
2503 break;
2504
2505 default:
2506 break;
2507 }
2508 break;
2509
2510 default:
2511 break;
2512 }
2513
2514 return 1;
2515 }
2516
2517 /* Main entrypoint for the operand constraint checking.
2518
2519 Return 1 if operands of *INST meet the constraint applied by the operand
2520 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2521 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2522 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2523 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2524 error kind when it is notified that an instruction does not pass the check).
2525
2526 Un-determined operand qualifiers may get established during the process. */
2527
2528 int
2529 aarch64_match_operands_constraint (aarch64_inst *inst,
2530 aarch64_operand_error *mismatch_detail)
2531 {
2532 int i;
2533
2534 DEBUG_TRACE ("enter");
2535
2536 /* Check for cases where a source register needs to be the same as the
2537 destination register. Do this before matching qualifiers since if
2538 an instruction has both invalid tying and invalid qualifiers,
2539 the error about qualifiers would suggest several alternative
2540 instructions that also have invalid tying. */
2541 i = inst->opcode->tied_operand;
2542 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2543 {
2544 if (mismatch_detail)
2545 {
2546 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2547 mismatch_detail->index = i;
2548 mismatch_detail->error = NULL;
2549 }
2550 return 0;
2551 }
2552
2553 /* Match operands' qualifier.
2554 *INST has already had qualifier establish for some, if not all, of
2555 its operands; we need to find out whether these established
2556 qualifiers match one of the qualifier sequence in
2557 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2558 with the corresponding qualifier in such a sequence.
2559 Only basic operand constraint checking is done here; the more thorough
2560 constraint checking will carried out by operand_general_constraint_met_p,
2561 which has be to called after this in order to get all of the operands'
2562 qualifiers established. */
2563 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2564 {
2565 DEBUG_TRACE ("FAIL on operand qualifier matching");
2566 if (mismatch_detail)
2567 {
2568 /* Return an error type to indicate that it is the qualifier
2569 matching failure; we don't care about which operand as there
2570 are enough information in the opcode table to reproduce it. */
2571 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2572 mismatch_detail->index = -1;
2573 mismatch_detail->error = NULL;
2574 }
2575 return 0;
2576 }
2577
2578 /* Match operands' constraint. */
2579 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2580 {
2581 enum aarch64_opnd type = inst->opcode->operands[i];
2582 if (type == AARCH64_OPND_NIL)
2583 break;
2584 if (inst->operands[i].skip)
2585 {
2586 DEBUG_TRACE ("skip the incomplete operand %d", i);
2587 continue;
2588 }
2589 if (operand_general_constraint_met_p (inst->operands, i, type,
2590 inst->opcode, mismatch_detail) == 0)
2591 {
2592 DEBUG_TRACE ("FAIL on operand %d", i);
2593 return 0;
2594 }
2595 }
2596
2597 DEBUG_TRACE ("PASS");
2598
2599 return 1;
2600 }
2601
2602 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2603 Also updates the TYPE of each INST->OPERANDS with the corresponding
2604 value of OPCODE->OPERANDS.
2605
2606 Note that some operand qualifiers may need to be manually cleared by
2607 the caller before it further calls the aarch64_opcode_encode; by
2608 doing this, it helps the qualifier matching facilities work
2609 properly. */
2610
2611 const aarch64_opcode*
2612 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2613 {
2614 int i;
2615 const aarch64_opcode *old = inst->opcode;
2616
2617 inst->opcode = opcode;
2618
2619 /* Update the operand types. */
2620 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2621 {
2622 inst->operands[i].type = opcode->operands[i];
2623 if (opcode->operands[i] == AARCH64_OPND_NIL)
2624 break;
2625 }
2626
2627 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2628
2629 return old;
2630 }
2631
2632 int
2633 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2634 {
2635 int i;
2636 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2637 if (operands[i] == operand)
2638 return i;
2639 else if (operands[i] == AARCH64_OPND_NIL)
2640 break;
2641 return -1;
2642 }
2643 \f
2644 /* R0...R30, followed by FOR31. */
2645 #define BANK(R, FOR31) \
2646 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2647 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2648 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2649 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2650 /* [0][0] 32-bit integer regs with sp Wn
2651 [0][1] 64-bit integer regs with sp Xn sf=1
2652 [1][0] 32-bit integer regs with #0 Wn
2653 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2654 static const char *int_reg[2][2][32] = {
2655 #define R32(X) "w" #X
2656 #define R64(X) "x" #X
2657 { BANK (R32, "wsp"), BANK (R64, "sp") },
2658 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2659 #undef R64
2660 #undef R32
2661 };
2662
2663 /* Names of the SVE vector registers, first with .S suffixes,
2664 then with .D suffixes. */
2665
2666 static const char *sve_reg[2][32] = {
2667 #define ZS(X) "z" #X ".s"
2668 #define ZD(X) "z" #X ".d"
2669 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2670 #undef ZD
2671 #undef ZS
2672 };
2673 #undef BANK
2674
2675 /* Return the integer register name.
2676 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2677
2678 static inline const char *
2679 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2680 {
2681 const int has_zr = sp_reg_p ? 0 : 1;
2682 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2683 return int_reg[has_zr][is_64][regno];
2684 }
2685
2686 /* Like get_int_reg_name, but IS_64 is always 1. */
2687
2688 static inline const char *
2689 get_64bit_int_reg_name (int regno, int sp_reg_p)
2690 {
2691 const int has_zr = sp_reg_p ? 0 : 1;
2692 return int_reg[has_zr][1][regno];
2693 }
2694
2695 /* Get the name of the integer offset register in OPND, using the shift type
2696 to decide whether it's a word or doubleword. */
2697
2698 static inline const char *
2699 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2700 {
2701 switch (opnd->shifter.kind)
2702 {
2703 case AARCH64_MOD_UXTW:
2704 case AARCH64_MOD_SXTW:
2705 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2706
2707 case AARCH64_MOD_LSL:
2708 case AARCH64_MOD_SXTX:
2709 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2710
2711 default:
2712 abort ();
2713 }
2714 }
2715
2716 /* Get the name of the SVE vector offset register in OPND, using the operand
2717 qualifier to decide whether the suffix should be .S or .D. */
2718
2719 static inline const char *
2720 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2721 {
2722 assert (qualifier == AARCH64_OPND_QLF_S_S
2723 || qualifier == AARCH64_OPND_QLF_S_D);
2724 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2725 }
2726
2727 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2728
2729 typedef union
2730 {
2731 uint64_t i;
2732 double d;
2733 } double_conv_t;
2734
2735 typedef union
2736 {
2737 uint32_t i;
2738 float f;
2739 } single_conv_t;
2740
2741 typedef union
2742 {
2743 uint32_t i;
2744 float f;
2745 } half_conv_t;
2746
2747 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2748 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2749 (depending on the type of the instruction). IMM8 will be expanded to a
2750 single-precision floating-point value (SIZE == 4) or a double-precision
2751 floating-point value (SIZE == 8). A half-precision floating-point value
2752 (SIZE == 2) is expanded to a single-precision floating-point value. The
2753 expanded value is returned. */
2754
2755 static uint64_t
2756 expand_fp_imm (int size, uint32_t imm8)
2757 {
2758 uint64_t imm;
2759 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2760
2761 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2762 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2763 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2764 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2765 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2766 if (size == 8)
2767 {
2768 imm = (imm8_7 << (63-32)) /* imm8<7> */
2769 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2770 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2771 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2772 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2773 imm <<= 32;
2774 }
2775 else if (size == 4 || size == 2)
2776 {
2777 imm = (imm8_7 << 31) /* imm8<7> */
2778 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2779 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2780 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2781 }
2782 else
2783 {
2784 /* An unsupported size. */
2785 assert (0);
2786 }
2787
2788 return imm;
2789 }
2790
2791 /* Produce the string representation of the register list operand *OPND
2792 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2793 the register name that comes before the register number, such as "v". */
2794 static void
2795 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2796 const char *prefix)
2797 {
2798 const int num_regs = opnd->reglist.num_regs;
2799 const int first_reg = opnd->reglist.first_regno;
2800 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2801 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2802 char tb[8]; /* Temporary buffer. */
2803
2804 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2805 assert (num_regs >= 1 && num_regs <= 4);
2806
2807 /* Prepare the index if any. */
2808 if (opnd->reglist.has_index)
2809 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2810 else
2811 tb[0] = '\0';
2812
2813 /* The hyphenated form is preferred for disassembly if there are
2814 more than two registers in the list, and the register numbers
2815 are monotonically increasing in increments of one. */
2816 if (num_regs > 2 && last_reg > first_reg)
2817 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2818 prefix, last_reg, qlf_name, tb);
2819 else
2820 {
2821 const int reg0 = first_reg;
2822 const int reg1 = (first_reg + 1) & 0x1f;
2823 const int reg2 = (first_reg + 2) & 0x1f;
2824 const int reg3 = (first_reg + 3) & 0x1f;
2825
2826 switch (num_regs)
2827 {
2828 case 1:
2829 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2830 break;
2831 case 2:
2832 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2833 prefix, reg1, qlf_name, tb);
2834 break;
2835 case 3:
2836 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2837 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2838 prefix, reg2, qlf_name, tb);
2839 break;
2840 case 4:
2841 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2842 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2843 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2844 break;
2845 }
2846 }
2847 }
2848
2849 /* Print the register+immediate address in OPND to BUF, which has SIZE
2850 characters. BASE is the name of the base register. */
2851
2852 static void
2853 print_immediate_offset_address (char *buf, size_t size,
2854 const aarch64_opnd_info *opnd,
2855 const char *base)
2856 {
2857 if (opnd->addr.writeback)
2858 {
2859 if (opnd->addr.preind)
2860 snprintf (buf, size, "[%s,#%d]!", base, opnd->addr.offset.imm);
2861 else
2862 snprintf (buf, size, "[%s],#%d", base, opnd->addr.offset.imm);
2863 }
2864 else
2865 {
2866 if (opnd->shifter.operator_present)
2867 {
2868 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2869 snprintf (buf, size, "[%s,#%d,mul vl]",
2870 base, opnd->addr.offset.imm);
2871 }
2872 else if (opnd->addr.offset.imm)
2873 snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
2874 else
2875 snprintf (buf, size, "[%s]", base);
2876 }
2877 }
2878
2879 /* Produce the string representation of the register offset address operand
2880 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2881 the names of the base and offset registers. */
2882 static void
2883 print_register_offset_address (char *buf, size_t size,
2884 const aarch64_opnd_info *opnd,
2885 const char *base, const char *offset)
2886 {
2887 char tb[16]; /* Temporary buffer. */
2888 bfd_boolean print_extend_p = TRUE;
2889 bfd_boolean print_amount_p = TRUE;
2890 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2891
2892 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2893 || !opnd->shifter.amount_present))
2894 {
2895 /* Not print the shift/extend amount when the amount is zero and
2896 when it is not the special case of 8-bit load/store instruction. */
2897 print_amount_p = FALSE;
2898 /* Likewise, no need to print the shift operator LSL in such a
2899 situation. */
2900 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2901 print_extend_p = FALSE;
2902 }
2903
2904 /* Prepare for the extend/shift. */
2905 if (print_extend_p)
2906 {
2907 if (print_amount_p)
2908 snprintf (tb, sizeof (tb), ",%s #%" PRIi64, shift_name,
2909 opnd->shifter.amount);
2910 else
2911 snprintf (tb, sizeof (tb), ",%s", shift_name);
2912 }
2913 else
2914 tb[0] = '\0';
2915
2916 snprintf (buf, size, "[%s,%s%s]", base, offset, tb);
2917 }
2918
2919 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2920 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2921 PC, PCREL_P and ADDRESS are used to pass in and return information about
2922 the PC-relative address calculation, where the PC value is passed in
2923 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2924 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2925 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2926
2927 The function serves both the disassembler and the assembler diagnostics
2928 issuer, which is the reason why it lives in this file. */
2929
2930 void
2931 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2932 const aarch64_opcode *opcode,
2933 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2934 bfd_vma *address)
2935 {
2936 int i;
2937 const char *name = NULL;
2938 const aarch64_opnd_info *opnd = opnds + idx;
2939 enum aarch64_modifier_kind kind;
2940 uint64_t addr, enum_value;
2941
2942 buf[0] = '\0';
2943 if (pcrel_p)
2944 *pcrel_p = 0;
2945
2946 switch (opnd->type)
2947 {
2948 case AARCH64_OPND_Rd:
2949 case AARCH64_OPND_Rn:
2950 case AARCH64_OPND_Rm:
2951 case AARCH64_OPND_Rt:
2952 case AARCH64_OPND_Rt2:
2953 case AARCH64_OPND_Rs:
2954 case AARCH64_OPND_Ra:
2955 case AARCH64_OPND_Rt_SYS:
2956 case AARCH64_OPND_PAIRREG:
2957 case AARCH64_OPND_SVE_Rm:
2958 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2959 the <ic_op>, therefore we we use opnd->present to override the
2960 generic optional-ness information. */
2961 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2962 break;
2963 /* Omit the operand, e.g. RET. */
2964 if (optional_operand_p (opcode, idx)
2965 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2966 break;
2967 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2968 || opnd->qualifier == AARCH64_OPND_QLF_X);
2969 snprintf (buf, size, "%s",
2970 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2971 break;
2972
2973 case AARCH64_OPND_Rd_SP:
2974 case AARCH64_OPND_Rn_SP:
2975 case AARCH64_OPND_SVE_Rn_SP:
2976 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2977 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2978 || opnd->qualifier == AARCH64_OPND_QLF_X
2979 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2980 snprintf (buf, size, "%s",
2981 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2982 break;
2983
2984 case AARCH64_OPND_Rm_EXT:
2985 kind = opnd->shifter.kind;
2986 assert (idx == 1 || idx == 2);
2987 if ((aarch64_stack_pointer_p (opnds)
2988 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2989 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2990 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2991 && kind == AARCH64_MOD_UXTW)
2992 || (opnd->qualifier == AARCH64_OPND_QLF_X
2993 && kind == AARCH64_MOD_UXTX)))
2994 {
2995 /* 'LSL' is the preferred form in this case. */
2996 kind = AARCH64_MOD_LSL;
2997 if (opnd->shifter.amount == 0)
2998 {
2999 /* Shifter omitted. */
3000 snprintf (buf, size, "%s",
3001 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3002 break;
3003 }
3004 }
3005 if (opnd->shifter.amount)
3006 snprintf (buf, size, "%s, %s #%" PRIi64,
3007 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3008 aarch64_operand_modifiers[kind].name,
3009 opnd->shifter.amount);
3010 else
3011 snprintf (buf, size, "%s, %s",
3012 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3013 aarch64_operand_modifiers[kind].name);
3014 break;
3015
3016 case AARCH64_OPND_Rm_SFT:
3017 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3018 || opnd->qualifier == AARCH64_OPND_QLF_X);
3019 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3020 snprintf (buf, size, "%s",
3021 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3022 else
3023 snprintf (buf, size, "%s, %s #%" PRIi64,
3024 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3025 aarch64_operand_modifiers[opnd->shifter.kind].name,
3026 opnd->shifter.amount);
3027 break;
3028
3029 case AARCH64_OPND_Fd:
3030 case AARCH64_OPND_Fn:
3031 case AARCH64_OPND_Fm:
3032 case AARCH64_OPND_Fa:
3033 case AARCH64_OPND_Ft:
3034 case AARCH64_OPND_Ft2:
3035 case AARCH64_OPND_Sd:
3036 case AARCH64_OPND_Sn:
3037 case AARCH64_OPND_Sm:
3038 case AARCH64_OPND_SVE_VZn:
3039 case AARCH64_OPND_SVE_Vd:
3040 case AARCH64_OPND_SVE_Vm:
3041 case AARCH64_OPND_SVE_Vn:
3042 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3043 opnd->reg.regno);
3044 break;
3045
3046 case AARCH64_OPND_Vd:
3047 case AARCH64_OPND_Vn:
3048 case AARCH64_OPND_Vm:
3049 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3050 aarch64_get_qualifier_name (opnd->qualifier));
3051 break;
3052
3053 case AARCH64_OPND_Ed:
3054 case AARCH64_OPND_En:
3055 case AARCH64_OPND_Em:
3056 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3057 aarch64_get_qualifier_name (opnd->qualifier),
3058 opnd->reglane.index);
3059 break;
3060
3061 case AARCH64_OPND_VdD1:
3062 case AARCH64_OPND_VnD1:
3063 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3064 break;
3065
3066 case AARCH64_OPND_LVn:
3067 case AARCH64_OPND_LVt:
3068 case AARCH64_OPND_LVt_AL:
3069 case AARCH64_OPND_LEt:
3070 print_register_list (buf, size, opnd, "v");
3071 break;
3072
3073 case AARCH64_OPND_SVE_Pd:
3074 case AARCH64_OPND_SVE_Pg3:
3075 case AARCH64_OPND_SVE_Pg4_5:
3076 case AARCH64_OPND_SVE_Pg4_10:
3077 case AARCH64_OPND_SVE_Pg4_16:
3078 case AARCH64_OPND_SVE_Pm:
3079 case AARCH64_OPND_SVE_Pn:
3080 case AARCH64_OPND_SVE_Pt:
3081 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3082 snprintf (buf, size, "p%d", opnd->reg.regno);
3083 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3084 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3085 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3086 aarch64_get_qualifier_name (opnd->qualifier));
3087 else
3088 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3089 aarch64_get_qualifier_name (opnd->qualifier));
3090 break;
3091
3092 case AARCH64_OPND_SVE_Za_5:
3093 case AARCH64_OPND_SVE_Za_16:
3094 case AARCH64_OPND_SVE_Zd:
3095 case AARCH64_OPND_SVE_Zm_5:
3096 case AARCH64_OPND_SVE_Zm_16:
3097 case AARCH64_OPND_SVE_Zn:
3098 case AARCH64_OPND_SVE_Zt:
3099 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3100 snprintf (buf, size, "z%d", opnd->reg.regno);
3101 else
3102 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3103 aarch64_get_qualifier_name (opnd->qualifier));
3104 break;
3105
3106 case AARCH64_OPND_SVE_ZnxN:
3107 case AARCH64_OPND_SVE_ZtxN:
3108 print_register_list (buf, size, opnd, "z");
3109 break;
3110
3111 case AARCH64_OPND_SVE_Zn_INDEX:
3112 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3113 aarch64_get_qualifier_name (opnd->qualifier),
3114 opnd->reglane.index);
3115 break;
3116
3117 case AARCH64_OPND_Cn:
3118 case AARCH64_OPND_Cm:
3119 snprintf (buf, size, "C%d", opnd->reg.regno);
3120 break;
3121
3122 case AARCH64_OPND_IDX:
3123 case AARCH64_OPND_IMM:
3124 case AARCH64_OPND_WIDTH:
3125 case AARCH64_OPND_UIMM3_OP1:
3126 case AARCH64_OPND_UIMM3_OP2:
3127 case AARCH64_OPND_BIT_NUM:
3128 case AARCH64_OPND_IMM_VLSL:
3129 case AARCH64_OPND_IMM_VLSR:
3130 case AARCH64_OPND_SHLL_IMM:
3131 case AARCH64_OPND_IMM0:
3132 case AARCH64_OPND_IMMR:
3133 case AARCH64_OPND_IMMS:
3134 case AARCH64_OPND_FBITS:
3135 case AARCH64_OPND_SIMM5:
3136 case AARCH64_OPND_SVE_SHLIMM_PRED:
3137 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3138 case AARCH64_OPND_SVE_SHRIMM_PRED:
3139 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3140 case AARCH64_OPND_SVE_SIMM5:
3141 case AARCH64_OPND_SVE_SIMM5B:
3142 case AARCH64_OPND_SVE_SIMM6:
3143 case AARCH64_OPND_SVE_SIMM8:
3144 case AARCH64_OPND_SVE_UIMM3:
3145 case AARCH64_OPND_SVE_UIMM7:
3146 case AARCH64_OPND_SVE_UIMM8:
3147 case AARCH64_OPND_SVE_UIMM8_53:
3148 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3149 break;
3150
3151 case AARCH64_OPND_SVE_I1_HALF_ONE:
3152 case AARCH64_OPND_SVE_I1_HALF_TWO:
3153 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3154 {
3155 single_conv_t c;
3156 c.i = opnd->imm.value;
3157 snprintf (buf, size, "#%.1f", c.f);
3158 break;
3159 }
3160
3161 case AARCH64_OPND_SVE_PATTERN:
3162 if (optional_operand_p (opcode, idx)
3163 && opnd->imm.value == get_optional_operand_default_value (opcode))
3164 break;
3165 enum_value = opnd->imm.value;
3166 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3167 if (aarch64_sve_pattern_array[enum_value])
3168 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3169 else
3170 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3171 break;
3172
3173 case AARCH64_OPND_SVE_PATTERN_SCALED:
3174 if (optional_operand_p (opcode, idx)
3175 && !opnd->shifter.operator_present
3176 && opnd->imm.value == get_optional_operand_default_value (opcode))
3177 break;
3178 enum_value = opnd->imm.value;
3179 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3180 if (aarch64_sve_pattern_array[opnd->imm.value])
3181 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3182 else
3183 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3184 if (opnd->shifter.operator_present)
3185 {
3186 size_t len = strlen (buf);
3187 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3188 aarch64_operand_modifiers[opnd->shifter.kind].name,
3189 opnd->shifter.amount);
3190 }
3191 break;
3192
3193 case AARCH64_OPND_SVE_PRFOP:
3194 enum_value = opnd->imm.value;
3195 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3196 if (aarch64_sve_prfop_array[enum_value])
3197 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3198 else
3199 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3200 break;
3201
3202 case AARCH64_OPND_IMM_MOV:
3203 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3204 {
3205 case 4: /* e.g. MOV Wd, #<imm32>. */
3206 {
3207 int imm32 = opnd->imm.value;
3208 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3209 }
3210 break;
3211 case 8: /* e.g. MOV Xd, #<imm64>. */
3212 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3213 opnd->imm.value, opnd->imm.value);
3214 break;
3215 default: assert (0);
3216 }
3217 break;
3218
3219 case AARCH64_OPND_FPIMM0:
3220 snprintf (buf, size, "#0.0");
3221 break;
3222
3223 case AARCH64_OPND_LIMM:
3224 case AARCH64_OPND_AIMM:
3225 case AARCH64_OPND_HALF:
3226 case AARCH64_OPND_SVE_INV_LIMM:
3227 case AARCH64_OPND_SVE_LIMM:
3228 case AARCH64_OPND_SVE_LIMM_MOV:
3229 if (opnd->shifter.amount)
3230 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3231 opnd->shifter.amount);
3232 else
3233 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3234 break;
3235
3236 case AARCH64_OPND_SIMD_IMM:
3237 case AARCH64_OPND_SIMD_IMM_SFT:
3238 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3239 || opnd->shifter.kind == AARCH64_MOD_NONE)
3240 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3241 else
3242 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3243 aarch64_operand_modifiers[opnd->shifter.kind].name,
3244 opnd->shifter.amount);
3245 break;
3246
3247 case AARCH64_OPND_SVE_AIMM:
3248 case AARCH64_OPND_SVE_ASIMM:
3249 if (opnd->shifter.amount)
3250 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3251 opnd->shifter.amount);
3252 else
3253 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3254 break;
3255
3256 case AARCH64_OPND_FPIMM:
3257 case AARCH64_OPND_SIMD_FPIMM:
3258 case AARCH64_OPND_SVE_FPIMM8:
3259 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3260 {
3261 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3262 {
3263 half_conv_t c;
3264 c.i = expand_fp_imm (2, opnd->imm.value);
3265 snprintf (buf, size, "#%.18e", c.f);
3266 }
3267 break;
3268 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3269 {
3270 single_conv_t c;
3271 c.i = expand_fp_imm (4, opnd->imm.value);
3272 snprintf (buf, size, "#%.18e", c.f);
3273 }
3274 break;
3275 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3276 {
3277 double_conv_t c;
3278 c.i = expand_fp_imm (8, opnd->imm.value);
3279 snprintf (buf, size, "#%.18e", c.d);
3280 }
3281 break;
3282 default: assert (0);
3283 }
3284 break;
3285
3286 case AARCH64_OPND_CCMP_IMM:
3287 case AARCH64_OPND_NZCV:
3288 case AARCH64_OPND_EXCEPTION:
3289 case AARCH64_OPND_UIMM4:
3290 case AARCH64_OPND_UIMM7:
3291 if (optional_operand_p (opcode, idx) == TRUE
3292 && (opnd->imm.value ==
3293 (int64_t) get_optional_operand_default_value (opcode)))
3294 /* Omit the operand, e.g. DCPS1. */
3295 break;
3296 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3297 break;
3298
3299 case AARCH64_OPND_COND:
3300 case AARCH64_OPND_COND1:
3301 snprintf (buf, size, "%s", opnd->cond->names[0]);
3302 break;
3303
3304 case AARCH64_OPND_ADDR_ADRP:
3305 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3306 + opnd->imm.value;
3307 if (pcrel_p)
3308 *pcrel_p = 1;
3309 if (address)
3310 *address = addr;
3311 /* This is not necessary during the disassembling, as print_address_func
3312 in the disassemble_info will take care of the printing. But some
3313 other callers may be still interested in getting the string in *STR,
3314 so here we do snprintf regardless. */
3315 snprintf (buf, size, "#0x%" PRIx64, addr);
3316 break;
3317
3318 case AARCH64_OPND_ADDR_PCREL14:
3319 case AARCH64_OPND_ADDR_PCREL19:
3320 case AARCH64_OPND_ADDR_PCREL21:
3321 case AARCH64_OPND_ADDR_PCREL26:
3322 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3323 if (pcrel_p)
3324 *pcrel_p = 1;
3325 if (address)
3326 *address = addr;
3327 /* This is not necessary during the disassembling, as print_address_func
3328 in the disassemble_info will take care of the printing. But some
3329 other callers may be still interested in getting the string in *STR,
3330 so here we do snprintf regardless. */
3331 snprintf (buf, size, "#0x%" PRIx64, addr);
3332 break;
3333
3334 case AARCH64_OPND_ADDR_SIMPLE:
3335 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3336 case AARCH64_OPND_SIMD_ADDR_POST:
3337 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3338 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3339 {
3340 if (opnd->addr.offset.is_reg)
3341 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3342 else
3343 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3344 }
3345 else
3346 snprintf (buf, size, "[%s]", name);
3347 break;
3348
3349 case AARCH64_OPND_ADDR_REGOFF:
3350 case AARCH64_OPND_SVE_ADDR_RR:
3351 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3352 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3353 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3354 case AARCH64_OPND_SVE_ADDR_RX:
3355 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3356 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3357 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3358 print_register_offset_address
3359 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3360 get_offset_int_reg_name (opnd));
3361 break;
3362
3363 case AARCH64_OPND_SVE_ADDR_RZ:
3364 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3365 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3366 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3367 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3368 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3369 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3370 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3371 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3372 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3373 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3374 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3375 print_register_offset_address
3376 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3377 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3378 break;
3379
3380 case AARCH64_OPND_ADDR_SIMM7:
3381 case AARCH64_OPND_ADDR_SIMM9:
3382 case AARCH64_OPND_ADDR_SIMM9_2:
3383 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3384 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3385 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3386 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3387 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3388 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3389 case AARCH64_OPND_SVE_ADDR_RI_U6:
3390 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3391 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3392 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3393 print_immediate_offset_address
3394 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3395 break;
3396
3397 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3398 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3399 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3400 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3401 print_immediate_offset_address
3402 (buf, size, opnd,
3403 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3404 break;
3405
3406 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3407 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3408 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3409 print_register_offset_address
3410 (buf, size, opnd,
3411 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3412 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3413 break;
3414
3415 case AARCH64_OPND_ADDR_UIMM12:
3416 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3417 if (opnd->addr.offset.imm)
3418 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
3419 else
3420 snprintf (buf, size, "[%s]", name);
3421 break;
3422
3423 case AARCH64_OPND_SYSREG:
3424 for (i = 0; aarch64_sys_regs[i].name; ++i)
3425 if (aarch64_sys_regs[i].value == opnd->sysreg
3426 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3427 break;
3428 if (aarch64_sys_regs[i].name)
3429 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3430 else
3431 {
3432 /* Implementation defined system register. */
3433 unsigned int value = opnd->sysreg;
3434 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3435 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3436 value & 0x7);
3437 }
3438 break;
3439
3440 case AARCH64_OPND_PSTATEFIELD:
3441 for (i = 0; aarch64_pstatefields[i].name; ++i)
3442 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3443 break;
3444 assert (aarch64_pstatefields[i].name);
3445 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3446 break;
3447
3448 case AARCH64_OPND_SYSREG_AT:
3449 case AARCH64_OPND_SYSREG_DC:
3450 case AARCH64_OPND_SYSREG_IC:
3451 case AARCH64_OPND_SYSREG_TLBI:
3452 snprintf (buf, size, "%s", opnd->sysins_op->name);
3453 break;
3454
3455 case AARCH64_OPND_BARRIER:
3456 snprintf (buf, size, "%s", opnd->barrier->name);
3457 break;
3458
3459 case AARCH64_OPND_BARRIER_ISB:
3460 /* Operand can be omitted, e.g. in DCPS1. */
3461 if (! optional_operand_p (opcode, idx)
3462 || (opnd->barrier->value
3463 != get_optional_operand_default_value (opcode)))
3464 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3465 break;
3466
3467 case AARCH64_OPND_PRFOP:
3468 if (opnd->prfop->name != NULL)
3469 snprintf (buf, size, "%s", opnd->prfop->name);
3470 else
3471 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3472 break;
3473
3474 case AARCH64_OPND_BARRIER_PSB:
3475 snprintf (buf, size, "%s", opnd->hint_option->name);
3476 break;
3477
3478 default:
3479 assert (0);
3480 }
3481 }
3482 \f
3483 #define CPENC(op0,op1,crn,crm,op2) \
3484 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3485 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3486 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3487 /* for 3.9.10 System Instructions */
3488 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3489
3490 #define C0 0
3491 #define C1 1
3492 #define C2 2
3493 #define C3 3
3494 #define C4 4
3495 #define C5 5
3496 #define C6 6
3497 #define C7 7
3498 #define C8 8
3499 #define C9 9
3500 #define C10 10
3501 #define C11 11
3502 #define C12 12
3503 #define C13 13
3504 #define C14 14
3505 #define C15 15
3506
3507 #ifdef F_DEPRECATED
3508 #undef F_DEPRECATED
3509 #endif
3510 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3511
3512 #ifdef F_ARCHEXT
3513 #undef F_ARCHEXT
3514 #endif
3515 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3516
3517 #ifdef F_HASXT
3518 #undef F_HASXT
3519 #endif
3520 #define F_HASXT 0x4 /* System instruction register <Xt>
3521 operand. */
3522
3523
3524 /* TODO there are two more issues need to be resolved
3525 1. handle read-only and write-only system registers
3526 2. handle cpu-implementation-defined system registers. */
3527 const aarch64_sys_reg aarch64_sys_regs [] =
3528 {
3529 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3530 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3531 { "elr_el1", CPEN_(0,C0,1), 0 },
3532 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3533 { "sp_el0", CPEN_(0,C1,0), 0 },
3534 { "spsel", CPEN_(0,C2,0), 0 },
3535 { "daif", CPEN_(3,C2,1), 0 },
3536 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3537 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3538 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3539 { "nzcv", CPEN_(3,C2,0), 0 },
3540 { "fpcr", CPEN_(3,C4,0), 0 },
3541 { "fpsr", CPEN_(3,C4,1), 0 },
3542 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3543 { "dlr_el0", CPEN_(3,C5,1), 0 },
3544 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3545 { "elr_el2", CPEN_(4,C0,1), 0 },
3546 { "sp_el1", CPEN_(4,C1,0), 0 },
3547 { "spsr_irq", CPEN_(4,C3,0), 0 },
3548 { "spsr_abt", CPEN_(4,C3,1), 0 },
3549 { "spsr_und", CPEN_(4,C3,2), 0 },
3550 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3551 { "spsr_el3", CPEN_(6,C0,0), 0 },
3552 { "elr_el3", CPEN_(6,C0,1), 0 },
3553 { "sp_el2", CPEN_(6,C1,0), 0 },
3554 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3555 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3556 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3557 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3558 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3559 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3560 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3561 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3562 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3563 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3564 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3565 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3566 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3567 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3568 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3569 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3570 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3571 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3572 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3573 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3574 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3575 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3576 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3577 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3578 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3579 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3580 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3581 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3582 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3583 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3584 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3585 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3586 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3587 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3588 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3589 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3590 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3591 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3592 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3593 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3594 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3595 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3596 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3597 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3598 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3599 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3600 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3601 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3602 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3603 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3604 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3605 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3606 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3607 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3608 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3609 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3610 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3611 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3612 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3613 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3614 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3615 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3616 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3617 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3618 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3619 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3620 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3621 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3622 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3623 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3624 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3625 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3626 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3627 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3628 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3629 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3630 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3631 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3632 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3633 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3634 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3635 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3636 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3637 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3638 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3639 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3640 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3641 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3642 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3643 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3644 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3645 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3646 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3647 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3648 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3649 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3650 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3651 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3652 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3653 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3654 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3655 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3656 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3657 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3658 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3659 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3660 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3661 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3662 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3663 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3664 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3665 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3666 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3667 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3668 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3669 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3670 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3671 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3672 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3673 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3674 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3675 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3676 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3677 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3678 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3679 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3680 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3681 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3682 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3683 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3684 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3685 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3686 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3687 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3688 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3689 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3690 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3691 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3692 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3693 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3694 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3695 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3696 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3697 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3698 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3699 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3700 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3701 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3702 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3703 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3704 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3705 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3706 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3707 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3708 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3709 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3710 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3711 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3712 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3713 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3714 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3715 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3716 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3717 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3718 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3719 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3720 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3721 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3722 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3723 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3724 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3725 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3726 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3727 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3728 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3729 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3730 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3731 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3732 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3733 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3734 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3735 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3736 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3737 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3738 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3739 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3740 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3741 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3742 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3743 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3744 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3745 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3746 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3747 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3748 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3749 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3750 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3751 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3752 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3753 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3754 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3755 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3756 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3757 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3758 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3759 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3760 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3761 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3762 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3763 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3764 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3765 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3766 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3767 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3768 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3769 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3770 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3771 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3772 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3773 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3774 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3775 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3776 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3777 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3778 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3779 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3780 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3781 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3782 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3783 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3784 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3785 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3786 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3787 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3788 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3789 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3790 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3791 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3792 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3793 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3794 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3795 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3796 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3797 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3798 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3799 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3800 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3801 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3802 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3803 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3804 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3805 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3806 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3807 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3808 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3809 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3810 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3811 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3812 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3813 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3814 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3815 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3816 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3817 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3818 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3819 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3820 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3821 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3822 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3823 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3824 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3825 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3826 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3827 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3828 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3829 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3830 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3831 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3832 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3833 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3834 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3835 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3836 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3837 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3838 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3839 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3840 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3841 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3842 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3843 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3844 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3845 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3846 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3847 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3848 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3849 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3850 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3851 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3852 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3853 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3854 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3855 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3856 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3857 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3858 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3859 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3860 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3861 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3862 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3863 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3864 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3865 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3866 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3867 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3868 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3869 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3870 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3871 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3872 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3873 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3874 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3875 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3876 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3877 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3878 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3879 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3880 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3881 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3882 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3883 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3884 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3885 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3886 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3887 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3888 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3889 { 0, CPENC(0,0,0,0,0), 0 },
3890 };
3891
3892 bfd_boolean
3893 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3894 {
3895 return (reg->flags & F_DEPRECATED) != 0;
3896 }
3897
3898 bfd_boolean
3899 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3900 const aarch64_sys_reg *reg)
3901 {
3902 if (!(reg->flags & F_ARCHEXT))
3903 return TRUE;
3904
3905 /* PAN. Values are from aarch64_sys_regs. */
3906 if (reg->value == CPEN_(0,C2,3)
3907 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3908 return FALSE;
3909
3910 /* Virtualization host extensions: system registers. */
3911 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3912 || reg->value == CPENC (3, 4, C13, C0, 1)
3913 || reg->value == CPENC (3, 4, C14, C3, 0)
3914 || reg->value == CPENC (3, 4, C14, C3, 1)
3915 || reg->value == CPENC (3, 4, C14, C3, 2))
3916 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3917 return FALSE;
3918
3919 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3920 if ((reg->value == CPEN_ (5, C0, 0)
3921 || reg->value == CPEN_ (5, C0, 1)
3922 || reg->value == CPENC (3, 5, C1, C0, 0)
3923 || reg->value == CPENC (3, 5, C1, C0, 2)
3924 || reg->value == CPENC (3, 5, C2, C0, 0)
3925 || reg->value == CPENC (3, 5, C2, C0, 1)
3926 || reg->value == CPENC (3, 5, C2, C0, 2)
3927 || reg->value == CPENC (3, 5, C5, C1, 0)
3928 || reg->value == CPENC (3, 5, C5, C1, 1)
3929 || reg->value == CPENC (3, 5, C5, C2, 0)
3930 || reg->value == CPENC (3, 5, C6, C0, 0)
3931 || reg->value == CPENC (3, 5, C10, C2, 0)
3932 || reg->value == CPENC (3, 5, C10, C3, 0)
3933 || reg->value == CPENC (3, 5, C12, C0, 0)
3934 || reg->value == CPENC (3, 5, C13, C0, 1)
3935 || reg->value == CPENC (3, 5, C14, C1, 0))
3936 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3937 return FALSE;
3938
3939 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3940 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3941 || reg->value == CPENC (3, 5, C14, C2, 1)
3942 || reg->value == CPENC (3, 5, C14, C2, 2)
3943 || reg->value == CPENC (3, 5, C14, C3, 0)
3944 || reg->value == CPENC (3, 5, C14, C3, 1)
3945 || reg->value == CPENC (3, 5, C14, C3, 2))
3946 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3947 return FALSE;
3948
3949 /* ARMv8.2 features. */
3950
3951 /* ID_AA64MMFR2_EL1. */
3952 if (reg->value == CPENC (3, 0, C0, C7, 2)
3953 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3954 return FALSE;
3955
3956 /* PSTATE.UAO. */
3957 if (reg->value == CPEN_ (0, C2, 4)
3958 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3959 return FALSE;
3960
3961 /* RAS extension. */
3962
3963 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3964 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3965 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3966 || reg->value == CPENC (3, 0, C5, C3, 1)
3967 || reg->value == CPENC (3, 0, C5, C3, 2)
3968 || reg->value == CPENC (3, 0, C5, C3, 3)
3969 || reg->value == CPENC (3, 0, C5, C4, 0)
3970 || reg->value == CPENC (3, 0, C5, C4, 1)
3971 || reg->value == CPENC (3, 0, C5, C4, 2)
3972 || reg->value == CPENC (3, 0, C5, C4, 3)
3973 || reg->value == CPENC (3, 0, C5, C5, 0)
3974 || reg->value == CPENC (3, 0, C5, C5, 1))
3975 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3976 return FALSE;
3977
3978 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3979 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3980 || reg->value == CPENC (3, 0, C12, C1, 1)
3981 || reg->value == CPENC (3, 4, C12, C1, 1))
3982 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3983 return FALSE;
3984
3985 /* Statistical Profiling extension. */
3986 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3987 || reg->value == CPENC (3, 0, C9, C10, 1)
3988 || reg->value == CPENC (3, 0, C9, C10, 3)
3989 || reg->value == CPENC (3, 0, C9, C10, 7)
3990 || reg->value == CPENC (3, 0, C9, C9, 0)
3991 || reg->value == CPENC (3, 0, C9, C9, 2)
3992 || reg->value == CPENC (3, 0, C9, C9, 3)
3993 || reg->value == CPENC (3, 0, C9, C9, 4)
3994 || reg->value == CPENC (3, 0, C9, C9, 5)
3995 || reg->value == CPENC (3, 0, C9, C9, 6)
3996 || reg->value == CPENC (3, 0, C9, C9, 7)
3997 || reg->value == CPENC (3, 4, C9, C9, 0)
3998 || reg->value == CPENC (3, 5, C9, C9, 0))
3999 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4000 return FALSE;
4001
4002 return TRUE;
4003 }
4004
4005 const aarch64_sys_reg aarch64_pstatefields [] =
4006 {
4007 { "spsel", 0x05, 0 },
4008 { "daifset", 0x1e, 0 },
4009 { "daifclr", 0x1f, 0 },
4010 { "pan", 0x04, F_ARCHEXT },
4011 { "uao", 0x03, F_ARCHEXT },
4012 { 0, CPENC(0,0,0,0,0), 0 },
4013 };
4014
4015 bfd_boolean
4016 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4017 const aarch64_sys_reg *reg)
4018 {
4019 if (!(reg->flags & F_ARCHEXT))
4020 return TRUE;
4021
4022 /* PAN. Values are from aarch64_pstatefields. */
4023 if (reg->value == 0x04
4024 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4025 return FALSE;
4026
4027 /* UAO. Values are from aarch64_pstatefields. */
4028 if (reg->value == 0x03
4029 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4030 return FALSE;
4031
4032 return TRUE;
4033 }
4034
4035 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4036 {
4037 { "ialluis", CPENS(0,C7,C1,0), 0 },
4038 { "iallu", CPENS(0,C7,C5,0), 0 },
4039 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4040 { 0, CPENS(0,0,0,0), 0 }
4041 };
4042
4043 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4044 {
4045 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4046 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4047 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4048 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4049 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4050 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4051 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4052 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4053 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4054 { 0, CPENS(0,0,0,0), 0 }
4055 };
4056
4057 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4058 {
4059 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4060 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4061 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4062 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4063 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4064 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4065 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4066 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4067 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4068 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4069 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4070 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4071 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4072 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4073 { 0, CPENS(0,0,0,0), 0 }
4074 };
4075
4076 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4077 {
4078 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4079 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4080 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4081 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4082 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4083 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4084 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4085 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4086 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4087 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4088 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4089 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4090 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4091 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4092 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4093 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4094 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4095 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4096 { "alle2", CPENS(4,C8,C7,0), 0 },
4097 { "alle2is", CPENS(4,C8,C3,0), 0 },
4098 { "alle1", CPENS(4,C8,C7,4), 0 },
4099 { "alle1is", CPENS(4,C8,C3,4), 0 },
4100 { "alle3", CPENS(6,C8,C7,0), 0 },
4101 { "alle3is", CPENS(6,C8,C3,0), 0 },
4102 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4103 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4104 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4105 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4106 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4107 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4108 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4109 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4110 { 0, CPENS(0,0,0,0), 0 }
4111 };
4112
4113 bfd_boolean
4114 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4115 {
4116 return (sys_ins_reg->flags & F_HASXT) != 0;
4117 }
4118
4119 extern bfd_boolean
4120 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4121 const aarch64_sys_ins_reg *reg)
4122 {
4123 if (!(reg->flags & F_ARCHEXT))
4124 return TRUE;
4125
4126 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4127 if (reg->value == CPENS (3, C7, C12, 1)
4128 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4129 return FALSE;
4130
4131 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4132 if ((reg->value == CPENS (0, C7, C9, 0)
4133 || reg->value == CPENS (0, C7, C9, 1))
4134 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4135 return FALSE;
4136
4137 return TRUE;
4138 }
4139
4140 #undef C0
4141 #undef C1
4142 #undef C2
4143 #undef C3
4144 #undef C4
4145 #undef C5
4146 #undef C6
4147 #undef C7
4148 #undef C8
4149 #undef C9
4150 #undef C10
4151 #undef C11
4152 #undef C12
4153 #undef C13
4154 #undef C14
4155 #undef C15
4156
4157 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4158 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4159
4160 static bfd_boolean
4161 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4162 const aarch64_insn insn)
4163 {
4164 int t = BITS (insn, 4, 0);
4165 int n = BITS (insn, 9, 5);
4166 int t2 = BITS (insn, 14, 10);
4167
4168 if (BIT (insn, 23))
4169 {
4170 /* Write back enabled. */
4171 if ((t == n || t2 == n) && n != 31)
4172 return FALSE;
4173 }
4174
4175 if (BIT (insn, 22))
4176 {
4177 /* Load */
4178 if (t == t2)
4179 return FALSE;
4180 }
4181
4182 return TRUE;
4183 }
4184
4185 /* Return true if VALUE cannot be moved into an SVE register using DUP
4186 (with any element size, not just ESIZE) and if using DUPM would
4187 therefore be OK. ESIZE is the number of bytes in the immediate. */
4188
4189 bfd_boolean
4190 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4191 {
4192 int64_t svalue = uvalue;
4193 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4194
4195 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4196 return FALSE;
4197 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4198 {
4199 svalue = (int32_t) uvalue;
4200 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4201 {
4202 svalue = (int16_t) uvalue;
4203 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4204 return FALSE;
4205 }
4206 }
4207 if ((svalue & 0xff) == 0)
4208 svalue /= 256;
4209 return svalue < -128 || svalue >= 128;
4210 }
4211
4212 /* Include the opcode description table as well as the operand description
4213 table. */
4214 #define VERIFIER(x) verify_##x
4215 #include "aarch64-tbl.h"
This page took 0.132312 seconds and 4 git commands to generate.