c85abc63cd8a3a6e7a05805cdb3b050fdc90a721
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
257 { 22, 1 }, /* N: in logical (immediate) instructions. */
258 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
259 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
260 { 31, 1 }, /* sf: in integer data processing instructions. */
261 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
262 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
263 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
264 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
265 { 31, 1 }, /* b5: in the test bit and branch instructions. */
266 { 19, 5 }, /* b40: in the test bit and branch instructions. */
267 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
268 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
269 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
270 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
271 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
272 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
273 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
274 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
275 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
276 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
277 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
278 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
279 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
280 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
281 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
282 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
283 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
284 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
285 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
286 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
287 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
288 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
289 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
290 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
291 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
292 { 5, 1 }, /* SVE_i1: single-bit immediate. */
293 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
294 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
295 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
296 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
297 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
298 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
299 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
300 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
301 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
302 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
303 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
304 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
305 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
306 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
307 { 16, 4 }, /* SVE_tsz: triangular size select. */
308 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
309 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
310 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
311 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
312 { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
313 };
314
315 enum aarch64_operand_class
316 aarch64_get_operand_class (enum aarch64_opnd type)
317 {
318 return aarch64_operands[type].op_class;
319 }
320
321 const char *
322 aarch64_get_operand_name (enum aarch64_opnd type)
323 {
324 return aarch64_operands[type].name;
325 }
326
327 /* Get operand description string.
328 This is usually for the diagnosis purpose. */
329 const char *
330 aarch64_get_operand_desc (enum aarch64_opnd type)
331 {
332 return aarch64_operands[type].desc;
333 }
334
335 /* Table of all conditional affixes. */
336 const aarch64_cond aarch64_conds[16] =
337 {
338 {{"eq", "none"}, 0x0},
339 {{"ne", "any"}, 0x1},
340 {{"cs", "hs", "nlast"}, 0x2},
341 {{"cc", "lo", "ul", "last"}, 0x3},
342 {{"mi", "first"}, 0x4},
343 {{"pl", "nfrst"}, 0x5},
344 {{"vs"}, 0x6},
345 {{"vc"}, 0x7},
346 {{"hi", "pmore"}, 0x8},
347 {{"ls", "plast"}, 0x9},
348 {{"ge", "tcont"}, 0xa},
349 {{"lt", "tstop"}, 0xb},
350 {{"gt"}, 0xc},
351 {{"le"}, 0xd},
352 {{"al"}, 0xe},
353 {{"nv"}, 0xf},
354 };
355
356 const aarch64_cond *
357 get_cond_from_value (aarch64_insn value)
358 {
359 assert (value < 16);
360 return &aarch64_conds[(unsigned int) value];
361 }
362
363 const aarch64_cond *
364 get_inverted_cond (const aarch64_cond *cond)
365 {
366 return &aarch64_conds[cond->value ^ 0x1];
367 }
368
369 /* Table describing the operand extension/shifting operators; indexed by
370 enum aarch64_modifier_kind.
371
372 The value column provides the most common values for encoding modifiers,
373 which enables table-driven encoding/decoding for the modifiers. */
374 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
375 {
376 {"none", 0x0},
377 {"msl", 0x0},
378 {"ror", 0x3},
379 {"asr", 0x2},
380 {"lsr", 0x1},
381 {"lsl", 0x0},
382 {"uxtb", 0x0},
383 {"uxth", 0x1},
384 {"uxtw", 0x2},
385 {"uxtx", 0x3},
386 {"sxtb", 0x4},
387 {"sxth", 0x5},
388 {"sxtw", 0x6},
389 {"sxtx", 0x7},
390 {"mul", 0x0},
391 {"mul vl", 0x0},
392 {NULL, 0},
393 };
394
395 enum aarch64_modifier_kind
396 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
397 {
398 return desc - aarch64_operand_modifiers;
399 }
400
401 aarch64_insn
402 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
403 {
404 return aarch64_operand_modifiers[kind].value;
405 }
406
407 enum aarch64_modifier_kind
408 aarch64_get_operand_modifier_from_value (aarch64_insn value,
409 bfd_boolean extend_p)
410 {
411 if (extend_p == TRUE)
412 return AARCH64_MOD_UXTB + value;
413 else
414 return AARCH64_MOD_LSL - value;
415 }
416
417 bfd_boolean
418 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
419 {
420 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
421 ? TRUE : FALSE;
422 }
423
424 static inline bfd_boolean
425 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
426 {
427 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
428 ? TRUE : FALSE;
429 }
430
431 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
432 {
433 { "#0x00", 0x0 },
434 { "oshld", 0x1 },
435 { "oshst", 0x2 },
436 { "osh", 0x3 },
437 { "#0x04", 0x4 },
438 { "nshld", 0x5 },
439 { "nshst", 0x6 },
440 { "nsh", 0x7 },
441 { "#0x08", 0x8 },
442 { "ishld", 0x9 },
443 { "ishst", 0xa },
444 { "ish", 0xb },
445 { "#0x0c", 0xc },
446 { "ld", 0xd },
447 { "st", 0xe },
448 { "sy", 0xf },
449 };
450
451 /* Table describing the operands supported by the aliases of the HINT
452 instruction.
453
454 The name column is the operand that is accepted for the alias. The value
455 column is the hint number of the alias. The list of operands is terminated
456 by NULL in the name column. */
457
458 const struct aarch64_name_value_pair aarch64_hint_options[] =
459 {
460 { "csync", 0x11 }, /* PSB CSYNC. */
461 { NULL, 0x0 },
462 };
463
464 /* op -> op: load = 0 instruction = 1 store = 2
465 l -> level: 1-3
466 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
467 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
468 const struct aarch64_name_value_pair aarch64_prfops[32] =
469 {
470 { "pldl1keep", B(0, 1, 0) },
471 { "pldl1strm", B(0, 1, 1) },
472 { "pldl2keep", B(0, 2, 0) },
473 { "pldl2strm", B(0, 2, 1) },
474 { "pldl3keep", B(0, 3, 0) },
475 { "pldl3strm", B(0, 3, 1) },
476 { NULL, 0x06 },
477 { NULL, 0x07 },
478 { "plil1keep", B(1, 1, 0) },
479 { "plil1strm", B(1, 1, 1) },
480 { "plil2keep", B(1, 2, 0) },
481 { "plil2strm", B(1, 2, 1) },
482 { "plil3keep", B(1, 3, 0) },
483 { "plil3strm", B(1, 3, 1) },
484 { NULL, 0x0e },
485 { NULL, 0x0f },
486 { "pstl1keep", B(2, 1, 0) },
487 { "pstl1strm", B(2, 1, 1) },
488 { "pstl2keep", B(2, 2, 0) },
489 { "pstl2strm", B(2, 2, 1) },
490 { "pstl3keep", B(2, 3, 0) },
491 { "pstl3strm", B(2, 3, 1) },
492 { NULL, 0x16 },
493 { NULL, 0x17 },
494 { NULL, 0x18 },
495 { NULL, 0x19 },
496 { NULL, 0x1a },
497 { NULL, 0x1b },
498 { NULL, 0x1c },
499 { NULL, 0x1d },
500 { NULL, 0x1e },
501 { NULL, 0x1f },
502 };
503 #undef B
504 \f
505 /* Utilities on value constraint. */
506
507 static inline int
508 value_in_range_p (int64_t value, int low, int high)
509 {
510 return (value >= low && value <= high) ? 1 : 0;
511 }
512
513 /* Return true if VALUE is a multiple of ALIGN. */
514 static inline int
515 value_aligned_p (int64_t value, int align)
516 {
517 return (value % align) == 0;
518 }
519
520 /* A signed value fits in a field. */
521 static inline int
522 value_fit_signed_field_p (int64_t value, unsigned width)
523 {
524 assert (width < 32);
525 if (width < sizeof (value) * 8)
526 {
527 int64_t lim = (int64_t)1 << (width - 1);
528 if (value >= -lim && value < lim)
529 return 1;
530 }
531 return 0;
532 }
533
534 /* An unsigned value fits in a field. */
535 static inline int
536 value_fit_unsigned_field_p (int64_t value, unsigned width)
537 {
538 assert (width < 32);
539 if (width < sizeof (value) * 8)
540 {
541 int64_t lim = (int64_t)1 << width;
542 if (value >= 0 && value < lim)
543 return 1;
544 }
545 return 0;
546 }
547
548 /* Return 1 if OPERAND is SP or WSP. */
549 int
550 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
551 {
552 return ((aarch64_get_operand_class (operand->type)
553 == AARCH64_OPND_CLASS_INT_REG)
554 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
555 && operand->reg.regno == 31);
556 }
557
558 /* Return 1 if OPERAND is XZR or WZP. */
559 int
560 aarch64_zero_register_p (const aarch64_opnd_info *operand)
561 {
562 return ((aarch64_get_operand_class (operand->type)
563 == AARCH64_OPND_CLASS_INT_REG)
564 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
565 && operand->reg.regno == 31);
566 }
567
568 /* Return true if the operand *OPERAND that has the operand code
569 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
570 qualified by the qualifier TARGET. */
571
572 static inline int
573 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
574 aarch64_opnd_qualifier_t target)
575 {
576 switch (operand->qualifier)
577 {
578 case AARCH64_OPND_QLF_W:
579 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
580 return 1;
581 break;
582 case AARCH64_OPND_QLF_X:
583 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
584 return 1;
585 break;
586 case AARCH64_OPND_QLF_WSP:
587 if (target == AARCH64_OPND_QLF_W
588 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
589 return 1;
590 break;
591 case AARCH64_OPND_QLF_SP:
592 if (target == AARCH64_OPND_QLF_X
593 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
594 return 1;
595 break;
596 default:
597 break;
598 }
599
600 return 0;
601 }
602
603 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
604 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
605
606 Return NIL if more than one expected qualifiers are found. */
607
608 aarch64_opnd_qualifier_t
609 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
610 int idx,
611 const aarch64_opnd_qualifier_t known_qlf,
612 int known_idx)
613 {
614 int i, saved_i;
615
616 /* Special case.
617
618 When the known qualifier is NIL, we have to assume that there is only
619 one qualifier sequence in the *QSEQ_LIST and return the corresponding
620 qualifier directly. One scenario is that for instruction
621 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
622 which has only one possible valid qualifier sequence
623 NIL, S_D
624 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
625 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
626
627 Because the qualifier NIL has dual roles in the qualifier sequence:
628 it can mean no qualifier for the operand, or the qualifer sequence is
629 not in use (when all qualifiers in the sequence are NILs), we have to
630 handle this special case here. */
631 if (known_qlf == AARCH64_OPND_NIL)
632 {
633 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
634 return qseq_list[0][idx];
635 }
636
637 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
638 {
639 if (qseq_list[i][known_idx] == known_qlf)
640 {
641 if (saved_i != -1)
642 /* More than one sequences are found to have KNOWN_QLF at
643 KNOWN_IDX. */
644 return AARCH64_OPND_NIL;
645 saved_i = i;
646 }
647 }
648
649 return qseq_list[saved_i][idx];
650 }
651
652 enum operand_qualifier_kind
653 {
654 OQK_NIL,
655 OQK_OPD_VARIANT,
656 OQK_VALUE_IN_RANGE,
657 OQK_MISC,
658 };
659
660 /* Operand qualifier description. */
661 struct operand_qualifier_data
662 {
663 /* The usage of the three data fields depends on the qualifier kind. */
664 int data0;
665 int data1;
666 int data2;
667 /* Description. */
668 const char *desc;
669 /* Kind. */
670 enum operand_qualifier_kind kind;
671 };
672
673 /* Indexed by the operand qualifier enumerators. */
674 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
675 {
676 {0, 0, 0, "NIL", OQK_NIL},
677
678 /* Operand variant qualifiers.
679 First 3 fields:
680 element size, number of elements and common value for encoding. */
681
682 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
683 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
684 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
685 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
686
687 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
688 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
689 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
690 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
691 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
692
693 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
694 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
695 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
696 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
697 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
698 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
699 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
700 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
701 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
702 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
703
704 {0, 0, 0, "z", OQK_OPD_VARIANT},
705 {0, 0, 0, "m", OQK_OPD_VARIANT},
706
707 /* Qualifiers constraining the value range.
708 First 3 fields:
709 Lower bound, higher bound, unused. */
710
711 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
712 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
713 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
714 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
715 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
716 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
717
718 /* Qualifiers for miscellaneous purpose.
719 First 3 fields:
720 unused, unused and unused. */
721
722 {0, 0, 0, "lsl", 0},
723 {0, 0, 0, "msl", 0},
724
725 {0, 0, 0, "retrieving", 0},
726 };
727
728 static inline bfd_boolean
729 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
730 {
731 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
732 ? TRUE : FALSE;
733 }
734
735 static inline bfd_boolean
736 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
737 {
738 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
739 ? TRUE : FALSE;
740 }
741
742 const char*
743 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
744 {
745 return aarch64_opnd_qualifiers[qualifier].desc;
746 }
747
748 /* Given an operand qualifier, return the expected data element size
749 of a qualified operand. */
750 unsigned char
751 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
752 {
753 assert (operand_variant_qualifier_p (qualifier) == TRUE);
754 return aarch64_opnd_qualifiers[qualifier].data0;
755 }
756
757 unsigned char
758 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
759 {
760 assert (operand_variant_qualifier_p (qualifier) == TRUE);
761 return aarch64_opnd_qualifiers[qualifier].data1;
762 }
763
764 aarch64_insn
765 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
766 {
767 assert (operand_variant_qualifier_p (qualifier) == TRUE);
768 return aarch64_opnd_qualifiers[qualifier].data2;
769 }
770
771 static int
772 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
773 {
774 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
775 return aarch64_opnd_qualifiers[qualifier].data0;
776 }
777
778 static int
779 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
780 {
781 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
782 return aarch64_opnd_qualifiers[qualifier].data1;
783 }
784
785 #ifdef DEBUG_AARCH64
786 void
787 aarch64_verbose (const char *str, ...)
788 {
789 va_list ap;
790 va_start (ap, str);
791 printf ("#### ");
792 vprintf (str, ap);
793 printf ("\n");
794 va_end (ap);
795 }
796
797 static inline void
798 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
799 {
800 int i;
801 printf ("#### \t");
802 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
803 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
804 printf ("\n");
805 }
806
807 static void
808 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
809 const aarch64_opnd_qualifier_t *qualifier)
810 {
811 int i;
812 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
813
814 aarch64_verbose ("dump_match_qualifiers:");
815 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
816 curr[i] = opnd[i].qualifier;
817 dump_qualifier_sequence (curr);
818 aarch64_verbose ("against");
819 dump_qualifier_sequence (qualifier);
820 }
821 #endif /* DEBUG_AARCH64 */
822
823 /* TODO improve this, we can have an extra field at the runtime to
824 store the number of operands rather than calculating it every time. */
825
826 int
827 aarch64_num_of_operands (const aarch64_opcode *opcode)
828 {
829 int i = 0;
830 const enum aarch64_opnd *opnds = opcode->operands;
831 while (opnds[i++] != AARCH64_OPND_NIL)
832 ;
833 --i;
834 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
835 return i;
836 }
837
838 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
839 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
840
841 N.B. on the entry, it is very likely that only some operands in *INST
842 have had their qualifiers been established.
843
844 If STOP_AT is not -1, the function will only try to match
845 the qualifier sequence for operands before and including the operand
846 of index STOP_AT; and on success *RET will only be filled with the first
847 (STOP_AT+1) qualifiers.
848
849 A couple examples of the matching algorithm:
850
851 X,W,NIL should match
852 X,W,NIL
853
854 NIL,NIL should match
855 X ,NIL
856
857 Apart from serving the main encoding routine, this can also be called
858 during or after the operand decoding. */
859
860 int
861 aarch64_find_best_match (const aarch64_inst *inst,
862 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
863 int stop_at, aarch64_opnd_qualifier_t *ret)
864 {
865 int found = 0;
866 int i, num_opnds;
867 const aarch64_opnd_qualifier_t *qualifiers;
868
869 num_opnds = aarch64_num_of_operands (inst->opcode);
870 if (num_opnds == 0)
871 {
872 DEBUG_TRACE ("SUCCEED: no operand");
873 return 1;
874 }
875
876 if (stop_at < 0 || stop_at >= num_opnds)
877 stop_at = num_opnds - 1;
878
879 /* For each pattern. */
880 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
881 {
882 int j;
883 qualifiers = *qualifiers_list;
884
885 /* Start as positive. */
886 found = 1;
887
888 DEBUG_TRACE ("%d", i);
889 #ifdef DEBUG_AARCH64
890 if (debug_dump)
891 dump_match_qualifiers (inst->operands, qualifiers);
892 #endif
893
894 /* Most opcodes has much fewer patterns in the list.
895 First NIL qualifier indicates the end in the list. */
896 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
897 {
898 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
899 if (i)
900 found = 0;
901 break;
902 }
903
904 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
905 {
906 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
907 {
908 /* Either the operand does not have qualifier, or the qualifier
909 for the operand needs to be deduced from the qualifier
910 sequence.
911 In the latter case, any constraint checking related with
912 the obtained qualifier should be done later in
913 operand_general_constraint_met_p. */
914 continue;
915 }
916 else if (*qualifiers != inst->operands[j].qualifier)
917 {
918 /* Unless the target qualifier can also qualify the operand
919 (which has already had a non-nil qualifier), non-equal
920 qualifiers are generally un-matched. */
921 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
922 continue;
923 else
924 {
925 found = 0;
926 break;
927 }
928 }
929 else
930 continue; /* Equal qualifiers are certainly matched. */
931 }
932
933 /* Qualifiers established. */
934 if (found == 1)
935 break;
936 }
937
938 if (found == 1)
939 {
940 /* Fill the result in *RET. */
941 int j;
942 qualifiers = *qualifiers_list;
943
944 DEBUG_TRACE ("complete qualifiers using list %d", i);
945 #ifdef DEBUG_AARCH64
946 if (debug_dump)
947 dump_qualifier_sequence (qualifiers);
948 #endif
949
950 for (j = 0; j <= stop_at; ++j, ++qualifiers)
951 ret[j] = *qualifiers;
952 for (; j < AARCH64_MAX_OPND_NUM; ++j)
953 ret[j] = AARCH64_OPND_QLF_NIL;
954
955 DEBUG_TRACE ("SUCCESS");
956 return 1;
957 }
958
959 DEBUG_TRACE ("FAIL");
960 return 0;
961 }
962
963 /* Operand qualifier matching and resolving.
964
965 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
966 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
967
968 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
969 succeeds. */
970
971 static int
972 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
973 {
974 int i, nops;
975 aarch64_opnd_qualifier_seq_t qualifiers;
976
977 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
978 qualifiers))
979 {
980 DEBUG_TRACE ("matching FAIL");
981 return 0;
982 }
983
984 if (inst->opcode->flags & F_STRICT)
985 {
986 /* Require an exact qualifier match, even for NIL qualifiers. */
987 nops = aarch64_num_of_operands (inst->opcode);
988 for (i = 0; i < nops; ++i)
989 if (inst->operands[i].qualifier != qualifiers[i])
990 return FALSE;
991 }
992
993 /* Update the qualifiers. */
994 if (update_p == TRUE)
995 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
996 {
997 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
998 break;
999 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1000 "update %s with %s for operand %d",
1001 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1002 aarch64_get_qualifier_name (qualifiers[i]), i);
1003 inst->operands[i].qualifier = qualifiers[i];
1004 }
1005
1006 DEBUG_TRACE ("matching SUCCESS");
1007 return 1;
1008 }
1009
1010 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1011 register by MOVZ.
1012
1013 IS32 indicates whether value is a 32-bit immediate or not.
1014 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1015 amount will be returned in *SHIFT_AMOUNT. */
1016
1017 bfd_boolean
1018 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1019 {
1020 int amount;
1021
1022 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1023
1024 if (is32)
1025 {
1026 /* Allow all zeros or all ones in top 32-bits, so that
1027 32-bit constant expressions like ~0x80000000 are
1028 permitted. */
1029 uint64_t ext = value;
1030 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1031 /* Immediate out of range. */
1032 return FALSE;
1033 value &= (int64_t) 0xffffffff;
1034 }
1035
1036 /* first, try movz then movn */
1037 amount = -1;
1038 if ((value & ((int64_t) 0xffff << 0)) == value)
1039 amount = 0;
1040 else if ((value & ((int64_t) 0xffff << 16)) == value)
1041 amount = 16;
1042 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1043 amount = 32;
1044 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1045 amount = 48;
1046
1047 if (amount == -1)
1048 {
1049 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1050 return FALSE;
1051 }
1052
1053 if (shift_amount != NULL)
1054 *shift_amount = amount;
1055
1056 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1057
1058 return TRUE;
1059 }
1060
1061 /* Build the accepted values for immediate logical SIMD instructions.
1062
1063 The standard encodings of the immediate value are:
1064 N imms immr SIMD size R S
1065 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1066 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1067 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1068 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1069 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1070 0 11110s 00000r 2 UInt(r) UInt(s)
1071 where all-ones value of S is reserved.
1072
1073 Let's call E the SIMD size.
1074
1075 The immediate value is: S+1 bits '1' rotated to the right by R.
1076
1077 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1078 (remember S != E - 1). */
1079
1080 #define TOTAL_IMM_NB 5334
1081
1082 typedef struct
1083 {
1084 uint64_t imm;
1085 aarch64_insn encoding;
1086 } simd_imm_encoding;
1087
1088 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1089
1090 static int
1091 simd_imm_encoding_cmp(const void *i1, const void *i2)
1092 {
1093 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1094 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1095
1096 if (imm1->imm < imm2->imm)
1097 return -1;
1098 if (imm1->imm > imm2->imm)
1099 return +1;
1100 return 0;
1101 }
1102
1103 /* immediate bitfield standard encoding
1104 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1105 1 ssssss rrrrrr 64 rrrrrr ssssss
1106 0 0sssss 0rrrrr 32 rrrrr sssss
1107 0 10ssss 00rrrr 16 rrrr ssss
1108 0 110sss 000rrr 8 rrr sss
1109 0 1110ss 0000rr 4 rr ss
1110 0 11110s 00000r 2 r s */
1111 static inline int
1112 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1113 {
1114 return (is64 << 12) | (r << 6) | s;
1115 }
1116
1117 static void
1118 build_immediate_table (void)
1119 {
1120 uint32_t log_e, e, s, r, s_mask;
1121 uint64_t mask, imm;
1122 int nb_imms;
1123 int is64;
1124
1125 nb_imms = 0;
1126 for (log_e = 1; log_e <= 6; log_e++)
1127 {
1128 /* Get element size. */
1129 e = 1u << log_e;
1130 if (log_e == 6)
1131 {
1132 is64 = 1;
1133 mask = 0xffffffffffffffffull;
1134 s_mask = 0;
1135 }
1136 else
1137 {
1138 is64 = 0;
1139 mask = (1ull << e) - 1;
1140 /* log_e s_mask
1141 1 ((1 << 4) - 1) << 2 = 111100
1142 2 ((1 << 3) - 1) << 3 = 111000
1143 3 ((1 << 2) - 1) << 4 = 110000
1144 4 ((1 << 1) - 1) << 5 = 100000
1145 5 ((1 << 0) - 1) << 6 = 000000 */
1146 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1147 }
1148 for (s = 0; s < e - 1; s++)
1149 for (r = 0; r < e; r++)
1150 {
1151 /* s+1 consecutive bits to 1 (s < 63) */
1152 imm = (1ull << (s + 1)) - 1;
1153 /* rotate right by r */
1154 if (r != 0)
1155 imm = (imm >> r) | ((imm << (e - r)) & mask);
1156 /* replicate the constant depending on SIMD size */
1157 switch (log_e)
1158 {
1159 case 1: imm = (imm << 2) | imm;
1160 /* Fall through. */
1161 case 2: imm = (imm << 4) | imm;
1162 /* Fall through. */
1163 case 3: imm = (imm << 8) | imm;
1164 /* Fall through. */
1165 case 4: imm = (imm << 16) | imm;
1166 /* Fall through. */
1167 case 5: imm = (imm << 32) | imm;
1168 /* Fall through. */
1169 case 6: break;
1170 default: abort ();
1171 }
1172 simd_immediates[nb_imms].imm = imm;
1173 simd_immediates[nb_imms].encoding =
1174 encode_immediate_bitfield(is64, s | s_mask, r);
1175 nb_imms++;
1176 }
1177 }
1178 assert (nb_imms == TOTAL_IMM_NB);
1179 qsort(simd_immediates, nb_imms,
1180 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1181 }
1182
1183 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1184 be accepted by logical (immediate) instructions
1185 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1186
1187 ESIZE is the number of bytes in the decoded immediate value.
1188 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1189 VALUE will be returned in *ENCODING. */
1190
1191 bfd_boolean
1192 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1193 {
1194 simd_imm_encoding imm_enc;
1195 const simd_imm_encoding *imm_encoding;
1196 static bfd_boolean initialized = FALSE;
1197 uint64_t upper;
1198 int i;
1199
1200 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1201 value, is32);
1202
1203 if (initialized == FALSE)
1204 {
1205 build_immediate_table ();
1206 initialized = TRUE;
1207 }
1208
1209 /* Allow all zeros or all ones in top bits, so that
1210 constant expressions like ~1 are permitted. */
1211 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1212 if ((value & ~upper) != value && (value | upper) != value)
1213 return FALSE;
1214
1215 /* Replicate to a full 64-bit value. */
1216 value &= ~upper;
1217 for (i = esize * 8; i < 64; i *= 2)
1218 value |= (value << i);
1219
1220 imm_enc.imm = value;
1221 imm_encoding = (const simd_imm_encoding *)
1222 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1223 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1224 if (imm_encoding == NULL)
1225 {
1226 DEBUG_TRACE ("exit with FALSE");
1227 return FALSE;
1228 }
1229 if (encoding != NULL)
1230 *encoding = imm_encoding->encoding;
1231 DEBUG_TRACE ("exit with TRUE");
1232 return TRUE;
1233 }
1234
1235 /* If 64-bit immediate IMM is in the format of
1236 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1237 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1238 of value "abcdefgh". Otherwise return -1. */
1239 int
1240 aarch64_shrink_expanded_imm8 (uint64_t imm)
1241 {
1242 int i, ret;
1243 uint32_t byte;
1244
1245 ret = 0;
1246 for (i = 0; i < 8; i++)
1247 {
1248 byte = (imm >> (8 * i)) & 0xff;
1249 if (byte == 0xff)
1250 ret |= 1 << i;
1251 else if (byte != 0x00)
1252 return -1;
1253 }
1254 return ret;
1255 }
1256
1257 /* Utility inline functions for operand_general_constraint_met_p. */
1258
1259 static inline void
1260 set_error (aarch64_operand_error *mismatch_detail,
1261 enum aarch64_operand_error_kind kind, int idx,
1262 const char* error)
1263 {
1264 if (mismatch_detail == NULL)
1265 return;
1266 mismatch_detail->kind = kind;
1267 mismatch_detail->index = idx;
1268 mismatch_detail->error = error;
1269 }
1270
1271 static inline void
1272 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1273 const char* error)
1274 {
1275 if (mismatch_detail == NULL)
1276 return;
1277 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1278 }
1279
1280 static inline void
1281 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1282 int idx, int lower_bound, int upper_bound,
1283 const char* error)
1284 {
1285 if (mismatch_detail == NULL)
1286 return;
1287 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1288 mismatch_detail->data[0] = lower_bound;
1289 mismatch_detail->data[1] = upper_bound;
1290 }
1291
1292 static inline void
1293 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1294 int idx, int lower_bound, int upper_bound)
1295 {
1296 if (mismatch_detail == NULL)
1297 return;
1298 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1299 _("immediate value"));
1300 }
1301
1302 static inline void
1303 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1304 int idx, int lower_bound, int upper_bound)
1305 {
1306 if (mismatch_detail == NULL)
1307 return;
1308 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1309 _("immediate offset"));
1310 }
1311
1312 static inline void
1313 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1314 int idx, int lower_bound, int upper_bound)
1315 {
1316 if (mismatch_detail == NULL)
1317 return;
1318 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1319 _("register number"));
1320 }
1321
1322 static inline void
1323 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1324 int idx, int lower_bound, int upper_bound)
1325 {
1326 if (mismatch_detail == NULL)
1327 return;
1328 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1329 _("register element index"));
1330 }
1331
1332 static inline void
1333 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1334 int idx, int lower_bound, int upper_bound)
1335 {
1336 if (mismatch_detail == NULL)
1337 return;
1338 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1339 _("shift amount"));
1340 }
1341
1342 /* Report that the MUL modifier in operand IDX should be in the range
1343 [LOWER_BOUND, UPPER_BOUND]. */
1344 static inline void
1345 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1346 int idx, int lower_bound, int upper_bound)
1347 {
1348 if (mismatch_detail == NULL)
1349 return;
1350 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1351 _("multiplier"));
1352 }
1353
1354 static inline void
1355 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1356 int alignment)
1357 {
1358 if (mismatch_detail == NULL)
1359 return;
1360 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1361 mismatch_detail->data[0] = alignment;
1362 }
1363
1364 static inline void
1365 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1366 int expected_num)
1367 {
1368 if (mismatch_detail == NULL)
1369 return;
1370 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1371 mismatch_detail->data[0] = expected_num;
1372 }
1373
1374 static inline void
1375 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1376 const char* error)
1377 {
1378 if (mismatch_detail == NULL)
1379 return;
1380 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1381 }
1382
1383 /* General constraint checking based on operand code.
1384
1385 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1386 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1387
1388 This function has to be called after the qualifiers for all operands
1389 have been resolved.
1390
1391 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1392 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1393 of error message during the disassembling where error message is not
1394 wanted. We avoid the dynamic construction of strings of error messages
1395 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1396 use a combination of error code, static string and some integer data to
1397 represent an error. */
1398
1399 static int
1400 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1401 enum aarch64_opnd type,
1402 const aarch64_opcode *opcode,
1403 aarch64_operand_error *mismatch_detail)
1404 {
1405 unsigned num, modifiers, shift;
1406 unsigned char size;
1407 int64_t imm, min_value, max_value;
1408 uint64_t uvalue, mask;
1409 const aarch64_opnd_info *opnd = opnds + idx;
1410 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1411
1412 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1413
1414 switch (aarch64_operands[type].op_class)
1415 {
1416 case AARCH64_OPND_CLASS_INT_REG:
1417 /* Check pair reg constraints for cas* instructions. */
1418 if (type == AARCH64_OPND_PAIRREG)
1419 {
1420 assert (idx == 1 || idx == 3);
1421 if (opnds[idx - 1].reg.regno % 2 != 0)
1422 {
1423 set_syntax_error (mismatch_detail, idx - 1,
1424 _("reg pair must start from even reg"));
1425 return 0;
1426 }
1427 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1428 {
1429 set_syntax_error (mismatch_detail, idx,
1430 _("reg pair must be contiguous"));
1431 return 0;
1432 }
1433 break;
1434 }
1435
1436 /* <Xt> may be optional in some IC and TLBI instructions. */
1437 if (type == AARCH64_OPND_Rt_SYS)
1438 {
1439 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1440 == AARCH64_OPND_CLASS_SYSTEM));
1441 if (opnds[1].present
1442 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1443 {
1444 set_other_error (mismatch_detail, idx, _("extraneous register"));
1445 return 0;
1446 }
1447 if (!opnds[1].present
1448 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1449 {
1450 set_other_error (mismatch_detail, idx, _("missing register"));
1451 return 0;
1452 }
1453 }
1454 switch (qualifier)
1455 {
1456 case AARCH64_OPND_QLF_WSP:
1457 case AARCH64_OPND_QLF_SP:
1458 if (!aarch64_stack_pointer_p (opnd))
1459 {
1460 set_other_error (mismatch_detail, idx,
1461 _("stack pointer register expected"));
1462 return 0;
1463 }
1464 break;
1465 default:
1466 break;
1467 }
1468 break;
1469
1470 case AARCH64_OPND_CLASS_SVE_REG:
1471 switch (type)
1472 {
1473 case AARCH64_OPND_SVE_Zn_INDEX:
1474 size = aarch64_get_qualifier_esize (opnd->qualifier);
1475 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1476 {
1477 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1478 0, 64 / size - 1);
1479 return 0;
1480 }
1481 break;
1482
1483 case AARCH64_OPND_SVE_ZnxN:
1484 case AARCH64_OPND_SVE_ZtxN:
1485 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1486 {
1487 set_other_error (mismatch_detail, idx,
1488 _("invalid register list"));
1489 return 0;
1490 }
1491 break;
1492
1493 default:
1494 break;
1495 }
1496 break;
1497
1498 case AARCH64_OPND_CLASS_PRED_REG:
1499 if (opnd->reg.regno >= 8
1500 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1501 {
1502 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1503 return 0;
1504 }
1505 break;
1506
1507 case AARCH64_OPND_CLASS_COND:
1508 if (type == AARCH64_OPND_COND1
1509 && (opnds[idx].cond->value & 0xe) == 0xe)
1510 {
1511 /* Not allow AL or NV. */
1512 set_syntax_error (mismatch_detail, idx, NULL);
1513 }
1514 break;
1515
1516 case AARCH64_OPND_CLASS_ADDRESS:
1517 /* Check writeback. */
1518 switch (opcode->iclass)
1519 {
1520 case ldst_pos:
1521 case ldst_unscaled:
1522 case ldstnapair_offs:
1523 case ldstpair_off:
1524 case ldst_unpriv:
1525 if (opnd->addr.writeback == 1)
1526 {
1527 set_syntax_error (mismatch_detail, idx,
1528 _("unexpected address writeback"));
1529 return 0;
1530 }
1531 break;
1532 case ldst_imm10:
1533 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1534 {
1535 set_syntax_error (mismatch_detail, idx,
1536 _("unexpected address writeback"));
1537 return 0;
1538 }
1539 break;
1540 case ldst_imm9:
1541 case ldstpair_indexed:
1542 case asisdlsep:
1543 case asisdlsop:
1544 if (opnd->addr.writeback == 0)
1545 {
1546 set_syntax_error (mismatch_detail, idx,
1547 _("address writeback expected"));
1548 return 0;
1549 }
1550 break;
1551 default:
1552 assert (opnd->addr.writeback == 0);
1553 break;
1554 }
1555 switch (type)
1556 {
1557 case AARCH64_OPND_ADDR_SIMM7:
1558 /* Scaled signed 7 bits immediate offset. */
1559 /* Get the size of the data element that is accessed, which may be
1560 different from that of the source register size,
1561 e.g. in strb/ldrb. */
1562 size = aarch64_get_qualifier_esize (opnd->qualifier);
1563 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1564 {
1565 set_offset_out_of_range_error (mismatch_detail, idx,
1566 -64 * size, 63 * size);
1567 return 0;
1568 }
1569 if (!value_aligned_p (opnd->addr.offset.imm, size))
1570 {
1571 set_unaligned_error (mismatch_detail, idx, size);
1572 return 0;
1573 }
1574 break;
1575 case AARCH64_OPND_ADDR_SIMM9:
1576 /* Unscaled signed 9 bits immediate offset. */
1577 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1578 {
1579 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1580 return 0;
1581 }
1582 break;
1583
1584 case AARCH64_OPND_ADDR_SIMM9_2:
1585 /* Unscaled signed 9 bits immediate offset, which has to be negative
1586 or unaligned. */
1587 size = aarch64_get_qualifier_esize (qualifier);
1588 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1589 && !value_aligned_p (opnd->addr.offset.imm, size))
1590 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1591 return 1;
1592 set_other_error (mismatch_detail, idx,
1593 _("negative or unaligned offset expected"));
1594 return 0;
1595
1596 case AARCH64_OPND_ADDR_SIMM10:
1597 /* Scaled signed 10 bits immediate offset. */
1598 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1599 {
1600 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1601 return 0;
1602 }
1603 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1604 {
1605 set_unaligned_error (mismatch_detail, idx, 8);
1606 return 0;
1607 }
1608 break;
1609
1610 case AARCH64_OPND_SIMD_ADDR_POST:
1611 /* AdvSIMD load/store multiple structures, post-index. */
1612 assert (idx == 1);
1613 if (opnd->addr.offset.is_reg)
1614 {
1615 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1616 return 1;
1617 else
1618 {
1619 set_other_error (mismatch_detail, idx,
1620 _("invalid register offset"));
1621 return 0;
1622 }
1623 }
1624 else
1625 {
1626 const aarch64_opnd_info *prev = &opnds[idx-1];
1627 unsigned num_bytes; /* total number of bytes transferred. */
1628 /* The opcode dependent area stores the number of elements in
1629 each structure to be loaded/stored. */
1630 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1631 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1632 /* Special handling of loading single structure to all lane. */
1633 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1634 * aarch64_get_qualifier_esize (prev->qualifier);
1635 else
1636 num_bytes = prev->reglist.num_regs
1637 * aarch64_get_qualifier_esize (prev->qualifier)
1638 * aarch64_get_qualifier_nelem (prev->qualifier);
1639 if ((int) num_bytes != opnd->addr.offset.imm)
1640 {
1641 set_other_error (mismatch_detail, idx,
1642 _("invalid post-increment amount"));
1643 return 0;
1644 }
1645 }
1646 break;
1647
1648 case AARCH64_OPND_ADDR_REGOFF:
1649 /* Get the size of the data element that is accessed, which may be
1650 different from that of the source register size,
1651 e.g. in strb/ldrb. */
1652 size = aarch64_get_qualifier_esize (opnd->qualifier);
1653 /* It is either no shift or shift by the binary logarithm of SIZE. */
1654 if (opnd->shifter.amount != 0
1655 && opnd->shifter.amount != (int)get_logsz (size))
1656 {
1657 set_other_error (mismatch_detail, idx,
1658 _("invalid shift amount"));
1659 return 0;
1660 }
1661 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1662 operators. */
1663 switch (opnd->shifter.kind)
1664 {
1665 case AARCH64_MOD_UXTW:
1666 case AARCH64_MOD_LSL:
1667 case AARCH64_MOD_SXTW:
1668 case AARCH64_MOD_SXTX: break;
1669 default:
1670 set_other_error (mismatch_detail, idx,
1671 _("invalid extend/shift operator"));
1672 return 0;
1673 }
1674 break;
1675
1676 case AARCH64_OPND_ADDR_UIMM12:
1677 imm = opnd->addr.offset.imm;
1678 /* Get the size of the data element that is accessed, which may be
1679 different from that of the source register size,
1680 e.g. in strb/ldrb. */
1681 size = aarch64_get_qualifier_esize (qualifier);
1682 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1683 {
1684 set_offset_out_of_range_error (mismatch_detail, idx,
1685 0, 4095 * size);
1686 return 0;
1687 }
1688 if (!value_aligned_p (opnd->addr.offset.imm, size))
1689 {
1690 set_unaligned_error (mismatch_detail, idx, size);
1691 return 0;
1692 }
1693 break;
1694
1695 case AARCH64_OPND_ADDR_PCREL14:
1696 case AARCH64_OPND_ADDR_PCREL19:
1697 case AARCH64_OPND_ADDR_PCREL21:
1698 case AARCH64_OPND_ADDR_PCREL26:
1699 imm = opnd->imm.value;
1700 if (operand_need_shift_by_two (get_operand_from_code (type)))
1701 {
1702 /* The offset value in a PC-relative branch instruction is alway
1703 4-byte aligned and is encoded without the lowest 2 bits. */
1704 if (!value_aligned_p (imm, 4))
1705 {
1706 set_unaligned_error (mismatch_detail, idx, 4);
1707 return 0;
1708 }
1709 /* Right shift by 2 so that we can carry out the following check
1710 canonically. */
1711 imm >>= 2;
1712 }
1713 size = get_operand_fields_width (get_operand_from_code (type));
1714 if (!value_fit_signed_field_p (imm, size))
1715 {
1716 set_other_error (mismatch_detail, idx,
1717 _("immediate out of range"));
1718 return 0;
1719 }
1720 break;
1721
1722 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1723 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1724 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1725 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1726 min_value = -8;
1727 max_value = 7;
1728 sve_imm_offset_vl:
1729 assert (!opnd->addr.offset.is_reg);
1730 assert (opnd->addr.preind);
1731 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1732 min_value *= num;
1733 max_value *= num;
1734 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1735 || (opnd->shifter.operator_present
1736 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1737 {
1738 set_other_error (mismatch_detail, idx,
1739 _("invalid addressing mode"));
1740 return 0;
1741 }
1742 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1743 {
1744 set_offset_out_of_range_error (mismatch_detail, idx,
1745 min_value, max_value);
1746 return 0;
1747 }
1748 if (!value_aligned_p (opnd->addr.offset.imm, num))
1749 {
1750 set_unaligned_error (mismatch_detail, idx, num);
1751 return 0;
1752 }
1753 break;
1754
1755 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1756 min_value = -32;
1757 max_value = 31;
1758 goto sve_imm_offset_vl;
1759
1760 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1761 min_value = -256;
1762 max_value = 255;
1763 goto sve_imm_offset_vl;
1764
1765 case AARCH64_OPND_SVE_ADDR_RI_U6:
1766 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1767 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1768 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1769 min_value = 0;
1770 max_value = 63;
1771 sve_imm_offset:
1772 assert (!opnd->addr.offset.is_reg);
1773 assert (opnd->addr.preind);
1774 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1775 min_value *= num;
1776 max_value *= num;
1777 if (opnd->shifter.operator_present
1778 || opnd->shifter.amount_present)
1779 {
1780 set_other_error (mismatch_detail, idx,
1781 _("invalid addressing mode"));
1782 return 0;
1783 }
1784 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1785 {
1786 set_offset_out_of_range_error (mismatch_detail, idx,
1787 min_value, max_value);
1788 return 0;
1789 }
1790 if (!value_aligned_p (opnd->addr.offset.imm, num))
1791 {
1792 set_unaligned_error (mismatch_detail, idx, num);
1793 return 0;
1794 }
1795 break;
1796
1797 case AARCH64_OPND_SVE_ADDR_RR:
1798 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1799 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1800 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1801 case AARCH64_OPND_SVE_ADDR_RX:
1802 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1803 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1804 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1805 case AARCH64_OPND_SVE_ADDR_RZ:
1806 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1807 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1808 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1809 modifiers = 1 << AARCH64_MOD_LSL;
1810 sve_rr_operand:
1811 assert (opnd->addr.offset.is_reg);
1812 assert (opnd->addr.preind);
1813 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1814 && opnd->addr.offset.regno == 31)
1815 {
1816 set_other_error (mismatch_detail, idx,
1817 _("index register xzr is not allowed"));
1818 return 0;
1819 }
1820 if (((1 << opnd->shifter.kind) & modifiers) == 0
1821 || (opnd->shifter.amount
1822 != get_operand_specific_data (&aarch64_operands[type])))
1823 {
1824 set_other_error (mismatch_detail, idx,
1825 _("invalid addressing mode"));
1826 return 0;
1827 }
1828 break;
1829
1830 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1831 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1832 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1833 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1834 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1835 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1836 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1837 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1838 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1839 goto sve_rr_operand;
1840
1841 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1842 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1843 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1844 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1845 min_value = 0;
1846 max_value = 31;
1847 goto sve_imm_offset;
1848
1849 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1850 modifiers = 1 << AARCH64_MOD_LSL;
1851 sve_zz_operand:
1852 assert (opnd->addr.offset.is_reg);
1853 assert (opnd->addr.preind);
1854 if (((1 << opnd->shifter.kind) & modifiers) == 0
1855 || opnd->shifter.amount < 0
1856 || opnd->shifter.amount > 3)
1857 {
1858 set_other_error (mismatch_detail, idx,
1859 _("invalid addressing mode"));
1860 return 0;
1861 }
1862 break;
1863
1864 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1865 modifiers = (1 << AARCH64_MOD_SXTW);
1866 goto sve_zz_operand;
1867
1868 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1869 modifiers = 1 << AARCH64_MOD_UXTW;
1870 goto sve_zz_operand;
1871
1872 default:
1873 break;
1874 }
1875 break;
1876
1877 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1878 if (type == AARCH64_OPND_LEt)
1879 {
1880 /* Get the upper bound for the element index. */
1881 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1882 if (!value_in_range_p (opnd->reglist.index, 0, num))
1883 {
1884 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1885 return 0;
1886 }
1887 }
1888 /* The opcode dependent area stores the number of elements in
1889 each structure to be loaded/stored. */
1890 num = get_opcode_dependent_value (opcode);
1891 switch (type)
1892 {
1893 case AARCH64_OPND_LVt:
1894 assert (num >= 1 && num <= 4);
1895 /* Unless LD1/ST1, the number of registers should be equal to that
1896 of the structure elements. */
1897 if (num != 1 && opnd->reglist.num_regs != num)
1898 {
1899 set_reg_list_error (mismatch_detail, idx, num);
1900 return 0;
1901 }
1902 break;
1903 case AARCH64_OPND_LVt_AL:
1904 case AARCH64_OPND_LEt:
1905 assert (num >= 1 && num <= 4);
1906 /* The number of registers should be equal to that of the structure
1907 elements. */
1908 if (opnd->reglist.num_regs != num)
1909 {
1910 set_reg_list_error (mismatch_detail, idx, num);
1911 return 0;
1912 }
1913 break;
1914 default:
1915 break;
1916 }
1917 break;
1918
1919 case AARCH64_OPND_CLASS_IMMEDIATE:
1920 /* Constraint check on immediate operand. */
1921 imm = opnd->imm.value;
1922 /* E.g. imm_0_31 constrains value to be 0..31. */
1923 if (qualifier_value_in_range_constraint_p (qualifier)
1924 && !value_in_range_p (imm, get_lower_bound (qualifier),
1925 get_upper_bound (qualifier)))
1926 {
1927 set_imm_out_of_range_error (mismatch_detail, idx,
1928 get_lower_bound (qualifier),
1929 get_upper_bound (qualifier));
1930 return 0;
1931 }
1932
1933 switch (type)
1934 {
1935 case AARCH64_OPND_AIMM:
1936 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1937 {
1938 set_other_error (mismatch_detail, idx,
1939 _("invalid shift operator"));
1940 return 0;
1941 }
1942 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1943 {
1944 set_other_error (mismatch_detail, idx,
1945 _("shift amount must be 0 or 12"));
1946 return 0;
1947 }
1948 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1949 {
1950 set_other_error (mismatch_detail, idx,
1951 _("immediate out of range"));
1952 return 0;
1953 }
1954 break;
1955
1956 case AARCH64_OPND_HALF:
1957 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1958 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1959 {
1960 set_other_error (mismatch_detail, idx,
1961 _("invalid shift operator"));
1962 return 0;
1963 }
1964 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1965 if (!value_aligned_p (opnd->shifter.amount, 16))
1966 {
1967 set_other_error (mismatch_detail, idx,
1968 _("shift amount must be a multiple of 16"));
1969 return 0;
1970 }
1971 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1972 {
1973 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1974 0, size * 8 - 16);
1975 return 0;
1976 }
1977 if (opnd->imm.value < 0)
1978 {
1979 set_other_error (mismatch_detail, idx,
1980 _("negative immediate value not allowed"));
1981 return 0;
1982 }
1983 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1984 {
1985 set_other_error (mismatch_detail, idx,
1986 _("immediate out of range"));
1987 return 0;
1988 }
1989 break;
1990
1991 case AARCH64_OPND_IMM_MOV:
1992 {
1993 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1994 imm = opnd->imm.value;
1995 assert (idx == 1);
1996 switch (opcode->op)
1997 {
1998 case OP_MOV_IMM_WIDEN:
1999 imm = ~imm;
2000 /* Fall through. */
2001 case OP_MOV_IMM_WIDE:
2002 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2003 {
2004 set_other_error (mismatch_detail, idx,
2005 _("immediate out of range"));
2006 return 0;
2007 }
2008 break;
2009 case OP_MOV_IMM_LOG:
2010 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2011 {
2012 set_other_error (mismatch_detail, idx,
2013 _("immediate out of range"));
2014 return 0;
2015 }
2016 break;
2017 default:
2018 assert (0);
2019 return 0;
2020 }
2021 }
2022 break;
2023
2024 case AARCH64_OPND_NZCV:
2025 case AARCH64_OPND_CCMP_IMM:
2026 case AARCH64_OPND_EXCEPTION:
2027 case AARCH64_OPND_UIMM4:
2028 case AARCH64_OPND_UIMM7:
2029 case AARCH64_OPND_UIMM3_OP1:
2030 case AARCH64_OPND_UIMM3_OP2:
2031 case AARCH64_OPND_SVE_UIMM3:
2032 case AARCH64_OPND_SVE_UIMM7:
2033 case AARCH64_OPND_SVE_UIMM8:
2034 case AARCH64_OPND_SVE_UIMM8_53:
2035 size = get_operand_fields_width (get_operand_from_code (type));
2036 assert (size < 32);
2037 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2038 {
2039 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2040 (1 << size) - 1);
2041 return 0;
2042 }
2043 break;
2044
2045 case AARCH64_OPND_SIMM5:
2046 case AARCH64_OPND_SVE_SIMM5:
2047 case AARCH64_OPND_SVE_SIMM5B:
2048 case AARCH64_OPND_SVE_SIMM6:
2049 case AARCH64_OPND_SVE_SIMM8:
2050 size = get_operand_fields_width (get_operand_from_code (type));
2051 assert (size < 32);
2052 if (!value_fit_signed_field_p (opnd->imm.value, size))
2053 {
2054 set_imm_out_of_range_error (mismatch_detail, idx,
2055 -(1 << (size - 1)),
2056 (1 << (size - 1)) - 1);
2057 return 0;
2058 }
2059 break;
2060
2061 case AARCH64_OPND_WIDTH:
2062 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2063 && opnds[0].type == AARCH64_OPND_Rd);
2064 size = get_upper_bound (qualifier);
2065 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2066 /* lsb+width <= reg.size */
2067 {
2068 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2069 size - opnds[idx-1].imm.value);
2070 return 0;
2071 }
2072 break;
2073
2074 case AARCH64_OPND_LIMM:
2075 case AARCH64_OPND_SVE_LIMM:
2076 {
2077 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2078 uint64_t uimm = opnd->imm.value;
2079 if (opcode->op == OP_BIC)
2080 uimm = ~uimm;
2081 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
2082 {
2083 set_other_error (mismatch_detail, idx,
2084 _("immediate out of range"));
2085 return 0;
2086 }
2087 }
2088 break;
2089
2090 case AARCH64_OPND_IMM0:
2091 case AARCH64_OPND_FPIMM0:
2092 if (opnd->imm.value != 0)
2093 {
2094 set_other_error (mismatch_detail, idx,
2095 _("immediate zero expected"));
2096 return 0;
2097 }
2098 break;
2099
2100 case AARCH64_OPND_SHLL_IMM:
2101 assert (idx == 2);
2102 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2103 if (opnd->imm.value != size)
2104 {
2105 set_other_error (mismatch_detail, idx,
2106 _("invalid shift amount"));
2107 return 0;
2108 }
2109 break;
2110
2111 case AARCH64_OPND_IMM_VLSL:
2112 size = aarch64_get_qualifier_esize (qualifier);
2113 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2114 {
2115 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2116 size * 8 - 1);
2117 return 0;
2118 }
2119 break;
2120
2121 case AARCH64_OPND_IMM_VLSR:
2122 size = aarch64_get_qualifier_esize (qualifier);
2123 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2124 {
2125 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2126 return 0;
2127 }
2128 break;
2129
2130 case AARCH64_OPND_SIMD_IMM:
2131 case AARCH64_OPND_SIMD_IMM_SFT:
2132 /* Qualifier check. */
2133 switch (qualifier)
2134 {
2135 case AARCH64_OPND_QLF_LSL:
2136 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2137 {
2138 set_other_error (mismatch_detail, idx,
2139 _("invalid shift operator"));
2140 return 0;
2141 }
2142 break;
2143 case AARCH64_OPND_QLF_MSL:
2144 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2145 {
2146 set_other_error (mismatch_detail, idx,
2147 _("invalid shift operator"));
2148 return 0;
2149 }
2150 break;
2151 case AARCH64_OPND_QLF_NIL:
2152 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2153 {
2154 set_other_error (mismatch_detail, idx,
2155 _("shift is not permitted"));
2156 return 0;
2157 }
2158 break;
2159 default:
2160 assert (0);
2161 return 0;
2162 }
2163 /* Is the immediate valid? */
2164 assert (idx == 1);
2165 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2166 {
2167 /* uimm8 or simm8 */
2168 if (!value_in_range_p (opnd->imm.value, -128, 255))
2169 {
2170 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2171 return 0;
2172 }
2173 }
2174 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2175 {
2176 /* uimm64 is not
2177 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2178 ffffffffgggggggghhhhhhhh'. */
2179 set_other_error (mismatch_detail, idx,
2180 _("invalid value for immediate"));
2181 return 0;
2182 }
2183 /* Is the shift amount valid? */
2184 switch (opnd->shifter.kind)
2185 {
2186 case AARCH64_MOD_LSL:
2187 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2188 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2189 {
2190 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2191 (size - 1) * 8);
2192 return 0;
2193 }
2194 if (!value_aligned_p (opnd->shifter.amount, 8))
2195 {
2196 set_unaligned_error (mismatch_detail, idx, 8);
2197 return 0;
2198 }
2199 break;
2200 case AARCH64_MOD_MSL:
2201 /* Only 8 and 16 are valid shift amount. */
2202 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2203 {
2204 set_other_error (mismatch_detail, idx,
2205 _("shift amount must be 0 or 16"));
2206 return 0;
2207 }
2208 break;
2209 default:
2210 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2211 {
2212 set_other_error (mismatch_detail, idx,
2213 _("invalid shift operator"));
2214 return 0;
2215 }
2216 break;
2217 }
2218 break;
2219
2220 case AARCH64_OPND_FPIMM:
2221 case AARCH64_OPND_SIMD_FPIMM:
2222 case AARCH64_OPND_SVE_FPIMM8:
2223 if (opnd->imm.is_fp == 0)
2224 {
2225 set_other_error (mismatch_detail, idx,
2226 _("floating-point immediate expected"));
2227 return 0;
2228 }
2229 /* The value is expected to be an 8-bit floating-point constant with
2230 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2231 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2232 instruction). */
2233 if (!value_in_range_p (opnd->imm.value, 0, 255))
2234 {
2235 set_other_error (mismatch_detail, idx,
2236 _("immediate out of range"));
2237 return 0;
2238 }
2239 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2240 {
2241 set_other_error (mismatch_detail, idx,
2242 _("invalid shift operator"));
2243 return 0;
2244 }
2245 break;
2246
2247 case AARCH64_OPND_SVE_AIMM:
2248 min_value = 0;
2249 sve_aimm:
2250 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2251 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2252 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2253 uvalue = opnd->imm.value;
2254 shift = opnd->shifter.amount;
2255 if (size == 1)
2256 {
2257 if (shift != 0)
2258 {
2259 set_other_error (mismatch_detail, idx,
2260 _("no shift amount allowed for"
2261 " 8-bit constants"));
2262 return 0;
2263 }
2264 }
2265 else
2266 {
2267 if (shift != 0 && shift != 8)
2268 {
2269 set_other_error (mismatch_detail, idx,
2270 _("shift amount must be 0 or 8"));
2271 return 0;
2272 }
2273 if (shift == 0 && (uvalue & 0xff) == 0)
2274 {
2275 shift = 8;
2276 uvalue = (int64_t) uvalue / 256;
2277 }
2278 }
2279 mask >>= shift;
2280 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2281 {
2282 set_other_error (mismatch_detail, idx,
2283 _("immediate too big for element size"));
2284 return 0;
2285 }
2286 uvalue = (uvalue - min_value) & mask;
2287 if (uvalue > 0xff)
2288 {
2289 set_other_error (mismatch_detail, idx,
2290 _("invalid arithmetic immediate"));
2291 return 0;
2292 }
2293 break;
2294
2295 case AARCH64_OPND_SVE_ASIMM:
2296 min_value = -128;
2297 goto sve_aimm;
2298
2299 case AARCH64_OPND_SVE_I1_HALF_ONE:
2300 assert (opnd->imm.is_fp);
2301 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2302 {
2303 set_other_error (mismatch_detail, idx,
2304 _("floating-point value must be 0.5 or 1.0"));
2305 return 0;
2306 }
2307 break;
2308
2309 case AARCH64_OPND_SVE_I1_HALF_TWO:
2310 assert (opnd->imm.is_fp);
2311 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2312 {
2313 set_other_error (mismatch_detail, idx,
2314 _("floating-point value must be 0.5 or 2.0"));
2315 return 0;
2316 }
2317 break;
2318
2319 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2320 assert (opnd->imm.is_fp);
2321 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2322 {
2323 set_other_error (mismatch_detail, idx,
2324 _("floating-point value must be 0.0 or 1.0"));
2325 return 0;
2326 }
2327 break;
2328
2329 case AARCH64_OPND_SVE_INV_LIMM:
2330 {
2331 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2332 uint64_t uimm = ~opnd->imm.value;
2333 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2334 {
2335 set_other_error (mismatch_detail, idx,
2336 _("immediate out of range"));
2337 return 0;
2338 }
2339 }
2340 break;
2341
2342 case AARCH64_OPND_SVE_LIMM_MOV:
2343 {
2344 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2345 uint64_t uimm = opnd->imm.value;
2346 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2347 {
2348 set_other_error (mismatch_detail, idx,
2349 _("immediate out of range"));
2350 return 0;
2351 }
2352 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2353 {
2354 set_other_error (mismatch_detail, idx,
2355 _("invalid replicated MOV immediate"));
2356 return 0;
2357 }
2358 }
2359 break;
2360
2361 case AARCH64_OPND_SVE_PATTERN_SCALED:
2362 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2363 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2364 {
2365 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2366 return 0;
2367 }
2368 break;
2369
2370 case AARCH64_OPND_SVE_SHLIMM_PRED:
2371 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2372 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2373 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2374 {
2375 set_imm_out_of_range_error (mismatch_detail, idx,
2376 0, 8 * size - 1);
2377 return 0;
2378 }
2379 break;
2380
2381 case AARCH64_OPND_SVE_SHRIMM_PRED:
2382 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2383 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2384 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2385 {
2386 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2387 return 0;
2388 }
2389 break;
2390
2391 default:
2392 break;
2393 }
2394 break;
2395
2396 case AARCH64_OPND_CLASS_CP_REG:
2397 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2398 valid range: C0 - C15. */
2399 if (opnd->reg.regno > 15)
2400 {
2401 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2402 return 0;
2403 }
2404 break;
2405
2406 case AARCH64_OPND_CLASS_SYSTEM:
2407 switch (type)
2408 {
2409 case AARCH64_OPND_PSTATEFIELD:
2410 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2411 /* MSR UAO, #uimm4
2412 MSR PAN, #uimm4
2413 The immediate must be #0 or #1. */
2414 if ((opnd->pstatefield == 0x03 /* UAO. */
2415 || opnd->pstatefield == 0x04) /* PAN. */
2416 && opnds[1].imm.value > 1)
2417 {
2418 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2419 return 0;
2420 }
2421 /* MSR SPSel, #uimm4
2422 Uses uimm4 as a control value to select the stack pointer: if
2423 bit 0 is set it selects the current exception level's stack
2424 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2425 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2426 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2427 {
2428 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2429 return 0;
2430 }
2431 break;
2432 default:
2433 break;
2434 }
2435 break;
2436
2437 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2438 /* Get the upper bound for the element index. */
2439 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2440 /* Index out-of-range. */
2441 if (!value_in_range_p (opnd->reglane.index, 0, num))
2442 {
2443 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2444 return 0;
2445 }
2446 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2447 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2448 number is encoded in "size:M:Rm":
2449 size <Vm>
2450 00 RESERVED
2451 01 0:Rm
2452 10 M:Rm
2453 11 RESERVED */
2454 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2455 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2456 {
2457 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2458 return 0;
2459 }
2460 break;
2461
2462 case AARCH64_OPND_CLASS_MODIFIED_REG:
2463 assert (idx == 1 || idx == 2);
2464 switch (type)
2465 {
2466 case AARCH64_OPND_Rm_EXT:
2467 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2468 && opnd->shifter.kind != AARCH64_MOD_LSL)
2469 {
2470 set_other_error (mismatch_detail, idx,
2471 _("extend operator expected"));
2472 return 0;
2473 }
2474 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2475 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2476 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2477 case. */
2478 if (!aarch64_stack_pointer_p (opnds + 0)
2479 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2480 {
2481 if (!opnd->shifter.operator_present)
2482 {
2483 set_other_error (mismatch_detail, idx,
2484 _("missing extend operator"));
2485 return 0;
2486 }
2487 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2488 {
2489 set_other_error (mismatch_detail, idx,
2490 _("'LSL' operator not allowed"));
2491 return 0;
2492 }
2493 }
2494 assert (opnd->shifter.operator_present /* Default to LSL. */
2495 || opnd->shifter.kind == AARCH64_MOD_LSL);
2496 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2497 {
2498 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2499 return 0;
2500 }
2501 /* In the 64-bit form, the final register operand is written as Wm
2502 for all but the (possibly omitted) UXTX/LSL and SXTX
2503 operators.
2504 N.B. GAS allows X register to be used with any operator as a
2505 programming convenience. */
2506 if (qualifier == AARCH64_OPND_QLF_X
2507 && opnd->shifter.kind != AARCH64_MOD_LSL
2508 && opnd->shifter.kind != AARCH64_MOD_UXTX
2509 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2510 {
2511 set_other_error (mismatch_detail, idx, _("W register expected"));
2512 return 0;
2513 }
2514 break;
2515
2516 case AARCH64_OPND_Rm_SFT:
2517 /* ROR is not available to the shifted register operand in
2518 arithmetic instructions. */
2519 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2520 {
2521 set_other_error (mismatch_detail, idx,
2522 _("shift operator expected"));
2523 return 0;
2524 }
2525 if (opnd->shifter.kind == AARCH64_MOD_ROR
2526 && opcode->iclass != log_shift)
2527 {
2528 set_other_error (mismatch_detail, idx,
2529 _("'ROR' operator not allowed"));
2530 return 0;
2531 }
2532 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2533 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2534 {
2535 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2536 return 0;
2537 }
2538 break;
2539
2540 default:
2541 break;
2542 }
2543 break;
2544
2545 default:
2546 break;
2547 }
2548
2549 return 1;
2550 }
2551
2552 /* Main entrypoint for the operand constraint checking.
2553
2554 Return 1 if operands of *INST meet the constraint applied by the operand
2555 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2556 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2557 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2558 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2559 error kind when it is notified that an instruction does not pass the check).
2560
2561 Un-determined operand qualifiers may get established during the process. */
2562
2563 int
2564 aarch64_match_operands_constraint (aarch64_inst *inst,
2565 aarch64_operand_error *mismatch_detail)
2566 {
2567 int i;
2568
2569 DEBUG_TRACE ("enter");
2570
2571 /* Check for cases where a source register needs to be the same as the
2572 destination register. Do this before matching qualifiers since if
2573 an instruction has both invalid tying and invalid qualifiers,
2574 the error about qualifiers would suggest several alternative
2575 instructions that also have invalid tying. */
2576 i = inst->opcode->tied_operand;
2577 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2578 {
2579 if (mismatch_detail)
2580 {
2581 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2582 mismatch_detail->index = i;
2583 mismatch_detail->error = NULL;
2584 }
2585 return 0;
2586 }
2587
2588 /* Match operands' qualifier.
2589 *INST has already had qualifier establish for some, if not all, of
2590 its operands; we need to find out whether these established
2591 qualifiers match one of the qualifier sequence in
2592 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2593 with the corresponding qualifier in such a sequence.
2594 Only basic operand constraint checking is done here; the more thorough
2595 constraint checking will carried out by operand_general_constraint_met_p,
2596 which has be to called after this in order to get all of the operands'
2597 qualifiers established. */
2598 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2599 {
2600 DEBUG_TRACE ("FAIL on operand qualifier matching");
2601 if (mismatch_detail)
2602 {
2603 /* Return an error type to indicate that it is the qualifier
2604 matching failure; we don't care about which operand as there
2605 are enough information in the opcode table to reproduce it. */
2606 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2607 mismatch_detail->index = -1;
2608 mismatch_detail->error = NULL;
2609 }
2610 return 0;
2611 }
2612
2613 /* Match operands' constraint. */
2614 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2615 {
2616 enum aarch64_opnd type = inst->opcode->operands[i];
2617 if (type == AARCH64_OPND_NIL)
2618 break;
2619 if (inst->operands[i].skip)
2620 {
2621 DEBUG_TRACE ("skip the incomplete operand %d", i);
2622 continue;
2623 }
2624 if (operand_general_constraint_met_p (inst->operands, i, type,
2625 inst->opcode, mismatch_detail) == 0)
2626 {
2627 DEBUG_TRACE ("FAIL on operand %d", i);
2628 return 0;
2629 }
2630 }
2631
2632 DEBUG_TRACE ("PASS");
2633
2634 return 1;
2635 }
2636
2637 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2638 Also updates the TYPE of each INST->OPERANDS with the corresponding
2639 value of OPCODE->OPERANDS.
2640
2641 Note that some operand qualifiers may need to be manually cleared by
2642 the caller before it further calls the aarch64_opcode_encode; by
2643 doing this, it helps the qualifier matching facilities work
2644 properly. */
2645
2646 const aarch64_opcode*
2647 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2648 {
2649 int i;
2650 const aarch64_opcode *old = inst->opcode;
2651
2652 inst->opcode = opcode;
2653
2654 /* Update the operand types. */
2655 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2656 {
2657 inst->operands[i].type = opcode->operands[i];
2658 if (opcode->operands[i] == AARCH64_OPND_NIL)
2659 break;
2660 }
2661
2662 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2663
2664 return old;
2665 }
2666
2667 int
2668 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2669 {
2670 int i;
2671 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2672 if (operands[i] == operand)
2673 return i;
2674 else if (operands[i] == AARCH64_OPND_NIL)
2675 break;
2676 return -1;
2677 }
2678 \f
2679 /* R0...R30, followed by FOR31. */
2680 #define BANK(R, FOR31) \
2681 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2682 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2683 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2684 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2685 /* [0][0] 32-bit integer regs with sp Wn
2686 [0][1] 64-bit integer regs with sp Xn sf=1
2687 [1][0] 32-bit integer regs with #0 Wn
2688 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2689 static const char *int_reg[2][2][32] = {
2690 #define R32(X) "w" #X
2691 #define R64(X) "x" #X
2692 { BANK (R32, "wsp"), BANK (R64, "sp") },
2693 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2694 #undef R64
2695 #undef R32
2696 };
2697
2698 /* Names of the SVE vector registers, first with .S suffixes,
2699 then with .D suffixes. */
2700
2701 static const char *sve_reg[2][32] = {
2702 #define ZS(X) "z" #X ".s"
2703 #define ZD(X) "z" #X ".d"
2704 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2705 #undef ZD
2706 #undef ZS
2707 };
2708 #undef BANK
2709
2710 /* Return the integer register name.
2711 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2712
2713 static inline const char *
2714 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2715 {
2716 const int has_zr = sp_reg_p ? 0 : 1;
2717 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2718 return int_reg[has_zr][is_64][regno];
2719 }
2720
2721 /* Like get_int_reg_name, but IS_64 is always 1. */
2722
2723 static inline const char *
2724 get_64bit_int_reg_name (int regno, int sp_reg_p)
2725 {
2726 const int has_zr = sp_reg_p ? 0 : 1;
2727 return int_reg[has_zr][1][regno];
2728 }
2729
2730 /* Get the name of the integer offset register in OPND, using the shift type
2731 to decide whether it's a word or doubleword. */
2732
2733 static inline const char *
2734 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2735 {
2736 switch (opnd->shifter.kind)
2737 {
2738 case AARCH64_MOD_UXTW:
2739 case AARCH64_MOD_SXTW:
2740 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2741
2742 case AARCH64_MOD_LSL:
2743 case AARCH64_MOD_SXTX:
2744 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2745
2746 default:
2747 abort ();
2748 }
2749 }
2750
2751 /* Get the name of the SVE vector offset register in OPND, using the operand
2752 qualifier to decide whether the suffix should be .S or .D. */
2753
2754 static inline const char *
2755 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2756 {
2757 assert (qualifier == AARCH64_OPND_QLF_S_S
2758 || qualifier == AARCH64_OPND_QLF_S_D);
2759 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2760 }
2761
2762 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2763
2764 typedef union
2765 {
2766 uint64_t i;
2767 double d;
2768 } double_conv_t;
2769
2770 typedef union
2771 {
2772 uint32_t i;
2773 float f;
2774 } single_conv_t;
2775
2776 typedef union
2777 {
2778 uint32_t i;
2779 float f;
2780 } half_conv_t;
2781
2782 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2783 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2784 (depending on the type of the instruction). IMM8 will be expanded to a
2785 single-precision floating-point value (SIZE == 4) or a double-precision
2786 floating-point value (SIZE == 8). A half-precision floating-point value
2787 (SIZE == 2) is expanded to a single-precision floating-point value. The
2788 expanded value is returned. */
2789
2790 static uint64_t
2791 expand_fp_imm (int size, uint32_t imm8)
2792 {
2793 uint64_t imm;
2794 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2795
2796 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2797 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2798 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2799 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2800 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2801 if (size == 8)
2802 {
2803 imm = (imm8_7 << (63-32)) /* imm8<7> */
2804 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2805 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2806 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2807 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2808 imm <<= 32;
2809 }
2810 else if (size == 4 || size == 2)
2811 {
2812 imm = (imm8_7 << 31) /* imm8<7> */
2813 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2814 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2815 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2816 }
2817 else
2818 {
2819 /* An unsupported size. */
2820 assert (0);
2821 }
2822
2823 return imm;
2824 }
2825
2826 /* Produce the string representation of the register list operand *OPND
2827 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2828 the register name that comes before the register number, such as "v". */
2829 static void
2830 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2831 const char *prefix)
2832 {
2833 const int num_regs = opnd->reglist.num_regs;
2834 const int first_reg = opnd->reglist.first_regno;
2835 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2836 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2837 char tb[8]; /* Temporary buffer. */
2838
2839 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2840 assert (num_regs >= 1 && num_regs <= 4);
2841
2842 /* Prepare the index if any. */
2843 if (opnd->reglist.has_index)
2844 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2845 else
2846 tb[0] = '\0';
2847
2848 /* The hyphenated form is preferred for disassembly if there are
2849 more than two registers in the list, and the register numbers
2850 are monotonically increasing in increments of one. */
2851 if (num_regs > 2 && last_reg > first_reg)
2852 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2853 prefix, last_reg, qlf_name, tb);
2854 else
2855 {
2856 const int reg0 = first_reg;
2857 const int reg1 = (first_reg + 1) & 0x1f;
2858 const int reg2 = (first_reg + 2) & 0x1f;
2859 const int reg3 = (first_reg + 3) & 0x1f;
2860
2861 switch (num_regs)
2862 {
2863 case 1:
2864 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2865 break;
2866 case 2:
2867 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2868 prefix, reg1, qlf_name, tb);
2869 break;
2870 case 3:
2871 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2872 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2873 prefix, reg2, qlf_name, tb);
2874 break;
2875 case 4:
2876 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2877 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2878 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2879 break;
2880 }
2881 }
2882 }
2883
2884 /* Print the register+immediate address in OPND to BUF, which has SIZE
2885 characters. BASE is the name of the base register. */
2886
2887 static void
2888 print_immediate_offset_address (char *buf, size_t size,
2889 const aarch64_opnd_info *opnd,
2890 const char *base)
2891 {
2892 if (opnd->addr.writeback)
2893 {
2894 if (opnd->addr.preind)
2895 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
2896 else
2897 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
2898 }
2899 else
2900 {
2901 if (opnd->shifter.operator_present)
2902 {
2903 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2904 snprintf (buf, size, "[%s, #%d, mul vl]",
2905 base, opnd->addr.offset.imm);
2906 }
2907 else if (opnd->addr.offset.imm)
2908 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
2909 else
2910 snprintf (buf, size, "[%s]", base);
2911 }
2912 }
2913
2914 /* Produce the string representation of the register offset address operand
2915 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2916 the names of the base and offset registers. */
2917 static void
2918 print_register_offset_address (char *buf, size_t size,
2919 const aarch64_opnd_info *opnd,
2920 const char *base, const char *offset)
2921 {
2922 char tb[16]; /* Temporary buffer. */
2923 bfd_boolean print_extend_p = TRUE;
2924 bfd_boolean print_amount_p = TRUE;
2925 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2926
2927 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2928 || !opnd->shifter.amount_present))
2929 {
2930 /* Not print the shift/extend amount when the amount is zero and
2931 when it is not the special case of 8-bit load/store instruction. */
2932 print_amount_p = FALSE;
2933 /* Likewise, no need to print the shift operator LSL in such a
2934 situation. */
2935 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2936 print_extend_p = FALSE;
2937 }
2938
2939 /* Prepare for the extend/shift. */
2940 if (print_extend_p)
2941 {
2942 if (print_amount_p)
2943 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
2944 opnd->shifter.amount);
2945 else
2946 snprintf (tb, sizeof (tb), ", %s", shift_name);
2947 }
2948 else
2949 tb[0] = '\0';
2950
2951 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
2952 }
2953
2954 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2955 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2956 PC, PCREL_P and ADDRESS are used to pass in and return information about
2957 the PC-relative address calculation, where the PC value is passed in
2958 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2959 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2960 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2961
2962 The function serves both the disassembler and the assembler diagnostics
2963 issuer, which is the reason why it lives in this file. */
2964
2965 void
2966 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2967 const aarch64_opcode *opcode,
2968 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2969 bfd_vma *address)
2970 {
2971 unsigned int i, num_conds;
2972 const char *name = NULL;
2973 const aarch64_opnd_info *opnd = opnds + idx;
2974 enum aarch64_modifier_kind kind;
2975 uint64_t addr, enum_value;
2976
2977 buf[0] = '\0';
2978 if (pcrel_p)
2979 *pcrel_p = 0;
2980
2981 switch (opnd->type)
2982 {
2983 case AARCH64_OPND_Rd:
2984 case AARCH64_OPND_Rn:
2985 case AARCH64_OPND_Rm:
2986 case AARCH64_OPND_Rt:
2987 case AARCH64_OPND_Rt2:
2988 case AARCH64_OPND_Rs:
2989 case AARCH64_OPND_Ra:
2990 case AARCH64_OPND_Rt_SYS:
2991 case AARCH64_OPND_PAIRREG:
2992 case AARCH64_OPND_SVE_Rm:
2993 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2994 the <ic_op>, therefore we we use opnd->present to override the
2995 generic optional-ness information. */
2996 if (opnd->type == AARCH64_OPND_Rt_SYS)
2997 {
2998 if (!opnd->present)
2999 break;
3000 }
3001 /* Omit the operand, e.g. RET. */
3002 else if (optional_operand_p (opcode, idx)
3003 && (opnd->reg.regno
3004 == get_optional_operand_default_value (opcode)))
3005 break;
3006 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3007 || opnd->qualifier == AARCH64_OPND_QLF_X);
3008 snprintf (buf, size, "%s",
3009 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3010 break;
3011
3012 case AARCH64_OPND_Rd_SP:
3013 case AARCH64_OPND_Rn_SP:
3014 case AARCH64_OPND_SVE_Rn_SP:
3015 case AARCH64_OPND_Rm_SP:
3016 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3017 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3018 || opnd->qualifier == AARCH64_OPND_QLF_X
3019 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3020 snprintf (buf, size, "%s",
3021 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3022 break;
3023
3024 case AARCH64_OPND_Rm_EXT:
3025 kind = opnd->shifter.kind;
3026 assert (idx == 1 || idx == 2);
3027 if ((aarch64_stack_pointer_p (opnds)
3028 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3029 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3030 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3031 && kind == AARCH64_MOD_UXTW)
3032 || (opnd->qualifier == AARCH64_OPND_QLF_X
3033 && kind == AARCH64_MOD_UXTX)))
3034 {
3035 /* 'LSL' is the preferred form in this case. */
3036 kind = AARCH64_MOD_LSL;
3037 if (opnd->shifter.amount == 0)
3038 {
3039 /* Shifter omitted. */
3040 snprintf (buf, size, "%s",
3041 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3042 break;
3043 }
3044 }
3045 if (opnd->shifter.amount)
3046 snprintf (buf, size, "%s, %s #%" PRIi64,
3047 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3048 aarch64_operand_modifiers[kind].name,
3049 opnd->shifter.amount);
3050 else
3051 snprintf (buf, size, "%s, %s",
3052 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3053 aarch64_operand_modifiers[kind].name);
3054 break;
3055
3056 case AARCH64_OPND_Rm_SFT:
3057 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3058 || opnd->qualifier == AARCH64_OPND_QLF_X);
3059 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3060 snprintf (buf, size, "%s",
3061 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3062 else
3063 snprintf (buf, size, "%s, %s #%" PRIi64,
3064 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3065 aarch64_operand_modifiers[opnd->shifter.kind].name,
3066 opnd->shifter.amount);
3067 break;
3068
3069 case AARCH64_OPND_Fd:
3070 case AARCH64_OPND_Fn:
3071 case AARCH64_OPND_Fm:
3072 case AARCH64_OPND_Fa:
3073 case AARCH64_OPND_Ft:
3074 case AARCH64_OPND_Ft2:
3075 case AARCH64_OPND_Sd:
3076 case AARCH64_OPND_Sn:
3077 case AARCH64_OPND_Sm:
3078 case AARCH64_OPND_SVE_VZn:
3079 case AARCH64_OPND_SVE_Vd:
3080 case AARCH64_OPND_SVE_Vm:
3081 case AARCH64_OPND_SVE_Vn:
3082 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3083 opnd->reg.regno);
3084 break;
3085
3086 case AARCH64_OPND_Vd:
3087 case AARCH64_OPND_Vn:
3088 case AARCH64_OPND_Vm:
3089 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3090 aarch64_get_qualifier_name (opnd->qualifier));
3091 break;
3092
3093 case AARCH64_OPND_Ed:
3094 case AARCH64_OPND_En:
3095 case AARCH64_OPND_Em:
3096 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3097 aarch64_get_qualifier_name (opnd->qualifier),
3098 opnd->reglane.index);
3099 break;
3100
3101 case AARCH64_OPND_VdD1:
3102 case AARCH64_OPND_VnD1:
3103 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3104 break;
3105
3106 case AARCH64_OPND_LVn:
3107 case AARCH64_OPND_LVt:
3108 case AARCH64_OPND_LVt_AL:
3109 case AARCH64_OPND_LEt:
3110 print_register_list (buf, size, opnd, "v");
3111 break;
3112
3113 case AARCH64_OPND_SVE_Pd:
3114 case AARCH64_OPND_SVE_Pg3:
3115 case AARCH64_OPND_SVE_Pg4_5:
3116 case AARCH64_OPND_SVE_Pg4_10:
3117 case AARCH64_OPND_SVE_Pg4_16:
3118 case AARCH64_OPND_SVE_Pm:
3119 case AARCH64_OPND_SVE_Pn:
3120 case AARCH64_OPND_SVE_Pt:
3121 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3122 snprintf (buf, size, "p%d", opnd->reg.regno);
3123 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3124 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3125 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3126 aarch64_get_qualifier_name (opnd->qualifier));
3127 else
3128 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3129 aarch64_get_qualifier_name (opnd->qualifier));
3130 break;
3131
3132 case AARCH64_OPND_SVE_Za_5:
3133 case AARCH64_OPND_SVE_Za_16:
3134 case AARCH64_OPND_SVE_Zd:
3135 case AARCH64_OPND_SVE_Zm_5:
3136 case AARCH64_OPND_SVE_Zm_16:
3137 case AARCH64_OPND_SVE_Zn:
3138 case AARCH64_OPND_SVE_Zt:
3139 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3140 snprintf (buf, size, "z%d", opnd->reg.regno);
3141 else
3142 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3143 aarch64_get_qualifier_name (opnd->qualifier));
3144 break;
3145
3146 case AARCH64_OPND_SVE_ZnxN:
3147 case AARCH64_OPND_SVE_ZtxN:
3148 print_register_list (buf, size, opnd, "z");
3149 break;
3150
3151 case AARCH64_OPND_SVE_Zn_INDEX:
3152 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3153 aarch64_get_qualifier_name (opnd->qualifier),
3154 opnd->reglane.index);
3155 break;
3156
3157 case AARCH64_OPND_Cn:
3158 case AARCH64_OPND_Cm:
3159 snprintf (buf, size, "C%d", opnd->reg.regno);
3160 break;
3161
3162 case AARCH64_OPND_IDX:
3163 case AARCH64_OPND_IMM:
3164 case AARCH64_OPND_WIDTH:
3165 case AARCH64_OPND_UIMM3_OP1:
3166 case AARCH64_OPND_UIMM3_OP2:
3167 case AARCH64_OPND_BIT_NUM:
3168 case AARCH64_OPND_IMM_VLSL:
3169 case AARCH64_OPND_IMM_VLSR:
3170 case AARCH64_OPND_SHLL_IMM:
3171 case AARCH64_OPND_IMM0:
3172 case AARCH64_OPND_IMMR:
3173 case AARCH64_OPND_IMMS:
3174 case AARCH64_OPND_FBITS:
3175 case AARCH64_OPND_SIMM5:
3176 case AARCH64_OPND_SVE_SHLIMM_PRED:
3177 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3178 case AARCH64_OPND_SVE_SHRIMM_PRED:
3179 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3180 case AARCH64_OPND_SVE_SIMM5:
3181 case AARCH64_OPND_SVE_SIMM5B:
3182 case AARCH64_OPND_SVE_SIMM6:
3183 case AARCH64_OPND_SVE_SIMM8:
3184 case AARCH64_OPND_SVE_UIMM3:
3185 case AARCH64_OPND_SVE_UIMM7:
3186 case AARCH64_OPND_SVE_UIMM8:
3187 case AARCH64_OPND_SVE_UIMM8_53:
3188 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3189 break;
3190
3191 case AARCH64_OPND_SVE_I1_HALF_ONE:
3192 case AARCH64_OPND_SVE_I1_HALF_TWO:
3193 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3194 {
3195 single_conv_t c;
3196 c.i = opnd->imm.value;
3197 snprintf (buf, size, "#%.1f", c.f);
3198 break;
3199 }
3200
3201 case AARCH64_OPND_SVE_PATTERN:
3202 if (optional_operand_p (opcode, idx)
3203 && opnd->imm.value == get_optional_operand_default_value (opcode))
3204 break;
3205 enum_value = opnd->imm.value;
3206 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3207 if (aarch64_sve_pattern_array[enum_value])
3208 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3209 else
3210 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3211 break;
3212
3213 case AARCH64_OPND_SVE_PATTERN_SCALED:
3214 if (optional_operand_p (opcode, idx)
3215 && !opnd->shifter.operator_present
3216 && opnd->imm.value == get_optional_operand_default_value (opcode))
3217 break;
3218 enum_value = opnd->imm.value;
3219 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3220 if (aarch64_sve_pattern_array[opnd->imm.value])
3221 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3222 else
3223 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3224 if (opnd->shifter.operator_present)
3225 {
3226 size_t len = strlen (buf);
3227 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3228 aarch64_operand_modifiers[opnd->shifter.kind].name,
3229 opnd->shifter.amount);
3230 }
3231 break;
3232
3233 case AARCH64_OPND_SVE_PRFOP:
3234 enum_value = opnd->imm.value;
3235 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3236 if (aarch64_sve_prfop_array[enum_value])
3237 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3238 else
3239 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3240 break;
3241
3242 case AARCH64_OPND_IMM_MOV:
3243 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3244 {
3245 case 4: /* e.g. MOV Wd, #<imm32>. */
3246 {
3247 int imm32 = opnd->imm.value;
3248 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3249 }
3250 break;
3251 case 8: /* e.g. MOV Xd, #<imm64>. */
3252 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3253 opnd->imm.value, opnd->imm.value);
3254 break;
3255 default: assert (0);
3256 }
3257 break;
3258
3259 case AARCH64_OPND_FPIMM0:
3260 snprintf (buf, size, "#0.0");
3261 break;
3262
3263 case AARCH64_OPND_LIMM:
3264 case AARCH64_OPND_AIMM:
3265 case AARCH64_OPND_HALF:
3266 case AARCH64_OPND_SVE_INV_LIMM:
3267 case AARCH64_OPND_SVE_LIMM:
3268 case AARCH64_OPND_SVE_LIMM_MOV:
3269 if (opnd->shifter.amount)
3270 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3271 opnd->shifter.amount);
3272 else
3273 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3274 break;
3275
3276 case AARCH64_OPND_SIMD_IMM:
3277 case AARCH64_OPND_SIMD_IMM_SFT:
3278 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3279 || opnd->shifter.kind == AARCH64_MOD_NONE)
3280 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3281 else
3282 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3283 aarch64_operand_modifiers[opnd->shifter.kind].name,
3284 opnd->shifter.amount);
3285 break;
3286
3287 case AARCH64_OPND_SVE_AIMM:
3288 case AARCH64_OPND_SVE_ASIMM:
3289 if (opnd->shifter.amount)
3290 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3291 opnd->shifter.amount);
3292 else
3293 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3294 break;
3295
3296 case AARCH64_OPND_FPIMM:
3297 case AARCH64_OPND_SIMD_FPIMM:
3298 case AARCH64_OPND_SVE_FPIMM8:
3299 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3300 {
3301 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3302 {
3303 half_conv_t c;
3304 c.i = expand_fp_imm (2, opnd->imm.value);
3305 snprintf (buf, size, "#%.18e", c.f);
3306 }
3307 break;
3308 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3309 {
3310 single_conv_t c;
3311 c.i = expand_fp_imm (4, opnd->imm.value);
3312 snprintf (buf, size, "#%.18e", c.f);
3313 }
3314 break;
3315 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3316 {
3317 double_conv_t c;
3318 c.i = expand_fp_imm (8, opnd->imm.value);
3319 snprintf (buf, size, "#%.18e", c.d);
3320 }
3321 break;
3322 default: assert (0);
3323 }
3324 break;
3325
3326 case AARCH64_OPND_CCMP_IMM:
3327 case AARCH64_OPND_NZCV:
3328 case AARCH64_OPND_EXCEPTION:
3329 case AARCH64_OPND_UIMM4:
3330 case AARCH64_OPND_UIMM7:
3331 if (optional_operand_p (opcode, idx) == TRUE
3332 && (opnd->imm.value ==
3333 (int64_t) get_optional_operand_default_value (opcode)))
3334 /* Omit the operand, e.g. DCPS1. */
3335 break;
3336 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3337 break;
3338
3339 case AARCH64_OPND_COND:
3340 case AARCH64_OPND_COND1:
3341 snprintf (buf, size, "%s", opnd->cond->names[0]);
3342 num_conds = ARRAY_SIZE (opnd->cond->names);
3343 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3344 {
3345 size_t len = strlen (buf);
3346 if (i == 1)
3347 snprintf (buf + len, size - len, " // %s = %s",
3348 opnd->cond->names[0], opnd->cond->names[i]);
3349 else
3350 snprintf (buf + len, size - len, ", %s",
3351 opnd->cond->names[i]);
3352 }
3353 break;
3354
3355 case AARCH64_OPND_ADDR_ADRP:
3356 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3357 + opnd->imm.value;
3358 if (pcrel_p)
3359 *pcrel_p = 1;
3360 if (address)
3361 *address = addr;
3362 /* This is not necessary during the disassembling, as print_address_func
3363 in the disassemble_info will take care of the printing. But some
3364 other callers may be still interested in getting the string in *STR,
3365 so here we do snprintf regardless. */
3366 snprintf (buf, size, "#0x%" PRIx64, addr);
3367 break;
3368
3369 case AARCH64_OPND_ADDR_PCREL14:
3370 case AARCH64_OPND_ADDR_PCREL19:
3371 case AARCH64_OPND_ADDR_PCREL21:
3372 case AARCH64_OPND_ADDR_PCREL26:
3373 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3374 if (pcrel_p)
3375 *pcrel_p = 1;
3376 if (address)
3377 *address = addr;
3378 /* This is not necessary during the disassembling, as print_address_func
3379 in the disassemble_info will take care of the printing. But some
3380 other callers may be still interested in getting the string in *STR,
3381 so here we do snprintf regardless. */
3382 snprintf (buf, size, "#0x%" PRIx64, addr);
3383 break;
3384
3385 case AARCH64_OPND_ADDR_SIMPLE:
3386 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3387 case AARCH64_OPND_SIMD_ADDR_POST:
3388 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3389 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3390 {
3391 if (opnd->addr.offset.is_reg)
3392 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3393 else
3394 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3395 }
3396 else
3397 snprintf (buf, size, "[%s]", name);
3398 break;
3399
3400 case AARCH64_OPND_ADDR_REGOFF:
3401 case AARCH64_OPND_SVE_ADDR_RR:
3402 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3403 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3404 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3405 case AARCH64_OPND_SVE_ADDR_RX:
3406 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3407 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3408 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3409 print_register_offset_address
3410 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3411 get_offset_int_reg_name (opnd));
3412 break;
3413
3414 case AARCH64_OPND_SVE_ADDR_RZ:
3415 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3416 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3417 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3418 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3419 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3420 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3421 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3422 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3423 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3424 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3425 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3426 print_register_offset_address
3427 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3428 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3429 break;
3430
3431 case AARCH64_OPND_ADDR_SIMM7:
3432 case AARCH64_OPND_ADDR_SIMM9:
3433 case AARCH64_OPND_ADDR_SIMM9_2:
3434 case AARCH64_OPND_ADDR_SIMM10:
3435 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3436 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3437 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3438 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3439 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3440 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3441 case AARCH64_OPND_SVE_ADDR_RI_U6:
3442 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3443 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3444 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3445 print_immediate_offset_address
3446 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3447 break;
3448
3449 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3450 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3451 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3452 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3453 print_immediate_offset_address
3454 (buf, size, opnd,
3455 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3456 break;
3457
3458 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3459 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3460 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3461 print_register_offset_address
3462 (buf, size, opnd,
3463 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3464 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3465 break;
3466
3467 case AARCH64_OPND_ADDR_UIMM12:
3468 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3469 if (opnd->addr.offset.imm)
3470 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3471 else
3472 snprintf (buf, size, "[%s]", name);
3473 break;
3474
3475 case AARCH64_OPND_SYSREG:
3476 for (i = 0; aarch64_sys_regs[i].name; ++i)
3477 if (aarch64_sys_regs[i].value == opnd->sysreg
3478 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3479 break;
3480 if (aarch64_sys_regs[i].name)
3481 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3482 else
3483 {
3484 /* Implementation defined system register. */
3485 unsigned int value = opnd->sysreg;
3486 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3487 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3488 value & 0x7);
3489 }
3490 break;
3491
3492 case AARCH64_OPND_PSTATEFIELD:
3493 for (i = 0; aarch64_pstatefields[i].name; ++i)
3494 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3495 break;
3496 assert (aarch64_pstatefields[i].name);
3497 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3498 break;
3499
3500 case AARCH64_OPND_SYSREG_AT:
3501 case AARCH64_OPND_SYSREG_DC:
3502 case AARCH64_OPND_SYSREG_IC:
3503 case AARCH64_OPND_SYSREG_TLBI:
3504 snprintf (buf, size, "%s", opnd->sysins_op->name);
3505 break;
3506
3507 case AARCH64_OPND_BARRIER:
3508 snprintf (buf, size, "%s", opnd->barrier->name);
3509 break;
3510
3511 case AARCH64_OPND_BARRIER_ISB:
3512 /* Operand can be omitted, e.g. in DCPS1. */
3513 if (! optional_operand_p (opcode, idx)
3514 || (opnd->barrier->value
3515 != get_optional_operand_default_value (opcode)))
3516 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3517 break;
3518
3519 case AARCH64_OPND_PRFOP:
3520 if (opnd->prfop->name != NULL)
3521 snprintf (buf, size, "%s", opnd->prfop->name);
3522 else
3523 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3524 break;
3525
3526 case AARCH64_OPND_BARRIER_PSB:
3527 snprintf (buf, size, "%s", opnd->hint_option->name);
3528 break;
3529
3530 default:
3531 assert (0);
3532 }
3533 }
3534 \f
3535 #define CPENC(op0,op1,crn,crm,op2) \
3536 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3537 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3538 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3539 /* for 3.9.10 System Instructions */
3540 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3541
3542 #define C0 0
3543 #define C1 1
3544 #define C2 2
3545 #define C3 3
3546 #define C4 4
3547 #define C5 5
3548 #define C6 6
3549 #define C7 7
3550 #define C8 8
3551 #define C9 9
3552 #define C10 10
3553 #define C11 11
3554 #define C12 12
3555 #define C13 13
3556 #define C14 14
3557 #define C15 15
3558
3559 #ifdef F_DEPRECATED
3560 #undef F_DEPRECATED
3561 #endif
3562 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3563
3564 #ifdef F_ARCHEXT
3565 #undef F_ARCHEXT
3566 #endif
3567 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3568
3569 #ifdef F_HASXT
3570 #undef F_HASXT
3571 #endif
3572 #define F_HASXT 0x4 /* System instruction register <Xt>
3573 operand. */
3574
3575
3576 /* TODO there are two more issues need to be resolved
3577 1. handle read-only and write-only system registers
3578 2. handle cpu-implementation-defined system registers. */
3579 const aarch64_sys_reg aarch64_sys_regs [] =
3580 {
3581 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3582 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3583 { "elr_el1", CPEN_(0,C0,1), 0 },
3584 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3585 { "sp_el0", CPEN_(0,C1,0), 0 },
3586 { "spsel", CPEN_(0,C2,0), 0 },
3587 { "daif", CPEN_(3,C2,1), 0 },
3588 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3589 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3590 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3591 { "nzcv", CPEN_(3,C2,0), 0 },
3592 { "fpcr", CPEN_(3,C4,0), 0 },
3593 { "fpsr", CPEN_(3,C4,1), 0 },
3594 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3595 { "dlr_el0", CPEN_(3,C5,1), 0 },
3596 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3597 { "elr_el2", CPEN_(4,C0,1), 0 },
3598 { "sp_el1", CPEN_(4,C1,0), 0 },
3599 { "spsr_irq", CPEN_(4,C3,0), 0 },
3600 { "spsr_abt", CPEN_(4,C3,1), 0 },
3601 { "spsr_und", CPEN_(4,C3,2), 0 },
3602 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3603 { "spsr_el3", CPEN_(6,C0,0), 0 },
3604 { "elr_el3", CPEN_(6,C0,1), 0 },
3605 { "sp_el2", CPEN_(6,C1,0), 0 },
3606 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3607 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3608 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3609 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3610 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3611 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3612 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3613 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3614 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3615 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3616 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3617 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3618 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3619 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3620 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3621 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3622 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3623 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3624 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3625 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3626 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3627 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3628 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3629 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3630 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3631 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3632 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3633 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3634 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3635 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3636 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3637 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3638 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3639 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3640 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3641 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3642 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3643 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3644 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3645 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3646 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3647 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3648 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3649 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3650 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3651 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3652 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3653 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3654 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3655 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3656 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3657 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3658 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3659 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3660 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3661 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3662 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3663 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3664 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3665 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3666 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3667 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3668 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3669 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3670 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3671 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3672 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3673 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3674 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3675 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3676 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3677 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3678 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3679 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3680 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3681 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3682 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3683 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3684 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3685 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3686 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3687 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3688 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3689 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3690 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3691 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3692 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3693 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3694 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3695 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3696 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3697 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3698 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3699 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3700 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3701 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3702 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3703 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3704 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3705 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3706 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3707 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3708 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3709 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3710 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3711 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3712 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3713 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3714 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3715 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3716 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3717 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3718 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3719 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3720 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3721 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3722 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3723 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3724 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3725 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3726 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3727 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3728 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3729 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3730 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3731 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3732 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3733 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3734 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3735 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3736 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3737 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3738 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3739 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3740 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3741 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3742 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3743 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3744 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3745 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3746 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3747 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3748 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3749 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3750 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3751 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3752 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3753 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3754 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3755 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3756 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3757 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3758 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3759 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3760 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3761 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3762 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3763 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3764 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3765 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3766 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3767 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3768 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3769 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3770 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3771 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3772 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3773 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3774 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3775 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3776 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3777 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3778 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3779 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3780 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3781 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3782 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3783 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3784 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3785 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3786 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3787 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3788 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3789 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3790 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3791 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3792 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3793 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3794 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3795 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3796 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3797 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3798 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3799 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3800 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3801 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3802 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3803 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3804 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3805 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3806 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3807 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3808 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3809 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3810 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3811 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3812 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3813 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3814 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3815 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3816 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3817 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3818 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3819 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3820 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3821 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3822 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3823 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3824 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3825 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3826 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3827 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3828 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3829 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3830 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3831 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3832 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3833 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3834 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3835 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3836 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3837 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3838 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3839 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3840 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3841 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3842 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3843 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3844 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3845 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3846 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3847 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3848 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3849 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3850 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3851 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3852 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3853 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3854 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3855 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3856 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3857 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3858 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3859 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3860 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3861 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3862 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3863 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3864 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3865 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3866 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3867 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3868 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3869 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3870 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3871 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3872 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3873 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3874 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3875 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3876 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3877 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3878 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3879 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3880 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3881 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3882 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3883 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3884 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3885 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3886 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3887 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3888 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3889 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3890 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3891 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3892 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3893 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3894 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3895 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3896 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3897 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3898 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3899 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3900 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3901 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3902 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3903 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3904 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3905 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3906 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3907 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3908 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3909 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3910 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3911 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3912 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3913 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3914 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3915 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3916 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3917 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3918 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3919 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3920 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3921 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3922 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3923 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3924 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3925 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3926 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3927 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3928 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3929 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3930 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3931 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3932 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3933 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3934 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3935 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3936 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3937 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3938 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3939 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3940 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3941 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3942 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3943 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3944 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3945 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3946 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3947 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3948 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3949 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3950 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3951 { 0, CPENC(0,0,0,0,0), 0 },
3952 };
3953
3954 bfd_boolean
3955 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3956 {
3957 return (reg->flags & F_DEPRECATED) != 0;
3958 }
3959
3960 bfd_boolean
3961 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3962 const aarch64_sys_reg *reg)
3963 {
3964 if (!(reg->flags & F_ARCHEXT))
3965 return TRUE;
3966
3967 /* PAN. Values are from aarch64_sys_regs. */
3968 if (reg->value == CPEN_(0,C2,3)
3969 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3970 return FALSE;
3971
3972 /* Virtualization host extensions: system registers. */
3973 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3974 || reg->value == CPENC (3, 4, C13, C0, 1)
3975 || reg->value == CPENC (3, 4, C14, C3, 0)
3976 || reg->value == CPENC (3, 4, C14, C3, 1)
3977 || reg->value == CPENC (3, 4, C14, C3, 2))
3978 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3979 return FALSE;
3980
3981 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3982 if ((reg->value == CPEN_ (5, C0, 0)
3983 || reg->value == CPEN_ (5, C0, 1)
3984 || reg->value == CPENC (3, 5, C1, C0, 0)
3985 || reg->value == CPENC (3, 5, C1, C0, 2)
3986 || reg->value == CPENC (3, 5, C2, C0, 0)
3987 || reg->value == CPENC (3, 5, C2, C0, 1)
3988 || reg->value == CPENC (3, 5, C2, C0, 2)
3989 || reg->value == CPENC (3, 5, C5, C1, 0)
3990 || reg->value == CPENC (3, 5, C5, C1, 1)
3991 || reg->value == CPENC (3, 5, C5, C2, 0)
3992 || reg->value == CPENC (3, 5, C6, C0, 0)
3993 || reg->value == CPENC (3, 5, C10, C2, 0)
3994 || reg->value == CPENC (3, 5, C10, C3, 0)
3995 || reg->value == CPENC (3, 5, C12, C0, 0)
3996 || reg->value == CPENC (3, 5, C13, C0, 1)
3997 || reg->value == CPENC (3, 5, C14, C1, 0))
3998 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3999 return FALSE;
4000
4001 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4002 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4003 || reg->value == CPENC (3, 5, C14, C2, 1)
4004 || reg->value == CPENC (3, 5, C14, C2, 2)
4005 || reg->value == CPENC (3, 5, C14, C3, 0)
4006 || reg->value == CPENC (3, 5, C14, C3, 1)
4007 || reg->value == CPENC (3, 5, C14, C3, 2))
4008 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4009 return FALSE;
4010
4011 /* ARMv8.2 features. */
4012
4013 /* ID_AA64MMFR2_EL1. */
4014 if (reg->value == CPENC (3, 0, C0, C7, 2)
4015 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4016 return FALSE;
4017
4018 /* PSTATE.UAO. */
4019 if (reg->value == CPEN_ (0, C2, 4)
4020 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4021 return FALSE;
4022
4023 /* RAS extension. */
4024
4025 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4026 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4027 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4028 || reg->value == CPENC (3, 0, C5, C3, 1)
4029 || reg->value == CPENC (3, 0, C5, C3, 2)
4030 || reg->value == CPENC (3, 0, C5, C3, 3)
4031 || reg->value == CPENC (3, 0, C5, C4, 0)
4032 || reg->value == CPENC (3, 0, C5, C4, 1)
4033 || reg->value == CPENC (3, 0, C5, C4, 2)
4034 || reg->value == CPENC (3, 0, C5, C4, 3)
4035 || reg->value == CPENC (3, 0, C5, C5, 0)
4036 || reg->value == CPENC (3, 0, C5, C5, 1))
4037 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4038 return FALSE;
4039
4040 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4041 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4042 || reg->value == CPENC (3, 0, C12, C1, 1)
4043 || reg->value == CPENC (3, 4, C12, C1, 1))
4044 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4045 return FALSE;
4046
4047 /* Statistical Profiling extension. */
4048 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4049 || reg->value == CPENC (3, 0, C9, C10, 1)
4050 || reg->value == CPENC (3, 0, C9, C10, 3)
4051 || reg->value == CPENC (3, 0, C9, C10, 7)
4052 || reg->value == CPENC (3, 0, C9, C9, 0)
4053 || reg->value == CPENC (3, 0, C9, C9, 2)
4054 || reg->value == CPENC (3, 0, C9, C9, 3)
4055 || reg->value == CPENC (3, 0, C9, C9, 4)
4056 || reg->value == CPENC (3, 0, C9, C9, 5)
4057 || reg->value == CPENC (3, 0, C9, C9, 6)
4058 || reg->value == CPENC (3, 0, C9, C9, 7)
4059 || reg->value == CPENC (3, 4, C9, C9, 0)
4060 || reg->value == CPENC (3, 5, C9, C9, 0))
4061 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4062 return FALSE;
4063
4064 /* ARMv8.3 Pointer authentication keys. */
4065 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4066 || reg->value == CPENC (3, 0, C2, C1, 1)
4067 || reg->value == CPENC (3, 0, C2, C1, 2)
4068 || reg->value == CPENC (3, 0, C2, C1, 3)
4069 || reg->value == CPENC (3, 0, C2, C2, 0)
4070 || reg->value == CPENC (3, 0, C2, C2, 1)
4071 || reg->value == CPENC (3, 0, C2, C2, 2)
4072 || reg->value == CPENC (3, 0, C2, C2, 3)
4073 || reg->value == CPENC (3, 0, C2, C3, 0)
4074 || reg->value == CPENC (3, 0, C2, C3, 1))
4075 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4076 return FALSE;
4077
4078 return TRUE;
4079 }
4080
4081 const aarch64_sys_reg aarch64_pstatefields [] =
4082 {
4083 { "spsel", 0x05, 0 },
4084 { "daifset", 0x1e, 0 },
4085 { "daifclr", 0x1f, 0 },
4086 { "pan", 0x04, F_ARCHEXT },
4087 { "uao", 0x03, F_ARCHEXT },
4088 { 0, CPENC(0,0,0,0,0), 0 },
4089 };
4090
4091 bfd_boolean
4092 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4093 const aarch64_sys_reg *reg)
4094 {
4095 if (!(reg->flags & F_ARCHEXT))
4096 return TRUE;
4097
4098 /* PAN. Values are from aarch64_pstatefields. */
4099 if (reg->value == 0x04
4100 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4101 return FALSE;
4102
4103 /* UAO. Values are from aarch64_pstatefields. */
4104 if (reg->value == 0x03
4105 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4106 return FALSE;
4107
4108 return TRUE;
4109 }
4110
4111 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4112 {
4113 { "ialluis", CPENS(0,C7,C1,0), 0 },
4114 { "iallu", CPENS(0,C7,C5,0), 0 },
4115 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4116 { 0, CPENS(0,0,0,0), 0 }
4117 };
4118
4119 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4120 {
4121 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4122 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4123 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4124 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4125 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4126 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4127 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4128 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4129 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4130 { 0, CPENS(0,0,0,0), 0 }
4131 };
4132
4133 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4134 {
4135 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4136 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4137 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4138 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4139 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4140 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4141 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4142 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4143 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4144 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4145 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4146 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4147 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4148 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4149 { 0, CPENS(0,0,0,0), 0 }
4150 };
4151
4152 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4153 {
4154 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4155 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4156 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4157 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4158 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4159 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4160 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4161 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4162 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4163 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4164 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4165 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4166 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4167 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4168 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4169 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4170 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4171 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4172 { "alle2", CPENS(4,C8,C7,0), 0 },
4173 { "alle2is", CPENS(4,C8,C3,0), 0 },
4174 { "alle1", CPENS(4,C8,C7,4), 0 },
4175 { "alle1is", CPENS(4,C8,C3,4), 0 },
4176 { "alle3", CPENS(6,C8,C7,0), 0 },
4177 { "alle3is", CPENS(6,C8,C3,0), 0 },
4178 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4179 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4180 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4181 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4182 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4183 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4184 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4185 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4186 { 0, CPENS(0,0,0,0), 0 }
4187 };
4188
4189 bfd_boolean
4190 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4191 {
4192 return (sys_ins_reg->flags & F_HASXT) != 0;
4193 }
4194
4195 extern bfd_boolean
4196 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4197 const aarch64_sys_ins_reg *reg)
4198 {
4199 if (!(reg->flags & F_ARCHEXT))
4200 return TRUE;
4201
4202 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4203 if (reg->value == CPENS (3, C7, C12, 1)
4204 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4205 return FALSE;
4206
4207 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4208 if ((reg->value == CPENS (0, C7, C9, 0)
4209 || reg->value == CPENS (0, C7, C9, 1))
4210 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4211 return FALSE;
4212
4213 return TRUE;
4214 }
4215
4216 #undef C0
4217 #undef C1
4218 #undef C2
4219 #undef C3
4220 #undef C4
4221 #undef C5
4222 #undef C6
4223 #undef C7
4224 #undef C8
4225 #undef C9
4226 #undef C10
4227 #undef C11
4228 #undef C12
4229 #undef C13
4230 #undef C14
4231 #undef C15
4232
4233 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4234 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4235
4236 static bfd_boolean
4237 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4238 const aarch64_insn insn)
4239 {
4240 int t = BITS (insn, 4, 0);
4241 int n = BITS (insn, 9, 5);
4242 int t2 = BITS (insn, 14, 10);
4243
4244 if (BIT (insn, 23))
4245 {
4246 /* Write back enabled. */
4247 if ((t == n || t2 == n) && n != 31)
4248 return FALSE;
4249 }
4250
4251 if (BIT (insn, 22))
4252 {
4253 /* Load */
4254 if (t == t2)
4255 return FALSE;
4256 }
4257
4258 return TRUE;
4259 }
4260
4261 /* Return true if VALUE cannot be moved into an SVE register using DUP
4262 (with any element size, not just ESIZE) and if using DUPM would
4263 therefore be OK. ESIZE is the number of bytes in the immediate. */
4264
4265 bfd_boolean
4266 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4267 {
4268 int64_t svalue = uvalue;
4269 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4270
4271 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4272 return FALSE;
4273 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4274 {
4275 svalue = (int32_t) uvalue;
4276 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4277 {
4278 svalue = (int16_t) uvalue;
4279 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4280 return FALSE;
4281 }
4282 }
4283 if ((svalue & 0xff) == 0)
4284 svalue /= 256;
4285 return svalue < -128 || svalue >= 128;
4286 }
4287
4288 /* Include the opcode description table as well as the operand description
4289 table. */
4290 #define VERIFIER(x) verify_##x
4291 #include "aarch64-tbl.h"
This page took 0.144474 seconds and 3 git commands to generate.