[AArch64][SVE 23/32] Add SVE pattern and prfop operands
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
268 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
269 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
270 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
271 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
272 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
273 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
274 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
275 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
276 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
277 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
278 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
279 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
280 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
281 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
282 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
283 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
284 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
285 };
286
287 enum aarch64_operand_class
288 aarch64_get_operand_class (enum aarch64_opnd type)
289 {
290 return aarch64_operands[type].op_class;
291 }
292
293 const char *
294 aarch64_get_operand_name (enum aarch64_opnd type)
295 {
296 return aarch64_operands[type].name;
297 }
298
299 /* Get operand description string.
300 This is usually for the diagnosis purpose. */
301 const char *
302 aarch64_get_operand_desc (enum aarch64_opnd type)
303 {
304 return aarch64_operands[type].desc;
305 }
306
307 /* Table of all conditional affixes. */
308 const aarch64_cond aarch64_conds[16] =
309 {
310 {{"eq"}, 0x0},
311 {{"ne"}, 0x1},
312 {{"cs", "hs"}, 0x2},
313 {{"cc", "lo", "ul"}, 0x3},
314 {{"mi"}, 0x4},
315 {{"pl"}, 0x5},
316 {{"vs"}, 0x6},
317 {{"vc"}, 0x7},
318 {{"hi"}, 0x8},
319 {{"ls"}, 0x9},
320 {{"ge"}, 0xa},
321 {{"lt"}, 0xb},
322 {{"gt"}, 0xc},
323 {{"le"}, 0xd},
324 {{"al"}, 0xe},
325 {{"nv"}, 0xf},
326 };
327
328 const aarch64_cond *
329 get_cond_from_value (aarch64_insn value)
330 {
331 assert (value < 16);
332 return &aarch64_conds[(unsigned int) value];
333 }
334
335 const aarch64_cond *
336 get_inverted_cond (const aarch64_cond *cond)
337 {
338 return &aarch64_conds[cond->value ^ 0x1];
339 }
340
341 /* Table describing the operand extension/shifting operators; indexed by
342 enum aarch64_modifier_kind.
343
344 The value column provides the most common values for encoding modifiers,
345 which enables table-driven encoding/decoding for the modifiers. */
346 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
347 {
348 {"none", 0x0},
349 {"msl", 0x0},
350 {"ror", 0x3},
351 {"asr", 0x2},
352 {"lsr", 0x1},
353 {"lsl", 0x0},
354 {"uxtb", 0x0},
355 {"uxth", 0x1},
356 {"uxtw", 0x2},
357 {"uxtx", 0x3},
358 {"sxtb", 0x4},
359 {"sxth", 0x5},
360 {"sxtw", 0x6},
361 {"sxtx", 0x7},
362 {NULL, 0},
363 };
364
365 enum aarch64_modifier_kind
366 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
367 {
368 return desc - aarch64_operand_modifiers;
369 }
370
371 aarch64_insn
372 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
373 {
374 return aarch64_operand_modifiers[kind].value;
375 }
376
377 enum aarch64_modifier_kind
378 aarch64_get_operand_modifier_from_value (aarch64_insn value,
379 bfd_boolean extend_p)
380 {
381 if (extend_p == TRUE)
382 return AARCH64_MOD_UXTB + value;
383 else
384 return AARCH64_MOD_LSL - value;
385 }
386
387 bfd_boolean
388 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
389 {
390 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
391 ? TRUE : FALSE;
392 }
393
394 static inline bfd_boolean
395 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
396 {
397 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
398 ? TRUE : FALSE;
399 }
400
401 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
402 {
403 { "#0x00", 0x0 },
404 { "oshld", 0x1 },
405 { "oshst", 0x2 },
406 { "osh", 0x3 },
407 { "#0x04", 0x4 },
408 { "nshld", 0x5 },
409 { "nshst", 0x6 },
410 { "nsh", 0x7 },
411 { "#0x08", 0x8 },
412 { "ishld", 0x9 },
413 { "ishst", 0xa },
414 { "ish", 0xb },
415 { "#0x0c", 0xc },
416 { "ld", 0xd },
417 { "st", 0xe },
418 { "sy", 0xf },
419 };
420
421 /* Table describing the operands supported by the aliases of the HINT
422 instruction.
423
424 The name column is the operand that is accepted for the alias. The value
425 column is the hint number of the alias. The list of operands is terminated
426 by NULL in the name column. */
427
428 const struct aarch64_name_value_pair aarch64_hint_options[] =
429 {
430 { "csync", 0x11 }, /* PSB CSYNC. */
431 { NULL, 0x0 },
432 };
433
434 /* op -> op: load = 0 instruction = 1 store = 2
435 l -> level: 1-3
436 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
437 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
438 const struct aarch64_name_value_pair aarch64_prfops[32] =
439 {
440 { "pldl1keep", B(0, 1, 0) },
441 { "pldl1strm", B(0, 1, 1) },
442 { "pldl2keep", B(0, 2, 0) },
443 { "pldl2strm", B(0, 2, 1) },
444 { "pldl3keep", B(0, 3, 0) },
445 { "pldl3strm", B(0, 3, 1) },
446 { NULL, 0x06 },
447 { NULL, 0x07 },
448 { "plil1keep", B(1, 1, 0) },
449 { "plil1strm", B(1, 1, 1) },
450 { "plil2keep", B(1, 2, 0) },
451 { "plil2strm", B(1, 2, 1) },
452 { "plil3keep", B(1, 3, 0) },
453 { "plil3strm", B(1, 3, 1) },
454 { NULL, 0x0e },
455 { NULL, 0x0f },
456 { "pstl1keep", B(2, 1, 0) },
457 { "pstl1strm", B(2, 1, 1) },
458 { "pstl2keep", B(2, 2, 0) },
459 { "pstl2strm", B(2, 2, 1) },
460 { "pstl3keep", B(2, 3, 0) },
461 { "pstl3strm", B(2, 3, 1) },
462 { NULL, 0x16 },
463 { NULL, 0x17 },
464 { NULL, 0x18 },
465 { NULL, 0x19 },
466 { NULL, 0x1a },
467 { NULL, 0x1b },
468 { NULL, 0x1c },
469 { NULL, 0x1d },
470 { NULL, 0x1e },
471 { NULL, 0x1f },
472 };
473 #undef B
474 \f
475 /* Utilities on value constraint. */
476
477 static inline int
478 value_in_range_p (int64_t value, int low, int high)
479 {
480 return (value >= low && value <= high) ? 1 : 0;
481 }
482
483 static inline int
484 value_aligned_p (int64_t value, int align)
485 {
486 return ((value & (align - 1)) == 0) ? 1 : 0;
487 }
488
489 /* A signed value fits in a field. */
490 static inline int
491 value_fit_signed_field_p (int64_t value, unsigned width)
492 {
493 assert (width < 32);
494 if (width < sizeof (value) * 8)
495 {
496 int64_t lim = (int64_t)1 << (width - 1);
497 if (value >= -lim && value < lim)
498 return 1;
499 }
500 return 0;
501 }
502
503 /* An unsigned value fits in a field. */
504 static inline int
505 value_fit_unsigned_field_p (int64_t value, unsigned width)
506 {
507 assert (width < 32);
508 if (width < sizeof (value) * 8)
509 {
510 int64_t lim = (int64_t)1 << width;
511 if (value >= 0 && value < lim)
512 return 1;
513 }
514 return 0;
515 }
516
517 /* Return 1 if OPERAND is SP or WSP. */
518 int
519 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
520 {
521 return ((aarch64_get_operand_class (operand->type)
522 == AARCH64_OPND_CLASS_INT_REG)
523 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
524 && operand->reg.regno == 31);
525 }
526
527 /* Return 1 if OPERAND is XZR or WZP. */
528 int
529 aarch64_zero_register_p (const aarch64_opnd_info *operand)
530 {
531 return ((aarch64_get_operand_class (operand->type)
532 == AARCH64_OPND_CLASS_INT_REG)
533 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
534 && operand->reg.regno == 31);
535 }
536
537 /* Return true if the operand *OPERAND that has the operand code
538 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
539 qualified by the qualifier TARGET. */
540
541 static inline int
542 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
543 aarch64_opnd_qualifier_t target)
544 {
545 switch (operand->qualifier)
546 {
547 case AARCH64_OPND_QLF_W:
548 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
549 return 1;
550 break;
551 case AARCH64_OPND_QLF_X:
552 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
553 return 1;
554 break;
555 case AARCH64_OPND_QLF_WSP:
556 if (target == AARCH64_OPND_QLF_W
557 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
558 return 1;
559 break;
560 case AARCH64_OPND_QLF_SP:
561 if (target == AARCH64_OPND_QLF_X
562 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
563 return 1;
564 break;
565 default:
566 break;
567 }
568
569 return 0;
570 }
571
572 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
573 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
574
575 Return NIL if more than one expected qualifiers are found. */
576
577 aarch64_opnd_qualifier_t
578 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
579 int idx,
580 const aarch64_opnd_qualifier_t known_qlf,
581 int known_idx)
582 {
583 int i, saved_i;
584
585 /* Special case.
586
587 When the known qualifier is NIL, we have to assume that there is only
588 one qualifier sequence in the *QSEQ_LIST and return the corresponding
589 qualifier directly. One scenario is that for instruction
590 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
591 which has only one possible valid qualifier sequence
592 NIL, S_D
593 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
594 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
595
596 Because the qualifier NIL has dual roles in the qualifier sequence:
597 it can mean no qualifier for the operand, or the qualifer sequence is
598 not in use (when all qualifiers in the sequence are NILs), we have to
599 handle this special case here. */
600 if (known_qlf == AARCH64_OPND_NIL)
601 {
602 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
603 return qseq_list[0][idx];
604 }
605
606 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
607 {
608 if (qseq_list[i][known_idx] == known_qlf)
609 {
610 if (saved_i != -1)
611 /* More than one sequences are found to have KNOWN_QLF at
612 KNOWN_IDX. */
613 return AARCH64_OPND_NIL;
614 saved_i = i;
615 }
616 }
617
618 return qseq_list[saved_i][idx];
619 }
620
621 enum operand_qualifier_kind
622 {
623 OQK_NIL,
624 OQK_OPD_VARIANT,
625 OQK_VALUE_IN_RANGE,
626 OQK_MISC,
627 };
628
629 /* Operand qualifier description. */
630 struct operand_qualifier_data
631 {
632 /* The usage of the three data fields depends on the qualifier kind. */
633 int data0;
634 int data1;
635 int data2;
636 /* Description. */
637 const char *desc;
638 /* Kind. */
639 enum operand_qualifier_kind kind;
640 };
641
642 /* Indexed by the operand qualifier enumerators. */
643 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
644 {
645 {0, 0, 0, "NIL", OQK_NIL},
646
647 /* Operand variant qualifiers.
648 First 3 fields:
649 element size, number of elements and common value for encoding. */
650
651 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
652 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
653 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
654 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
655
656 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
657 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
658 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
659 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
660 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
661
662 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
663 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
664 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
665 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
666 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
667 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
668 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
669 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
670 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
671 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
672
673 {0, 0, 0, "z", OQK_OPD_VARIANT},
674 {0, 0, 0, "m", OQK_OPD_VARIANT},
675
676 /* Qualifiers constraining the value range.
677 First 3 fields:
678 Lower bound, higher bound, unused. */
679
680 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
681 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
682 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
683 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
684 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
685 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
686
687 /* Qualifiers for miscellaneous purpose.
688 First 3 fields:
689 unused, unused and unused. */
690
691 {0, 0, 0, "lsl", 0},
692 {0, 0, 0, "msl", 0},
693
694 {0, 0, 0, "retrieving", 0},
695 };
696
697 static inline bfd_boolean
698 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
699 {
700 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
701 ? TRUE : FALSE;
702 }
703
704 static inline bfd_boolean
705 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
706 {
707 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
708 ? TRUE : FALSE;
709 }
710
711 const char*
712 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
713 {
714 return aarch64_opnd_qualifiers[qualifier].desc;
715 }
716
717 /* Given an operand qualifier, return the expected data element size
718 of a qualified operand. */
719 unsigned char
720 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
721 {
722 assert (operand_variant_qualifier_p (qualifier) == TRUE);
723 return aarch64_opnd_qualifiers[qualifier].data0;
724 }
725
726 unsigned char
727 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
728 {
729 assert (operand_variant_qualifier_p (qualifier) == TRUE);
730 return aarch64_opnd_qualifiers[qualifier].data1;
731 }
732
733 aarch64_insn
734 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
735 {
736 assert (operand_variant_qualifier_p (qualifier) == TRUE);
737 return aarch64_opnd_qualifiers[qualifier].data2;
738 }
739
740 static int
741 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
742 {
743 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
744 return aarch64_opnd_qualifiers[qualifier].data0;
745 }
746
747 static int
748 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
749 {
750 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
751 return aarch64_opnd_qualifiers[qualifier].data1;
752 }
753
754 #ifdef DEBUG_AARCH64
755 void
756 aarch64_verbose (const char *str, ...)
757 {
758 va_list ap;
759 va_start (ap, str);
760 printf ("#### ");
761 vprintf (str, ap);
762 printf ("\n");
763 va_end (ap);
764 }
765
766 static inline void
767 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
768 {
769 int i;
770 printf ("#### \t");
771 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
772 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
773 printf ("\n");
774 }
775
776 static void
777 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
778 const aarch64_opnd_qualifier_t *qualifier)
779 {
780 int i;
781 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
782
783 aarch64_verbose ("dump_match_qualifiers:");
784 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
785 curr[i] = opnd[i].qualifier;
786 dump_qualifier_sequence (curr);
787 aarch64_verbose ("against");
788 dump_qualifier_sequence (qualifier);
789 }
790 #endif /* DEBUG_AARCH64 */
791
792 /* TODO improve this, we can have an extra field at the runtime to
793 store the number of operands rather than calculating it every time. */
794
795 int
796 aarch64_num_of_operands (const aarch64_opcode *opcode)
797 {
798 int i = 0;
799 const enum aarch64_opnd *opnds = opcode->operands;
800 while (opnds[i++] != AARCH64_OPND_NIL)
801 ;
802 --i;
803 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
804 return i;
805 }
806
807 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
808 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
809
810 N.B. on the entry, it is very likely that only some operands in *INST
811 have had their qualifiers been established.
812
813 If STOP_AT is not -1, the function will only try to match
814 the qualifier sequence for operands before and including the operand
815 of index STOP_AT; and on success *RET will only be filled with the first
816 (STOP_AT+1) qualifiers.
817
818 A couple examples of the matching algorithm:
819
820 X,W,NIL should match
821 X,W,NIL
822
823 NIL,NIL should match
824 X ,NIL
825
826 Apart from serving the main encoding routine, this can also be called
827 during or after the operand decoding. */
828
829 int
830 aarch64_find_best_match (const aarch64_inst *inst,
831 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
832 int stop_at, aarch64_opnd_qualifier_t *ret)
833 {
834 int found = 0;
835 int i, num_opnds;
836 const aarch64_opnd_qualifier_t *qualifiers;
837
838 num_opnds = aarch64_num_of_operands (inst->opcode);
839 if (num_opnds == 0)
840 {
841 DEBUG_TRACE ("SUCCEED: no operand");
842 return 1;
843 }
844
845 if (stop_at < 0 || stop_at >= num_opnds)
846 stop_at = num_opnds - 1;
847
848 /* For each pattern. */
849 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
850 {
851 int j;
852 qualifiers = *qualifiers_list;
853
854 /* Start as positive. */
855 found = 1;
856
857 DEBUG_TRACE ("%d", i);
858 #ifdef DEBUG_AARCH64
859 if (debug_dump)
860 dump_match_qualifiers (inst->operands, qualifiers);
861 #endif
862
863 /* Most opcodes has much fewer patterns in the list.
864 First NIL qualifier indicates the end in the list. */
865 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
866 {
867 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
868 if (i)
869 found = 0;
870 break;
871 }
872
873 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
874 {
875 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
876 {
877 /* Either the operand does not have qualifier, or the qualifier
878 for the operand needs to be deduced from the qualifier
879 sequence.
880 In the latter case, any constraint checking related with
881 the obtained qualifier should be done later in
882 operand_general_constraint_met_p. */
883 continue;
884 }
885 else if (*qualifiers != inst->operands[j].qualifier)
886 {
887 /* Unless the target qualifier can also qualify the operand
888 (which has already had a non-nil qualifier), non-equal
889 qualifiers are generally un-matched. */
890 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
891 continue;
892 else
893 {
894 found = 0;
895 break;
896 }
897 }
898 else
899 continue; /* Equal qualifiers are certainly matched. */
900 }
901
902 /* Qualifiers established. */
903 if (found == 1)
904 break;
905 }
906
907 if (found == 1)
908 {
909 /* Fill the result in *RET. */
910 int j;
911 qualifiers = *qualifiers_list;
912
913 DEBUG_TRACE ("complete qualifiers using list %d", i);
914 #ifdef DEBUG_AARCH64
915 if (debug_dump)
916 dump_qualifier_sequence (qualifiers);
917 #endif
918
919 for (j = 0; j <= stop_at; ++j, ++qualifiers)
920 ret[j] = *qualifiers;
921 for (; j < AARCH64_MAX_OPND_NUM; ++j)
922 ret[j] = AARCH64_OPND_QLF_NIL;
923
924 DEBUG_TRACE ("SUCCESS");
925 return 1;
926 }
927
928 DEBUG_TRACE ("FAIL");
929 return 0;
930 }
931
932 /* Operand qualifier matching and resolving.
933
934 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
935 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
936
937 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
938 succeeds. */
939
940 static int
941 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
942 {
943 int i, nops;
944 aarch64_opnd_qualifier_seq_t qualifiers;
945
946 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
947 qualifiers))
948 {
949 DEBUG_TRACE ("matching FAIL");
950 return 0;
951 }
952
953 if (inst->opcode->flags & F_STRICT)
954 {
955 /* Require an exact qualifier match, even for NIL qualifiers. */
956 nops = aarch64_num_of_operands (inst->opcode);
957 for (i = 0; i < nops; ++i)
958 if (inst->operands[i].qualifier != qualifiers[i])
959 return FALSE;
960 }
961
962 /* Update the qualifiers. */
963 if (update_p == TRUE)
964 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
965 {
966 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
967 break;
968 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
969 "update %s with %s for operand %d",
970 aarch64_get_qualifier_name (inst->operands[i].qualifier),
971 aarch64_get_qualifier_name (qualifiers[i]), i);
972 inst->operands[i].qualifier = qualifiers[i];
973 }
974
975 DEBUG_TRACE ("matching SUCCESS");
976 return 1;
977 }
978
979 /* Return TRUE if VALUE is a wide constant that can be moved into a general
980 register by MOVZ.
981
982 IS32 indicates whether value is a 32-bit immediate or not.
983 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
984 amount will be returned in *SHIFT_AMOUNT. */
985
986 bfd_boolean
987 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
988 {
989 int amount;
990
991 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
992
993 if (is32)
994 {
995 /* Allow all zeros or all ones in top 32-bits, so that
996 32-bit constant expressions like ~0x80000000 are
997 permitted. */
998 uint64_t ext = value;
999 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1000 /* Immediate out of range. */
1001 return FALSE;
1002 value &= (int64_t) 0xffffffff;
1003 }
1004
1005 /* first, try movz then movn */
1006 amount = -1;
1007 if ((value & ((int64_t) 0xffff << 0)) == value)
1008 amount = 0;
1009 else if ((value & ((int64_t) 0xffff << 16)) == value)
1010 amount = 16;
1011 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1012 amount = 32;
1013 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1014 amount = 48;
1015
1016 if (amount == -1)
1017 {
1018 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1019 return FALSE;
1020 }
1021
1022 if (shift_amount != NULL)
1023 *shift_amount = amount;
1024
1025 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1026
1027 return TRUE;
1028 }
1029
1030 /* Build the accepted values for immediate logical SIMD instructions.
1031
1032 The standard encodings of the immediate value are:
1033 N imms immr SIMD size R S
1034 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1035 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1036 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1037 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1038 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1039 0 11110s 00000r 2 UInt(r) UInt(s)
1040 where all-ones value of S is reserved.
1041
1042 Let's call E the SIMD size.
1043
1044 The immediate value is: S+1 bits '1' rotated to the right by R.
1045
1046 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1047 (remember S != E - 1). */
1048
1049 #define TOTAL_IMM_NB 5334
1050
1051 typedef struct
1052 {
1053 uint64_t imm;
1054 aarch64_insn encoding;
1055 } simd_imm_encoding;
1056
1057 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1058
1059 static int
1060 simd_imm_encoding_cmp(const void *i1, const void *i2)
1061 {
1062 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1063 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1064
1065 if (imm1->imm < imm2->imm)
1066 return -1;
1067 if (imm1->imm > imm2->imm)
1068 return +1;
1069 return 0;
1070 }
1071
1072 /* immediate bitfield standard encoding
1073 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1074 1 ssssss rrrrrr 64 rrrrrr ssssss
1075 0 0sssss 0rrrrr 32 rrrrr sssss
1076 0 10ssss 00rrrr 16 rrrr ssss
1077 0 110sss 000rrr 8 rrr sss
1078 0 1110ss 0000rr 4 rr ss
1079 0 11110s 00000r 2 r s */
1080 static inline int
1081 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1082 {
1083 return (is64 << 12) | (r << 6) | s;
1084 }
1085
1086 static void
1087 build_immediate_table (void)
1088 {
1089 uint32_t log_e, e, s, r, s_mask;
1090 uint64_t mask, imm;
1091 int nb_imms;
1092 int is64;
1093
1094 nb_imms = 0;
1095 for (log_e = 1; log_e <= 6; log_e++)
1096 {
1097 /* Get element size. */
1098 e = 1u << log_e;
1099 if (log_e == 6)
1100 {
1101 is64 = 1;
1102 mask = 0xffffffffffffffffull;
1103 s_mask = 0;
1104 }
1105 else
1106 {
1107 is64 = 0;
1108 mask = (1ull << e) - 1;
1109 /* log_e s_mask
1110 1 ((1 << 4) - 1) << 2 = 111100
1111 2 ((1 << 3) - 1) << 3 = 111000
1112 3 ((1 << 2) - 1) << 4 = 110000
1113 4 ((1 << 1) - 1) << 5 = 100000
1114 5 ((1 << 0) - 1) << 6 = 000000 */
1115 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1116 }
1117 for (s = 0; s < e - 1; s++)
1118 for (r = 0; r < e; r++)
1119 {
1120 /* s+1 consecutive bits to 1 (s < 63) */
1121 imm = (1ull << (s + 1)) - 1;
1122 /* rotate right by r */
1123 if (r != 0)
1124 imm = (imm >> r) | ((imm << (e - r)) & mask);
1125 /* replicate the constant depending on SIMD size */
1126 switch (log_e)
1127 {
1128 case 1: imm = (imm << 2) | imm;
1129 case 2: imm = (imm << 4) | imm;
1130 case 3: imm = (imm << 8) | imm;
1131 case 4: imm = (imm << 16) | imm;
1132 case 5: imm = (imm << 32) | imm;
1133 case 6: break;
1134 default: abort ();
1135 }
1136 simd_immediates[nb_imms].imm = imm;
1137 simd_immediates[nb_imms].encoding =
1138 encode_immediate_bitfield(is64, s | s_mask, r);
1139 nb_imms++;
1140 }
1141 }
1142 assert (nb_imms == TOTAL_IMM_NB);
1143 qsort(simd_immediates, nb_imms,
1144 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1145 }
1146
1147 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1148 be accepted by logical (immediate) instructions
1149 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1150
1151 ESIZE is the number of bytes in the decoded immediate value.
1152 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1153 VALUE will be returned in *ENCODING. */
1154
1155 bfd_boolean
1156 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1157 {
1158 simd_imm_encoding imm_enc;
1159 const simd_imm_encoding *imm_encoding;
1160 static bfd_boolean initialized = FALSE;
1161 uint64_t upper;
1162 int i;
1163
1164 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1165 value, is32);
1166
1167 if (initialized == FALSE)
1168 {
1169 build_immediate_table ();
1170 initialized = TRUE;
1171 }
1172
1173 /* Allow all zeros or all ones in top bits, so that
1174 constant expressions like ~1 are permitted. */
1175 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1176 if ((value & ~upper) != value && (value | upper) != value)
1177 return FALSE;
1178
1179 /* Replicate to a full 64-bit value. */
1180 value &= ~upper;
1181 for (i = esize * 8; i < 64; i *= 2)
1182 value |= (value << i);
1183
1184 imm_enc.imm = value;
1185 imm_encoding = (const simd_imm_encoding *)
1186 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1187 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1188 if (imm_encoding == NULL)
1189 {
1190 DEBUG_TRACE ("exit with FALSE");
1191 return FALSE;
1192 }
1193 if (encoding != NULL)
1194 *encoding = imm_encoding->encoding;
1195 DEBUG_TRACE ("exit with TRUE");
1196 return TRUE;
1197 }
1198
1199 /* If 64-bit immediate IMM is in the format of
1200 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1201 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1202 of value "abcdefgh". Otherwise return -1. */
1203 int
1204 aarch64_shrink_expanded_imm8 (uint64_t imm)
1205 {
1206 int i, ret;
1207 uint32_t byte;
1208
1209 ret = 0;
1210 for (i = 0; i < 8; i++)
1211 {
1212 byte = (imm >> (8 * i)) & 0xff;
1213 if (byte == 0xff)
1214 ret |= 1 << i;
1215 else if (byte != 0x00)
1216 return -1;
1217 }
1218 return ret;
1219 }
1220
1221 /* Utility inline functions for operand_general_constraint_met_p. */
1222
1223 static inline void
1224 set_error (aarch64_operand_error *mismatch_detail,
1225 enum aarch64_operand_error_kind kind, int idx,
1226 const char* error)
1227 {
1228 if (mismatch_detail == NULL)
1229 return;
1230 mismatch_detail->kind = kind;
1231 mismatch_detail->index = idx;
1232 mismatch_detail->error = error;
1233 }
1234
1235 static inline void
1236 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1237 const char* error)
1238 {
1239 if (mismatch_detail == NULL)
1240 return;
1241 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1242 }
1243
1244 static inline void
1245 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1246 int idx, int lower_bound, int upper_bound,
1247 const char* error)
1248 {
1249 if (mismatch_detail == NULL)
1250 return;
1251 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1252 mismatch_detail->data[0] = lower_bound;
1253 mismatch_detail->data[1] = upper_bound;
1254 }
1255
1256 static inline void
1257 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1258 int idx, int lower_bound, int upper_bound)
1259 {
1260 if (mismatch_detail == NULL)
1261 return;
1262 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1263 _("immediate value"));
1264 }
1265
1266 static inline void
1267 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1268 int idx, int lower_bound, int upper_bound)
1269 {
1270 if (mismatch_detail == NULL)
1271 return;
1272 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1273 _("immediate offset"));
1274 }
1275
1276 static inline void
1277 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1278 int idx, int lower_bound, int upper_bound)
1279 {
1280 if (mismatch_detail == NULL)
1281 return;
1282 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1283 _("register number"));
1284 }
1285
1286 static inline void
1287 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1288 int idx, int lower_bound, int upper_bound)
1289 {
1290 if (mismatch_detail == NULL)
1291 return;
1292 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1293 _("register element index"));
1294 }
1295
1296 static inline void
1297 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1298 int idx, int lower_bound, int upper_bound)
1299 {
1300 if (mismatch_detail == NULL)
1301 return;
1302 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1303 _("shift amount"));
1304 }
1305
1306 static inline void
1307 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1308 int alignment)
1309 {
1310 if (mismatch_detail == NULL)
1311 return;
1312 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1313 mismatch_detail->data[0] = alignment;
1314 }
1315
1316 static inline void
1317 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1318 int expected_num)
1319 {
1320 if (mismatch_detail == NULL)
1321 return;
1322 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1323 mismatch_detail->data[0] = expected_num;
1324 }
1325
1326 static inline void
1327 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1328 const char* error)
1329 {
1330 if (mismatch_detail == NULL)
1331 return;
1332 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1333 }
1334
1335 /* General constraint checking based on operand code.
1336
1337 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1338 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1339
1340 This function has to be called after the qualifiers for all operands
1341 have been resolved.
1342
1343 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1344 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1345 of error message during the disassembling where error message is not
1346 wanted. We avoid the dynamic construction of strings of error messages
1347 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1348 use a combination of error code, static string and some integer data to
1349 represent an error. */
1350
1351 static int
1352 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1353 enum aarch64_opnd type,
1354 const aarch64_opcode *opcode,
1355 aarch64_operand_error *mismatch_detail)
1356 {
1357 unsigned num;
1358 unsigned char size;
1359 int64_t imm;
1360 const aarch64_opnd_info *opnd = opnds + idx;
1361 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1362
1363 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1364
1365 switch (aarch64_operands[type].op_class)
1366 {
1367 case AARCH64_OPND_CLASS_INT_REG:
1368 /* Check pair reg constraints for cas* instructions. */
1369 if (type == AARCH64_OPND_PAIRREG)
1370 {
1371 assert (idx == 1 || idx == 3);
1372 if (opnds[idx - 1].reg.regno % 2 != 0)
1373 {
1374 set_syntax_error (mismatch_detail, idx - 1,
1375 _("reg pair must start from even reg"));
1376 return 0;
1377 }
1378 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1379 {
1380 set_syntax_error (mismatch_detail, idx,
1381 _("reg pair must be contiguous"));
1382 return 0;
1383 }
1384 break;
1385 }
1386
1387 /* <Xt> may be optional in some IC and TLBI instructions. */
1388 if (type == AARCH64_OPND_Rt_SYS)
1389 {
1390 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1391 == AARCH64_OPND_CLASS_SYSTEM));
1392 if (opnds[1].present
1393 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1394 {
1395 set_other_error (mismatch_detail, idx, _("extraneous register"));
1396 return 0;
1397 }
1398 if (!opnds[1].present
1399 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1400 {
1401 set_other_error (mismatch_detail, idx, _("missing register"));
1402 return 0;
1403 }
1404 }
1405 switch (qualifier)
1406 {
1407 case AARCH64_OPND_QLF_WSP:
1408 case AARCH64_OPND_QLF_SP:
1409 if (!aarch64_stack_pointer_p (opnd))
1410 {
1411 set_other_error (mismatch_detail, idx,
1412 _("stack pointer register expected"));
1413 return 0;
1414 }
1415 break;
1416 default:
1417 break;
1418 }
1419 break;
1420
1421 case AARCH64_OPND_CLASS_SVE_REG:
1422 switch (type)
1423 {
1424 case AARCH64_OPND_SVE_Zn_INDEX:
1425 size = aarch64_get_qualifier_esize (opnd->qualifier);
1426 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1427 {
1428 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1429 0, 64 / size - 1);
1430 return 0;
1431 }
1432 break;
1433
1434 case AARCH64_OPND_SVE_ZnxN:
1435 case AARCH64_OPND_SVE_ZtxN:
1436 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1437 {
1438 set_other_error (mismatch_detail, idx,
1439 _("invalid register list"));
1440 return 0;
1441 }
1442 break;
1443
1444 default:
1445 break;
1446 }
1447 break;
1448
1449 case AARCH64_OPND_CLASS_PRED_REG:
1450 if (opnd->reg.regno >= 8
1451 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1452 {
1453 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1454 return 0;
1455 }
1456 break;
1457
1458 case AARCH64_OPND_CLASS_COND:
1459 if (type == AARCH64_OPND_COND1
1460 && (opnds[idx].cond->value & 0xe) == 0xe)
1461 {
1462 /* Not allow AL or NV. */
1463 set_syntax_error (mismatch_detail, idx, NULL);
1464 }
1465 break;
1466
1467 case AARCH64_OPND_CLASS_ADDRESS:
1468 /* Check writeback. */
1469 switch (opcode->iclass)
1470 {
1471 case ldst_pos:
1472 case ldst_unscaled:
1473 case ldstnapair_offs:
1474 case ldstpair_off:
1475 case ldst_unpriv:
1476 if (opnd->addr.writeback == 1)
1477 {
1478 set_syntax_error (mismatch_detail, idx,
1479 _("unexpected address writeback"));
1480 return 0;
1481 }
1482 break;
1483 case ldst_imm9:
1484 case ldstpair_indexed:
1485 case asisdlsep:
1486 case asisdlsop:
1487 if (opnd->addr.writeback == 0)
1488 {
1489 set_syntax_error (mismatch_detail, idx,
1490 _("address writeback expected"));
1491 return 0;
1492 }
1493 break;
1494 default:
1495 assert (opnd->addr.writeback == 0);
1496 break;
1497 }
1498 switch (type)
1499 {
1500 case AARCH64_OPND_ADDR_SIMM7:
1501 /* Scaled signed 7 bits immediate offset. */
1502 /* Get the size of the data element that is accessed, which may be
1503 different from that of the source register size,
1504 e.g. in strb/ldrb. */
1505 size = aarch64_get_qualifier_esize (opnd->qualifier);
1506 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1507 {
1508 set_offset_out_of_range_error (mismatch_detail, idx,
1509 -64 * size, 63 * size);
1510 return 0;
1511 }
1512 if (!value_aligned_p (opnd->addr.offset.imm, size))
1513 {
1514 set_unaligned_error (mismatch_detail, idx, size);
1515 return 0;
1516 }
1517 break;
1518 case AARCH64_OPND_ADDR_SIMM9:
1519 /* Unscaled signed 9 bits immediate offset. */
1520 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1521 {
1522 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1523 return 0;
1524 }
1525 break;
1526
1527 case AARCH64_OPND_ADDR_SIMM9_2:
1528 /* Unscaled signed 9 bits immediate offset, which has to be negative
1529 or unaligned. */
1530 size = aarch64_get_qualifier_esize (qualifier);
1531 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1532 && !value_aligned_p (opnd->addr.offset.imm, size))
1533 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1534 return 1;
1535 set_other_error (mismatch_detail, idx,
1536 _("negative or unaligned offset expected"));
1537 return 0;
1538
1539 case AARCH64_OPND_SIMD_ADDR_POST:
1540 /* AdvSIMD load/store multiple structures, post-index. */
1541 assert (idx == 1);
1542 if (opnd->addr.offset.is_reg)
1543 {
1544 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1545 return 1;
1546 else
1547 {
1548 set_other_error (mismatch_detail, idx,
1549 _("invalid register offset"));
1550 return 0;
1551 }
1552 }
1553 else
1554 {
1555 const aarch64_opnd_info *prev = &opnds[idx-1];
1556 unsigned num_bytes; /* total number of bytes transferred. */
1557 /* The opcode dependent area stores the number of elements in
1558 each structure to be loaded/stored. */
1559 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1560 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1561 /* Special handling of loading single structure to all lane. */
1562 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1563 * aarch64_get_qualifier_esize (prev->qualifier);
1564 else
1565 num_bytes = prev->reglist.num_regs
1566 * aarch64_get_qualifier_esize (prev->qualifier)
1567 * aarch64_get_qualifier_nelem (prev->qualifier);
1568 if ((int) num_bytes != opnd->addr.offset.imm)
1569 {
1570 set_other_error (mismatch_detail, idx,
1571 _("invalid post-increment amount"));
1572 return 0;
1573 }
1574 }
1575 break;
1576
1577 case AARCH64_OPND_ADDR_REGOFF:
1578 /* Get the size of the data element that is accessed, which may be
1579 different from that of the source register size,
1580 e.g. in strb/ldrb. */
1581 size = aarch64_get_qualifier_esize (opnd->qualifier);
1582 /* It is either no shift or shift by the binary logarithm of SIZE. */
1583 if (opnd->shifter.amount != 0
1584 && opnd->shifter.amount != (int)get_logsz (size))
1585 {
1586 set_other_error (mismatch_detail, idx,
1587 _("invalid shift amount"));
1588 return 0;
1589 }
1590 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1591 operators. */
1592 switch (opnd->shifter.kind)
1593 {
1594 case AARCH64_MOD_UXTW:
1595 case AARCH64_MOD_LSL:
1596 case AARCH64_MOD_SXTW:
1597 case AARCH64_MOD_SXTX: break;
1598 default:
1599 set_other_error (mismatch_detail, idx,
1600 _("invalid extend/shift operator"));
1601 return 0;
1602 }
1603 break;
1604
1605 case AARCH64_OPND_ADDR_UIMM12:
1606 imm = opnd->addr.offset.imm;
1607 /* Get the size of the data element that is accessed, which may be
1608 different from that of the source register size,
1609 e.g. in strb/ldrb. */
1610 size = aarch64_get_qualifier_esize (qualifier);
1611 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1612 {
1613 set_offset_out_of_range_error (mismatch_detail, idx,
1614 0, 4095 * size);
1615 return 0;
1616 }
1617 if (!value_aligned_p (opnd->addr.offset.imm, size))
1618 {
1619 set_unaligned_error (mismatch_detail, idx, size);
1620 return 0;
1621 }
1622 break;
1623
1624 case AARCH64_OPND_ADDR_PCREL14:
1625 case AARCH64_OPND_ADDR_PCREL19:
1626 case AARCH64_OPND_ADDR_PCREL21:
1627 case AARCH64_OPND_ADDR_PCREL26:
1628 imm = opnd->imm.value;
1629 if (operand_need_shift_by_two (get_operand_from_code (type)))
1630 {
1631 /* The offset value in a PC-relative branch instruction is alway
1632 4-byte aligned and is encoded without the lowest 2 bits. */
1633 if (!value_aligned_p (imm, 4))
1634 {
1635 set_unaligned_error (mismatch_detail, idx, 4);
1636 return 0;
1637 }
1638 /* Right shift by 2 so that we can carry out the following check
1639 canonically. */
1640 imm >>= 2;
1641 }
1642 size = get_operand_fields_width (get_operand_from_code (type));
1643 if (!value_fit_signed_field_p (imm, size))
1644 {
1645 set_other_error (mismatch_detail, idx,
1646 _("immediate out of range"));
1647 return 0;
1648 }
1649 break;
1650
1651 default:
1652 break;
1653 }
1654 break;
1655
1656 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1657 if (type == AARCH64_OPND_LEt)
1658 {
1659 /* Get the upper bound for the element index. */
1660 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1661 if (!value_in_range_p (opnd->reglist.index, 0, num))
1662 {
1663 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1664 return 0;
1665 }
1666 }
1667 /* The opcode dependent area stores the number of elements in
1668 each structure to be loaded/stored. */
1669 num = get_opcode_dependent_value (opcode);
1670 switch (type)
1671 {
1672 case AARCH64_OPND_LVt:
1673 assert (num >= 1 && num <= 4);
1674 /* Unless LD1/ST1, the number of registers should be equal to that
1675 of the structure elements. */
1676 if (num != 1 && opnd->reglist.num_regs != num)
1677 {
1678 set_reg_list_error (mismatch_detail, idx, num);
1679 return 0;
1680 }
1681 break;
1682 case AARCH64_OPND_LVt_AL:
1683 case AARCH64_OPND_LEt:
1684 assert (num >= 1 && num <= 4);
1685 /* The number of registers should be equal to that of the structure
1686 elements. */
1687 if (opnd->reglist.num_regs != num)
1688 {
1689 set_reg_list_error (mismatch_detail, idx, num);
1690 return 0;
1691 }
1692 break;
1693 default:
1694 break;
1695 }
1696 break;
1697
1698 case AARCH64_OPND_CLASS_IMMEDIATE:
1699 /* Constraint check on immediate operand. */
1700 imm = opnd->imm.value;
1701 /* E.g. imm_0_31 constrains value to be 0..31. */
1702 if (qualifier_value_in_range_constraint_p (qualifier)
1703 && !value_in_range_p (imm, get_lower_bound (qualifier),
1704 get_upper_bound (qualifier)))
1705 {
1706 set_imm_out_of_range_error (mismatch_detail, idx,
1707 get_lower_bound (qualifier),
1708 get_upper_bound (qualifier));
1709 return 0;
1710 }
1711
1712 switch (type)
1713 {
1714 case AARCH64_OPND_AIMM:
1715 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1716 {
1717 set_other_error (mismatch_detail, idx,
1718 _("invalid shift operator"));
1719 return 0;
1720 }
1721 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1722 {
1723 set_other_error (mismatch_detail, idx,
1724 _("shift amount expected to be 0 or 12"));
1725 return 0;
1726 }
1727 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1728 {
1729 set_other_error (mismatch_detail, idx,
1730 _("immediate out of range"));
1731 return 0;
1732 }
1733 break;
1734
1735 case AARCH64_OPND_HALF:
1736 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1737 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1738 {
1739 set_other_error (mismatch_detail, idx,
1740 _("invalid shift operator"));
1741 return 0;
1742 }
1743 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1744 if (!value_aligned_p (opnd->shifter.amount, 16))
1745 {
1746 set_other_error (mismatch_detail, idx,
1747 _("shift amount should be a multiple of 16"));
1748 return 0;
1749 }
1750 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1751 {
1752 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1753 0, size * 8 - 16);
1754 return 0;
1755 }
1756 if (opnd->imm.value < 0)
1757 {
1758 set_other_error (mismatch_detail, idx,
1759 _("negative immediate value not allowed"));
1760 return 0;
1761 }
1762 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1763 {
1764 set_other_error (mismatch_detail, idx,
1765 _("immediate out of range"));
1766 return 0;
1767 }
1768 break;
1769
1770 case AARCH64_OPND_IMM_MOV:
1771 {
1772 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1773 imm = opnd->imm.value;
1774 assert (idx == 1);
1775 switch (opcode->op)
1776 {
1777 case OP_MOV_IMM_WIDEN:
1778 imm = ~imm;
1779 /* Fall through... */
1780 case OP_MOV_IMM_WIDE:
1781 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1782 {
1783 set_other_error (mismatch_detail, idx,
1784 _("immediate out of range"));
1785 return 0;
1786 }
1787 break;
1788 case OP_MOV_IMM_LOG:
1789 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1790 {
1791 set_other_error (mismatch_detail, idx,
1792 _("immediate out of range"));
1793 return 0;
1794 }
1795 break;
1796 default:
1797 assert (0);
1798 return 0;
1799 }
1800 }
1801 break;
1802
1803 case AARCH64_OPND_NZCV:
1804 case AARCH64_OPND_CCMP_IMM:
1805 case AARCH64_OPND_EXCEPTION:
1806 case AARCH64_OPND_UIMM4:
1807 case AARCH64_OPND_UIMM7:
1808 case AARCH64_OPND_UIMM3_OP1:
1809 case AARCH64_OPND_UIMM3_OP2:
1810 size = get_operand_fields_width (get_operand_from_code (type));
1811 assert (size < 32);
1812 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1813 {
1814 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1815 (1 << size) - 1);
1816 return 0;
1817 }
1818 break;
1819
1820 case AARCH64_OPND_WIDTH:
1821 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
1822 && opnds[0].type == AARCH64_OPND_Rd);
1823 size = get_upper_bound (qualifier);
1824 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1825 /* lsb+width <= reg.size */
1826 {
1827 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1828 size - opnds[idx-1].imm.value);
1829 return 0;
1830 }
1831 break;
1832
1833 case AARCH64_OPND_LIMM:
1834 {
1835 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1836 uint64_t uimm = opnd->imm.value;
1837 if (opcode->op == OP_BIC)
1838 uimm = ~uimm;
1839 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
1840 {
1841 set_other_error (mismatch_detail, idx,
1842 _("immediate out of range"));
1843 return 0;
1844 }
1845 }
1846 break;
1847
1848 case AARCH64_OPND_IMM0:
1849 case AARCH64_OPND_FPIMM0:
1850 if (opnd->imm.value != 0)
1851 {
1852 set_other_error (mismatch_detail, idx,
1853 _("immediate zero expected"));
1854 return 0;
1855 }
1856 break;
1857
1858 case AARCH64_OPND_SHLL_IMM:
1859 assert (idx == 2);
1860 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1861 if (opnd->imm.value != size)
1862 {
1863 set_other_error (mismatch_detail, idx,
1864 _("invalid shift amount"));
1865 return 0;
1866 }
1867 break;
1868
1869 case AARCH64_OPND_IMM_VLSL:
1870 size = aarch64_get_qualifier_esize (qualifier);
1871 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1872 {
1873 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1874 size * 8 - 1);
1875 return 0;
1876 }
1877 break;
1878
1879 case AARCH64_OPND_IMM_VLSR:
1880 size = aarch64_get_qualifier_esize (qualifier);
1881 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1882 {
1883 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1884 return 0;
1885 }
1886 break;
1887
1888 case AARCH64_OPND_SIMD_IMM:
1889 case AARCH64_OPND_SIMD_IMM_SFT:
1890 /* Qualifier check. */
1891 switch (qualifier)
1892 {
1893 case AARCH64_OPND_QLF_LSL:
1894 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1895 {
1896 set_other_error (mismatch_detail, idx,
1897 _("invalid shift operator"));
1898 return 0;
1899 }
1900 break;
1901 case AARCH64_OPND_QLF_MSL:
1902 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1903 {
1904 set_other_error (mismatch_detail, idx,
1905 _("invalid shift operator"));
1906 return 0;
1907 }
1908 break;
1909 case AARCH64_OPND_QLF_NIL:
1910 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1911 {
1912 set_other_error (mismatch_detail, idx,
1913 _("shift is not permitted"));
1914 return 0;
1915 }
1916 break;
1917 default:
1918 assert (0);
1919 return 0;
1920 }
1921 /* Is the immediate valid? */
1922 assert (idx == 1);
1923 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1924 {
1925 /* uimm8 or simm8 */
1926 if (!value_in_range_p (opnd->imm.value, -128, 255))
1927 {
1928 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
1929 return 0;
1930 }
1931 }
1932 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1933 {
1934 /* uimm64 is not
1935 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1936 ffffffffgggggggghhhhhhhh'. */
1937 set_other_error (mismatch_detail, idx,
1938 _("invalid value for immediate"));
1939 return 0;
1940 }
1941 /* Is the shift amount valid? */
1942 switch (opnd->shifter.kind)
1943 {
1944 case AARCH64_MOD_LSL:
1945 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1946 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1947 {
1948 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1949 (size - 1) * 8);
1950 return 0;
1951 }
1952 if (!value_aligned_p (opnd->shifter.amount, 8))
1953 {
1954 set_unaligned_error (mismatch_detail, idx, 8);
1955 return 0;
1956 }
1957 break;
1958 case AARCH64_MOD_MSL:
1959 /* Only 8 and 16 are valid shift amount. */
1960 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1961 {
1962 set_other_error (mismatch_detail, idx,
1963 _("shift amount expected to be 0 or 16"));
1964 return 0;
1965 }
1966 break;
1967 default:
1968 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1969 {
1970 set_other_error (mismatch_detail, idx,
1971 _("invalid shift operator"));
1972 return 0;
1973 }
1974 break;
1975 }
1976 break;
1977
1978 case AARCH64_OPND_FPIMM:
1979 case AARCH64_OPND_SIMD_FPIMM:
1980 if (opnd->imm.is_fp == 0)
1981 {
1982 set_other_error (mismatch_detail, idx,
1983 _("floating-point immediate expected"));
1984 return 0;
1985 }
1986 /* The value is expected to be an 8-bit floating-point constant with
1987 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1988 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1989 instruction). */
1990 if (!value_in_range_p (opnd->imm.value, 0, 255))
1991 {
1992 set_other_error (mismatch_detail, idx,
1993 _("immediate out of range"));
1994 return 0;
1995 }
1996 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1997 {
1998 set_other_error (mismatch_detail, idx,
1999 _("invalid shift operator"));
2000 return 0;
2001 }
2002 break;
2003
2004 default:
2005 break;
2006 }
2007 break;
2008
2009 case AARCH64_OPND_CLASS_CP_REG:
2010 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2011 valid range: C0 - C15. */
2012 if (opnd->reg.regno > 15)
2013 {
2014 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2015 return 0;
2016 }
2017 break;
2018
2019 case AARCH64_OPND_CLASS_SYSTEM:
2020 switch (type)
2021 {
2022 case AARCH64_OPND_PSTATEFIELD:
2023 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2024 /* MSR UAO, #uimm4
2025 MSR PAN, #uimm4
2026 The immediate must be #0 or #1. */
2027 if ((opnd->pstatefield == 0x03 /* UAO. */
2028 || opnd->pstatefield == 0x04) /* PAN. */
2029 && opnds[1].imm.value > 1)
2030 {
2031 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2032 return 0;
2033 }
2034 /* MSR SPSel, #uimm4
2035 Uses uimm4 as a control value to select the stack pointer: if
2036 bit 0 is set it selects the current exception level's stack
2037 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2038 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2039 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2040 {
2041 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2042 return 0;
2043 }
2044 break;
2045 default:
2046 break;
2047 }
2048 break;
2049
2050 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2051 /* Get the upper bound for the element index. */
2052 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2053 /* Index out-of-range. */
2054 if (!value_in_range_p (opnd->reglane.index, 0, num))
2055 {
2056 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2057 return 0;
2058 }
2059 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2060 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2061 number is encoded in "size:M:Rm":
2062 size <Vm>
2063 00 RESERVED
2064 01 0:Rm
2065 10 M:Rm
2066 11 RESERVED */
2067 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2068 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2069 {
2070 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2071 return 0;
2072 }
2073 break;
2074
2075 case AARCH64_OPND_CLASS_MODIFIED_REG:
2076 assert (idx == 1 || idx == 2);
2077 switch (type)
2078 {
2079 case AARCH64_OPND_Rm_EXT:
2080 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2081 && opnd->shifter.kind != AARCH64_MOD_LSL)
2082 {
2083 set_other_error (mismatch_detail, idx,
2084 _("extend operator expected"));
2085 return 0;
2086 }
2087 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2088 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2089 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2090 case. */
2091 if (!aarch64_stack_pointer_p (opnds + 0)
2092 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2093 {
2094 if (!opnd->shifter.operator_present)
2095 {
2096 set_other_error (mismatch_detail, idx,
2097 _("missing extend operator"));
2098 return 0;
2099 }
2100 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2101 {
2102 set_other_error (mismatch_detail, idx,
2103 _("'LSL' operator not allowed"));
2104 return 0;
2105 }
2106 }
2107 assert (opnd->shifter.operator_present /* Default to LSL. */
2108 || opnd->shifter.kind == AARCH64_MOD_LSL);
2109 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2110 {
2111 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2112 return 0;
2113 }
2114 /* In the 64-bit form, the final register operand is written as Wm
2115 for all but the (possibly omitted) UXTX/LSL and SXTX
2116 operators.
2117 N.B. GAS allows X register to be used with any operator as a
2118 programming convenience. */
2119 if (qualifier == AARCH64_OPND_QLF_X
2120 && opnd->shifter.kind != AARCH64_MOD_LSL
2121 && opnd->shifter.kind != AARCH64_MOD_UXTX
2122 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2123 {
2124 set_other_error (mismatch_detail, idx, _("W register expected"));
2125 return 0;
2126 }
2127 break;
2128
2129 case AARCH64_OPND_Rm_SFT:
2130 /* ROR is not available to the shifted register operand in
2131 arithmetic instructions. */
2132 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2133 {
2134 set_other_error (mismatch_detail, idx,
2135 _("shift operator expected"));
2136 return 0;
2137 }
2138 if (opnd->shifter.kind == AARCH64_MOD_ROR
2139 && opcode->iclass != log_shift)
2140 {
2141 set_other_error (mismatch_detail, idx,
2142 _("'ROR' operator not allowed"));
2143 return 0;
2144 }
2145 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2146 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2147 {
2148 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2149 return 0;
2150 }
2151 break;
2152
2153 default:
2154 break;
2155 }
2156 break;
2157
2158 default:
2159 break;
2160 }
2161
2162 return 1;
2163 }
2164
2165 /* Main entrypoint for the operand constraint checking.
2166
2167 Return 1 if operands of *INST meet the constraint applied by the operand
2168 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2169 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2170 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2171 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2172 error kind when it is notified that an instruction does not pass the check).
2173
2174 Un-determined operand qualifiers may get established during the process. */
2175
2176 int
2177 aarch64_match_operands_constraint (aarch64_inst *inst,
2178 aarch64_operand_error *mismatch_detail)
2179 {
2180 int i;
2181
2182 DEBUG_TRACE ("enter");
2183
2184 /* Check for cases where a source register needs to be the same as the
2185 destination register. Do this before matching qualifiers since if
2186 an instruction has both invalid tying and invalid qualifiers,
2187 the error about qualifiers would suggest several alternative
2188 instructions that also have invalid tying. */
2189 i = inst->opcode->tied_operand;
2190 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2191 {
2192 if (mismatch_detail)
2193 {
2194 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2195 mismatch_detail->index = i;
2196 mismatch_detail->error = NULL;
2197 }
2198 return 0;
2199 }
2200
2201 /* Match operands' qualifier.
2202 *INST has already had qualifier establish for some, if not all, of
2203 its operands; we need to find out whether these established
2204 qualifiers match one of the qualifier sequence in
2205 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2206 with the corresponding qualifier in such a sequence.
2207 Only basic operand constraint checking is done here; the more thorough
2208 constraint checking will carried out by operand_general_constraint_met_p,
2209 which has be to called after this in order to get all of the operands'
2210 qualifiers established. */
2211 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2212 {
2213 DEBUG_TRACE ("FAIL on operand qualifier matching");
2214 if (mismatch_detail)
2215 {
2216 /* Return an error type to indicate that it is the qualifier
2217 matching failure; we don't care about which operand as there
2218 are enough information in the opcode table to reproduce it. */
2219 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2220 mismatch_detail->index = -1;
2221 mismatch_detail->error = NULL;
2222 }
2223 return 0;
2224 }
2225
2226 /* Match operands' constraint. */
2227 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2228 {
2229 enum aarch64_opnd type = inst->opcode->operands[i];
2230 if (type == AARCH64_OPND_NIL)
2231 break;
2232 if (inst->operands[i].skip)
2233 {
2234 DEBUG_TRACE ("skip the incomplete operand %d", i);
2235 continue;
2236 }
2237 if (operand_general_constraint_met_p (inst->operands, i, type,
2238 inst->opcode, mismatch_detail) == 0)
2239 {
2240 DEBUG_TRACE ("FAIL on operand %d", i);
2241 return 0;
2242 }
2243 }
2244
2245 DEBUG_TRACE ("PASS");
2246
2247 return 1;
2248 }
2249
2250 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2251 Also updates the TYPE of each INST->OPERANDS with the corresponding
2252 value of OPCODE->OPERANDS.
2253
2254 Note that some operand qualifiers may need to be manually cleared by
2255 the caller before it further calls the aarch64_opcode_encode; by
2256 doing this, it helps the qualifier matching facilities work
2257 properly. */
2258
2259 const aarch64_opcode*
2260 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2261 {
2262 int i;
2263 const aarch64_opcode *old = inst->opcode;
2264
2265 inst->opcode = opcode;
2266
2267 /* Update the operand types. */
2268 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2269 {
2270 inst->operands[i].type = opcode->operands[i];
2271 if (opcode->operands[i] == AARCH64_OPND_NIL)
2272 break;
2273 }
2274
2275 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2276
2277 return old;
2278 }
2279
2280 int
2281 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2282 {
2283 int i;
2284 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2285 if (operands[i] == operand)
2286 return i;
2287 else if (operands[i] == AARCH64_OPND_NIL)
2288 break;
2289 return -1;
2290 }
2291 \f
2292 /* R0...R30, followed by FOR31. */
2293 #define BANK(R, FOR31) \
2294 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2295 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2296 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2297 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2298 /* [0][0] 32-bit integer regs with sp Wn
2299 [0][1] 64-bit integer regs with sp Xn sf=1
2300 [1][0] 32-bit integer regs with #0 Wn
2301 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2302 static const char *int_reg[2][2][32] = {
2303 #define R32(X) "w" #X
2304 #define R64(X) "x" #X
2305 { BANK (R32, "wsp"), BANK (R64, "sp") },
2306 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2307 #undef R64
2308 #undef R32
2309 };
2310 #undef BANK
2311
2312 /* Return the integer register name.
2313 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2314
2315 static inline const char *
2316 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2317 {
2318 const int has_zr = sp_reg_p ? 0 : 1;
2319 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2320 return int_reg[has_zr][is_64][regno];
2321 }
2322
2323 /* Like get_int_reg_name, but IS_64 is always 1. */
2324
2325 static inline const char *
2326 get_64bit_int_reg_name (int regno, int sp_reg_p)
2327 {
2328 const int has_zr = sp_reg_p ? 0 : 1;
2329 return int_reg[has_zr][1][regno];
2330 }
2331
2332 /* Get the name of the integer offset register in OPND, using the shift type
2333 to decide whether it's a word or doubleword. */
2334
2335 static inline const char *
2336 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2337 {
2338 switch (opnd->shifter.kind)
2339 {
2340 case AARCH64_MOD_UXTW:
2341 case AARCH64_MOD_SXTW:
2342 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2343
2344 case AARCH64_MOD_LSL:
2345 case AARCH64_MOD_SXTX:
2346 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2347
2348 default:
2349 abort ();
2350 }
2351 }
2352
2353 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2354
2355 typedef union
2356 {
2357 uint64_t i;
2358 double d;
2359 } double_conv_t;
2360
2361 typedef union
2362 {
2363 uint32_t i;
2364 float f;
2365 } single_conv_t;
2366
2367 typedef union
2368 {
2369 uint32_t i;
2370 float f;
2371 } half_conv_t;
2372
2373 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2374 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2375 (depending on the type of the instruction). IMM8 will be expanded to a
2376 single-precision floating-point value (SIZE == 4) or a double-precision
2377 floating-point value (SIZE == 8). A half-precision floating-point value
2378 (SIZE == 2) is expanded to a single-precision floating-point value. The
2379 expanded value is returned. */
2380
2381 static uint64_t
2382 expand_fp_imm (int size, uint32_t imm8)
2383 {
2384 uint64_t imm;
2385 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2386
2387 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2388 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2389 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2390 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2391 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2392 if (size == 8)
2393 {
2394 imm = (imm8_7 << (63-32)) /* imm8<7> */
2395 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2396 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2397 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2398 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2399 imm <<= 32;
2400 }
2401 else if (size == 4 || size == 2)
2402 {
2403 imm = (imm8_7 << 31) /* imm8<7> */
2404 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2405 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2406 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2407 }
2408 else
2409 {
2410 /* An unsupported size. */
2411 assert (0);
2412 }
2413
2414 return imm;
2415 }
2416
2417 /* Produce the string representation of the register list operand *OPND
2418 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2419 the register name that comes before the register number, such as "v". */
2420 static void
2421 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2422 const char *prefix)
2423 {
2424 const int num_regs = opnd->reglist.num_regs;
2425 const int first_reg = opnd->reglist.first_regno;
2426 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2427 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2428 char tb[8]; /* Temporary buffer. */
2429
2430 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2431 assert (num_regs >= 1 && num_regs <= 4);
2432
2433 /* Prepare the index if any. */
2434 if (opnd->reglist.has_index)
2435 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2436 else
2437 tb[0] = '\0';
2438
2439 /* The hyphenated form is preferred for disassembly if there are
2440 more than two registers in the list, and the register numbers
2441 are monotonically increasing in increments of one. */
2442 if (num_regs > 2 && last_reg > first_reg)
2443 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2444 prefix, last_reg, qlf_name, tb);
2445 else
2446 {
2447 const int reg0 = first_reg;
2448 const int reg1 = (first_reg + 1) & 0x1f;
2449 const int reg2 = (first_reg + 2) & 0x1f;
2450 const int reg3 = (first_reg + 3) & 0x1f;
2451
2452 switch (num_regs)
2453 {
2454 case 1:
2455 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2456 break;
2457 case 2:
2458 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2459 prefix, reg1, qlf_name, tb);
2460 break;
2461 case 3:
2462 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2463 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2464 prefix, reg2, qlf_name, tb);
2465 break;
2466 case 4:
2467 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2468 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2469 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2470 break;
2471 }
2472 }
2473 }
2474
2475 /* Print the register+immediate address in OPND to BUF, which has SIZE
2476 characters. BASE is the name of the base register. */
2477
2478 static void
2479 print_immediate_offset_address (char *buf, size_t size,
2480 const aarch64_opnd_info *opnd,
2481 const char *base)
2482 {
2483 if (opnd->addr.writeback)
2484 {
2485 if (opnd->addr.preind)
2486 snprintf (buf, size, "[%s,#%d]!", base, opnd->addr.offset.imm);
2487 else
2488 snprintf (buf, size, "[%s],#%d", base, opnd->addr.offset.imm);
2489 }
2490 else
2491 {
2492 if (opnd->addr.offset.imm)
2493 snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
2494 else
2495 snprintf (buf, size, "[%s]", base);
2496 }
2497 }
2498
2499 /* Produce the string representation of the register offset address operand
2500 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2501 the names of the base and offset registers. */
2502 static void
2503 print_register_offset_address (char *buf, size_t size,
2504 const aarch64_opnd_info *opnd,
2505 const char *base, const char *offset)
2506 {
2507 char tb[16]; /* Temporary buffer. */
2508 bfd_boolean print_extend_p = TRUE;
2509 bfd_boolean print_amount_p = TRUE;
2510 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2511
2512 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2513 || !opnd->shifter.amount_present))
2514 {
2515 /* Not print the shift/extend amount when the amount is zero and
2516 when it is not the special case of 8-bit load/store instruction. */
2517 print_amount_p = FALSE;
2518 /* Likewise, no need to print the shift operator LSL in such a
2519 situation. */
2520 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2521 print_extend_p = FALSE;
2522 }
2523
2524 /* Prepare for the extend/shift. */
2525 if (print_extend_p)
2526 {
2527 if (print_amount_p)
2528 snprintf (tb, sizeof (tb), ",%s #%d", shift_name, opnd->shifter.amount);
2529 else
2530 snprintf (tb, sizeof (tb), ",%s", shift_name);
2531 }
2532 else
2533 tb[0] = '\0';
2534
2535 snprintf (buf, size, "[%s,%s%s]", base, offset, tb);
2536 }
2537
2538 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2539 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2540 PC, PCREL_P and ADDRESS are used to pass in and return information about
2541 the PC-relative address calculation, where the PC value is passed in
2542 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2543 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2544 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2545
2546 The function serves both the disassembler and the assembler diagnostics
2547 issuer, which is the reason why it lives in this file. */
2548
2549 void
2550 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2551 const aarch64_opcode *opcode,
2552 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2553 bfd_vma *address)
2554 {
2555 int i;
2556 const char *name = NULL;
2557 const aarch64_opnd_info *opnd = opnds + idx;
2558 enum aarch64_modifier_kind kind;
2559 uint64_t addr, enum_value;
2560
2561 buf[0] = '\0';
2562 if (pcrel_p)
2563 *pcrel_p = 0;
2564
2565 switch (opnd->type)
2566 {
2567 case AARCH64_OPND_Rd:
2568 case AARCH64_OPND_Rn:
2569 case AARCH64_OPND_Rm:
2570 case AARCH64_OPND_Rt:
2571 case AARCH64_OPND_Rt2:
2572 case AARCH64_OPND_Rs:
2573 case AARCH64_OPND_Ra:
2574 case AARCH64_OPND_Rt_SYS:
2575 case AARCH64_OPND_PAIRREG:
2576 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2577 the <ic_op>, therefore we we use opnd->present to override the
2578 generic optional-ness information. */
2579 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2580 break;
2581 /* Omit the operand, e.g. RET. */
2582 if (optional_operand_p (opcode, idx)
2583 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2584 break;
2585 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2586 || opnd->qualifier == AARCH64_OPND_QLF_X);
2587 snprintf (buf, size, "%s",
2588 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2589 break;
2590
2591 case AARCH64_OPND_Rd_SP:
2592 case AARCH64_OPND_Rn_SP:
2593 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2594 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2595 || opnd->qualifier == AARCH64_OPND_QLF_X
2596 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2597 snprintf (buf, size, "%s",
2598 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2599 break;
2600
2601 case AARCH64_OPND_Rm_EXT:
2602 kind = opnd->shifter.kind;
2603 assert (idx == 1 || idx == 2);
2604 if ((aarch64_stack_pointer_p (opnds)
2605 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2606 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2607 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2608 && kind == AARCH64_MOD_UXTW)
2609 || (opnd->qualifier == AARCH64_OPND_QLF_X
2610 && kind == AARCH64_MOD_UXTX)))
2611 {
2612 /* 'LSL' is the preferred form in this case. */
2613 kind = AARCH64_MOD_LSL;
2614 if (opnd->shifter.amount == 0)
2615 {
2616 /* Shifter omitted. */
2617 snprintf (buf, size, "%s",
2618 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2619 break;
2620 }
2621 }
2622 if (opnd->shifter.amount)
2623 snprintf (buf, size, "%s, %s #%d",
2624 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2625 aarch64_operand_modifiers[kind].name,
2626 opnd->shifter.amount);
2627 else
2628 snprintf (buf, size, "%s, %s",
2629 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2630 aarch64_operand_modifiers[kind].name);
2631 break;
2632
2633 case AARCH64_OPND_Rm_SFT:
2634 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2635 || opnd->qualifier == AARCH64_OPND_QLF_X);
2636 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2637 snprintf (buf, size, "%s",
2638 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2639 else
2640 snprintf (buf, size, "%s, %s #%d",
2641 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2642 aarch64_operand_modifiers[opnd->shifter.kind].name,
2643 opnd->shifter.amount);
2644 break;
2645
2646 case AARCH64_OPND_Fd:
2647 case AARCH64_OPND_Fn:
2648 case AARCH64_OPND_Fm:
2649 case AARCH64_OPND_Fa:
2650 case AARCH64_OPND_Ft:
2651 case AARCH64_OPND_Ft2:
2652 case AARCH64_OPND_Sd:
2653 case AARCH64_OPND_Sn:
2654 case AARCH64_OPND_Sm:
2655 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2656 opnd->reg.regno);
2657 break;
2658
2659 case AARCH64_OPND_Vd:
2660 case AARCH64_OPND_Vn:
2661 case AARCH64_OPND_Vm:
2662 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2663 aarch64_get_qualifier_name (opnd->qualifier));
2664 break;
2665
2666 case AARCH64_OPND_Ed:
2667 case AARCH64_OPND_En:
2668 case AARCH64_OPND_Em:
2669 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2670 aarch64_get_qualifier_name (opnd->qualifier),
2671 opnd->reglane.index);
2672 break;
2673
2674 case AARCH64_OPND_VdD1:
2675 case AARCH64_OPND_VnD1:
2676 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2677 break;
2678
2679 case AARCH64_OPND_LVn:
2680 case AARCH64_OPND_LVt:
2681 case AARCH64_OPND_LVt_AL:
2682 case AARCH64_OPND_LEt:
2683 print_register_list (buf, size, opnd, "v");
2684 break;
2685
2686 case AARCH64_OPND_SVE_Pd:
2687 case AARCH64_OPND_SVE_Pg3:
2688 case AARCH64_OPND_SVE_Pg4_5:
2689 case AARCH64_OPND_SVE_Pg4_10:
2690 case AARCH64_OPND_SVE_Pg4_16:
2691 case AARCH64_OPND_SVE_Pm:
2692 case AARCH64_OPND_SVE_Pn:
2693 case AARCH64_OPND_SVE_Pt:
2694 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
2695 snprintf (buf, size, "p%d", opnd->reg.regno);
2696 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
2697 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
2698 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
2699 aarch64_get_qualifier_name (opnd->qualifier));
2700 else
2701 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
2702 aarch64_get_qualifier_name (opnd->qualifier));
2703 break;
2704
2705 case AARCH64_OPND_SVE_Za_5:
2706 case AARCH64_OPND_SVE_Za_16:
2707 case AARCH64_OPND_SVE_Zd:
2708 case AARCH64_OPND_SVE_Zm_5:
2709 case AARCH64_OPND_SVE_Zm_16:
2710 case AARCH64_OPND_SVE_Zn:
2711 case AARCH64_OPND_SVE_Zt:
2712 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
2713 snprintf (buf, size, "z%d", opnd->reg.regno);
2714 else
2715 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
2716 aarch64_get_qualifier_name (opnd->qualifier));
2717 break;
2718
2719 case AARCH64_OPND_SVE_ZnxN:
2720 case AARCH64_OPND_SVE_ZtxN:
2721 print_register_list (buf, size, opnd, "z");
2722 break;
2723
2724 case AARCH64_OPND_SVE_Zn_INDEX:
2725 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2726 aarch64_get_qualifier_name (opnd->qualifier),
2727 opnd->reglane.index);
2728 break;
2729
2730 case AARCH64_OPND_Cn:
2731 case AARCH64_OPND_Cm:
2732 snprintf (buf, size, "C%d", opnd->reg.regno);
2733 break;
2734
2735 case AARCH64_OPND_IDX:
2736 case AARCH64_OPND_IMM:
2737 case AARCH64_OPND_WIDTH:
2738 case AARCH64_OPND_UIMM3_OP1:
2739 case AARCH64_OPND_UIMM3_OP2:
2740 case AARCH64_OPND_BIT_NUM:
2741 case AARCH64_OPND_IMM_VLSL:
2742 case AARCH64_OPND_IMM_VLSR:
2743 case AARCH64_OPND_SHLL_IMM:
2744 case AARCH64_OPND_IMM0:
2745 case AARCH64_OPND_IMMR:
2746 case AARCH64_OPND_IMMS:
2747 case AARCH64_OPND_FBITS:
2748 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2749 break;
2750
2751 case AARCH64_OPND_SVE_PATTERN:
2752 if (optional_operand_p (opcode, idx)
2753 && opnd->imm.value == get_optional_operand_default_value (opcode))
2754 break;
2755 enum_value = opnd->imm.value;
2756 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
2757 if (aarch64_sve_pattern_array[enum_value])
2758 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
2759 else
2760 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2761 break;
2762
2763 case AARCH64_OPND_SVE_PRFOP:
2764 enum_value = opnd->imm.value;
2765 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
2766 if (aarch64_sve_prfop_array[enum_value])
2767 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
2768 else
2769 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2770 break;
2771
2772 case AARCH64_OPND_IMM_MOV:
2773 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2774 {
2775 case 4: /* e.g. MOV Wd, #<imm32>. */
2776 {
2777 int imm32 = opnd->imm.value;
2778 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2779 }
2780 break;
2781 case 8: /* e.g. MOV Xd, #<imm64>. */
2782 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2783 opnd->imm.value, opnd->imm.value);
2784 break;
2785 default: assert (0);
2786 }
2787 break;
2788
2789 case AARCH64_OPND_FPIMM0:
2790 snprintf (buf, size, "#0.0");
2791 break;
2792
2793 case AARCH64_OPND_LIMM:
2794 case AARCH64_OPND_AIMM:
2795 case AARCH64_OPND_HALF:
2796 if (opnd->shifter.amount)
2797 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2798 opnd->shifter.amount);
2799 else
2800 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2801 break;
2802
2803 case AARCH64_OPND_SIMD_IMM:
2804 case AARCH64_OPND_SIMD_IMM_SFT:
2805 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2806 || opnd->shifter.kind == AARCH64_MOD_NONE)
2807 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2808 else
2809 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2810 aarch64_operand_modifiers[opnd->shifter.kind].name,
2811 opnd->shifter.amount);
2812 break;
2813
2814 case AARCH64_OPND_FPIMM:
2815 case AARCH64_OPND_SIMD_FPIMM:
2816 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2817 {
2818 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2819 {
2820 half_conv_t c;
2821 c.i = expand_fp_imm (2, opnd->imm.value);
2822 snprintf (buf, size, "#%.18e", c.f);
2823 }
2824 break;
2825 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2826 {
2827 single_conv_t c;
2828 c.i = expand_fp_imm (4, opnd->imm.value);
2829 snprintf (buf, size, "#%.18e", c.f);
2830 }
2831 break;
2832 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2833 {
2834 double_conv_t c;
2835 c.i = expand_fp_imm (8, opnd->imm.value);
2836 snprintf (buf, size, "#%.18e", c.d);
2837 }
2838 break;
2839 default: assert (0);
2840 }
2841 break;
2842
2843 case AARCH64_OPND_CCMP_IMM:
2844 case AARCH64_OPND_NZCV:
2845 case AARCH64_OPND_EXCEPTION:
2846 case AARCH64_OPND_UIMM4:
2847 case AARCH64_OPND_UIMM7:
2848 if (optional_operand_p (opcode, idx) == TRUE
2849 && (opnd->imm.value ==
2850 (int64_t) get_optional_operand_default_value (opcode)))
2851 /* Omit the operand, e.g. DCPS1. */
2852 break;
2853 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2854 break;
2855
2856 case AARCH64_OPND_COND:
2857 case AARCH64_OPND_COND1:
2858 snprintf (buf, size, "%s", opnd->cond->names[0]);
2859 break;
2860
2861 case AARCH64_OPND_ADDR_ADRP:
2862 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2863 + opnd->imm.value;
2864 if (pcrel_p)
2865 *pcrel_p = 1;
2866 if (address)
2867 *address = addr;
2868 /* This is not necessary during the disassembling, as print_address_func
2869 in the disassemble_info will take care of the printing. But some
2870 other callers may be still interested in getting the string in *STR,
2871 so here we do snprintf regardless. */
2872 snprintf (buf, size, "#0x%" PRIx64, addr);
2873 break;
2874
2875 case AARCH64_OPND_ADDR_PCREL14:
2876 case AARCH64_OPND_ADDR_PCREL19:
2877 case AARCH64_OPND_ADDR_PCREL21:
2878 case AARCH64_OPND_ADDR_PCREL26:
2879 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2880 if (pcrel_p)
2881 *pcrel_p = 1;
2882 if (address)
2883 *address = addr;
2884 /* This is not necessary during the disassembling, as print_address_func
2885 in the disassemble_info will take care of the printing. But some
2886 other callers may be still interested in getting the string in *STR,
2887 so here we do snprintf regardless. */
2888 snprintf (buf, size, "#0x%" PRIx64, addr);
2889 break;
2890
2891 case AARCH64_OPND_ADDR_SIMPLE:
2892 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2893 case AARCH64_OPND_SIMD_ADDR_POST:
2894 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2895 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2896 {
2897 if (opnd->addr.offset.is_reg)
2898 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2899 else
2900 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2901 }
2902 else
2903 snprintf (buf, size, "[%s]", name);
2904 break;
2905
2906 case AARCH64_OPND_ADDR_REGOFF:
2907 print_register_offset_address
2908 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2909 get_offset_int_reg_name (opnd));
2910 break;
2911
2912 case AARCH64_OPND_ADDR_SIMM7:
2913 case AARCH64_OPND_ADDR_SIMM9:
2914 case AARCH64_OPND_ADDR_SIMM9_2:
2915 print_immediate_offset_address
2916 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
2917 break;
2918
2919 case AARCH64_OPND_ADDR_UIMM12:
2920 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2921 if (opnd->addr.offset.imm)
2922 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2923 else
2924 snprintf (buf, size, "[%s]", name);
2925 break;
2926
2927 case AARCH64_OPND_SYSREG:
2928 for (i = 0; aarch64_sys_regs[i].name; ++i)
2929 if (aarch64_sys_regs[i].value == opnd->sysreg
2930 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
2931 break;
2932 if (aarch64_sys_regs[i].name)
2933 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2934 else
2935 {
2936 /* Implementation defined system register. */
2937 unsigned int value = opnd->sysreg;
2938 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2939 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2940 value & 0x7);
2941 }
2942 break;
2943
2944 case AARCH64_OPND_PSTATEFIELD:
2945 for (i = 0; aarch64_pstatefields[i].name; ++i)
2946 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2947 break;
2948 assert (aarch64_pstatefields[i].name);
2949 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2950 break;
2951
2952 case AARCH64_OPND_SYSREG_AT:
2953 case AARCH64_OPND_SYSREG_DC:
2954 case AARCH64_OPND_SYSREG_IC:
2955 case AARCH64_OPND_SYSREG_TLBI:
2956 snprintf (buf, size, "%s", opnd->sysins_op->name);
2957 break;
2958
2959 case AARCH64_OPND_BARRIER:
2960 snprintf (buf, size, "%s", opnd->barrier->name);
2961 break;
2962
2963 case AARCH64_OPND_BARRIER_ISB:
2964 /* Operand can be omitted, e.g. in DCPS1. */
2965 if (! optional_operand_p (opcode, idx)
2966 || (opnd->barrier->value
2967 != get_optional_operand_default_value (opcode)))
2968 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2969 break;
2970
2971 case AARCH64_OPND_PRFOP:
2972 if (opnd->prfop->name != NULL)
2973 snprintf (buf, size, "%s", opnd->prfop->name);
2974 else
2975 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
2976 break;
2977
2978 case AARCH64_OPND_BARRIER_PSB:
2979 snprintf (buf, size, "%s", opnd->hint_option->name);
2980 break;
2981
2982 default:
2983 assert (0);
2984 }
2985 }
2986 \f
2987 #define CPENC(op0,op1,crn,crm,op2) \
2988 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2989 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2990 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2991 /* for 3.9.10 System Instructions */
2992 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2993
2994 #define C0 0
2995 #define C1 1
2996 #define C2 2
2997 #define C3 3
2998 #define C4 4
2999 #define C5 5
3000 #define C6 6
3001 #define C7 7
3002 #define C8 8
3003 #define C9 9
3004 #define C10 10
3005 #define C11 11
3006 #define C12 12
3007 #define C13 13
3008 #define C14 14
3009 #define C15 15
3010
3011 #ifdef F_DEPRECATED
3012 #undef F_DEPRECATED
3013 #endif
3014 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3015
3016 #ifdef F_ARCHEXT
3017 #undef F_ARCHEXT
3018 #endif
3019 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3020
3021 #ifdef F_HASXT
3022 #undef F_HASXT
3023 #endif
3024 #define F_HASXT 0x4 /* System instruction register <Xt>
3025 operand. */
3026
3027
3028 /* TODO there are two more issues need to be resolved
3029 1. handle read-only and write-only system registers
3030 2. handle cpu-implementation-defined system registers. */
3031 const aarch64_sys_reg aarch64_sys_regs [] =
3032 {
3033 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3034 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3035 { "elr_el1", CPEN_(0,C0,1), 0 },
3036 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3037 { "sp_el0", CPEN_(0,C1,0), 0 },
3038 { "spsel", CPEN_(0,C2,0), 0 },
3039 { "daif", CPEN_(3,C2,1), 0 },
3040 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3041 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3042 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3043 { "nzcv", CPEN_(3,C2,0), 0 },
3044 { "fpcr", CPEN_(3,C4,0), 0 },
3045 { "fpsr", CPEN_(3,C4,1), 0 },
3046 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3047 { "dlr_el0", CPEN_(3,C5,1), 0 },
3048 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3049 { "elr_el2", CPEN_(4,C0,1), 0 },
3050 { "sp_el1", CPEN_(4,C1,0), 0 },
3051 { "spsr_irq", CPEN_(4,C3,0), 0 },
3052 { "spsr_abt", CPEN_(4,C3,1), 0 },
3053 { "spsr_und", CPEN_(4,C3,2), 0 },
3054 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3055 { "spsr_el3", CPEN_(6,C0,0), 0 },
3056 { "elr_el3", CPEN_(6,C0,1), 0 },
3057 { "sp_el2", CPEN_(6,C1,0), 0 },
3058 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3059 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3060 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3061 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3062 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3063 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3064 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3065 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3066 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3067 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3068 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3069 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3070 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3071 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3072 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3073 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3074 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3075 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3076 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3077 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3078 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3079 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3080 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3081 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3082 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3083 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3084 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3085 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3086 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3087 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3088 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3089 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3090 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3091 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3092 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3093 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3094 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3095 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3096 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3097 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3098 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3099 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3100 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3101 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3102 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3103 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3104 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3105 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3106 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3107 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3108 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3109 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3110 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3111 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3112 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3113 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3114 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3115 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3116 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3117 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3118 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3119 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3120 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3121 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3122 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3123 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3124 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3125 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3126 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3127 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3128 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3129 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3130 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3131 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3132 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3133 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3134 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3135 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3136 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3137 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3138 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3139 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3140 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3141 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3142 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3143 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3144 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3145 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3146 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3147 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3148 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3149 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3150 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3151 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3152 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3153 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3154 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3155 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3156 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3157 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3158 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3159 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3160 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3161 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3162 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3163 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3164 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3165 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3166 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3167 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3168 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3169 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3170 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3171 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3172 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3173 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3174 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3175 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3176 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3177 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3178 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3179 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3180 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3181 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3182 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3183 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3184 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3185 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3186 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3187 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3188 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3189 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3190 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3191 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3192 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3193 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3194 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3195 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3196 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3197 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3198 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3199 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3200 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3201 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3202 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3203 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3204 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3205 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3206 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3207 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3208 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3209 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3210 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3211 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3212 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3213 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3214 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3215 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3216 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3217 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3218 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3219 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3220 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3221 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3222 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3223 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3224 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3225 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3226 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3227 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3228 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3229 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3230 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3231 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3232 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3233 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3234 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3235 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3236 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3237 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3238 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3239 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3240 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3241 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3242 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3243 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3244 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3245 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3246 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3247 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3248 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3249 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3250 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3251 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3252 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3253 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3254 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3255 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3256 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3257 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3258 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3259 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3260 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3261 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3262 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3263 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3264 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3265 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3266 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3267 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3268 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3269 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3270 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3271 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3272 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3273 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3274 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3275 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3276 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3277 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3278 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3279 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3280 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3281 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3282 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3283 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3284 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3285 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3286 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3287 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3288 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3289 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3290 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3291 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3292 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3293 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3294 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3295 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3296 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3297 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3298 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3299 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3300 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3301 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3302 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3303 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3304 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3305 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3306 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3307 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3308 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3309 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3310 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3311 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3312 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3313 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3314 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3315 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3316 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3317 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3318 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3319 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3320 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3321 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3322 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3323 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3324 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3325 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3326 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3327 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3328 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3329 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3330 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3331 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3332 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3333 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3334 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3335 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3336 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3337 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3338 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3339 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3340 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3341 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3342 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3343 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3344 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3345 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3346 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3347 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3348 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3349 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3350 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3351 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3352 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3353 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3354 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3355 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3356 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3357 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3358 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3359 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3360 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3361 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3362 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3363 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3364 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3365 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3366 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3367 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3368 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3369 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3370 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3371 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3372 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3373 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3374 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3375 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3376 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3377 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3378 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3379 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3380 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3381 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3382 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3383 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3384 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3385 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3386 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3387 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3388 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3389 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3390 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3391 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3392 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3393 { 0, CPENC(0,0,0,0,0), 0 },
3394 };
3395
3396 bfd_boolean
3397 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3398 {
3399 return (reg->flags & F_DEPRECATED) != 0;
3400 }
3401
3402 bfd_boolean
3403 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3404 const aarch64_sys_reg *reg)
3405 {
3406 if (!(reg->flags & F_ARCHEXT))
3407 return TRUE;
3408
3409 /* PAN. Values are from aarch64_sys_regs. */
3410 if (reg->value == CPEN_(0,C2,3)
3411 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3412 return FALSE;
3413
3414 /* Virtualization host extensions: system registers. */
3415 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3416 || reg->value == CPENC (3, 4, C13, C0, 1)
3417 || reg->value == CPENC (3, 4, C14, C3, 0)
3418 || reg->value == CPENC (3, 4, C14, C3, 1)
3419 || reg->value == CPENC (3, 4, C14, C3, 2))
3420 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3421 return FALSE;
3422
3423 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3424 if ((reg->value == CPEN_ (5, C0, 0)
3425 || reg->value == CPEN_ (5, C0, 1)
3426 || reg->value == CPENC (3, 5, C1, C0, 0)
3427 || reg->value == CPENC (3, 5, C1, C0, 2)
3428 || reg->value == CPENC (3, 5, C2, C0, 0)
3429 || reg->value == CPENC (3, 5, C2, C0, 1)
3430 || reg->value == CPENC (3, 5, C2, C0, 2)
3431 || reg->value == CPENC (3, 5, C5, C1, 0)
3432 || reg->value == CPENC (3, 5, C5, C1, 1)
3433 || reg->value == CPENC (3, 5, C5, C2, 0)
3434 || reg->value == CPENC (3, 5, C6, C0, 0)
3435 || reg->value == CPENC (3, 5, C10, C2, 0)
3436 || reg->value == CPENC (3, 5, C10, C3, 0)
3437 || reg->value == CPENC (3, 5, C12, C0, 0)
3438 || reg->value == CPENC (3, 5, C13, C0, 1)
3439 || reg->value == CPENC (3, 5, C14, C1, 0))
3440 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3441 return FALSE;
3442
3443 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3444 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3445 || reg->value == CPENC (3, 5, C14, C2, 1)
3446 || reg->value == CPENC (3, 5, C14, C2, 2)
3447 || reg->value == CPENC (3, 5, C14, C3, 0)
3448 || reg->value == CPENC (3, 5, C14, C3, 1)
3449 || reg->value == CPENC (3, 5, C14, C3, 2))
3450 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3451 return FALSE;
3452
3453 /* ARMv8.2 features. */
3454
3455 /* ID_AA64MMFR2_EL1. */
3456 if (reg->value == CPENC (3, 0, C0, C7, 2)
3457 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3458 return FALSE;
3459
3460 /* PSTATE.UAO. */
3461 if (reg->value == CPEN_ (0, C2, 4)
3462 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3463 return FALSE;
3464
3465 /* RAS extension. */
3466
3467 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3468 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3469 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3470 || reg->value == CPENC (3, 0, C5, C3, 1)
3471 || reg->value == CPENC (3, 0, C5, C3, 2)
3472 || reg->value == CPENC (3, 0, C5, C3, 3)
3473 || reg->value == CPENC (3, 0, C5, C4, 0)
3474 || reg->value == CPENC (3, 0, C5, C4, 1)
3475 || reg->value == CPENC (3, 0, C5, C4, 2)
3476 || reg->value == CPENC (3, 0, C5, C4, 3)
3477 || reg->value == CPENC (3, 0, C5, C5, 0)
3478 || reg->value == CPENC (3, 0, C5, C5, 1))
3479 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3480 return FALSE;
3481
3482 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3483 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3484 || reg->value == CPENC (3, 0, C12, C1, 1)
3485 || reg->value == CPENC (3, 4, C12, C1, 1))
3486 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3487 return FALSE;
3488
3489 /* Statistical Profiling extension. */
3490 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3491 || reg->value == CPENC (3, 0, C9, C10, 1)
3492 || reg->value == CPENC (3, 0, C9, C10, 3)
3493 || reg->value == CPENC (3, 0, C9, C10, 7)
3494 || reg->value == CPENC (3, 0, C9, C9, 0)
3495 || reg->value == CPENC (3, 0, C9, C9, 2)
3496 || reg->value == CPENC (3, 0, C9, C9, 3)
3497 || reg->value == CPENC (3, 0, C9, C9, 4)
3498 || reg->value == CPENC (3, 0, C9, C9, 5)
3499 || reg->value == CPENC (3, 0, C9, C9, 6)
3500 || reg->value == CPENC (3, 0, C9, C9, 7)
3501 || reg->value == CPENC (3, 4, C9, C9, 0)
3502 || reg->value == CPENC (3, 5, C9, C9, 0))
3503 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
3504 return FALSE;
3505
3506 return TRUE;
3507 }
3508
3509 const aarch64_sys_reg aarch64_pstatefields [] =
3510 {
3511 { "spsel", 0x05, 0 },
3512 { "daifset", 0x1e, 0 },
3513 { "daifclr", 0x1f, 0 },
3514 { "pan", 0x04, F_ARCHEXT },
3515 { "uao", 0x03, F_ARCHEXT },
3516 { 0, CPENC(0,0,0,0,0), 0 },
3517 };
3518
3519 bfd_boolean
3520 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
3521 const aarch64_sys_reg *reg)
3522 {
3523 if (!(reg->flags & F_ARCHEXT))
3524 return TRUE;
3525
3526 /* PAN. Values are from aarch64_pstatefields. */
3527 if (reg->value == 0x04
3528 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3529 return FALSE;
3530
3531 /* UAO. Values are from aarch64_pstatefields. */
3532 if (reg->value == 0x03
3533 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3534 return FALSE;
3535
3536 return TRUE;
3537 }
3538
3539 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3540 {
3541 { "ialluis", CPENS(0,C7,C1,0), 0 },
3542 { "iallu", CPENS(0,C7,C5,0), 0 },
3543 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
3544 { 0, CPENS(0,0,0,0), 0 }
3545 };
3546
3547 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3548 {
3549 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
3550 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
3551 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
3552 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
3553 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
3554 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
3555 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
3556 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
3557 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
3558 { 0, CPENS(0,0,0,0), 0 }
3559 };
3560
3561 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3562 {
3563 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
3564 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
3565 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
3566 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
3567 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
3568 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
3569 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
3570 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
3571 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
3572 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
3573 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
3574 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
3575 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
3576 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
3577 { 0, CPENS(0,0,0,0), 0 }
3578 };
3579
3580 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3581 {
3582 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3583 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
3584 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
3585 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
3586 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3587 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
3588 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
3589 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
3590 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
3591 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
3592 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
3593 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
3594 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
3595 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
3596 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3597 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3598 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
3599 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
3600 { "alle2", CPENS(4,C8,C7,0), 0 },
3601 { "alle2is", CPENS(4,C8,C3,0), 0 },
3602 { "alle1", CPENS(4,C8,C7,4), 0 },
3603 { "alle1is", CPENS(4,C8,C3,4), 0 },
3604 { "alle3", CPENS(6,C8,C7,0), 0 },
3605 { "alle3is", CPENS(6,C8,C3,0), 0 },
3606 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
3607 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
3608 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
3609 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
3610 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
3611 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
3612 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
3613 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
3614 { 0, CPENS(0,0,0,0), 0 }
3615 };
3616
3617 bfd_boolean
3618 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
3619 {
3620 return (sys_ins_reg->flags & F_HASXT) != 0;
3621 }
3622
3623 extern bfd_boolean
3624 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
3625 const aarch64_sys_ins_reg *reg)
3626 {
3627 if (!(reg->flags & F_ARCHEXT))
3628 return TRUE;
3629
3630 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3631 if (reg->value == CPENS (3, C7, C12, 1)
3632 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3633 return FALSE;
3634
3635 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3636 if ((reg->value == CPENS (0, C7, C9, 0)
3637 || reg->value == CPENS (0, C7, C9, 1))
3638 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3639 return FALSE;
3640
3641 return TRUE;
3642 }
3643
3644 #undef C0
3645 #undef C1
3646 #undef C2
3647 #undef C3
3648 #undef C4
3649 #undef C5
3650 #undef C6
3651 #undef C7
3652 #undef C8
3653 #undef C9
3654 #undef C10
3655 #undef C11
3656 #undef C12
3657 #undef C13
3658 #undef C14
3659 #undef C15
3660
3661 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
3662 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
3663
3664 static bfd_boolean
3665 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
3666 const aarch64_insn insn)
3667 {
3668 int t = BITS (insn, 4, 0);
3669 int n = BITS (insn, 9, 5);
3670 int t2 = BITS (insn, 14, 10);
3671
3672 if (BIT (insn, 23))
3673 {
3674 /* Write back enabled. */
3675 if ((t == n || t2 == n) && n != 31)
3676 return FALSE;
3677 }
3678
3679 if (BIT (insn, 22))
3680 {
3681 /* Load */
3682 if (t == t2)
3683 return FALSE;
3684 }
3685
3686 return TRUE;
3687 }
3688
3689 /* Include the opcode description table as well as the operand description
3690 table. */
3691 #define VERIFIER(x) verify_##x
3692 #include "aarch64-tbl.h"
This page took 0.129865 seconds and 4 git commands to generate.