[AArch64][SVE 22/32] Add qualifiers for merging and zeroing predication
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30
31 #include "aarch64-opc.h"
32
33 #ifdef DEBUG_AARCH64
34 int debug_dump = FALSE;
35 #endif /* DEBUG_AARCH64 */
36
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
39
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
42 {
43 return ((qualifier >= AARCH64_OPND_QLF_V_8B
44 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
45 : FALSE);
46 }
47
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
50 {
51 return ((qualifier >= AARCH64_OPND_QLF_S_B
52 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
53 : FALSE);
54 }
55
56 enum data_pattern
57 {
58 DP_UNKNOWN,
59 DP_VECTOR_3SAME,
60 DP_VECTOR_LONG,
61 DP_VECTOR_WIDE,
62 DP_VECTOR_ACROSS_LANES,
63 };
64
65 static const char significant_operand_index [] =
66 {
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
72 };
73
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
75 the data pattern.
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
78
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
81 {
82 if (vector_qualifier_p (qualifiers[0]) == TRUE)
83 {
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers[0] == qualifiers[1]
87 && vector_qualifier_p (qualifiers[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers[0])
89 == aarch64_get_qualifier_esize (qualifiers[1]))
90 && (aarch64_get_qualifier_esize (qualifiers[0])
91 == aarch64_get_qualifier_esize (qualifiers[2])))
92 return DP_VECTOR_3SAME;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
95 or v.8h, v.16b. */
96 if (vector_qualifier_p (qualifiers[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers[0])
99 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
100 return DP_VECTOR_LONG;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers[0] == qualifiers[1]
103 && vector_qualifier_p (qualifiers[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers[0])
106 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers[0])
108 == aarch64_get_qualifier_esize (qualifiers[1])))
109 return DP_VECTOR_WIDE;
110 }
111 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
112 {
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers[1]) == TRUE
115 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
116 return DP_VECTOR_ACROSS_LANES;
117 }
118
119 return DP_UNKNOWN;
120 }
121
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
128 benefit. */
129
130 int
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
132 {
133 return
134 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
135 }
136 \f
137 const aarch64_field fields[] =
138 {
139 { 0, 0 }, /* NIL. */
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
196 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
197 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
198 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
199 { 31, 1 }, /* b5: in the test bit and branch instructions. */
200 { 19, 5 }, /* b40: in the test bit and branch instructions. */
201 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
202 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
203 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
204 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
205 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
206 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
207 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
208 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
209 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
210 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
211 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
212 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
213 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
214 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
215 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
216 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
217 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
218 };
219
220 enum aarch64_operand_class
221 aarch64_get_operand_class (enum aarch64_opnd type)
222 {
223 return aarch64_operands[type].op_class;
224 }
225
226 const char *
227 aarch64_get_operand_name (enum aarch64_opnd type)
228 {
229 return aarch64_operands[type].name;
230 }
231
232 /* Get operand description string.
233 This is usually for the diagnosis purpose. */
234 const char *
235 aarch64_get_operand_desc (enum aarch64_opnd type)
236 {
237 return aarch64_operands[type].desc;
238 }
239
240 /* Table of all conditional affixes. */
241 const aarch64_cond aarch64_conds[16] =
242 {
243 {{"eq"}, 0x0},
244 {{"ne"}, 0x1},
245 {{"cs", "hs"}, 0x2},
246 {{"cc", "lo", "ul"}, 0x3},
247 {{"mi"}, 0x4},
248 {{"pl"}, 0x5},
249 {{"vs"}, 0x6},
250 {{"vc"}, 0x7},
251 {{"hi"}, 0x8},
252 {{"ls"}, 0x9},
253 {{"ge"}, 0xa},
254 {{"lt"}, 0xb},
255 {{"gt"}, 0xc},
256 {{"le"}, 0xd},
257 {{"al"}, 0xe},
258 {{"nv"}, 0xf},
259 };
260
261 const aarch64_cond *
262 get_cond_from_value (aarch64_insn value)
263 {
264 assert (value < 16);
265 return &aarch64_conds[(unsigned int) value];
266 }
267
268 const aarch64_cond *
269 get_inverted_cond (const aarch64_cond *cond)
270 {
271 return &aarch64_conds[cond->value ^ 0x1];
272 }
273
274 /* Table describing the operand extension/shifting operators; indexed by
275 enum aarch64_modifier_kind.
276
277 The value column provides the most common values for encoding modifiers,
278 which enables table-driven encoding/decoding for the modifiers. */
279 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
280 {
281 {"none", 0x0},
282 {"msl", 0x0},
283 {"ror", 0x3},
284 {"asr", 0x2},
285 {"lsr", 0x1},
286 {"lsl", 0x0},
287 {"uxtb", 0x0},
288 {"uxth", 0x1},
289 {"uxtw", 0x2},
290 {"uxtx", 0x3},
291 {"sxtb", 0x4},
292 {"sxth", 0x5},
293 {"sxtw", 0x6},
294 {"sxtx", 0x7},
295 {NULL, 0},
296 };
297
298 enum aarch64_modifier_kind
299 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
300 {
301 return desc - aarch64_operand_modifiers;
302 }
303
304 aarch64_insn
305 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
306 {
307 return aarch64_operand_modifiers[kind].value;
308 }
309
310 enum aarch64_modifier_kind
311 aarch64_get_operand_modifier_from_value (aarch64_insn value,
312 bfd_boolean extend_p)
313 {
314 if (extend_p == TRUE)
315 return AARCH64_MOD_UXTB + value;
316 else
317 return AARCH64_MOD_LSL - value;
318 }
319
320 bfd_boolean
321 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
322 {
323 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
324 ? TRUE : FALSE;
325 }
326
327 static inline bfd_boolean
328 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
329 {
330 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
331 ? TRUE : FALSE;
332 }
333
334 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
335 {
336 { "#0x00", 0x0 },
337 { "oshld", 0x1 },
338 { "oshst", 0x2 },
339 { "osh", 0x3 },
340 { "#0x04", 0x4 },
341 { "nshld", 0x5 },
342 { "nshst", 0x6 },
343 { "nsh", 0x7 },
344 { "#0x08", 0x8 },
345 { "ishld", 0x9 },
346 { "ishst", 0xa },
347 { "ish", 0xb },
348 { "#0x0c", 0xc },
349 { "ld", 0xd },
350 { "st", 0xe },
351 { "sy", 0xf },
352 };
353
354 /* Table describing the operands supported by the aliases of the HINT
355 instruction.
356
357 The name column is the operand that is accepted for the alias. The value
358 column is the hint number of the alias. The list of operands is terminated
359 by NULL in the name column. */
360
361 const struct aarch64_name_value_pair aarch64_hint_options[] =
362 {
363 { "csync", 0x11 }, /* PSB CSYNC. */
364 { NULL, 0x0 },
365 };
366
367 /* op -> op: load = 0 instruction = 1 store = 2
368 l -> level: 1-3
369 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
370 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
371 const struct aarch64_name_value_pair aarch64_prfops[32] =
372 {
373 { "pldl1keep", B(0, 1, 0) },
374 { "pldl1strm", B(0, 1, 1) },
375 { "pldl2keep", B(0, 2, 0) },
376 { "pldl2strm", B(0, 2, 1) },
377 { "pldl3keep", B(0, 3, 0) },
378 { "pldl3strm", B(0, 3, 1) },
379 { NULL, 0x06 },
380 { NULL, 0x07 },
381 { "plil1keep", B(1, 1, 0) },
382 { "plil1strm", B(1, 1, 1) },
383 { "plil2keep", B(1, 2, 0) },
384 { "plil2strm", B(1, 2, 1) },
385 { "plil3keep", B(1, 3, 0) },
386 { "plil3strm", B(1, 3, 1) },
387 { NULL, 0x0e },
388 { NULL, 0x0f },
389 { "pstl1keep", B(2, 1, 0) },
390 { "pstl1strm", B(2, 1, 1) },
391 { "pstl2keep", B(2, 2, 0) },
392 { "pstl2strm", B(2, 2, 1) },
393 { "pstl3keep", B(2, 3, 0) },
394 { "pstl3strm", B(2, 3, 1) },
395 { NULL, 0x16 },
396 { NULL, 0x17 },
397 { NULL, 0x18 },
398 { NULL, 0x19 },
399 { NULL, 0x1a },
400 { NULL, 0x1b },
401 { NULL, 0x1c },
402 { NULL, 0x1d },
403 { NULL, 0x1e },
404 { NULL, 0x1f },
405 };
406 #undef B
407 \f
408 /* Utilities on value constraint. */
409
410 static inline int
411 value_in_range_p (int64_t value, int low, int high)
412 {
413 return (value >= low && value <= high) ? 1 : 0;
414 }
415
416 static inline int
417 value_aligned_p (int64_t value, int align)
418 {
419 return ((value & (align - 1)) == 0) ? 1 : 0;
420 }
421
422 /* A signed value fits in a field. */
423 static inline int
424 value_fit_signed_field_p (int64_t value, unsigned width)
425 {
426 assert (width < 32);
427 if (width < sizeof (value) * 8)
428 {
429 int64_t lim = (int64_t)1 << (width - 1);
430 if (value >= -lim && value < lim)
431 return 1;
432 }
433 return 0;
434 }
435
436 /* An unsigned value fits in a field. */
437 static inline int
438 value_fit_unsigned_field_p (int64_t value, unsigned width)
439 {
440 assert (width < 32);
441 if (width < sizeof (value) * 8)
442 {
443 int64_t lim = (int64_t)1 << width;
444 if (value >= 0 && value < lim)
445 return 1;
446 }
447 return 0;
448 }
449
450 /* Return 1 if OPERAND is SP or WSP. */
451 int
452 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
453 {
454 return ((aarch64_get_operand_class (operand->type)
455 == AARCH64_OPND_CLASS_INT_REG)
456 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
457 && operand->reg.regno == 31);
458 }
459
460 /* Return 1 if OPERAND is XZR or WZP. */
461 int
462 aarch64_zero_register_p (const aarch64_opnd_info *operand)
463 {
464 return ((aarch64_get_operand_class (operand->type)
465 == AARCH64_OPND_CLASS_INT_REG)
466 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
467 && operand->reg.regno == 31);
468 }
469
470 /* Return true if the operand *OPERAND that has the operand code
471 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
472 qualified by the qualifier TARGET. */
473
474 static inline int
475 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
476 aarch64_opnd_qualifier_t target)
477 {
478 switch (operand->qualifier)
479 {
480 case AARCH64_OPND_QLF_W:
481 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
482 return 1;
483 break;
484 case AARCH64_OPND_QLF_X:
485 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
486 return 1;
487 break;
488 case AARCH64_OPND_QLF_WSP:
489 if (target == AARCH64_OPND_QLF_W
490 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
491 return 1;
492 break;
493 case AARCH64_OPND_QLF_SP:
494 if (target == AARCH64_OPND_QLF_X
495 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
496 return 1;
497 break;
498 default:
499 break;
500 }
501
502 return 0;
503 }
504
505 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
506 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
507
508 Return NIL if more than one expected qualifiers are found. */
509
510 aarch64_opnd_qualifier_t
511 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
512 int idx,
513 const aarch64_opnd_qualifier_t known_qlf,
514 int known_idx)
515 {
516 int i, saved_i;
517
518 /* Special case.
519
520 When the known qualifier is NIL, we have to assume that there is only
521 one qualifier sequence in the *QSEQ_LIST and return the corresponding
522 qualifier directly. One scenario is that for instruction
523 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
524 which has only one possible valid qualifier sequence
525 NIL, S_D
526 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
527 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
528
529 Because the qualifier NIL has dual roles in the qualifier sequence:
530 it can mean no qualifier for the operand, or the qualifer sequence is
531 not in use (when all qualifiers in the sequence are NILs), we have to
532 handle this special case here. */
533 if (known_qlf == AARCH64_OPND_NIL)
534 {
535 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
536 return qseq_list[0][idx];
537 }
538
539 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
540 {
541 if (qseq_list[i][known_idx] == known_qlf)
542 {
543 if (saved_i != -1)
544 /* More than one sequences are found to have KNOWN_QLF at
545 KNOWN_IDX. */
546 return AARCH64_OPND_NIL;
547 saved_i = i;
548 }
549 }
550
551 return qseq_list[saved_i][idx];
552 }
553
554 enum operand_qualifier_kind
555 {
556 OQK_NIL,
557 OQK_OPD_VARIANT,
558 OQK_VALUE_IN_RANGE,
559 OQK_MISC,
560 };
561
562 /* Operand qualifier description. */
563 struct operand_qualifier_data
564 {
565 /* The usage of the three data fields depends on the qualifier kind. */
566 int data0;
567 int data1;
568 int data2;
569 /* Description. */
570 const char *desc;
571 /* Kind. */
572 enum operand_qualifier_kind kind;
573 };
574
575 /* Indexed by the operand qualifier enumerators. */
576 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
577 {
578 {0, 0, 0, "NIL", OQK_NIL},
579
580 /* Operand variant qualifiers.
581 First 3 fields:
582 element size, number of elements and common value for encoding. */
583
584 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
585 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
586 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
587 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
588
589 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
590 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
591 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
592 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
593 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
594
595 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
596 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
597 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
598 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
599 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
600 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
601 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
602 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
603 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
604 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
605
606 {0, 0, 0, "z", OQK_OPD_VARIANT},
607 {0, 0, 0, "m", OQK_OPD_VARIANT},
608
609 /* Qualifiers constraining the value range.
610 First 3 fields:
611 Lower bound, higher bound, unused. */
612
613 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
614 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
615 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
616 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
617 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
618 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
619
620 /* Qualifiers for miscellaneous purpose.
621 First 3 fields:
622 unused, unused and unused. */
623
624 {0, 0, 0, "lsl", 0},
625 {0, 0, 0, "msl", 0},
626
627 {0, 0, 0, "retrieving", 0},
628 };
629
630 static inline bfd_boolean
631 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
632 {
633 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
634 ? TRUE : FALSE;
635 }
636
637 static inline bfd_boolean
638 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
639 {
640 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
641 ? TRUE : FALSE;
642 }
643
644 const char*
645 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
646 {
647 return aarch64_opnd_qualifiers[qualifier].desc;
648 }
649
650 /* Given an operand qualifier, return the expected data element size
651 of a qualified operand. */
652 unsigned char
653 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
654 {
655 assert (operand_variant_qualifier_p (qualifier) == TRUE);
656 return aarch64_opnd_qualifiers[qualifier].data0;
657 }
658
659 unsigned char
660 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
661 {
662 assert (operand_variant_qualifier_p (qualifier) == TRUE);
663 return aarch64_opnd_qualifiers[qualifier].data1;
664 }
665
666 aarch64_insn
667 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
668 {
669 assert (operand_variant_qualifier_p (qualifier) == TRUE);
670 return aarch64_opnd_qualifiers[qualifier].data2;
671 }
672
673 static int
674 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
675 {
676 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
677 return aarch64_opnd_qualifiers[qualifier].data0;
678 }
679
680 static int
681 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
682 {
683 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
684 return aarch64_opnd_qualifiers[qualifier].data1;
685 }
686
687 #ifdef DEBUG_AARCH64
688 void
689 aarch64_verbose (const char *str, ...)
690 {
691 va_list ap;
692 va_start (ap, str);
693 printf ("#### ");
694 vprintf (str, ap);
695 printf ("\n");
696 va_end (ap);
697 }
698
699 static inline void
700 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
701 {
702 int i;
703 printf ("#### \t");
704 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
705 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
706 printf ("\n");
707 }
708
709 static void
710 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
711 const aarch64_opnd_qualifier_t *qualifier)
712 {
713 int i;
714 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
715
716 aarch64_verbose ("dump_match_qualifiers:");
717 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
718 curr[i] = opnd[i].qualifier;
719 dump_qualifier_sequence (curr);
720 aarch64_verbose ("against");
721 dump_qualifier_sequence (qualifier);
722 }
723 #endif /* DEBUG_AARCH64 */
724
725 /* TODO improve this, we can have an extra field at the runtime to
726 store the number of operands rather than calculating it every time. */
727
728 int
729 aarch64_num_of_operands (const aarch64_opcode *opcode)
730 {
731 int i = 0;
732 const enum aarch64_opnd *opnds = opcode->operands;
733 while (opnds[i++] != AARCH64_OPND_NIL)
734 ;
735 --i;
736 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
737 return i;
738 }
739
740 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
741 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
742
743 N.B. on the entry, it is very likely that only some operands in *INST
744 have had their qualifiers been established.
745
746 If STOP_AT is not -1, the function will only try to match
747 the qualifier sequence for operands before and including the operand
748 of index STOP_AT; and on success *RET will only be filled with the first
749 (STOP_AT+1) qualifiers.
750
751 A couple examples of the matching algorithm:
752
753 X,W,NIL should match
754 X,W,NIL
755
756 NIL,NIL should match
757 X ,NIL
758
759 Apart from serving the main encoding routine, this can also be called
760 during or after the operand decoding. */
761
762 int
763 aarch64_find_best_match (const aarch64_inst *inst,
764 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
765 int stop_at, aarch64_opnd_qualifier_t *ret)
766 {
767 int found = 0;
768 int i, num_opnds;
769 const aarch64_opnd_qualifier_t *qualifiers;
770
771 num_opnds = aarch64_num_of_operands (inst->opcode);
772 if (num_opnds == 0)
773 {
774 DEBUG_TRACE ("SUCCEED: no operand");
775 return 1;
776 }
777
778 if (stop_at < 0 || stop_at >= num_opnds)
779 stop_at = num_opnds - 1;
780
781 /* For each pattern. */
782 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
783 {
784 int j;
785 qualifiers = *qualifiers_list;
786
787 /* Start as positive. */
788 found = 1;
789
790 DEBUG_TRACE ("%d", i);
791 #ifdef DEBUG_AARCH64
792 if (debug_dump)
793 dump_match_qualifiers (inst->operands, qualifiers);
794 #endif
795
796 /* Most opcodes has much fewer patterns in the list.
797 First NIL qualifier indicates the end in the list. */
798 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
799 {
800 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
801 if (i)
802 found = 0;
803 break;
804 }
805
806 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
807 {
808 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
809 {
810 /* Either the operand does not have qualifier, or the qualifier
811 for the operand needs to be deduced from the qualifier
812 sequence.
813 In the latter case, any constraint checking related with
814 the obtained qualifier should be done later in
815 operand_general_constraint_met_p. */
816 continue;
817 }
818 else if (*qualifiers != inst->operands[j].qualifier)
819 {
820 /* Unless the target qualifier can also qualify the operand
821 (which has already had a non-nil qualifier), non-equal
822 qualifiers are generally un-matched. */
823 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
824 continue;
825 else
826 {
827 found = 0;
828 break;
829 }
830 }
831 else
832 continue; /* Equal qualifiers are certainly matched. */
833 }
834
835 /* Qualifiers established. */
836 if (found == 1)
837 break;
838 }
839
840 if (found == 1)
841 {
842 /* Fill the result in *RET. */
843 int j;
844 qualifiers = *qualifiers_list;
845
846 DEBUG_TRACE ("complete qualifiers using list %d", i);
847 #ifdef DEBUG_AARCH64
848 if (debug_dump)
849 dump_qualifier_sequence (qualifiers);
850 #endif
851
852 for (j = 0; j <= stop_at; ++j, ++qualifiers)
853 ret[j] = *qualifiers;
854 for (; j < AARCH64_MAX_OPND_NUM; ++j)
855 ret[j] = AARCH64_OPND_QLF_NIL;
856
857 DEBUG_TRACE ("SUCCESS");
858 return 1;
859 }
860
861 DEBUG_TRACE ("FAIL");
862 return 0;
863 }
864
865 /* Operand qualifier matching and resolving.
866
867 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
868 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
869
870 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
871 succeeds. */
872
873 static int
874 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
875 {
876 int i, nops;
877 aarch64_opnd_qualifier_seq_t qualifiers;
878
879 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
880 qualifiers))
881 {
882 DEBUG_TRACE ("matching FAIL");
883 return 0;
884 }
885
886 if (inst->opcode->flags & F_STRICT)
887 {
888 /* Require an exact qualifier match, even for NIL qualifiers. */
889 nops = aarch64_num_of_operands (inst->opcode);
890 for (i = 0; i < nops; ++i)
891 if (inst->operands[i].qualifier != qualifiers[i])
892 return FALSE;
893 }
894
895 /* Update the qualifiers. */
896 if (update_p == TRUE)
897 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
898 {
899 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
900 break;
901 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
902 "update %s with %s for operand %d",
903 aarch64_get_qualifier_name (inst->operands[i].qualifier),
904 aarch64_get_qualifier_name (qualifiers[i]), i);
905 inst->operands[i].qualifier = qualifiers[i];
906 }
907
908 DEBUG_TRACE ("matching SUCCESS");
909 return 1;
910 }
911
912 /* Return TRUE if VALUE is a wide constant that can be moved into a general
913 register by MOVZ.
914
915 IS32 indicates whether value is a 32-bit immediate or not.
916 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
917 amount will be returned in *SHIFT_AMOUNT. */
918
919 bfd_boolean
920 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
921 {
922 int amount;
923
924 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
925
926 if (is32)
927 {
928 /* Allow all zeros or all ones in top 32-bits, so that
929 32-bit constant expressions like ~0x80000000 are
930 permitted. */
931 uint64_t ext = value;
932 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
933 /* Immediate out of range. */
934 return FALSE;
935 value &= (int64_t) 0xffffffff;
936 }
937
938 /* first, try movz then movn */
939 amount = -1;
940 if ((value & ((int64_t) 0xffff << 0)) == value)
941 amount = 0;
942 else if ((value & ((int64_t) 0xffff << 16)) == value)
943 amount = 16;
944 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
945 amount = 32;
946 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
947 amount = 48;
948
949 if (amount == -1)
950 {
951 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
952 return FALSE;
953 }
954
955 if (shift_amount != NULL)
956 *shift_amount = amount;
957
958 DEBUG_TRACE ("exit TRUE with amount %d", amount);
959
960 return TRUE;
961 }
962
963 /* Build the accepted values for immediate logical SIMD instructions.
964
965 The standard encodings of the immediate value are:
966 N imms immr SIMD size R S
967 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
968 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
969 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
970 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
971 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
972 0 11110s 00000r 2 UInt(r) UInt(s)
973 where all-ones value of S is reserved.
974
975 Let's call E the SIMD size.
976
977 The immediate value is: S+1 bits '1' rotated to the right by R.
978
979 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
980 (remember S != E - 1). */
981
982 #define TOTAL_IMM_NB 5334
983
984 typedef struct
985 {
986 uint64_t imm;
987 aarch64_insn encoding;
988 } simd_imm_encoding;
989
990 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
991
992 static int
993 simd_imm_encoding_cmp(const void *i1, const void *i2)
994 {
995 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
996 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
997
998 if (imm1->imm < imm2->imm)
999 return -1;
1000 if (imm1->imm > imm2->imm)
1001 return +1;
1002 return 0;
1003 }
1004
1005 /* immediate bitfield standard encoding
1006 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1007 1 ssssss rrrrrr 64 rrrrrr ssssss
1008 0 0sssss 0rrrrr 32 rrrrr sssss
1009 0 10ssss 00rrrr 16 rrrr ssss
1010 0 110sss 000rrr 8 rrr sss
1011 0 1110ss 0000rr 4 rr ss
1012 0 11110s 00000r 2 r s */
1013 static inline int
1014 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1015 {
1016 return (is64 << 12) | (r << 6) | s;
1017 }
1018
1019 static void
1020 build_immediate_table (void)
1021 {
1022 uint32_t log_e, e, s, r, s_mask;
1023 uint64_t mask, imm;
1024 int nb_imms;
1025 int is64;
1026
1027 nb_imms = 0;
1028 for (log_e = 1; log_e <= 6; log_e++)
1029 {
1030 /* Get element size. */
1031 e = 1u << log_e;
1032 if (log_e == 6)
1033 {
1034 is64 = 1;
1035 mask = 0xffffffffffffffffull;
1036 s_mask = 0;
1037 }
1038 else
1039 {
1040 is64 = 0;
1041 mask = (1ull << e) - 1;
1042 /* log_e s_mask
1043 1 ((1 << 4) - 1) << 2 = 111100
1044 2 ((1 << 3) - 1) << 3 = 111000
1045 3 ((1 << 2) - 1) << 4 = 110000
1046 4 ((1 << 1) - 1) << 5 = 100000
1047 5 ((1 << 0) - 1) << 6 = 000000 */
1048 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1049 }
1050 for (s = 0; s < e - 1; s++)
1051 for (r = 0; r < e; r++)
1052 {
1053 /* s+1 consecutive bits to 1 (s < 63) */
1054 imm = (1ull << (s + 1)) - 1;
1055 /* rotate right by r */
1056 if (r != 0)
1057 imm = (imm >> r) | ((imm << (e - r)) & mask);
1058 /* replicate the constant depending on SIMD size */
1059 switch (log_e)
1060 {
1061 case 1: imm = (imm << 2) | imm;
1062 case 2: imm = (imm << 4) | imm;
1063 case 3: imm = (imm << 8) | imm;
1064 case 4: imm = (imm << 16) | imm;
1065 case 5: imm = (imm << 32) | imm;
1066 case 6: break;
1067 default: abort ();
1068 }
1069 simd_immediates[nb_imms].imm = imm;
1070 simd_immediates[nb_imms].encoding =
1071 encode_immediate_bitfield(is64, s | s_mask, r);
1072 nb_imms++;
1073 }
1074 }
1075 assert (nb_imms == TOTAL_IMM_NB);
1076 qsort(simd_immediates, nb_imms,
1077 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1078 }
1079
1080 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1081 be accepted by logical (immediate) instructions
1082 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1083
1084 ESIZE is the number of bytes in the decoded immediate value.
1085 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1086 VALUE will be returned in *ENCODING. */
1087
1088 bfd_boolean
1089 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1090 {
1091 simd_imm_encoding imm_enc;
1092 const simd_imm_encoding *imm_encoding;
1093 static bfd_boolean initialized = FALSE;
1094 uint64_t upper;
1095 int i;
1096
1097 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1098 value, is32);
1099
1100 if (initialized == FALSE)
1101 {
1102 build_immediate_table ();
1103 initialized = TRUE;
1104 }
1105
1106 /* Allow all zeros or all ones in top bits, so that
1107 constant expressions like ~1 are permitted. */
1108 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1109 if ((value & ~upper) != value && (value | upper) != value)
1110 return FALSE;
1111
1112 /* Replicate to a full 64-bit value. */
1113 value &= ~upper;
1114 for (i = esize * 8; i < 64; i *= 2)
1115 value |= (value << i);
1116
1117 imm_enc.imm = value;
1118 imm_encoding = (const simd_imm_encoding *)
1119 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1120 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1121 if (imm_encoding == NULL)
1122 {
1123 DEBUG_TRACE ("exit with FALSE");
1124 return FALSE;
1125 }
1126 if (encoding != NULL)
1127 *encoding = imm_encoding->encoding;
1128 DEBUG_TRACE ("exit with TRUE");
1129 return TRUE;
1130 }
1131
1132 /* If 64-bit immediate IMM is in the format of
1133 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1134 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1135 of value "abcdefgh". Otherwise return -1. */
1136 int
1137 aarch64_shrink_expanded_imm8 (uint64_t imm)
1138 {
1139 int i, ret;
1140 uint32_t byte;
1141
1142 ret = 0;
1143 for (i = 0; i < 8; i++)
1144 {
1145 byte = (imm >> (8 * i)) & 0xff;
1146 if (byte == 0xff)
1147 ret |= 1 << i;
1148 else if (byte != 0x00)
1149 return -1;
1150 }
1151 return ret;
1152 }
1153
1154 /* Utility inline functions for operand_general_constraint_met_p. */
1155
1156 static inline void
1157 set_error (aarch64_operand_error *mismatch_detail,
1158 enum aarch64_operand_error_kind kind, int idx,
1159 const char* error)
1160 {
1161 if (mismatch_detail == NULL)
1162 return;
1163 mismatch_detail->kind = kind;
1164 mismatch_detail->index = idx;
1165 mismatch_detail->error = error;
1166 }
1167
1168 static inline void
1169 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1170 const char* error)
1171 {
1172 if (mismatch_detail == NULL)
1173 return;
1174 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1175 }
1176
1177 static inline void
1178 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1179 int idx, int lower_bound, int upper_bound,
1180 const char* error)
1181 {
1182 if (mismatch_detail == NULL)
1183 return;
1184 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1185 mismatch_detail->data[0] = lower_bound;
1186 mismatch_detail->data[1] = upper_bound;
1187 }
1188
1189 static inline void
1190 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1191 int idx, int lower_bound, int upper_bound)
1192 {
1193 if (mismatch_detail == NULL)
1194 return;
1195 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1196 _("immediate value"));
1197 }
1198
1199 static inline void
1200 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1201 int idx, int lower_bound, int upper_bound)
1202 {
1203 if (mismatch_detail == NULL)
1204 return;
1205 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1206 _("immediate offset"));
1207 }
1208
1209 static inline void
1210 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1211 int idx, int lower_bound, int upper_bound)
1212 {
1213 if (mismatch_detail == NULL)
1214 return;
1215 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1216 _("register number"));
1217 }
1218
1219 static inline void
1220 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1221 int idx, int lower_bound, int upper_bound)
1222 {
1223 if (mismatch_detail == NULL)
1224 return;
1225 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1226 _("register element index"));
1227 }
1228
1229 static inline void
1230 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1231 int idx, int lower_bound, int upper_bound)
1232 {
1233 if (mismatch_detail == NULL)
1234 return;
1235 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1236 _("shift amount"));
1237 }
1238
1239 static inline void
1240 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1241 int alignment)
1242 {
1243 if (mismatch_detail == NULL)
1244 return;
1245 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1246 mismatch_detail->data[0] = alignment;
1247 }
1248
1249 static inline void
1250 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1251 int expected_num)
1252 {
1253 if (mismatch_detail == NULL)
1254 return;
1255 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1256 mismatch_detail->data[0] = expected_num;
1257 }
1258
1259 static inline void
1260 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1261 const char* error)
1262 {
1263 if (mismatch_detail == NULL)
1264 return;
1265 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1266 }
1267
1268 /* General constraint checking based on operand code.
1269
1270 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1271 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1272
1273 This function has to be called after the qualifiers for all operands
1274 have been resolved.
1275
1276 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1277 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1278 of error message during the disassembling where error message is not
1279 wanted. We avoid the dynamic construction of strings of error messages
1280 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1281 use a combination of error code, static string and some integer data to
1282 represent an error. */
1283
1284 static int
1285 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1286 enum aarch64_opnd type,
1287 const aarch64_opcode *opcode,
1288 aarch64_operand_error *mismatch_detail)
1289 {
1290 unsigned num;
1291 unsigned char size;
1292 int64_t imm;
1293 const aarch64_opnd_info *opnd = opnds + idx;
1294 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1295
1296 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1297
1298 switch (aarch64_operands[type].op_class)
1299 {
1300 case AARCH64_OPND_CLASS_INT_REG:
1301 /* Check pair reg constraints for cas* instructions. */
1302 if (type == AARCH64_OPND_PAIRREG)
1303 {
1304 assert (idx == 1 || idx == 3);
1305 if (opnds[idx - 1].reg.regno % 2 != 0)
1306 {
1307 set_syntax_error (mismatch_detail, idx - 1,
1308 _("reg pair must start from even reg"));
1309 return 0;
1310 }
1311 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1312 {
1313 set_syntax_error (mismatch_detail, idx,
1314 _("reg pair must be contiguous"));
1315 return 0;
1316 }
1317 break;
1318 }
1319
1320 /* <Xt> may be optional in some IC and TLBI instructions. */
1321 if (type == AARCH64_OPND_Rt_SYS)
1322 {
1323 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1324 == AARCH64_OPND_CLASS_SYSTEM));
1325 if (opnds[1].present
1326 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1327 {
1328 set_other_error (mismatch_detail, idx, _("extraneous register"));
1329 return 0;
1330 }
1331 if (!opnds[1].present
1332 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1333 {
1334 set_other_error (mismatch_detail, idx, _("missing register"));
1335 return 0;
1336 }
1337 }
1338 switch (qualifier)
1339 {
1340 case AARCH64_OPND_QLF_WSP:
1341 case AARCH64_OPND_QLF_SP:
1342 if (!aarch64_stack_pointer_p (opnd))
1343 {
1344 set_other_error (mismatch_detail, idx,
1345 _("stack pointer register expected"));
1346 return 0;
1347 }
1348 break;
1349 default:
1350 break;
1351 }
1352 break;
1353
1354 case AARCH64_OPND_CLASS_SVE_REG:
1355 switch (type)
1356 {
1357 case AARCH64_OPND_SVE_Zn_INDEX:
1358 size = aarch64_get_qualifier_esize (opnd->qualifier);
1359 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1360 {
1361 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1362 0, 64 / size - 1);
1363 return 0;
1364 }
1365 break;
1366
1367 case AARCH64_OPND_SVE_ZnxN:
1368 case AARCH64_OPND_SVE_ZtxN:
1369 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1370 {
1371 set_other_error (mismatch_detail, idx,
1372 _("invalid register list"));
1373 return 0;
1374 }
1375 break;
1376
1377 default:
1378 break;
1379 }
1380 break;
1381
1382 case AARCH64_OPND_CLASS_PRED_REG:
1383 if (opnd->reg.regno >= 8
1384 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1385 {
1386 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1387 return 0;
1388 }
1389 break;
1390
1391 case AARCH64_OPND_CLASS_COND:
1392 if (type == AARCH64_OPND_COND1
1393 && (opnds[idx].cond->value & 0xe) == 0xe)
1394 {
1395 /* Not allow AL or NV. */
1396 set_syntax_error (mismatch_detail, idx, NULL);
1397 }
1398 break;
1399
1400 case AARCH64_OPND_CLASS_ADDRESS:
1401 /* Check writeback. */
1402 switch (opcode->iclass)
1403 {
1404 case ldst_pos:
1405 case ldst_unscaled:
1406 case ldstnapair_offs:
1407 case ldstpair_off:
1408 case ldst_unpriv:
1409 if (opnd->addr.writeback == 1)
1410 {
1411 set_syntax_error (mismatch_detail, idx,
1412 _("unexpected address writeback"));
1413 return 0;
1414 }
1415 break;
1416 case ldst_imm9:
1417 case ldstpair_indexed:
1418 case asisdlsep:
1419 case asisdlsop:
1420 if (opnd->addr.writeback == 0)
1421 {
1422 set_syntax_error (mismatch_detail, idx,
1423 _("address writeback expected"));
1424 return 0;
1425 }
1426 break;
1427 default:
1428 assert (opnd->addr.writeback == 0);
1429 break;
1430 }
1431 switch (type)
1432 {
1433 case AARCH64_OPND_ADDR_SIMM7:
1434 /* Scaled signed 7 bits immediate offset. */
1435 /* Get the size of the data element that is accessed, which may be
1436 different from that of the source register size,
1437 e.g. in strb/ldrb. */
1438 size = aarch64_get_qualifier_esize (opnd->qualifier);
1439 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1440 {
1441 set_offset_out_of_range_error (mismatch_detail, idx,
1442 -64 * size, 63 * size);
1443 return 0;
1444 }
1445 if (!value_aligned_p (opnd->addr.offset.imm, size))
1446 {
1447 set_unaligned_error (mismatch_detail, idx, size);
1448 return 0;
1449 }
1450 break;
1451 case AARCH64_OPND_ADDR_SIMM9:
1452 /* Unscaled signed 9 bits immediate offset. */
1453 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1454 {
1455 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1456 return 0;
1457 }
1458 break;
1459
1460 case AARCH64_OPND_ADDR_SIMM9_2:
1461 /* Unscaled signed 9 bits immediate offset, which has to be negative
1462 or unaligned. */
1463 size = aarch64_get_qualifier_esize (qualifier);
1464 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1465 && !value_aligned_p (opnd->addr.offset.imm, size))
1466 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1467 return 1;
1468 set_other_error (mismatch_detail, idx,
1469 _("negative or unaligned offset expected"));
1470 return 0;
1471
1472 case AARCH64_OPND_SIMD_ADDR_POST:
1473 /* AdvSIMD load/store multiple structures, post-index. */
1474 assert (idx == 1);
1475 if (opnd->addr.offset.is_reg)
1476 {
1477 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1478 return 1;
1479 else
1480 {
1481 set_other_error (mismatch_detail, idx,
1482 _("invalid register offset"));
1483 return 0;
1484 }
1485 }
1486 else
1487 {
1488 const aarch64_opnd_info *prev = &opnds[idx-1];
1489 unsigned num_bytes; /* total number of bytes transferred. */
1490 /* The opcode dependent area stores the number of elements in
1491 each structure to be loaded/stored. */
1492 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1493 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1494 /* Special handling of loading single structure to all lane. */
1495 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1496 * aarch64_get_qualifier_esize (prev->qualifier);
1497 else
1498 num_bytes = prev->reglist.num_regs
1499 * aarch64_get_qualifier_esize (prev->qualifier)
1500 * aarch64_get_qualifier_nelem (prev->qualifier);
1501 if ((int) num_bytes != opnd->addr.offset.imm)
1502 {
1503 set_other_error (mismatch_detail, idx,
1504 _("invalid post-increment amount"));
1505 return 0;
1506 }
1507 }
1508 break;
1509
1510 case AARCH64_OPND_ADDR_REGOFF:
1511 /* Get the size of the data element that is accessed, which may be
1512 different from that of the source register size,
1513 e.g. in strb/ldrb. */
1514 size = aarch64_get_qualifier_esize (opnd->qualifier);
1515 /* It is either no shift or shift by the binary logarithm of SIZE. */
1516 if (opnd->shifter.amount != 0
1517 && opnd->shifter.amount != (int)get_logsz (size))
1518 {
1519 set_other_error (mismatch_detail, idx,
1520 _("invalid shift amount"));
1521 return 0;
1522 }
1523 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1524 operators. */
1525 switch (opnd->shifter.kind)
1526 {
1527 case AARCH64_MOD_UXTW:
1528 case AARCH64_MOD_LSL:
1529 case AARCH64_MOD_SXTW:
1530 case AARCH64_MOD_SXTX: break;
1531 default:
1532 set_other_error (mismatch_detail, idx,
1533 _("invalid extend/shift operator"));
1534 return 0;
1535 }
1536 break;
1537
1538 case AARCH64_OPND_ADDR_UIMM12:
1539 imm = opnd->addr.offset.imm;
1540 /* Get the size of the data element that is accessed, which may be
1541 different from that of the source register size,
1542 e.g. in strb/ldrb. */
1543 size = aarch64_get_qualifier_esize (qualifier);
1544 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1545 {
1546 set_offset_out_of_range_error (mismatch_detail, idx,
1547 0, 4095 * size);
1548 return 0;
1549 }
1550 if (!value_aligned_p (opnd->addr.offset.imm, size))
1551 {
1552 set_unaligned_error (mismatch_detail, idx, size);
1553 return 0;
1554 }
1555 break;
1556
1557 case AARCH64_OPND_ADDR_PCREL14:
1558 case AARCH64_OPND_ADDR_PCREL19:
1559 case AARCH64_OPND_ADDR_PCREL21:
1560 case AARCH64_OPND_ADDR_PCREL26:
1561 imm = opnd->imm.value;
1562 if (operand_need_shift_by_two (get_operand_from_code (type)))
1563 {
1564 /* The offset value in a PC-relative branch instruction is alway
1565 4-byte aligned and is encoded without the lowest 2 bits. */
1566 if (!value_aligned_p (imm, 4))
1567 {
1568 set_unaligned_error (mismatch_detail, idx, 4);
1569 return 0;
1570 }
1571 /* Right shift by 2 so that we can carry out the following check
1572 canonically. */
1573 imm >>= 2;
1574 }
1575 size = get_operand_fields_width (get_operand_from_code (type));
1576 if (!value_fit_signed_field_p (imm, size))
1577 {
1578 set_other_error (mismatch_detail, idx,
1579 _("immediate out of range"));
1580 return 0;
1581 }
1582 break;
1583
1584 default:
1585 break;
1586 }
1587 break;
1588
1589 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1590 if (type == AARCH64_OPND_LEt)
1591 {
1592 /* Get the upper bound for the element index. */
1593 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1594 if (!value_in_range_p (opnd->reglist.index, 0, num))
1595 {
1596 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1597 return 0;
1598 }
1599 }
1600 /* The opcode dependent area stores the number of elements in
1601 each structure to be loaded/stored. */
1602 num = get_opcode_dependent_value (opcode);
1603 switch (type)
1604 {
1605 case AARCH64_OPND_LVt:
1606 assert (num >= 1 && num <= 4);
1607 /* Unless LD1/ST1, the number of registers should be equal to that
1608 of the structure elements. */
1609 if (num != 1 && opnd->reglist.num_regs != num)
1610 {
1611 set_reg_list_error (mismatch_detail, idx, num);
1612 return 0;
1613 }
1614 break;
1615 case AARCH64_OPND_LVt_AL:
1616 case AARCH64_OPND_LEt:
1617 assert (num >= 1 && num <= 4);
1618 /* The number of registers should be equal to that of the structure
1619 elements. */
1620 if (opnd->reglist.num_regs != num)
1621 {
1622 set_reg_list_error (mismatch_detail, idx, num);
1623 return 0;
1624 }
1625 break;
1626 default:
1627 break;
1628 }
1629 break;
1630
1631 case AARCH64_OPND_CLASS_IMMEDIATE:
1632 /* Constraint check on immediate operand. */
1633 imm = opnd->imm.value;
1634 /* E.g. imm_0_31 constrains value to be 0..31. */
1635 if (qualifier_value_in_range_constraint_p (qualifier)
1636 && !value_in_range_p (imm, get_lower_bound (qualifier),
1637 get_upper_bound (qualifier)))
1638 {
1639 set_imm_out_of_range_error (mismatch_detail, idx,
1640 get_lower_bound (qualifier),
1641 get_upper_bound (qualifier));
1642 return 0;
1643 }
1644
1645 switch (type)
1646 {
1647 case AARCH64_OPND_AIMM:
1648 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1649 {
1650 set_other_error (mismatch_detail, idx,
1651 _("invalid shift operator"));
1652 return 0;
1653 }
1654 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1655 {
1656 set_other_error (mismatch_detail, idx,
1657 _("shift amount expected to be 0 or 12"));
1658 return 0;
1659 }
1660 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1661 {
1662 set_other_error (mismatch_detail, idx,
1663 _("immediate out of range"));
1664 return 0;
1665 }
1666 break;
1667
1668 case AARCH64_OPND_HALF:
1669 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1670 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1671 {
1672 set_other_error (mismatch_detail, idx,
1673 _("invalid shift operator"));
1674 return 0;
1675 }
1676 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1677 if (!value_aligned_p (opnd->shifter.amount, 16))
1678 {
1679 set_other_error (mismatch_detail, idx,
1680 _("shift amount should be a multiple of 16"));
1681 return 0;
1682 }
1683 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1684 {
1685 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1686 0, size * 8 - 16);
1687 return 0;
1688 }
1689 if (opnd->imm.value < 0)
1690 {
1691 set_other_error (mismatch_detail, idx,
1692 _("negative immediate value not allowed"));
1693 return 0;
1694 }
1695 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1696 {
1697 set_other_error (mismatch_detail, idx,
1698 _("immediate out of range"));
1699 return 0;
1700 }
1701 break;
1702
1703 case AARCH64_OPND_IMM_MOV:
1704 {
1705 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1706 imm = opnd->imm.value;
1707 assert (idx == 1);
1708 switch (opcode->op)
1709 {
1710 case OP_MOV_IMM_WIDEN:
1711 imm = ~imm;
1712 /* Fall through... */
1713 case OP_MOV_IMM_WIDE:
1714 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1715 {
1716 set_other_error (mismatch_detail, idx,
1717 _("immediate out of range"));
1718 return 0;
1719 }
1720 break;
1721 case OP_MOV_IMM_LOG:
1722 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1723 {
1724 set_other_error (mismatch_detail, idx,
1725 _("immediate out of range"));
1726 return 0;
1727 }
1728 break;
1729 default:
1730 assert (0);
1731 return 0;
1732 }
1733 }
1734 break;
1735
1736 case AARCH64_OPND_NZCV:
1737 case AARCH64_OPND_CCMP_IMM:
1738 case AARCH64_OPND_EXCEPTION:
1739 case AARCH64_OPND_UIMM4:
1740 case AARCH64_OPND_UIMM7:
1741 case AARCH64_OPND_UIMM3_OP1:
1742 case AARCH64_OPND_UIMM3_OP2:
1743 size = get_operand_fields_width (get_operand_from_code (type));
1744 assert (size < 32);
1745 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1746 {
1747 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1748 (1 << size) - 1);
1749 return 0;
1750 }
1751 break;
1752
1753 case AARCH64_OPND_WIDTH:
1754 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
1755 && opnds[0].type == AARCH64_OPND_Rd);
1756 size = get_upper_bound (qualifier);
1757 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1758 /* lsb+width <= reg.size */
1759 {
1760 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1761 size - opnds[idx-1].imm.value);
1762 return 0;
1763 }
1764 break;
1765
1766 case AARCH64_OPND_LIMM:
1767 {
1768 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1769 uint64_t uimm = opnd->imm.value;
1770 if (opcode->op == OP_BIC)
1771 uimm = ~uimm;
1772 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
1773 {
1774 set_other_error (mismatch_detail, idx,
1775 _("immediate out of range"));
1776 return 0;
1777 }
1778 }
1779 break;
1780
1781 case AARCH64_OPND_IMM0:
1782 case AARCH64_OPND_FPIMM0:
1783 if (opnd->imm.value != 0)
1784 {
1785 set_other_error (mismatch_detail, idx,
1786 _("immediate zero expected"));
1787 return 0;
1788 }
1789 break;
1790
1791 case AARCH64_OPND_SHLL_IMM:
1792 assert (idx == 2);
1793 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1794 if (opnd->imm.value != size)
1795 {
1796 set_other_error (mismatch_detail, idx,
1797 _("invalid shift amount"));
1798 return 0;
1799 }
1800 break;
1801
1802 case AARCH64_OPND_IMM_VLSL:
1803 size = aarch64_get_qualifier_esize (qualifier);
1804 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1805 {
1806 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1807 size * 8 - 1);
1808 return 0;
1809 }
1810 break;
1811
1812 case AARCH64_OPND_IMM_VLSR:
1813 size = aarch64_get_qualifier_esize (qualifier);
1814 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1815 {
1816 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1817 return 0;
1818 }
1819 break;
1820
1821 case AARCH64_OPND_SIMD_IMM:
1822 case AARCH64_OPND_SIMD_IMM_SFT:
1823 /* Qualifier check. */
1824 switch (qualifier)
1825 {
1826 case AARCH64_OPND_QLF_LSL:
1827 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1828 {
1829 set_other_error (mismatch_detail, idx,
1830 _("invalid shift operator"));
1831 return 0;
1832 }
1833 break;
1834 case AARCH64_OPND_QLF_MSL:
1835 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1836 {
1837 set_other_error (mismatch_detail, idx,
1838 _("invalid shift operator"));
1839 return 0;
1840 }
1841 break;
1842 case AARCH64_OPND_QLF_NIL:
1843 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1844 {
1845 set_other_error (mismatch_detail, idx,
1846 _("shift is not permitted"));
1847 return 0;
1848 }
1849 break;
1850 default:
1851 assert (0);
1852 return 0;
1853 }
1854 /* Is the immediate valid? */
1855 assert (idx == 1);
1856 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1857 {
1858 /* uimm8 or simm8 */
1859 if (!value_in_range_p (opnd->imm.value, -128, 255))
1860 {
1861 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
1862 return 0;
1863 }
1864 }
1865 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1866 {
1867 /* uimm64 is not
1868 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1869 ffffffffgggggggghhhhhhhh'. */
1870 set_other_error (mismatch_detail, idx,
1871 _("invalid value for immediate"));
1872 return 0;
1873 }
1874 /* Is the shift amount valid? */
1875 switch (opnd->shifter.kind)
1876 {
1877 case AARCH64_MOD_LSL:
1878 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1879 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1880 {
1881 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1882 (size - 1) * 8);
1883 return 0;
1884 }
1885 if (!value_aligned_p (opnd->shifter.amount, 8))
1886 {
1887 set_unaligned_error (mismatch_detail, idx, 8);
1888 return 0;
1889 }
1890 break;
1891 case AARCH64_MOD_MSL:
1892 /* Only 8 and 16 are valid shift amount. */
1893 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1894 {
1895 set_other_error (mismatch_detail, idx,
1896 _("shift amount expected to be 0 or 16"));
1897 return 0;
1898 }
1899 break;
1900 default:
1901 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1902 {
1903 set_other_error (mismatch_detail, idx,
1904 _("invalid shift operator"));
1905 return 0;
1906 }
1907 break;
1908 }
1909 break;
1910
1911 case AARCH64_OPND_FPIMM:
1912 case AARCH64_OPND_SIMD_FPIMM:
1913 if (opnd->imm.is_fp == 0)
1914 {
1915 set_other_error (mismatch_detail, idx,
1916 _("floating-point immediate expected"));
1917 return 0;
1918 }
1919 /* The value is expected to be an 8-bit floating-point constant with
1920 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1921 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1922 instruction). */
1923 if (!value_in_range_p (opnd->imm.value, 0, 255))
1924 {
1925 set_other_error (mismatch_detail, idx,
1926 _("immediate out of range"));
1927 return 0;
1928 }
1929 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1930 {
1931 set_other_error (mismatch_detail, idx,
1932 _("invalid shift operator"));
1933 return 0;
1934 }
1935 break;
1936
1937 default:
1938 break;
1939 }
1940 break;
1941
1942 case AARCH64_OPND_CLASS_CP_REG:
1943 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1944 valid range: C0 - C15. */
1945 if (opnd->reg.regno > 15)
1946 {
1947 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1948 return 0;
1949 }
1950 break;
1951
1952 case AARCH64_OPND_CLASS_SYSTEM:
1953 switch (type)
1954 {
1955 case AARCH64_OPND_PSTATEFIELD:
1956 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
1957 /* MSR UAO, #uimm4
1958 MSR PAN, #uimm4
1959 The immediate must be #0 or #1. */
1960 if ((opnd->pstatefield == 0x03 /* UAO. */
1961 || opnd->pstatefield == 0x04) /* PAN. */
1962 && opnds[1].imm.value > 1)
1963 {
1964 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1965 return 0;
1966 }
1967 /* MSR SPSel, #uimm4
1968 Uses uimm4 as a control value to select the stack pointer: if
1969 bit 0 is set it selects the current exception level's stack
1970 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1971 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1972 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
1973 {
1974 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1975 return 0;
1976 }
1977 break;
1978 default:
1979 break;
1980 }
1981 break;
1982
1983 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
1984 /* Get the upper bound for the element index. */
1985 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1986 /* Index out-of-range. */
1987 if (!value_in_range_p (opnd->reglane.index, 0, num))
1988 {
1989 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1990 return 0;
1991 }
1992 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1993 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1994 number is encoded in "size:M:Rm":
1995 size <Vm>
1996 00 RESERVED
1997 01 0:Rm
1998 10 M:Rm
1999 11 RESERVED */
2000 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2001 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2002 {
2003 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2004 return 0;
2005 }
2006 break;
2007
2008 case AARCH64_OPND_CLASS_MODIFIED_REG:
2009 assert (idx == 1 || idx == 2);
2010 switch (type)
2011 {
2012 case AARCH64_OPND_Rm_EXT:
2013 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2014 && opnd->shifter.kind != AARCH64_MOD_LSL)
2015 {
2016 set_other_error (mismatch_detail, idx,
2017 _("extend operator expected"));
2018 return 0;
2019 }
2020 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2021 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2022 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2023 case. */
2024 if (!aarch64_stack_pointer_p (opnds + 0)
2025 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2026 {
2027 if (!opnd->shifter.operator_present)
2028 {
2029 set_other_error (mismatch_detail, idx,
2030 _("missing extend operator"));
2031 return 0;
2032 }
2033 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2034 {
2035 set_other_error (mismatch_detail, idx,
2036 _("'LSL' operator not allowed"));
2037 return 0;
2038 }
2039 }
2040 assert (opnd->shifter.operator_present /* Default to LSL. */
2041 || opnd->shifter.kind == AARCH64_MOD_LSL);
2042 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2043 {
2044 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2045 return 0;
2046 }
2047 /* In the 64-bit form, the final register operand is written as Wm
2048 for all but the (possibly omitted) UXTX/LSL and SXTX
2049 operators.
2050 N.B. GAS allows X register to be used with any operator as a
2051 programming convenience. */
2052 if (qualifier == AARCH64_OPND_QLF_X
2053 && opnd->shifter.kind != AARCH64_MOD_LSL
2054 && opnd->shifter.kind != AARCH64_MOD_UXTX
2055 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2056 {
2057 set_other_error (mismatch_detail, idx, _("W register expected"));
2058 return 0;
2059 }
2060 break;
2061
2062 case AARCH64_OPND_Rm_SFT:
2063 /* ROR is not available to the shifted register operand in
2064 arithmetic instructions. */
2065 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2066 {
2067 set_other_error (mismatch_detail, idx,
2068 _("shift operator expected"));
2069 return 0;
2070 }
2071 if (opnd->shifter.kind == AARCH64_MOD_ROR
2072 && opcode->iclass != log_shift)
2073 {
2074 set_other_error (mismatch_detail, idx,
2075 _("'ROR' operator not allowed"));
2076 return 0;
2077 }
2078 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2079 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2080 {
2081 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2082 return 0;
2083 }
2084 break;
2085
2086 default:
2087 break;
2088 }
2089 break;
2090
2091 default:
2092 break;
2093 }
2094
2095 return 1;
2096 }
2097
2098 /* Main entrypoint for the operand constraint checking.
2099
2100 Return 1 if operands of *INST meet the constraint applied by the operand
2101 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2102 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2103 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2104 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2105 error kind when it is notified that an instruction does not pass the check).
2106
2107 Un-determined operand qualifiers may get established during the process. */
2108
2109 int
2110 aarch64_match_operands_constraint (aarch64_inst *inst,
2111 aarch64_operand_error *mismatch_detail)
2112 {
2113 int i;
2114
2115 DEBUG_TRACE ("enter");
2116
2117 /* Check for cases where a source register needs to be the same as the
2118 destination register. Do this before matching qualifiers since if
2119 an instruction has both invalid tying and invalid qualifiers,
2120 the error about qualifiers would suggest several alternative
2121 instructions that also have invalid tying. */
2122 i = inst->opcode->tied_operand;
2123 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2124 {
2125 if (mismatch_detail)
2126 {
2127 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2128 mismatch_detail->index = i;
2129 mismatch_detail->error = NULL;
2130 }
2131 return 0;
2132 }
2133
2134 /* Match operands' qualifier.
2135 *INST has already had qualifier establish for some, if not all, of
2136 its operands; we need to find out whether these established
2137 qualifiers match one of the qualifier sequence in
2138 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2139 with the corresponding qualifier in such a sequence.
2140 Only basic operand constraint checking is done here; the more thorough
2141 constraint checking will carried out by operand_general_constraint_met_p,
2142 which has be to called after this in order to get all of the operands'
2143 qualifiers established. */
2144 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2145 {
2146 DEBUG_TRACE ("FAIL on operand qualifier matching");
2147 if (mismatch_detail)
2148 {
2149 /* Return an error type to indicate that it is the qualifier
2150 matching failure; we don't care about which operand as there
2151 are enough information in the opcode table to reproduce it. */
2152 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2153 mismatch_detail->index = -1;
2154 mismatch_detail->error = NULL;
2155 }
2156 return 0;
2157 }
2158
2159 /* Match operands' constraint. */
2160 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2161 {
2162 enum aarch64_opnd type = inst->opcode->operands[i];
2163 if (type == AARCH64_OPND_NIL)
2164 break;
2165 if (inst->operands[i].skip)
2166 {
2167 DEBUG_TRACE ("skip the incomplete operand %d", i);
2168 continue;
2169 }
2170 if (operand_general_constraint_met_p (inst->operands, i, type,
2171 inst->opcode, mismatch_detail) == 0)
2172 {
2173 DEBUG_TRACE ("FAIL on operand %d", i);
2174 return 0;
2175 }
2176 }
2177
2178 DEBUG_TRACE ("PASS");
2179
2180 return 1;
2181 }
2182
2183 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2184 Also updates the TYPE of each INST->OPERANDS with the corresponding
2185 value of OPCODE->OPERANDS.
2186
2187 Note that some operand qualifiers may need to be manually cleared by
2188 the caller before it further calls the aarch64_opcode_encode; by
2189 doing this, it helps the qualifier matching facilities work
2190 properly. */
2191
2192 const aarch64_opcode*
2193 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2194 {
2195 int i;
2196 const aarch64_opcode *old = inst->opcode;
2197
2198 inst->opcode = opcode;
2199
2200 /* Update the operand types. */
2201 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2202 {
2203 inst->operands[i].type = opcode->operands[i];
2204 if (opcode->operands[i] == AARCH64_OPND_NIL)
2205 break;
2206 }
2207
2208 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2209
2210 return old;
2211 }
2212
2213 int
2214 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2215 {
2216 int i;
2217 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2218 if (operands[i] == operand)
2219 return i;
2220 else if (operands[i] == AARCH64_OPND_NIL)
2221 break;
2222 return -1;
2223 }
2224 \f
2225 /* R0...R30, followed by FOR31. */
2226 #define BANK(R, FOR31) \
2227 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2228 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2229 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2230 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2231 /* [0][0] 32-bit integer regs with sp Wn
2232 [0][1] 64-bit integer regs with sp Xn sf=1
2233 [1][0] 32-bit integer regs with #0 Wn
2234 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2235 static const char *int_reg[2][2][32] = {
2236 #define R32(X) "w" #X
2237 #define R64(X) "x" #X
2238 { BANK (R32, "wsp"), BANK (R64, "sp") },
2239 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2240 #undef R64
2241 #undef R32
2242 };
2243 #undef BANK
2244
2245 /* Return the integer register name.
2246 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2247
2248 static inline const char *
2249 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2250 {
2251 const int has_zr = sp_reg_p ? 0 : 1;
2252 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2253 return int_reg[has_zr][is_64][regno];
2254 }
2255
2256 /* Like get_int_reg_name, but IS_64 is always 1. */
2257
2258 static inline const char *
2259 get_64bit_int_reg_name (int regno, int sp_reg_p)
2260 {
2261 const int has_zr = sp_reg_p ? 0 : 1;
2262 return int_reg[has_zr][1][regno];
2263 }
2264
2265 /* Get the name of the integer offset register in OPND, using the shift type
2266 to decide whether it's a word or doubleword. */
2267
2268 static inline const char *
2269 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2270 {
2271 switch (opnd->shifter.kind)
2272 {
2273 case AARCH64_MOD_UXTW:
2274 case AARCH64_MOD_SXTW:
2275 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2276
2277 case AARCH64_MOD_LSL:
2278 case AARCH64_MOD_SXTX:
2279 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2280
2281 default:
2282 abort ();
2283 }
2284 }
2285
2286 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2287
2288 typedef union
2289 {
2290 uint64_t i;
2291 double d;
2292 } double_conv_t;
2293
2294 typedef union
2295 {
2296 uint32_t i;
2297 float f;
2298 } single_conv_t;
2299
2300 typedef union
2301 {
2302 uint32_t i;
2303 float f;
2304 } half_conv_t;
2305
2306 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2307 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2308 (depending on the type of the instruction). IMM8 will be expanded to a
2309 single-precision floating-point value (SIZE == 4) or a double-precision
2310 floating-point value (SIZE == 8). A half-precision floating-point value
2311 (SIZE == 2) is expanded to a single-precision floating-point value. The
2312 expanded value is returned. */
2313
2314 static uint64_t
2315 expand_fp_imm (int size, uint32_t imm8)
2316 {
2317 uint64_t imm;
2318 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2319
2320 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2321 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2322 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2323 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2324 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2325 if (size == 8)
2326 {
2327 imm = (imm8_7 << (63-32)) /* imm8<7> */
2328 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2329 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2330 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2331 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2332 imm <<= 32;
2333 }
2334 else if (size == 4 || size == 2)
2335 {
2336 imm = (imm8_7 << 31) /* imm8<7> */
2337 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2338 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2339 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2340 }
2341 else
2342 {
2343 /* An unsupported size. */
2344 assert (0);
2345 }
2346
2347 return imm;
2348 }
2349
2350 /* Produce the string representation of the register list operand *OPND
2351 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2352 the register name that comes before the register number, such as "v". */
2353 static void
2354 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2355 const char *prefix)
2356 {
2357 const int num_regs = opnd->reglist.num_regs;
2358 const int first_reg = opnd->reglist.first_regno;
2359 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2360 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2361 char tb[8]; /* Temporary buffer. */
2362
2363 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2364 assert (num_regs >= 1 && num_regs <= 4);
2365
2366 /* Prepare the index if any. */
2367 if (opnd->reglist.has_index)
2368 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2369 else
2370 tb[0] = '\0';
2371
2372 /* The hyphenated form is preferred for disassembly if there are
2373 more than two registers in the list, and the register numbers
2374 are monotonically increasing in increments of one. */
2375 if (num_regs > 2 && last_reg > first_reg)
2376 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2377 prefix, last_reg, qlf_name, tb);
2378 else
2379 {
2380 const int reg0 = first_reg;
2381 const int reg1 = (first_reg + 1) & 0x1f;
2382 const int reg2 = (first_reg + 2) & 0x1f;
2383 const int reg3 = (first_reg + 3) & 0x1f;
2384
2385 switch (num_regs)
2386 {
2387 case 1:
2388 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2389 break;
2390 case 2:
2391 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2392 prefix, reg1, qlf_name, tb);
2393 break;
2394 case 3:
2395 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2396 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2397 prefix, reg2, qlf_name, tb);
2398 break;
2399 case 4:
2400 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2401 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2402 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2403 break;
2404 }
2405 }
2406 }
2407
2408 /* Print the register+immediate address in OPND to BUF, which has SIZE
2409 characters. BASE is the name of the base register. */
2410
2411 static void
2412 print_immediate_offset_address (char *buf, size_t size,
2413 const aarch64_opnd_info *opnd,
2414 const char *base)
2415 {
2416 if (opnd->addr.writeback)
2417 {
2418 if (opnd->addr.preind)
2419 snprintf (buf, size, "[%s,#%d]!", base, opnd->addr.offset.imm);
2420 else
2421 snprintf (buf, size, "[%s],#%d", base, opnd->addr.offset.imm);
2422 }
2423 else
2424 {
2425 if (opnd->addr.offset.imm)
2426 snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
2427 else
2428 snprintf (buf, size, "[%s]", base);
2429 }
2430 }
2431
2432 /* Produce the string representation of the register offset address operand
2433 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2434 the names of the base and offset registers. */
2435 static void
2436 print_register_offset_address (char *buf, size_t size,
2437 const aarch64_opnd_info *opnd,
2438 const char *base, const char *offset)
2439 {
2440 char tb[16]; /* Temporary buffer. */
2441 bfd_boolean print_extend_p = TRUE;
2442 bfd_boolean print_amount_p = TRUE;
2443 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2444
2445 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2446 || !opnd->shifter.amount_present))
2447 {
2448 /* Not print the shift/extend amount when the amount is zero and
2449 when it is not the special case of 8-bit load/store instruction. */
2450 print_amount_p = FALSE;
2451 /* Likewise, no need to print the shift operator LSL in such a
2452 situation. */
2453 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2454 print_extend_p = FALSE;
2455 }
2456
2457 /* Prepare for the extend/shift. */
2458 if (print_extend_p)
2459 {
2460 if (print_amount_p)
2461 snprintf (tb, sizeof (tb), ",%s #%d", shift_name, opnd->shifter.amount);
2462 else
2463 snprintf (tb, sizeof (tb), ",%s", shift_name);
2464 }
2465 else
2466 tb[0] = '\0';
2467
2468 snprintf (buf, size, "[%s,%s%s]", base, offset, tb);
2469 }
2470
2471 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2472 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2473 PC, PCREL_P and ADDRESS are used to pass in and return information about
2474 the PC-relative address calculation, where the PC value is passed in
2475 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2476 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2477 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2478
2479 The function serves both the disassembler and the assembler diagnostics
2480 issuer, which is the reason why it lives in this file. */
2481
2482 void
2483 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2484 const aarch64_opcode *opcode,
2485 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2486 bfd_vma *address)
2487 {
2488 int i;
2489 const char *name = NULL;
2490 const aarch64_opnd_info *opnd = opnds + idx;
2491 enum aarch64_modifier_kind kind;
2492 uint64_t addr;
2493
2494 buf[0] = '\0';
2495 if (pcrel_p)
2496 *pcrel_p = 0;
2497
2498 switch (opnd->type)
2499 {
2500 case AARCH64_OPND_Rd:
2501 case AARCH64_OPND_Rn:
2502 case AARCH64_OPND_Rm:
2503 case AARCH64_OPND_Rt:
2504 case AARCH64_OPND_Rt2:
2505 case AARCH64_OPND_Rs:
2506 case AARCH64_OPND_Ra:
2507 case AARCH64_OPND_Rt_SYS:
2508 case AARCH64_OPND_PAIRREG:
2509 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2510 the <ic_op>, therefore we we use opnd->present to override the
2511 generic optional-ness information. */
2512 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2513 break;
2514 /* Omit the operand, e.g. RET. */
2515 if (optional_operand_p (opcode, idx)
2516 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2517 break;
2518 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2519 || opnd->qualifier == AARCH64_OPND_QLF_X);
2520 snprintf (buf, size, "%s",
2521 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2522 break;
2523
2524 case AARCH64_OPND_Rd_SP:
2525 case AARCH64_OPND_Rn_SP:
2526 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2527 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2528 || opnd->qualifier == AARCH64_OPND_QLF_X
2529 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2530 snprintf (buf, size, "%s",
2531 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2532 break;
2533
2534 case AARCH64_OPND_Rm_EXT:
2535 kind = opnd->shifter.kind;
2536 assert (idx == 1 || idx == 2);
2537 if ((aarch64_stack_pointer_p (opnds)
2538 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2539 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2540 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2541 && kind == AARCH64_MOD_UXTW)
2542 || (opnd->qualifier == AARCH64_OPND_QLF_X
2543 && kind == AARCH64_MOD_UXTX)))
2544 {
2545 /* 'LSL' is the preferred form in this case. */
2546 kind = AARCH64_MOD_LSL;
2547 if (opnd->shifter.amount == 0)
2548 {
2549 /* Shifter omitted. */
2550 snprintf (buf, size, "%s",
2551 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2552 break;
2553 }
2554 }
2555 if (opnd->shifter.amount)
2556 snprintf (buf, size, "%s, %s #%d",
2557 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2558 aarch64_operand_modifiers[kind].name,
2559 opnd->shifter.amount);
2560 else
2561 snprintf (buf, size, "%s, %s",
2562 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2563 aarch64_operand_modifiers[kind].name);
2564 break;
2565
2566 case AARCH64_OPND_Rm_SFT:
2567 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2568 || opnd->qualifier == AARCH64_OPND_QLF_X);
2569 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2570 snprintf (buf, size, "%s",
2571 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2572 else
2573 snprintf (buf, size, "%s, %s #%d",
2574 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2575 aarch64_operand_modifiers[opnd->shifter.kind].name,
2576 opnd->shifter.amount);
2577 break;
2578
2579 case AARCH64_OPND_Fd:
2580 case AARCH64_OPND_Fn:
2581 case AARCH64_OPND_Fm:
2582 case AARCH64_OPND_Fa:
2583 case AARCH64_OPND_Ft:
2584 case AARCH64_OPND_Ft2:
2585 case AARCH64_OPND_Sd:
2586 case AARCH64_OPND_Sn:
2587 case AARCH64_OPND_Sm:
2588 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2589 opnd->reg.regno);
2590 break;
2591
2592 case AARCH64_OPND_Vd:
2593 case AARCH64_OPND_Vn:
2594 case AARCH64_OPND_Vm:
2595 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2596 aarch64_get_qualifier_name (opnd->qualifier));
2597 break;
2598
2599 case AARCH64_OPND_Ed:
2600 case AARCH64_OPND_En:
2601 case AARCH64_OPND_Em:
2602 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2603 aarch64_get_qualifier_name (opnd->qualifier),
2604 opnd->reglane.index);
2605 break;
2606
2607 case AARCH64_OPND_VdD1:
2608 case AARCH64_OPND_VnD1:
2609 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2610 break;
2611
2612 case AARCH64_OPND_LVn:
2613 case AARCH64_OPND_LVt:
2614 case AARCH64_OPND_LVt_AL:
2615 case AARCH64_OPND_LEt:
2616 print_register_list (buf, size, opnd, "v");
2617 break;
2618
2619 case AARCH64_OPND_SVE_Pd:
2620 case AARCH64_OPND_SVE_Pg3:
2621 case AARCH64_OPND_SVE_Pg4_5:
2622 case AARCH64_OPND_SVE_Pg4_10:
2623 case AARCH64_OPND_SVE_Pg4_16:
2624 case AARCH64_OPND_SVE_Pm:
2625 case AARCH64_OPND_SVE_Pn:
2626 case AARCH64_OPND_SVE_Pt:
2627 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
2628 snprintf (buf, size, "p%d", opnd->reg.regno);
2629 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
2630 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
2631 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
2632 aarch64_get_qualifier_name (opnd->qualifier));
2633 else
2634 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
2635 aarch64_get_qualifier_name (opnd->qualifier));
2636 break;
2637
2638 case AARCH64_OPND_SVE_Za_5:
2639 case AARCH64_OPND_SVE_Za_16:
2640 case AARCH64_OPND_SVE_Zd:
2641 case AARCH64_OPND_SVE_Zm_5:
2642 case AARCH64_OPND_SVE_Zm_16:
2643 case AARCH64_OPND_SVE_Zn:
2644 case AARCH64_OPND_SVE_Zt:
2645 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
2646 snprintf (buf, size, "z%d", opnd->reg.regno);
2647 else
2648 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
2649 aarch64_get_qualifier_name (opnd->qualifier));
2650 break;
2651
2652 case AARCH64_OPND_SVE_ZnxN:
2653 case AARCH64_OPND_SVE_ZtxN:
2654 print_register_list (buf, size, opnd, "z");
2655 break;
2656
2657 case AARCH64_OPND_SVE_Zn_INDEX:
2658 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2659 aarch64_get_qualifier_name (opnd->qualifier),
2660 opnd->reglane.index);
2661 break;
2662
2663 case AARCH64_OPND_Cn:
2664 case AARCH64_OPND_Cm:
2665 snprintf (buf, size, "C%d", opnd->reg.regno);
2666 break;
2667
2668 case AARCH64_OPND_IDX:
2669 case AARCH64_OPND_IMM:
2670 case AARCH64_OPND_WIDTH:
2671 case AARCH64_OPND_UIMM3_OP1:
2672 case AARCH64_OPND_UIMM3_OP2:
2673 case AARCH64_OPND_BIT_NUM:
2674 case AARCH64_OPND_IMM_VLSL:
2675 case AARCH64_OPND_IMM_VLSR:
2676 case AARCH64_OPND_SHLL_IMM:
2677 case AARCH64_OPND_IMM0:
2678 case AARCH64_OPND_IMMR:
2679 case AARCH64_OPND_IMMS:
2680 case AARCH64_OPND_FBITS:
2681 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2682 break;
2683
2684 case AARCH64_OPND_IMM_MOV:
2685 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2686 {
2687 case 4: /* e.g. MOV Wd, #<imm32>. */
2688 {
2689 int imm32 = opnd->imm.value;
2690 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2691 }
2692 break;
2693 case 8: /* e.g. MOV Xd, #<imm64>. */
2694 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2695 opnd->imm.value, opnd->imm.value);
2696 break;
2697 default: assert (0);
2698 }
2699 break;
2700
2701 case AARCH64_OPND_FPIMM0:
2702 snprintf (buf, size, "#0.0");
2703 break;
2704
2705 case AARCH64_OPND_LIMM:
2706 case AARCH64_OPND_AIMM:
2707 case AARCH64_OPND_HALF:
2708 if (opnd->shifter.amount)
2709 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2710 opnd->shifter.amount);
2711 else
2712 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2713 break;
2714
2715 case AARCH64_OPND_SIMD_IMM:
2716 case AARCH64_OPND_SIMD_IMM_SFT:
2717 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2718 || opnd->shifter.kind == AARCH64_MOD_NONE)
2719 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2720 else
2721 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2722 aarch64_operand_modifiers[opnd->shifter.kind].name,
2723 opnd->shifter.amount);
2724 break;
2725
2726 case AARCH64_OPND_FPIMM:
2727 case AARCH64_OPND_SIMD_FPIMM:
2728 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2729 {
2730 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2731 {
2732 half_conv_t c;
2733 c.i = expand_fp_imm (2, opnd->imm.value);
2734 snprintf (buf, size, "#%.18e", c.f);
2735 }
2736 break;
2737 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2738 {
2739 single_conv_t c;
2740 c.i = expand_fp_imm (4, opnd->imm.value);
2741 snprintf (buf, size, "#%.18e", c.f);
2742 }
2743 break;
2744 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2745 {
2746 double_conv_t c;
2747 c.i = expand_fp_imm (8, opnd->imm.value);
2748 snprintf (buf, size, "#%.18e", c.d);
2749 }
2750 break;
2751 default: assert (0);
2752 }
2753 break;
2754
2755 case AARCH64_OPND_CCMP_IMM:
2756 case AARCH64_OPND_NZCV:
2757 case AARCH64_OPND_EXCEPTION:
2758 case AARCH64_OPND_UIMM4:
2759 case AARCH64_OPND_UIMM7:
2760 if (optional_operand_p (opcode, idx) == TRUE
2761 && (opnd->imm.value ==
2762 (int64_t) get_optional_operand_default_value (opcode)))
2763 /* Omit the operand, e.g. DCPS1. */
2764 break;
2765 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2766 break;
2767
2768 case AARCH64_OPND_COND:
2769 case AARCH64_OPND_COND1:
2770 snprintf (buf, size, "%s", opnd->cond->names[0]);
2771 break;
2772
2773 case AARCH64_OPND_ADDR_ADRP:
2774 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2775 + opnd->imm.value;
2776 if (pcrel_p)
2777 *pcrel_p = 1;
2778 if (address)
2779 *address = addr;
2780 /* This is not necessary during the disassembling, as print_address_func
2781 in the disassemble_info will take care of the printing. But some
2782 other callers may be still interested in getting the string in *STR,
2783 so here we do snprintf regardless. */
2784 snprintf (buf, size, "#0x%" PRIx64, addr);
2785 break;
2786
2787 case AARCH64_OPND_ADDR_PCREL14:
2788 case AARCH64_OPND_ADDR_PCREL19:
2789 case AARCH64_OPND_ADDR_PCREL21:
2790 case AARCH64_OPND_ADDR_PCREL26:
2791 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2792 if (pcrel_p)
2793 *pcrel_p = 1;
2794 if (address)
2795 *address = addr;
2796 /* This is not necessary during the disassembling, as print_address_func
2797 in the disassemble_info will take care of the printing. But some
2798 other callers may be still interested in getting the string in *STR,
2799 so here we do snprintf regardless. */
2800 snprintf (buf, size, "#0x%" PRIx64, addr);
2801 break;
2802
2803 case AARCH64_OPND_ADDR_SIMPLE:
2804 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2805 case AARCH64_OPND_SIMD_ADDR_POST:
2806 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2807 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2808 {
2809 if (opnd->addr.offset.is_reg)
2810 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2811 else
2812 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2813 }
2814 else
2815 snprintf (buf, size, "[%s]", name);
2816 break;
2817
2818 case AARCH64_OPND_ADDR_REGOFF:
2819 print_register_offset_address
2820 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2821 get_offset_int_reg_name (opnd));
2822 break;
2823
2824 case AARCH64_OPND_ADDR_SIMM7:
2825 case AARCH64_OPND_ADDR_SIMM9:
2826 case AARCH64_OPND_ADDR_SIMM9_2:
2827 print_immediate_offset_address
2828 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
2829 break;
2830
2831 case AARCH64_OPND_ADDR_UIMM12:
2832 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2833 if (opnd->addr.offset.imm)
2834 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2835 else
2836 snprintf (buf, size, "[%s]", name);
2837 break;
2838
2839 case AARCH64_OPND_SYSREG:
2840 for (i = 0; aarch64_sys_regs[i].name; ++i)
2841 if (aarch64_sys_regs[i].value == opnd->sysreg
2842 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
2843 break;
2844 if (aarch64_sys_regs[i].name)
2845 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2846 else
2847 {
2848 /* Implementation defined system register. */
2849 unsigned int value = opnd->sysreg;
2850 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2851 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2852 value & 0x7);
2853 }
2854 break;
2855
2856 case AARCH64_OPND_PSTATEFIELD:
2857 for (i = 0; aarch64_pstatefields[i].name; ++i)
2858 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2859 break;
2860 assert (aarch64_pstatefields[i].name);
2861 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2862 break;
2863
2864 case AARCH64_OPND_SYSREG_AT:
2865 case AARCH64_OPND_SYSREG_DC:
2866 case AARCH64_OPND_SYSREG_IC:
2867 case AARCH64_OPND_SYSREG_TLBI:
2868 snprintf (buf, size, "%s", opnd->sysins_op->name);
2869 break;
2870
2871 case AARCH64_OPND_BARRIER:
2872 snprintf (buf, size, "%s", opnd->barrier->name);
2873 break;
2874
2875 case AARCH64_OPND_BARRIER_ISB:
2876 /* Operand can be omitted, e.g. in DCPS1. */
2877 if (! optional_operand_p (opcode, idx)
2878 || (opnd->barrier->value
2879 != get_optional_operand_default_value (opcode)))
2880 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2881 break;
2882
2883 case AARCH64_OPND_PRFOP:
2884 if (opnd->prfop->name != NULL)
2885 snprintf (buf, size, "%s", opnd->prfop->name);
2886 else
2887 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
2888 break;
2889
2890 case AARCH64_OPND_BARRIER_PSB:
2891 snprintf (buf, size, "%s", opnd->hint_option->name);
2892 break;
2893
2894 default:
2895 assert (0);
2896 }
2897 }
2898 \f
2899 #define CPENC(op0,op1,crn,crm,op2) \
2900 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2901 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2902 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2903 /* for 3.9.10 System Instructions */
2904 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2905
2906 #define C0 0
2907 #define C1 1
2908 #define C2 2
2909 #define C3 3
2910 #define C4 4
2911 #define C5 5
2912 #define C6 6
2913 #define C7 7
2914 #define C8 8
2915 #define C9 9
2916 #define C10 10
2917 #define C11 11
2918 #define C12 12
2919 #define C13 13
2920 #define C14 14
2921 #define C15 15
2922
2923 #ifdef F_DEPRECATED
2924 #undef F_DEPRECATED
2925 #endif
2926 #define F_DEPRECATED 0x1 /* Deprecated system register. */
2927
2928 #ifdef F_ARCHEXT
2929 #undef F_ARCHEXT
2930 #endif
2931 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
2932
2933 #ifdef F_HASXT
2934 #undef F_HASXT
2935 #endif
2936 #define F_HASXT 0x4 /* System instruction register <Xt>
2937 operand. */
2938
2939
2940 /* TODO there are two more issues need to be resolved
2941 1. handle read-only and write-only system registers
2942 2. handle cpu-implementation-defined system registers. */
2943 const aarch64_sys_reg aarch64_sys_regs [] =
2944 {
2945 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
2946 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
2947 { "elr_el1", CPEN_(0,C0,1), 0 },
2948 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
2949 { "sp_el0", CPEN_(0,C1,0), 0 },
2950 { "spsel", CPEN_(0,C2,0), 0 },
2951 { "daif", CPEN_(3,C2,1), 0 },
2952 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
2953 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
2954 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
2955 { "nzcv", CPEN_(3,C2,0), 0 },
2956 { "fpcr", CPEN_(3,C4,0), 0 },
2957 { "fpsr", CPEN_(3,C4,1), 0 },
2958 { "dspsr_el0", CPEN_(3,C5,0), 0 },
2959 { "dlr_el0", CPEN_(3,C5,1), 0 },
2960 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
2961 { "elr_el2", CPEN_(4,C0,1), 0 },
2962 { "sp_el1", CPEN_(4,C1,0), 0 },
2963 { "spsr_irq", CPEN_(4,C3,0), 0 },
2964 { "spsr_abt", CPEN_(4,C3,1), 0 },
2965 { "spsr_und", CPEN_(4,C3,2), 0 },
2966 { "spsr_fiq", CPEN_(4,C3,3), 0 },
2967 { "spsr_el3", CPEN_(6,C0,0), 0 },
2968 { "elr_el3", CPEN_(6,C0,1), 0 },
2969 { "sp_el2", CPEN_(6,C1,0), 0 },
2970 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
2971 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
2972 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
2973 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
2974 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
2975 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
2976 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
2977 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
2978 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
2979 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
2980 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
2981 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
2982 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
2983 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
2984 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
2985 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
2986 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
2987 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
2988 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
2989 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
2990 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
2991 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
2992 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
2993 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
2994 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
2995 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
2996 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
2997 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
2998 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
2999 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3000 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3001 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3002 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3003 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3004 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3005 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3006 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3007 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3008 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3009 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3010 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3011 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3012 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3013 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3014 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3015 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3016 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3017 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3018 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3019 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3020 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3021 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3022 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3023 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3024 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3025 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3026 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3027 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3028 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3029 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3030 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3031 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3032 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3033 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3034 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3035 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3036 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3037 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3038 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3039 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3040 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3041 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3042 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3043 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3044 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3045 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3046 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3047 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3048 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3049 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3050 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3051 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3052 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3053 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3054 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3055 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3056 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3057 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3058 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3059 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3060 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3061 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3062 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3063 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3064 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3065 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3066 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3067 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3068 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3069 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3070 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3071 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3072 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3073 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3074 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3075 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3076 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3077 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3078 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3079 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3080 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3081 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3082 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3083 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3084 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3085 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3086 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3087 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3088 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3089 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3090 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3091 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3092 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3093 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3094 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3095 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3096 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3097 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3098 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3099 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3100 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3101 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3102 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3103 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3104 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3105 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3106 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3107 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3108 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3109 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3110 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3111 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3112 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3113 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3114 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3115 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3116 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3117 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3118 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3119 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3120 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3121 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3122 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3123 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3124 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3125 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3126 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3127 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3128 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3129 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3130 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3131 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3132 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3133 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3134 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3135 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3136 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3137 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3138 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3139 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3140 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3141 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3142 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3143 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3144 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3145 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3146 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3147 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3148 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3149 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3150 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3151 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3152 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3153 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3154 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3155 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3156 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3157 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3158 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3159 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3160 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3161 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3162 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3163 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3164 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3165 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3166 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3167 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3168 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3169 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3170 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3171 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3172 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3173 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3174 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3175 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3176 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3177 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3178 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3179 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3180 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3181 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3182 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3183 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3184 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3185 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3186 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3187 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3188 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3189 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3190 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3191 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3192 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3193 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3194 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3195 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3196 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3197 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3198 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3199 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3200 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3201 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3202 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3203 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3204 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3205 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3206 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3207 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3208 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3209 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3210 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3211 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3212 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3213 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3214 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3215 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3216 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3217 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3218 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3219 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3220 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3221 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3222 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3223 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3224 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3225 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3226 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3227 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3228 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3229 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3230 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3231 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3232 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3233 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3234 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3235 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3236 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3237 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3238 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3239 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3240 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3241 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3242 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3243 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3244 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3245 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3246 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3247 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3248 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3249 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3250 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3251 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3252 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3253 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3254 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3255 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3256 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3257 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3258 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3259 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3260 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3261 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3262 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3263 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3264 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3265 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3266 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3267 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3268 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3269 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3270 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3271 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3272 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3273 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3274 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3275 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3276 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3277 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3278 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3279 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3280 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3281 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3282 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3283 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3284 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3285 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3286 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3287 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3288 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3289 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3290 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3291 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3292 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3293 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3294 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3295 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3296 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3297 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3298 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3299 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3300 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3301 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3302 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3303 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3304 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3305 { 0, CPENC(0,0,0,0,0), 0 },
3306 };
3307
3308 bfd_boolean
3309 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3310 {
3311 return (reg->flags & F_DEPRECATED) != 0;
3312 }
3313
3314 bfd_boolean
3315 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3316 const aarch64_sys_reg *reg)
3317 {
3318 if (!(reg->flags & F_ARCHEXT))
3319 return TRUE;
3320
3321 /* PAN. Values are from aarch64_sys_regs. */
3322 if (reg->value == CPEN_(0,C2,3)
3323 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3324 return FALSE;
3325
3326 /* Virtualization host extensions: system registers. */
3327 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3328 || reg->value == CPENC (3, 4, C13, C0, 1)
3329 || reg->value == CPENC (3, 4, C14, C3, 0)
3330 || reg->value == CPENC (3, 4, C14, C3, 1)
3331 || reg->value == CPENC (3, 4, C14, C3, 2))
3332 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3333 return FALSE;
3334
3335 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3336 if ((reg->value == CPEN_ (5, C0, 0)
3337 || reg->value == CPEN_ (5, C0, 1)
3338 || reg->value == CPENC (3, 5, C1, C0, 0)
3339 || reg->value == CPENC (3, 5, C1, C0, 2)
3340 || reg->value == CPENC (3, 5, C2, C0, 0)
3341 || reg->value == CPENC (3, 5, C2, C0, 1)
3342 || reg->value == CPENC (3, 5, C2, C0, 2)
3343 || reg->value == CPENC (3, 5, C5, C1, 0)
3344 || reg->value == CPENC (3, 5, C5, C1, 1)
3345 || reg->value == CPENC (3, 5, C5, C2, 0)
3346 || reg->value == CPENC (3, 5, C6, C0, 0)
3347 || reg->value == CPENC (3, 5, C10, C2, 0)
3348 || reg->value == CPENC (3, 5, C10, C3, 0)
3349 || reg->value == CPENC (3, 5, C12, C0, 0)
3350 || reg->value == CPENC (3, 5, C13, C0, 1)
3351 || reg->value == CPENC (3, 5, C14, C1, 0))
3352 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3353 return FALSE;
3354
3355 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3356 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3357 || reg->value == CPENC (3, 5, C14, C2, 1)
3358 || reg->value == CPENC (3, 5, C14, C2, 2)
3359 || reg->value == CPENC (3, 5, C14, C3, 0)
3360 || reg->value == CPENC (3, 5, C14, C3, 1)
3361 || reg->value == CPENC (3, 5, C14, C3, 2))
3362 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3363 return FALSE;
3364
3365 /* ARMv8.2 features. */
3366
3367 /* ID_AA64MMFR2_EL1. */
3368 if (reg->value == CPENC (3, 0, C0, C7, 2)
3369 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3370 return FALSE;
3371
3372 /* PSTATE.UAO. */
3373 if (reg->value == CPEN_ (0, C2, 4)
3374 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3375 return FALSE;
3376
3377 /* RAS extension. */
3378
3379 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3380 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3381 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3382 || reg->value == CPENC (3, 0, C5, C3, 1)
3383 || reg->value == CPENC (3, 0, C5, C3, 2)
3384 || reg->value == CPENC (3, 0, C5, C3, 3)
3385 || reg->value == CPENC (3, 0, C5, C4, 0)
3386 || reg->value == CPENC (3, 0, C5, C4, 1)
3387 || reg->value == CPENC (3, 0, C5, C4, 2)
3388 || reg->value == CPENC (3, 0, C5, C4, 3)
3389 || reg->value == CPENC (3, 0, C5, C5, 0)
3390 || reg->value == CPENC (3, 0, C5, C5, 1))
3391 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3392 return FALSE;
3393
3394 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3395 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3396 || reg->value == CPENC (3, 0, C12, C1, 1)
3397 || reg->value == CPENC (3, 4, C12, C1, 1))
3398 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3399 return FALSE;
3400
3401 /* Statistical Profiling extension. */
3402 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3403 || reg->value == CPENC (3, 0, C9, C10, 1)
3404 || reg->value == CPENC (3, 0, C9, C10, 3)
3405 || reg->value == CPENC (3, 0, C9, C10, 7)
3406 || reg->value == CPENC (3, 0, C9, C9, 0)
3407 || reg->value == CPENC (3, 0, C9, C9, 2)
3408 || reg->value == CPENC (3, 0, C9, C9, 3)
3409 || reg->value == CPENC (3, 0, C9, C9, 4)
3410 || reg->value == CPENC (3, 0, C9, C9, 5)
3411 || reg->value == CPENC (3, 0, C9, C9, 6)
3412 || reg->value == CPENC (3, 0, C9, C9, 7)
3413 || reg->value == CPENC (3, 4, C9, C9, 0)
3414 || reg->value == CPENC (3, 5, C9, C9, 0))
3415 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
3416 return FALSE;
3417
3418 return TRUE;
3419 }
3420
3421 const aarch64_sys_reg aarch64_pstatefields [] =
3422 {
3423 { "spsel", 0x05, 0 },
3424 { "daifset", 0x1e, 0 },
3425 { "daifclr", 0x1f, 0 },
3426 { "pan", 0x04, F_ARCHEXT },
3427 { "uao", 0x03, F_ARCHEXT },
3428 { 0, CPENC(0,0,0,0,0), 0 },
3429 };
3430
3431 bfd_boolean
3432 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
3433 const aarch64_sys_reg *reg)
3434 {
3435 if (!(reg->flags & F_ARCHEXT))
3436 return TRUE;
3437
3438 /* PAN. Values are from aarch64_pstatefields. */
3439 if (reg->value == 0x04
3440 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3441 return FALSE;
3442
3443 /* UAO. Values are from aarch64_pstatefields. */
3444 if (reg->value == 0x03
3445 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3446 return FALSE;
3447
3448 return TRUE;
3449 }
3450
3451 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3452 {
3453 { "ialluis", CPENS(0,C7,C1,0), 0 },
3454 { "iallu", CPENS(0,C7,C5,0), 0 },
3455 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
3456 { 0, CPENS(0,0,0,0), 0 }
3457 };
3458
3459 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3460 {
3461 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
3462 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
3463 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
3464 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
3465 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
3466 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
3467 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
3468 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
3469 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
3470 { 0, CPENS(0,0,0,0), 0 }
3471 };
3472
3473 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3474 {
3475 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
3476 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
3477 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
3478 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
3479 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
3480 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
3481 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
3482 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
3483 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
3484 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
3485 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
3486 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
3487 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
3488 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
3489 { 0, CPENS(0,0,0,0), 0 }
3490 };
3491
3492 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3493 {
3494 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3495 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
3496 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
3497 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
3498 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3499 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
3500 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
3501 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
3502 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
3503 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
3504 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
3505 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
3506 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
3507 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
3508 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3509 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3510 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
3511 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
3512 { "alle2", CPENS(4,C8,C7,0), 0 },
3513 { "alle2is", CPENS(4,C8,C3,0), 0 },
3514 { "alle1", CPENS(4,C8,C7,4), 0 },
3515 { "alle1is", CPENS(4,C8,C3,4), 0 },
3516 { "alle3", CPENS(6,C8,C7,0), 0 },
3517 { "alle3is", CPENS(6,C8,C3,0), 0 },
3518 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
3519 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
3520 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
3521 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
3522 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
3523 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
3524 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
3525 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
3526 { 0, CPENS(0,0,0,0), 0 }
3527 };
3528
3529 bfd_boolean
3530 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
3531 {
3532 return (sys_ins_reg->flags & F_HASXT) != 0;
3533 }
3534
3535 extern bfd_boolean
3536 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
3537 const aarch64_sys_ins_reg *reg)
3538 {
3539 if (!(reg->flags & F_ARCHEXT))
3540 return TRUE;
3541
3542 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3543 if (reg->value == CPENS (3, C7, C12, 1)
3544 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3545 return FALSE;
3546
3547 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3548 if ((reg->value == CPENS (0, C7, C9, 0)
3549 || reg->value == CPENS (0, C7, C9, 1))
3550 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3551 return FALSE;
3552
3553 return TRUE;
3554 }
3555
3556 #undef C0
3557 #undef C1
3558 #undef C2
3559 #undef C3
3560 #undef C4
3561 #undef C5
3562 #undef C6
3563 #undef C7
3564 #undef C8
3565 #undef C9
3566 #undef C10
3567 #undef C11
3568 #undef C12
3569 #undef C13
3570 #undef C14
3571 #undef C15
3572
3573 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
3574 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
3575
3576 static bfd_boolean
3577 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
3578 const aarch64_insn insn)
3579 {
3580 int t = BITS (insn, 4, 0);
3581 int n = BITS (insn, 9, 5);
3582 int t2 = BITS (insn, 14, 10);
3583
3584 if (BIT (insn, 23))
3585 {
3586 /* Write back enabled. */
3587 if ((t == n || t2 == n) && n != 31)
3588 return FALSE;
3589 }
3590
3591 if (BIT (insn, 22))
3592 {
3593 /* Load */
3594 if (t == t2)
3595 return FALSE;
3596 }
3597
3598 return TRUE;
3599 }
3600
3601 /* Include the opcode description table as well as the operand description
3602 table. */
3603 #define VERIFIER(x) verify_##x
3604 #include "aarch64-tbl.h"
This page took 0.118686 seconds and 5 git commands to generate.