d870fd6203c535b5e9d74056d0b1eed9a2be6861
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30
31 #include "aarch64-opc.h"
32
33 #ifdef DEBUG_AARCH64
34 int debug_dump = FALSE;
35 #endif /* DEBUG_AARCH64 */
36
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
39
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
42 {
43 return ((qualifier >= AARCH64_OPND_QLF_V_8B
44 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
45 : FALSE);
46 }
47
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
50 {
51 return ((qualifier >= AARCH64_OPND_QLF_S_B
52 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
53 : FALSE);
54 }
55
56 enum data_pattern
57 {
58 DP_UNKNOWN,
59 DP_VECTOR_3SAME,
60 DP_VECTOR_LONG,
61 DP_VECTOR_WIDE,
62 DP_VECTOR_ACROSS_LANES,
63 };
64
65 static const char significant_operand_index [] =
66 {
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
72 };
73
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
75 the data pattern.
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
78
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
81 {
82 if (vector_qualifier_p (qualifiers[0]) == TRUE)
83 {
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers[0] == qualifiers[1]
87 && vector_qualifier_p (qualifiers[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers[0])
89 == aarch64_get_qualifier_esize (qualifiers[1]))
90 && (aarch64_get_qualifier_esize (qualifiers[0])
91 == aarch64_get_qualifier_esize (qualifiers[2])))
92 return DP_VECTOR_3SAME;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
95 or v.8h, v.16b. */
96 if (vector_qualifier_p (qualifiers[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers[0])
99 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
100 return DP_VECTOR_LONG;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers[0] == qualifiers[1]
103 && vector_qualifier_p (qualifiers[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers[0])
106 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers[0])
108 == aarch64_get_qualifier_esize (qualifiers[1])))
109 return DP_VECTOR_WIDE;
110 }
111 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
112 {
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers[1]) == TRUE
115 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
116 return DP_VECTOR_ACROSS_LANES;
117 }
118
119 return DP_UNKNOWN;
120 }
121
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
128 benefit. */
129
130 int
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
132 {
133 return
134 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
135 }
136 \f
137 const aarch64_field fields[] =
138 {
139 { 0, 0 }, /* NIL. */
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
196 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
197 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
198 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
199 { 31, 1 }, /* b5: in the test bit and branch instructions. */
200 { 19, 5 }, /* b40: in the test bit and branch instructions. */
201 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
202 };
203
204 enum aarch64_operand_class
205 aarch64_get_operand_class (enum aarch64_opnd type)
206 {
207 return aarch64_operands[type].op_class;
208 }
209
210 const char *
211 aarch64_get_operand_name (enum aarch64_opnd type)
212 {
213 return aarch64_operands[type].name;
214 }
215
216 /* Get operand description string.
217 This is usually for the diagnosis purpose. */
218 const char *
219 aarch64_get_operand_desc (enum aarch64_opnd type)
220 {
221 return aarch64_operands[type].desc;
222 }
223
224 /* Table of all conditional affixes. */
225 const aarch64_cond aarch64_conds[16] =
226 {
227 {{"eq"}, 0x0},
228 {{"ne"}, 0x1},
229 {{"cs", "hs"}, 0x2},
230 {{"cc", "lo", "ul"}, 0x3},
231 {{"mi"}, 0x4},
232 {{"pl"}, 0x5},
233 {{"vs"}, 0x6},
234 {{"vc"}, 0x7},
235 {{"hi"}, 0x8},
236 {{"ls"}, 0x9},
237 {{"ge"}, 0xa},
238 {{"lt"}, 0xb},
239 {{"gt"}, 0xc},
240 {{"le"}, 0xd},
241 {{"al"}, 0xe},
242 {{"nv"}, 0xf},
243 };
244
245 const aarch64_cond *
246 get_cond_from_value (aarch64_insn value)
247 {
248 assert (value < 16);
249 return &aarch64_conds[(unsigned int) value];
250 }
251
252 const aarch64_cond *
253 get_inverted_cond (const aarch64_cond *cond)
254 {
255 return &aarch64_conds[cond->value ^ 0x1];
256 }
257
258 /* Table describing the operand extension/shifting operators; indexed by
259 enum aarch64_modifier_kind.
260
261 The value column provides the most common values for encoding modifiers,
262 which enables table-driven encoding/decoding for the modifiers. */
263 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
264 {
265 {"none", 0x0},
266 {"msl", 0x0},
267 {"ror", 0x3},
268 {"asr", 0x2},
269 {"lsr", 0x1},
270 {"lsl", 0x0},
271 {"uxtb", 0x0},
272 {"uxth", 0x1},
273 {"uxtw", 0x2},
274 {"uxtx", 0x3},
275 {"sxtb", 0x4},
276 {"sxth", 0x5},
277 {"sxtw", 0x6},
278 {"sxtx", 0x7},
279 {NULL, 0},
280 };
281
282 enum aarch64_modifier_kind
283 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
284 {
285 return desc - aarch64_operand_modifiers;
286 }
287
288 aarch64_insn
289 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
290 {
291 return aarch64_operand_modifiers[kind].value;
292 }
293
294 enum aarch64_modifier_kind
295 aarch64_get_operand_modifier_from_value (aarch64_insn value,
296 bfd_boolean extend_p)
297 {
298 if (extend_p == TRUE)
299 return AARCH64_MOD_UXTB + value;
300 else
301 return AARCH64_MOD_LSL - value;
302 }
303
304 bfd_boolean
305 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
306 {
307 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
308 ? TRUE : FALSE;
309 }
310
311 static inline bfd_boolean
312 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
313 {
314 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
315 ? TRUE : FALSE;
316 }
317
318 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
319 {
320 { "#0x00", 0x0 },
321 { "oshld", 0x1 },
322 { "oshst", 0x2 },
323 { "osh", 0x3 },
324 { "#0x04", 0x4 },
325 { "nshld", 0x5 },
326 { "nshst", 0x6 },
327 { "nsh", 0x7 },
328 { "#0x08", 0x8 },
329 { "ishld", 0x9 },
330 { "ishst", 0xa },
331 { "ish", 0xb },
332 { "#0x0c", 0xc },
333 { "ld", 0xd },
334 { "st", 0xe },
335 { "sy", 0xf },
336 };
337
338 /* Table describing the operands supported by the aliases of the HINT
339 instruction.
340
341 The name column is the operand that is accepted for the alias. The value
342 column is the hint number of the alias. The list of operands is terminated
343 by NULL in the name column. */
344
345 const struct aarch64_name_value_pair aarch64_hint_options[] =
346 {
347 { "csync", 0x11 }, /* PSB CSYNC. */
348 { NULL, 0x0 },
349 };
350
351 /* op -> op: load = 0 instruction = 1 store = 2
352 l -> level: 1-3
353 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
354 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
355 const struct aarch64_name_value_pair aarch64_prfops[32] =
356 {
357 { "pldl1keep", B(0, 1, 0) },
358 { "pldl1strm", B(0, 1, 1) },
359 { "pldl2keep", B(0, 2, 0) },
360 { "pldl2strm", B(0, 2, 1) },
361 { "pldl3keep", B(0, 3, 0) },
362 { "pldl3strm", B(0, 3, 1) },
363 { NULL, 0x06 },
364 { NULL, 0x07 },
365 { "plil1keep", B(1, 1, 0) },
366 { "plil1strm", B(1, 1, 1) },
367 { "plil2keep", B(1, 2, 0) },
368 { "plil2strm", B(1, 2, 1) },
369 { "plil3keep", B(1, 3, 0) },
370 { "plil3strm", B(1, 3, 1) },
371 { NULL, 0x0e },
372 { NULL, 0x0f },
373 { "pstl1keep", B(2, 1, 0) },
374 { "pstl1strm", B(2, 1, 1) },
375 { "pstl2keep", B(2, 2, 0) },
376 { "pstl2strm", B(2, 2, 1) },
377 { "pstl3keep", B(2, 3, 0) },
378 { "pstl3strm", B(2, 3, 1) },
379 { NULL, 0x16 },
380 { NULL, 0x17 },
381 { NULL, 0x18 },
382 { NULL, 0x19 },
383 { NULL, 0x1a },
384 { NULL, 0x1b },
385 { NULL, 0x1c },
386 { NULL, 0x1d },
387 { NULL, 0x1e },
388 { NULL, 0x1f },
389 };
390 #undef B
391 \f
392 /* Utilities on value constraint. */
393
394 static inline int
395 value_in_range_p (int64_t value, int low, int high)
396 {
397 return (value >= low && value <= high) ? 1 : 0;
398 }
399
400 static inline int
401 value_aligned_p (int64_t value, int align)
402 {
403 return ((value & (align - 1)) == 0) ? 1 : 0;
404 }
405
406 /* A signed value fits in a field. */
407 static inline int
408 value_fit_signed_field_p (int64_t value, unsigned width)
409 {
410 assert (width < 32);
411 if (width < sizeof (value) * 8)
412 {
413 int64_t lim = (int64_t)1 << (width - 1);
414 if (value >= -lim && value < lim)
415 return 1;
416 }
417 return 0;
418 }
419
420 /* An unsigned value fits in a field. */
421 static inline int
422 value_fit_unsigned_field_p (int64_t value, unsigned width)
423 {
424 assert (width < 32);
425 if (width < sizeof (value) * 8)
426 {
427 int64_t lim = (int64_t)1 << width;
428 if (value >= 0 && value < lim)
429 return 1;
430 }
431 return 0;
432 }
433
434 /* Return 1 if OPERAND is SP or WSP. */
435 int
436 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
437 {
438 return ((aarch64_get_operand_class (operand->type)
439 == AARCH64_OPND_CLASS_INT_REG)
440 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
441 && operand->reg.regno == 31);
442 }
443
444 /* Return 1 if OPERAND is XZR or WZP. */
445 int
446 aarch64_zero_register_p (const aarch64_opnd_info *operand)
447 {
448 return ((aarch64_get_operand_class (operand->type)
449 == AARCH64_OPND_CLASS_INT_REG)
450 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
451 && operand->reg.regno == 31);
452 }
453
454 /* Return true if the operand *OPERAND that has the operand code
455 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
456 qualified by the qualifier TARGET. */
457
458 static inline int
459 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
460 aarch64_opnd_qualifier_t target)
461 {
462 switch (operand->qualifier)
463 {
464 case AARCH64_OPND_QLF_W:
465 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
466 return 1;
467 break;
468 case AARCH64_OPND_QLF_X:
469 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
470 return 1;
471 break;
472 case AARCH64_OPND_QLF_WSP:
473 if (target == AARCH64_OPND_QLF_W
474 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
475 return 1;
476 break;
477 case AARCH64_OPND_QLF_SP:
478 if (target == AARCH64_OPND_QLF_X
479 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
480 return 1;
481 break;
482 default:
483 break;
484 }
485
486 return 0;
487 }
488
489 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
490 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
491
492 Return NIL if more than one expected qualifiers are found. */
493
494 aarch64_opnd_qualifier_t
495 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
496 int idx,
497 const aarch64_opnd_qualifier_t known_qlf,
498 int known_idx)
499 {
500 int i, saved_i;
501
502 /* Special case.
503
504 When the known qualifier is NIL, we have to assume that there is only
505 one qualifier sequence in the *QSEQ_LIST and return the corresponding
506 qualifier directly. One scenario is that for instruction
507 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
508 which has only one possible valid qualifier sequence
509 NIL, S_D
510 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
511 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
512
513 Because the qualifier NIL has dual roles in the qualifier sequence:
514 it can mean no qualifier for the operand, or the qualifer sequence is
515 not in use (when all qualifiers in the sequence are NILs), we have to
516 handle this special case here. */
517 if (known_qlf == AARCH64_OPND_NIL)
518 {
519 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
520 return qseq_list[0][idx];
521 }
522
523 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
524 {
525 if (qseq_list[i][known_idx] == known_qlf)
526 {
527 if (saved_i != -1)
528 /* More than one sequences are found to have KNOWN_QLF at
529 KNOWN_IDX. */
530 return AARCH64_OPND_NIL;
531 saved_i = i;
532 }
533 }
534
535 return qseq_list[saved_i][idx];
536 }
537
538 enum operand_qualifier_kind
539 {
540 OQK_NIL,
541 OQK_OPD_VARIANT,
542 OQK_VALUE_IN_RANGE,
543 OQK_MISC,
544 };
545
546 /* Operand qualifier description. */
547 struct operand_qualifier_data
548 {
549 /* The usage of the three data fields depends on the qualifier kind. */
550 int data0;
551 int data1;
552 int data2;
553 /* Description. */
554 const char *desc;
555 /* Kind. */
556 enum operand_qualifier_kind kind;
557 };
558
559 /* Indexed by the operand qualifier enumerators. */
560 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
561 {
562 {0, 0, 0, "NIL", OQK_NIL},
563
564 /* Operand variant qualifiers.
565 First 3 fields:
566 element size, number of elements and common value for encoding. */
567
568 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
569 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
570 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
571 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
572
573 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
574 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
575 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
576 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
577 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
578
579 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
580 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
581 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
582 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
583 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
584 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
585 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
586 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
587 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
588 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
589
590 /* Qualifiers constraining the value range.
591 First 3 fields:
592 Lower bound, higher bound, unused. */
593
594 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
595 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
596 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
597 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
598 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
599 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
600
601 /* Qualifiers for miscellaneous purpose.
602 First 3 fields:
603 unused, unused and unused. */
604
605 {0, 0, 0, "lsl", 0},
606 {0, 0, 0, "msl", 0},
607
608 {0, 0, 0, "retrieving", 0},
609 };
610
611 static inline bfd_boolean
612 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
613 {
614 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
615 ? TRUE : FALSE;
616 }
617
618 static inline bfd_boolean
619 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
620 {
621 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
622 ? TRUE : FALSE;
623 }
624
625 const char*
626 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
627 {
628 return aarch64_opnd_qualifiers[qualifier].desc;
629 }
630
631 /* Given an operand qualifier, return the expected data element size
632 of a qualified operand. */
633 unsigned char
634 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
635 {
636 assert (operand_variant_qualifier_p (qualifier) == TRUE);
637 return aarch64_opnd_qualifiers[qualifier].data0;
638 }
639
640 unsigned char
641 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
642 {
643 assert (operand_variant_qualifier_p (qualifier) == TRUE);
644 return aarch64_opnd_qualifiers[qualifier].data1;
645 }
646
647 aarch64_insn
648 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
649 {
650 assert (operand_variant_qualifier_p (qualifier) == TRUE);
651 return aarch64_opnd_qualifiers[qualifier].data2;
652 }
653
654 static int
655 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
656 {
657 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
658 return aarch64_opnd_qualifiers[qualifier].data0;
659 }
660
661 static int
662 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
663 {
664 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
665 return aarch64_opnd_qualifiers[qualifier].data1;
666 }
667
668 #ifdef DEBUG_AARCH64
669 void
670 aarch64_verbose (const char *str, ...)
671 {
672 va_list ap;
673 va_start (ap, str);
674 printf ("#### ");
675 vprintf (str, ap);
676 printf ("\n");
677 va_end (ap);
678 }
679
680 static inline void
681 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
682 {
683 int i;
684 printf ("#### \t");
685 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
686 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
687 printf ("\n");
688 }
689
690 static void
691 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
692 const aarch64_opnd_qualifier_t *qualifier)
693 {
694 int i;
695 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
696
697 aarch64_verbose ("dump_match_qualifiers:");
698 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
699 curr[i] = opnd[i].qualifier;
700 dump_qualifier_sequence (curr);
701 aarch64_verbose ("against");
702 dump_qualifier_sequence (qualifier);
703 }
704 #endif /* DEBUG_AARCH64 */
705
706 /* TODO improve this, we can have an extra field at the runtime to
707 store the number of operands rather than calculating it every time. */
708
709 int
710 aarch64_num_of_operands (const aarch64_opcode *opcode)
711 {
712 int i = 0;
713 const enum aarch64_opnd *opnds = opcode->operands;
714 while (opnds[i++] != AARCH64_OPND_NIL)
715 ;
716 --i;
717 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
718 return i;
719 }
720
721 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
722 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
723
724 N.B. on the entry, it is very likely that only some operands in *INST
725 have had their qualifiers been established.
726
727 If STOP_AT is not -1, the function will only try to match
728 the qualifier sequence for operands before and including the operand
729 of index STOP_AT; and on success *RET will only be filled with the first
730 (STOP_AT+1) qualifiers.
731
732 A couple examples of the matching algorithm:
733
734 X,W,NIL should match
735 X,W,NIL
736
737 NIL,NIL should match
738 X ,NIL
739
740 Apart from serving the main encoding routine, this can also be called
741 during or after the operand decoding. */
742
743 int
744 aarch64_find_best_match (const aarch64_inst *inst,
745 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
746 int stop_at, aarch64_opnd_qualifier_t *ret)
747 {
748 int found = 0;
749 int i, num_opnds;
750 const aarch64_opnd_qualifier_t *qualifiers;
751
752 num_opnds = aarch64_num_of_operands (inst->opcode);
753 if (num_opnds == 0)
754 {
755 DEBUG_TRACE ("SUCCEED: no operand");
756 return 1;
757 }
758
759 if (stop_at < 0 || stop_at >= num_opnds)
760 stop_at = num_opnds - 1;
761
762 /* For each pattern. */
763 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
764 {
765 int j;
766 qualifiers = *qualifiers_list;
767
768 /* Start as positive. */
769 found = 1;
770
771 DEBUG_TRACE ("%d", i);
772 #ifdef DEBUG_AARCH64
773 if (debug_dump)
774 dump_match_qualifiers (inst->operands, qualifiers);
775 #endif
776
777 /* Most opcodes has much fewer patterns in the list.
778 First NIL qualifier indicates the end in the list. */
779 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
780 {
781 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
782 if (i)
783 found = 0;
784 break;
785 }
786
787 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
788 {
789 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
790 {
791 /* Either the operand does not have qualifier, or the qualifier
792 for the operand needs to be deduced from the qualifier
793 sequence.
794 In the latter case, any constraint checking related with
795 the obtained qualifier should be done later in
796 operand_general_constraint_met_p. */
797 continue;
798 }
799 else if (*qualifiers != inst->operands[j].qualifier)
800 {
801 /* Unless the target qualifier can also qualify the operand
802 (which has already had a non-nil qualifier), non-equal
803 qualifiers are generally un-matched. */
804 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
805 continue;
806 else
807 {
808 found = 0;
809 break;
810 }
811 }
812 else
813 continue; /* Equal qualifiers are certainly matched. */
814 }
815
816 /* Qualifiers established. */
817 if (found == 1)
818 break;
819 }
820
821 if (found == 1)
822 {
823 /* Fill the result in *RET. */
824 int j;
825 qualifiers = *qualifiers_list;
826
827 DEBUG_TRACE ("complete qualifiers using list %d", i);
828 #ifdef DEBUG_AARCH64
829 if (debug_dump)
830 dump_qualifier_sequence (qualifiers);
831 #endif
832
833 for (j = 0; j <= stop_at; ++j, ++qualifiers)
834 ret[j] = *qualifiers;
835 for (; j < AARCH64_MAX_OPND_NUM; ++j)
836 ret[j] = AARCH64_OPND_QLF_NIL;
837
838 DEBUG_TRACE ("SUCCESS");
839 return 1;
840 }
841
842 DEBUG_TRACE ("FAIL");
843 return 0;
844 }
845
846 /* Operand qualifier matching and resolving.
847
848 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
849 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
850
851 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
852 succeeds. */
853
854 static int
855 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
856 {
857 int i, nops;
858 aarch64_opnd_qualifier_seq_t qualifiers;
859
860 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
861 qualifiers))
862 {
863 DEBUG_TRACE ("matching FAIL");
864 return 0;
865 }
866
867 if (inst->opcode->flags & F_STRICT)
868 {
869 /* Require an exact qualifier match, even for NIL qualifiers. */
870 nops = aarch64_num_of_operands (inst->opcode);
871 for (i = 0; i < nops; ++i)
872 if (inst->operands[i].qualifier != qualifiers[i])
873 return FALSE;
874 }
875
876 /* Update the qualifiers. */
877 if (update_p == TRUE)
878 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
879 {
880 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
881 break;
882 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
883 "update %s with %s for operand %d",
884 aarch64_get_qualifier_name (inst->operands[i].qualifier),
885 aarch64_get_qualifier_name (qualifiers[i]), i);
886 inst->operands[i].qualifier = qualifiers[i];
887 }
888
889 DEBUG_TRACE ("matching SUCCESS");
890 return 1;
891 }
892
893 /* Return TRUE if VALUE is a wide constant that can be moved into a general
894 register by MOVZ.
895
896 IS32 indicates whether value is a 32-bit immediate or not.
897 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
898 amount will be returned in *SHIFT_AMOUNT. */
899
900 bfd_boolean
901 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
902 {
903 int amount;
904
905 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
906
907 if (is32)
908 {
909 /* Allow all zeros or all ones in top 32-bits, so that
910 32-bit constant expressions like ~0x80000000 are
911 permitted. */
912 uint64_t ext = value;
913 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
914 /* Immediate out of range. */
915 return FALSE;
916 value &= (int64_t) 0xffffffff;
917 }
918
919 /* first, try movz then movn */
920 amount = -1;
921 if ((value & ((int64_t) 0xffff << 0)) == value)
922 amount = 0;
923 else if ((value & ((int64_t) 0xffff << 16)) == value)
924 amount = 16;
925 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
926 amount = 32;
927 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
928 amount = 48;
929
930 if (amount == -1)
931 {
932 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
933 return FALSE;
934 }
935
936 if (shift_amount != NULL)
937 *shift_amount = amount;
938
939 DEBUG_TRACE ("exit TRUE with amount %d", amount);
940
941 return TRUE;
942 }
943
944 /* Build the accepted values for immediate logical SIMD instructions.
945
946 The standard encodings of the immediate value are:
947 N imms immr SIMD size R S
948 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
949 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
950 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
951 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
952 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
953 0 11110s 00000r 2 UInt(r) UInt(s)
954 where all-ones value of S is reserved.
955
956 Let's call E the SIMD size.
957
958 The immediate value is: S+1 bits '1' rotated to the right by R.
959
960 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
961 (remember S != E - 1). */
962
963 #define TOTAL_IMM_NB 5334
964
965 typedef struct
966 {
967 uint64_t imm;
968 aarch64_insn encoding;
969 } simd_imm_encoding;
970
971 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
972
973 static int
974 simd_imm_encoding_cmp(const void *i1, const void *i2)
975 {
976 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
977 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
978
979 if (imm1->imm < imm2->imm)
980 return -1;
981 if (imm1->imm > imm2->imm)
982 return +1;
983 return 0;
984 }
985
986 /* immediate bitfield standard encoding
987 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
988 1 ssssss rrrrrr 64 rrrrrr ssssss
989 0 0sssss 0rrrrr 32 rrrrr sssss
990 0 10ssss 00rrrr 16 rrrr ssss
991 0 110sss 000rrr 8 rrr sss
992 0 1110ss 0000rr 4 rr ss
993 0 11110s 00000r 2 r s */
994 static inline int
995 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
996 {
997 return (is64 << 12) | (r << 6) | s;
998 }
999
1000 static void
1001 build_immediate_table (void)
1002 {
1003 uint32_t log_e, e, s, r, s_mask;
1004 uint64_t mask, imm;
1005 int nb_imms;
1006 int is64;
1007
1008 nb_imms = 0;
1009 for (log_e = 1; log_e <= 6; log_e++)
1010 {
1011 /* Get element size. */
1012 e = 1u << log_e;
1013 if (log_e == 6)
1014 {
1015 is64 = 1;
1016 mask = 0xffffffffffffffffull;
1017 s_mask = 0;
1018 }
1019 else
1020 {
1021 is64 = 0;
1022 mask = (1ull << e) - 1;
1023 /* log_e s_mask
1024 1 ((1 << 4) - 1) << 2 = 111100
1025 2 ((1 << 3) - 1) << 3 = 111000
1026 3 ((1 << 2) - 1) << 4 = 110000
1027 4 ((1 << 1) - 1) << 5 = 100000
1028 5 ((1 << 0) - 1) << 6 = 000000 */
1029 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1030 }
1031 for (s = 0; s < e - 1; s++)
1032 for (r = 0; r < e; r++)
1033 {
1034 /* s+1 consecutive bits to 1 (s < 63) */
1035 imm = (1ull << (s + 1)) - 1;
1036 /* rotate right by r */
1037 if (r != 0)
1038 imm = (imm >> r) | ((imm << (e - r)) & mask);
1039 /* replicate the constant depending on SIMD size */
1040 switch (log_e)
1041 {
1042 case 1: imm = (imm << 2) | imm;
1043 case 2: imm = (imm << 4) | imm;
1044 case 3: imm = (imm << 8) | imm;
1045 case 4: imm = (imm << 16) | imm;
1046 case 5: imm = (imm << 32) | imm;
1047 case 6: break;
1048 default: abort ();
1049 }
1050 simd_immediates[nb_imms].imm = imm;
1051 simd_immediates[nb_imms].encoding =
1052 encode_immediate_bitfield(is64, s | s_mask, r);
1053 nb_imms++;
1054 }
1055 }
1056 assert (nb_imms == TOTAL_IMM_NB);
1057 qsort(simd_immediates, nb_imms,
1058 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1059 }
1060
1061 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1062 be accepted by logical (immediate) instructions
1063 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1064
1065 IS32 indicates whether or not VALUE is a 32-bit immediate.
1066 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1067 VALUE will be returned in *ENCODING. */
1068
1069 bfd_boolean
1070 aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding)
1071 {
1072 simd_imm_encoding imm_enc;
1073 const simd_imm_encoding *imm_encoding;
1074 static bfd_boolean initialized = FALSE;
1075
1076 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1077 value, is32);
1078
1079 if (initialized == FALSE)
1080 {
1081 build_immediate_table ();
1082 initialized = TRUE;
1083 }
1084
1085 if (is32)
1086 {
1087 /* Allow all zeros or all ones in top 32-bits, so that
1088 constant expressions like ~1 are permitted. */
1089 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1090 return FALSE;
1091
1092 /* Replicate the 32 lower bits to the 32 upper bits. */
1093 value &= 0xffffffff;
1094 value |= value << 32;
1095 }
1096
1097 imm_enc.imm = value;
1098 imm_encoding = (const simd_imm_encoding *)
1099 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1100 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1101 if (imm_encoding == NULL)
1102 {
1103 DEBUG_TRACE ("exit with FALSE");
1104 return FALSE;
1105 }
1106 if (encoding != NULL)
1107 *encoding = imm_encoding->encoding;
1108 DEBUG_TRACE ("exit with TRUE");
1109 return TRUE;
1110 }
1111
1112 /* If 64-bit immediate IMM is in the format of
1113 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1114 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1115 of value "abcdefgh". Otherwise return -1. */
1116 int
1117 aarch64_shrink_expanded_imm8 (uint64_t imm)
1118 {
1119 int i, ret;
1120 uint32_t byte;
1121
1122 ret = 0;
1123 for (i = 0; i < 8; i++)
1124 {
1125 byte = (imm >> (8 * i)) & 0xff;
1126 if (byte == 0xff)
1127 ret |= 1 << i;
1128 else if (byte != 0x00)
1129 return -1;
1130 }
1131 return ret;
1132 }
1133
1134 /* Utility inline functions for operand_general_constraint_met_p. */
1135
1136 static inline void
1137 set_error (aarch64_operand_error *mismatch_detail,
1138 enum aarch64_operand_error_kind kind, int idx,
1139 const char* error)
1140 {
1141 if (mismatch_detail == NULL)
1142 return;
1143 mismatch_detail->kind = kind;
1144 mismatch_detail->index = idx;
1145 mismatch_detail->error = error;
1146 }
1147
1148 static inline void
1149 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1150 const char* error)
1151 {
1152 if (mismatch_detail == NULL)
1153 return;
1154 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1155 }
1156
1157 static inline void
1158 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1159 int idx, int lower_bound, int upper_bound,
1160 const char* error)
1161 {
1162 if (mismatch_detail == NULL)
1163 return;
1164 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1165 mismatch_detail->data[0] = lower_bound;
1166 mismatch_detail->data[1] = upper_bound;
1167 }
1168
1169 static inline void
1170 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1171 int idx, int lower_bound, int upper_bound)
1172 {
1173 if (mismatch_detail == NULL)
1174 return;
1175 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1176 _("immediate value"));
1177 }
1178
1179 static inline void
1180 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1181 int idx, int lower_bound, int upper_bound)
1182 {
1183 if (mismatch_detail == NULL)
1184 return;
1185 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1186 _("immediate offset"));
1187 }
1188
1189 static inline void
1190 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1191 int idx, int lower_bound, int upper_bound)
1192 {
1193 if (mismatch_detail == NULL)
1194 return;
1195 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1196 _("register number"));
1197 }
1198
1199 static inline void
1200 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1201 int idx, int lower_bound, int upper_bound)
1202 {
1203 if (mismatch_detail == NULL)
1204 return;
1205 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1206 _("register element index"));
1207 }
1208
1209 static inline void
1210 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1211 int idx, int lower_bound, int upper_bound)
1212 {
1213 if (mismatch_detail == NULL)
1214 return;
1215 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1216 _("shift amount"));
1217 }
1218
1219 static inline void
1220 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1221 int alignment)
1222 {
1223 if (mismatch_detail == NULL)
1224 return;
1225 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1226 mismatch_detail->data[0] = alignment;
1227 }
1228
1229 static inline void
1230 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1231 int expected_num)
1232 {
1233 if (mismatch_detail == NULL)
1234 return;
1235 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1236 mismatch_detail->data[0] = expected_num;
1237 }
1238
1239 static inline void
1240 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1241 const char* error)
1242 {
1243 if (mismatch_detail == NULL)
1244 return;
1245 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1246 }
1247
1248 /* General constraint checking based on operand code.
1249
1250 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1251 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1252
1253 This function has to be called after the qualifiers for all operands
1254 have been resolved.
1255
1256 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1257 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1258 of error message during the disassembling where error message is not
1259 wanted. We avoid the dynamic construction of strings of error messages
1260 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1261 use a combination of error code, static string and some integer data to
1262 represent an error. */
1263
1264 static int
1265 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1266 enum aarch64_opnd type,
1267 const aarch64_opcode *opcode,
1268 aarch64_operand_error *mismatch_detail)
1269 {
1270 unsigned num;
1271 unsigned char size;
1272 int64_t imm;
1273 const aarch64_opnd_info *opnd = opnds + idx;
1274 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1275
1276 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1277
1278 switch (aarch64_operands[type].op_class)
1279 {
1280 case AARCH64_OPND_CLASS_INT_REG:
1281 /* Check pair reg constraints for cas* instructions. */
1282 if (type == AARCH64_OPND_PAIRREG)
1283 {
1284 assert (idx == 1 || idx == 3);
1285 if (opnds[idx - 1].reg.regno % 2 != 0)
1286 {
1287 set_syntax_error (mismatch_detail, idx - 1,
1288 _("reg pair must start from even reg"));
1289 return 0;
1290 }
1291 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1292 {
1293 set_syntax_error (mismatch_detail, idx,
1294 _("reg pair must be contiguous"));
1295 return 0;
1296 }
1297 break;
1298 }
1299
1300 /* <Xt> may be optional in some IC and TLBI instructions. */
1301 if (type == AARCH64_OPND_Rt_SYS)
1302 {
1303 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1304 == AARCH64_OPND_CLASS_SYSTEM));
1305 if (opnds[1].present
1306 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1307 {
1308 set_other_error (mismatch_detail, idx, _("extraneous register"));
1309 return 0;
1310 }
1311 if (!opnds[1].present
1312 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1313 {
1314 set_other_error (mismatch_detail, idx, _("missing register"));
1315 return 0;
1316 }
1317 }
1318 switch (qualifier)
1319 {
1320 case AARCH64_OPND_QLF_WSP:
1321 case AARCH64_OPND_QLF_SP:
1322 if (!aarch64_stack_pointer_p (opnd))
1323 {
1324 set_other_error (mismatch_detail, idx,
1325 _("stack pointer register expected"));
1326 return 0;
1327 }
1328 break;
1329 default:
1330 break;
1331 }
1332 break;
1333
1334 case AARCH64_OPND_CLASS_COND:
1335 if (type == AARCH64_OPND_COND1
1336 && (opnds[idx].cond->value & 0xe) == 0xe)
1337 {
1338 /* Not allow AL or NV. */
1339 set_syntax_error (mismatch_detail, idx, NULL);
1340 }
1341 break;
1342
1343 case AARCH64_OPND_CLASS_ADDRESS:
1344 /* Check writeback. */
1345 switch (opcode->iclass)
1346 {
1347 case ldst_pos:
1348 case ldst_unscaled:
1349 case ldstnapair_offs:
1350 case ldstpair_off:
1351 case ldst_unpriv:
1352 if (opnd->addr.writeback == 1)
1353 {
1354 set_syntax_error (mismatch_detail, idx,
1355 _("unexpected address writeback"));
1356 return 0;
1357 }
1358 break;
1359 case ldst_imm9:
1360 case ldstpair_indexed:
1361 case asisdlsep:
1362 case asisdlsop:
1363 if (opnd->addr.writeback == 0)
1364 {
1365 set_syntax_error (mismatch_detail, idx,
1366 _("address writeback expected"));
1367 return 0;
1368 }
1369 break;
1370 default:
1371 assert (opnd->addr.writeback == 0);
1372 break;
1373 }
1374 switch (type)
1375 {
1376 case AARCH64_OPND_ADDR_SIMM7:
1377 /* Scaled signed 7 bits immediate offset. */
1378 /* Get the size of the data element that is accessed, which may be
1379 different from that of the source register size,
1380 e.g. in strb/ldrb. */
1381 size = aarch64_get_qualifier_esize (opnd->qualifier);
1382 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1383 {
1384 set_offset_out_of_range_error (mismatch_detail, idx,
1385 -64 * size, 63 * size);
1386 return 0;
1387 }
1388 if (!value_aligned_p (opnd->addr.offset.imm, size))
1389 {
1390 set_unaligned_error (mismatch_detail, idx, size);
1391 return 0;
1392 }
1393 break;
1394 case AARCH64_OPND_ADDR_SIMM9:
1395 /* Unscaled signed 9 bits immediate offset. */
1396 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1397 {
1398 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1399 return 0;
1400 }
1401 break;
1402
1403 case AARCH64_OPND_ADDR_SIMM9_2:
1404 /* Unscaled signed 9 bits immediate offset, which has to be negative
1405 or unaligned. */
1406 size = aarch64_get_qualifier_esize (qualifier);
1407 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1408 && !value_aligned_p (opnd->addr.offset.imm, size))
1409 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1410 return 1;
1411 set_other_error (mismatch_detail, idx,
1412 _("negative or unaligned offset expected"));
1413 return 0;
1414
1415 case AARCH64_OPND_SIMD_ADDR_POST:
1416 /* AdvSIMD load/store multiple structures, post-index. */
1417 assert (idx == 1);
1418 if (opnd->addr.offset.is_reg)
1419 {
1420 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1421 return 1;
1422 else
1423 {
1424 set_other_error (mismatch_detail, idx,
1425 _("invalid register offset"));
1426 return 0;
1427 }
1428 }
1429 else
1430 {
1431 const aarch64_opnd_info *prev = &opnds[idx-1];
1432 unsigned num_bytes; /* total number of bytes transferred. */
1433 /* The opcode dependent area stores the number of elements in
1434 each structure to be loaded/stored. */
1435 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1436 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1437 /* Special handling of loading single structure to all lane. */
1438 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1439 * aarch64_get_qualifier_esize (prev->qualifier);
1440 else
1441 num_bytes = prev->reglist.num_regs
1442 * aarch64_get_qualifier_esize (prev->qualifier)
1443 * aarch64_get_qualifier_nelem (prev->qualifier);
1444 if ((int) num_bytes != opnd->addr.offset.imm)
1445 {
1446 set_other_error (mismatch_detail, idx,
1447 _("invalid post-increment amount"));
1448 return 0;
1449 }
1450 }
1451 break;
1452
1453 case AARCH64_OPND_ADDR_REGOFF:
1454 /* Get the size of the data element that is accessed, which may be
1455 different from that of the source register size,
1456 e.g. in strb/ldrb. */
1457 size = aarch64_get_qualifier_esize (opnd->qualifier);
1458 /* It is either no shift or shift by the binary logarithm of SIZE. */
1459 if (opnd->shifter.amount != 0
1460 && opnd->shifter.amount != (int)get_logsz (size))
1461 {
1462 set_other_error (mismatch_detail, idx,
1463 _("invalid shift amount"));
1464 return 0;
1465 }
1466 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1467 operators. */
1468 switch (opnd->shifter.kind)
1469 {
1470 case AARCH64_MOD_UXTW:
1471 case AARCH64_MOD_LSL:
1472 case AARCH64_MOD_SXTW:
1473 case AARCH64_MOD_SXTX: break;
1474 default:
1475 set_other_error (mismatch_detail, idx,
1476 _("invalid extend/shift operator"));
1477 return 0;
1478 }
1479 break;
1480
1481 case AARCH64_OPND_ADDR_UIMM12:
1482 imm = opnd->addr.offset.imm;
1483 /* Get the size of the data element that is accessed, which may be
1484 different from that of the source register size,
1485 e.g. in strb/ldrb. */
1486 size = aarch64_get_qualifier_esize (qualifier);
1487 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1488 {
1489 set_offset_out_of_range_error (mismatch_detail, idx,
1490 0, 4095 * size);
1491 return 0;
1492 }
1493 if (!value_aligned_p (opnd->addr.offset.imm, size))
1494 {
1495 set_unaligned_error (mismatch_detail, idx, size);
1496 return 0;
1497 }
1498 break;
1499
1500 case AARCH64_OPND_ADDR_PCREL14:
1501 case AARCH64_OPND_ADDR_PCREL19:
1502 case AARCH64_OPND_ADDR_PCREL21:
1503 case AARCH64_OPND_ADDR_PCREL26:
1504 imm = opnd->imm.value;
1505 if (operand_need_shift_by_two (get_operand_from_code (type)))
1506 {
1507 /* The offset value in a PC-relative branch instruction is alway
1508 4-byte aligned and is encoded without the lowest 2 bits. */
1509 if (!value_aligned_p (imm, 4))
1510 {
1511 set_unaligned_error (mismatch_detail, idx, 4);
1512 return 0;
1513 }
1514 /* Right shift by 2 so that we can carry out the following check
1515 canonically. */
1516 imm >>= 2;
1517 }
1518 size = get_operand_fields_width (get_operand_from_code (type));
1519 if (!value_fit_signed_field_p (imm, size))
1520 {
1521 set_other_error (mismatch_detail, idx,
1522 _("immediate out of range"));
1523 return 0;
1524 }
1525 break;
1526
1527 default:
1528 break;
1529 }
1530 break;
1531
1532 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1533 if (type == AARCH64_OPND_LEt)
1534 {
1535 /* Get the upper bound for the element index. */
1536 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1537 if (!value_in_range_p (opnd->reglist.index, 0, num))
1538 {
1539 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1540 return 0;
1541 }
1542 }
1543 /* The opcode dependent area stores the number of elements in
1544 each structure to be loaded/stored. */
1545 num = get_opcode_dependent_value (opcode);
1546 switch (type)
1547 {
1548 case AARCH64_OPND_LVt:
1549 assert (num >= 1 && num <= 4);
1550 /* Unless LD1/ST1, the number of registers should be equal to that
1551 of the structure elements. */
1552 if (num != 1 && opnd->reglist.num_regs != num)
1553 {
1554 set_reg_list_error (mismatch_detail, idx, num);
1555 return 0;
1556 }
1557 break;
1558 case AARCH64_OPND_LVt_AL:
1559 case AARCH64_OPND_LEt:
1560 assert (num >= 1 && num <= 4);
1561 /* The number of registers should be equal to that of the structure
1562 elements. */
1563 if (opnd->reglist.num_regs != num)
1564 {
1565 set_reg_list_error (mismatch_detail, idx, num);
1566 return 0;
1567 }
1568 break;
1569 default:
1570 break;
1571 }
1572 break;
1573
1574 case AARCH64_OPND_CLASS_IMMEDIATE:
1575 /* Constraint check on immediate operand. */
1576 imm = opnd->imm.value;
1577 /* E.g. imm_0_31 constrains value to be 0..31. */
1578 if (qualifier_value_in_range_constraint_p (qualifier)
1579 && !value_in_range_p (imm, get_lower_bound (qualifier),
1580 get_upper_bound (qualifier)))
1581 {
1582 set_imm_out_of_range_error (mismatch_detail, idx,
1583 get_lower_bound (qualifier),
1584 get_upper_bound (qualifier));
1585 return 0;
1586 }
1587
1588 switch (type)
1589 {
1590 case AARCH64_OPND_AIMM:
1591 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1592 {
1593 set_other_error (mismatch_detail, idx,
1594 _("invalid shift operator"));
1595 return 0;
1596 }
1597 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1598 {
1599 set_other_error (mismatch_detail, idx,
1600 _("shift amount expected to be 0 or 12"));
1601 return 0;
1602 }
1603 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1604 {
1605 set_other_error (mismatch_detail, idx,
1606 _("immediate out of range"));
1607 return 0;
1608 }
1609 break;
1610
1611 case AARCH64_OPND_HALF:
1612 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1613 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1614 {
1615 set_other_error (mismatch_detail, idx,
1616 _("invalid shift operator"));
1617 return 0;
1618 }
1619 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1620 if (!value_aligned_p (opnd->shifter.amount, 16))
1621 {
1622 set_other_error (mismatch_detail, idx,
1623 _("shift amount should be a multiple of 16"));
1624 return 0;
1625 }
1626 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1627 {
1628 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1629 0, size * 8 - 16);
1630 return 0;
1631 }
1632 if (opnd->imm.value < 0)
1633 {
1634 set_other_error (mismatch_detail, idx,
1635 _("negative immediate value not allowed"));
1636 return 0;
1637 }
1638 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1639 {
1640 set_other_error (mismatch_detail, idx,
1641 _("immediate out of range"));
1642 return 0;
1643 }
1644 break;
1645
1646 case AARCH64_OPND_IMM_MOV:
1647 {
1648 int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4;
1649 imm = opnd->imm.value;
1650 assert (idx == 1);
1651 switch (opcode->op)
1652 {
1653 case OP_MOV_IMM_WIDEN:
1654 imm = ~imm;
1655 /* Fall through... */
1656 case OP_MOV_IMM_WIDE:
1657 if (!aarch64_wide_constant_p (imm, is32, NULL))
1658 {
1659 set_other_error (mismatch_detail, idx,
1660 _("immediate out of range"));
1661 return 0;
1662 }
1663 break;
1664 case OP_MOV_IMM_LOG:
1665 if (!aarch64_logical_immediate_p (imm, is32, NULL))
1666 {
1667 set_other_error (mismatch_detail, idx,
1668 _("immediate out of range"));
1669 return 0;
1670 }
1671 break;
1672 default:
1673 assert (0);
1674 return 0;
1675 }
1676 }
1677 break;
1678
1679 case AARCH64_OPND_NZCV:
1680 case AARCH64_OPND_CCMP_IMM:
1681 case AARCH64_OPND_EXCEPTION:
1682 case AARCH64_OPND_UIMM4:
1683 case AARCH64_OPND_UIMM7:
1684 case AARCH64_OPND_UIMM3_OP1:
1685 case AARCH64_OPND_UIMM3_OP2:
1686 size = get_operand_fields_width (get_operand_from_code (type));
1687 assert (size < 32);
1688 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1689 {
1690 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1691 (1 << size) - 1);
1692 return 0;
1693 }
1694 break;
1695
1696 case AARCH64_OPND_WIDTH:
1697 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
1698 && opnds[0].type == AARCH64_OPND_Rd);
1699 size = get_upper_bound (qualifier);
1700 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1701 /* lsb+width <= reg.size */
1702 {
1703 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1704 size - opnds[idx-1].imm.value);
1705 return 0;
1706 }
1707 break;
1708
1709 case AARCH64_OPND_LIMM:
1710 {
1711 int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W;
1712 uint64_t uimm = opnd->imm.value;
1713 if (opcode->op == OP_BIC)
1714 uimm = ~uimm;
1715 if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE)
1716 {
1717 set_other_error (mismatch_detail, idx,
1718 _("immediate out of range"));
1719 return 0;
1720 }
1721 }
1722 break;
1723
1724 case AARCH64_OPND_IMM0:
1725 case AARCH64_OPND_FPIMM0:
1726 if (opnd->imm.value != 0)
1727 {
1728 set_other_error (mismatch_detail, idx,
1729 _("immediate zero expected"));
1730 return 0;
1731 }
1732 break;
1733
1734 case AARCH64_OPND_SHLL_IMM:
1735 assert (idx == 2);
1736 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1737 if (opnd->imm.value != size)
1738 {
1739 set_other_error (mismatch_detail, idx,
1740 _("invalid shift amount"));
1741 return 0;
1742 }
1743 break;
1744
1745 case AARCH64_OPND_IMM_VLSL:
1746 size = aarch64_get_qualifier_esize (qualifier);
1747 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1748 {
1749 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1750 size * 8 - 1);
1751 return 0;
1752 }
1753 break;
1754
1755 case AARCH64_OPND_IMM_VLSR:
1756 size = aarch64_get_qualifier_esize (qualifier);
1757 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1758 {
1759 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1760 return 0;
1761 }
1762 break;
1763
1764 case AARCH64_OPND_SIMD_IMM:
1765 case AARCH64_OPND_SIMD_IMM_SFT:
1766 /* Qualifier check. */
1767 switch (qualifier)
1768 {
1769 case AARCH64_OPND_QLF_LSL:
1770 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1771 {
1772 set_other_error (mismatch_detail, idx,
1773 _("invalid shift operator"));
1774 return 0;
1775 }
1776 break;
1777 case AARCH64_OPND_QLF_MSL:
1778 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1779 {
1780 set_other_error (mismatch_detail, idx,
1781 _("invalid shift operator"));
1782 return 0;
1783 }
1784 break;
1785 case AARCH64_OPND_QLF_NIL:
1786 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1787 {
1788 set_other_error (mismatch_detail, idx,
1789 _("shift is not permitted"));
1790 return 0;
1791 }
1792 break;
1793 default:
1794 assert (0);
1795 return 0;
1796 }
1797 /* Is the immediate valid? */
1798 assert (idx == 1);
1799 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1800 {
1801 /* uimm8 or simm8 */
1802 if (!value_in_range_p (opnd->imm.value, -128, 255))
1803 {
1804 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
1805 return 0;
1806 }
1807 }
1808 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1809 {
1810 /* uimm64 is not
1811 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1812 ffffffffgggggggghhhhhhhh'. */
1813 set_other_error (mismatch_detail, idx,
1814 _("invalid value for immediate"));
1815 return 0;
1816 }
1817 /* Is the shift amount valid? */
1818 switch (opnd->shifter.kind)
1819 {
1820 case AARCH64_MOD_LSL:
1821 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1822 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1823 {
1824 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1825 (size - 1) * 8);
1826 return 0;
1827 }
1828 if (!value_aligned_p (opnd->shifter.amount, 8))
1829 {
1830 set_unaligned_error (mismatch_detail, idx, 8);
1831 return 0;
1832 }
1833 break;
1834 case AARCH64_MOD_MSL:
1835 /* Only 8 and 16 are valid shift amount. */
1836 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1837 {
1838 set_other_error (mismatch_detail, idx,
1839 _("shift amount expected to be 0 or 16"));
1840 return 0;
1841 }
1842 break;
1843 default:
1844 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1845 {
1846 set_other_error (mismatch_detail, idx,
1847 _("invalid shift operator"));
1848 return 0;
1849 }
1850 break;
1851 }
1852 break;
1853
1854 case AARCH64_OPND_FPIMM:
1855 case AARCH64_OPND_SIMD_FPIMM:
1856 if (opnd->imm.is_fp == 0)
1857 {
1858 set_other_error (mismatch_detail, idx,
1859 _("floating-point immediate expected"));
1860 return 0;
1861 }
1862 /* The value is expected to be an 8-bit floating-point constant with
1863 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1864 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1865 instruction). */
1866 if (!value_in_range_p (opnd->imm.value, 0, 255))
1867 {
1868 set_other_error (mismatch_detail, idx,
1869 _("immediate out of range"));
1870 return 0;
1871 }
1872 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1873 {
1874 set_other_error (mismatch_detail, idx,
1875 _("invalid shift operator"));
1876 return 0;
1877 }
1878 break;
1879
1880 default:
1881 break;
1882 }
1883 break;
1884
1885 case AARCH64_OPND_CLASS_CP_REG:
1886 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1887 valid range: C0 - C15. */
1888 if (opnd->reg.regno > 15)
1889 {
1890 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1891 return 0;
1892 }
1893 break;
1894
1895 case AARCH64_OPND_CLASS_SYSTEM:
1896 switch (type)
1897 {
1898 case AARCH64_OPND_PSTATEFIELD:
1899 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
1900 /* MSR UAO, #uimm4
1901 MSR PAN, #uimm4
1902 The immediate must be #0 or #1. */
1903 if ((opnd->pstatefield == 0x03 /* UAO. */
1904 || opnd->pstatefield == 0x04) /* PAN. */
1905 && opnds[1].imm.value > 1)
1906 {
1907 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1908 return 0;
1909 }
1910 /* MSR SPSel, #uimm4
1911 Uses uimm4 as a control value to select the stack pointer: if
1912 bit 0 is set it selects the current exception level's stack
1913 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1914 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1915 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
1916 {
1917 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1918 return 0;
1919 }
1920 break;
1921 default:
1922 break;
1923 }
1924 break;
1925
1926 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
1927 /* Get the upper bound for the element index. */
1928 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1929 /* Index out-of-range. */
1930 if (!value_in_range_p (opnd->reglane.index, 0, num))
1931 {
1932 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1933 return 0;
1934 }
1935 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1936 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1937 number is encoded in "size:M:Rm":
1938 size <Vm>
1939 00 RESERVED
1940 01 0:Rm
1941 10 M:Rm
1942 11 RESERVED */
1943 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
1944 && !value_in_range_p (opnd->reglane.regno, 0, 15))
1945 {
1946 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1947 return 0;
1948 }
1949 break;
1950
1951 case AARCH64_OPND_CLASS_MODIFIED_REG:
1952 assert (idx == 1 || idx == 2);
1953 switch (type)
1954 {
1955 case AARCH64_OPND_Rm_EXT:
1956 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
1957 && opnd->shifter.kind != AARCH64_MOD_LSL)
1958 {
1959 set_other_error (mismatch_detail, idx,
1960 _("extend operator expected"));
1961 return 0;
1962 }
1963 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1964 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1965 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1966 case. */
1967 if (!aarch64_stack_pointer_p (opnds + 0)
1968 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
1969 {
1970 if (!opnd->shifter.operator_present)
1971 {
1972 set_other_error (mismatch_detail, idx,
1973 _("missing extend operator"));
1974 return 0;
1975 }
1976 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
1977 {
1978 set_other_error (mismatch_detail, idx,
1979 _("'LSL' operator not allowed"));
1980 return 0;
1981 }
1982 }
1983 assert (opnd->shifter.operator_present /* Default to LSL. */
1984 || opnd->shifter.kind == AARCH64_MOD_LSL);
1985 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
1986 {
1987 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
1988 return 0;
1989 }
1990 /* In the 64-bit form, the final register operand is written as Wm
1991 for all but the (possibly omitted) UXTX/LSL and SXTX
1992 operators.
1993 N.B. GAS allows X register to be used with any operator as a
1994 programming convenience. */
1995 if (qualifier == AARCH64_OPND_QLF_X
1996 && opnd->shifter.kind != AARCH64_MOD_LSL
1997 && opnd->shifter.kind != AARCH64_MOD_UXTX
1998 && opnd->shifter.kind != AARCH64_MOD_SXTX)
1999 {
2000 set_other_error (mismatch_detail, idx, _("W register expected"));
2001 return 0;
2002 }
2003 break;
2004
2005 case AARCH64_OPND_Rm_SFT:
2006 /* ROR is not available to the shifted register operand in
2007 arithmetic instructions. */
2008 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2009 {
2010 set_other_error (mismatch_detail, idx,
2011 _("shift operator expected"));
2012 return 0;
2013 }
2014 if (opnd->shifter.kind == AARCH64_MOD_ROR
2015 && opcode->iclass != log_shift)
2016 {
2017 set_other_error (mismatch_detail, idx,
2018 _("'ROR' operator not allowed"));
2019 return 0;
2020 }
2021 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2022 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2023 {
2024 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2025 return 0;
2026 }
2027 break;
2028
2029 default:
2030 break;
2031 }
2032 break;
2033
2034 default:
2035 break;
2036 }
2037
2038 return 1;
2039 }
2040
2041 /* Main entrypoint for the operand constraint checking.
2042
2043 Return 1 if operands of *INST meet the constraint applied by the operand
2044 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2045 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2046 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2047 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2048 error kind when it is notified that an instruction does not pass the check).
2049
2050 Un-determined operand qualifiers may get established during the process. */
2051
2052 int
2053 aarch64_match_operands_constraint (aarch64_inst *inst,
2054 aarch64_operand_error *mismatch_detail)
2055 {
2056 int i;
2057
2058 DEBUG_TRACE ("enter");
2059
2060 /* Match operands' qualifier.
2061 *INST has already had qualifier establish for some, if not all, of
2062 its operands; we need to find out whether these established
2063 qualifiers match one of the qualifier sequence in
2064 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2065 with the corresponding qualifier in such a sequence.
2066 Only basic operand constraint checking is done here; the more thorough
2067 constraint checking will carried out by operand_general_constraint_met_p,
2068 which has be to called after this in order to get all of the operands'
2069 qualifiers established. */
2070 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2071 {
2072 DEBUG_TRACE ("FAIL on operand qualifier matching");
2073 if (mismatch_detail)
2074 {
2075 /* Return an error type to indicate that it is the qualifier
2076 matching failure; we don't care about which operand as there
2077 are enough information in the opcode table to reproduce it. */
2078 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2079 mismatch_detail->index = -1;
2080 mismatch_detail->error = NULL;
2081 }
2082 return 0;
2083 }
2084
2085 /* Match operands' constraint. */
2086 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2087 {
2088 enum aarch64_opnd type = inst->opcode->operands[i];
2089 if (type == AARCH64_OPND_NIL)
2090 break;
2091 if (inst->operands[i].skip)
2092 {
2093 DEBUG_TRACE ("skip the incomplete operand %d", i);
2094 continue;
2095 }
2096 if (operand_general_constraint_met_p (inst->operands, i, type,
2097 inst->opcode, mismatch_detail) == 0)
2098 {
2099 DEBUG_TRACE ("FAIL on operand %d", i);
2100 return 0;
2101 }
2102 }
2103
2104 DEBUG_TRACE ("PASS");
2105
2106 return 1;
2107 }
2108
2109 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2110 Also updates the TYPE of each INST->OPERANDS with the corresponding
2111 value of OPCODE->OPERANDS.
2112
2113 Note that some operand qualifiers may need to be manually cleared by
2114 the caller before it further calls the aarch64_opcode_encode; by
2115 doing this, it helps the qualifier matching facilities work
2116 properly. */
2117
2118 const aarch64_opcode*
2119 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2120 {
2121 int i;
2122 const aarch64_opcode *old = inst->opcode;
2123
2124 inst->opcode = opcode;
2125
2126 /* Update the operand types. */
2127 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2128 {
2129 inst->operands[i].type = opcode->operands[i];
2130 if (opcode->operands[i] == AARCH64_OPND_NIL)
2131 break;
2132 }
2133
2134 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2135
2136 return old;
2137 }
2138
2139 int
2140 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2141 {
2142 int i;
2143 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2144 if (operands[i] == operand)
2145 return i;
2146 else if (operands[i] == AARCH64_OPND_NIL)
2147 break;
2148 return -1;
2149 }
2150 \f
2151 /* [0][0] 32-bit integer regs with sp Wn
2152 [0][1] 64-bit integer regs with sp Xn sf=1
2153 [1][0] 32-bit integer regs with #0 Wn
2154 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2155 static const char *int_reg[2][2][32] = {
2156 #define R32 "w"
2157 #define R64 "x"
2158 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2159 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2160 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2161 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", "wsp" },
2162 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2163 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2164 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2165 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", "sp" } },
2166 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2167 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2168 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2169 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
2170 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2171 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2172 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2173 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
2174 #undef R64
2175 #undef R32
2176 };
2177
2178 /* Return the integer register name.
2179 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2180
2181 static inline const char *
2182 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2183 {
2184 const int has_zr = sp_reg_p ? 0 : 1;
2185 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2186 return int_reg[has_zr][is_64][regno];
2187 }
2188
2189 /* Like get_int_reg_name, but IS_64 is always 1. */
2190
2191 static inline const char *
2192 get_64bit_int_reg_name (int regno, int sp_reg_p)
2193 {
2194 const int has_zr = sp_reg_p ? 0 : 1;
2195 return int_reg[has_zr][1][regno];
2196 }
2197
2198 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2199
2200 typedef union
2201 {
2202 uint64_t i;
2203 double d;
2204 } double_conv_t;
2205
2206 typedef union
2207 {
2208 uint32_t i;
2209 float f;
2210 } single_conv_t;
2211
2212 typedef union
2213 {
2214 uint32_t i;
2215 float f;
2216 } half_conv_t;
2217
2218 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2219 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2220 (depending on the type of the instruction). IMM8 will be expanded to a
2221 single-precision floating-point value (SIZE == 4) or a double-precision
2222 floating-point value (SIZE == 8). A half-precision floating-point value
2223 (SIZE == 2) is expanded to a single-precision floating-point value. The
2224 expanded value is returned. */
2225
2226 static uint64_t
2227 expand_fp_imm (int size, uint32_t imm8)
2228 {
2229 uint64_t imm;
2230 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2231
2232 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2233 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2234 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2235 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2236 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2237 if (size == 8)
2238 {
2239 imm = (imm8_7 << (63-32)) /* imm8<7> */
2240 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2241 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2242 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2243 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2244 imm <<= 32;
2245 }
2246 else if (size == 4 || size == 2)
2247 {
2248 imm = (imm8_7 << 31) /* imm8<7> */
2249 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2250 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2251 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2252 }
2253 else
2254 {
2255 /* An unsupported size. */
2256 assert (0);
2257 }
2258
2259 return imm;
2260 }
2261
2262 /* Produce the string representation of the register list operand *OPND
2263 in the buffer pointed by BUF of size SIZE. */
2264 static void
2265 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd)
2266 {
2267 const int num_regs = opnd->reglist.num_regs;
2268 const int first_reg = opnd->reglist.first_regno;
2269 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2270 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2271 char tb[8]; /* Temporary buffer. */
2272
2273 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2274 assert (num_regs >= 1 && num_regs <= 4);
2275
2276 /* Prepare the index if any. */
2277 if (opnd->reglist.has_index)
2278 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2279 else
2280 tb[0] = '\0';
2281
2282 /* The hyphenated form is preferred for disassembly if there are
2283 more than two registers in the list, and the register numbers
2284 are monotonically increasing in increments of one. */
2285 if (num_regs > 2 && last_reg > first_reg)
2286 snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name,
2287 last_reg, qlf_name, tb);
2288 else
2289 {
2290 const int reg0 = first_reg;
2291 const int reg1 = (first_reg + 1) & 0x1f;
2292 const int reg2 = (first_reg + 2) & 0x1f;
2293 const int reg3 = (first_reg + 3) & 0x1f;
2294
2295 switch (num_regs)
2296 {
2297 case 1:
2298 snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb);
2299 break;
2300 case 2:
2301 snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name,
2302 reg1, qlf_name, tb);
2303 break;
2304 case 3:
2305 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name,
2306 reg1, qlf_name, reg2, qlf_name, tb);
2307 break;
2308 case 4:
2309 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2310 reg0, qlf_name, reg1, qlf_name, reg2, qlf_name,
2311 reg3, qlf_name, tb);
2312 break;
2313 }
2314 }
2315 }
2316
2317 /* Produce the string representation of the register offset address operand
2318 *OPND in the buffer pointed by BUF of size SIZE. */
2319 static void
2320 print_register_offset_address (char *buf, size_t size,
2321 const aarch64_opnd_info *opnd)
2322 {
2323 char tb[16]; /* Temporary buffer. */
2324 bfd_boolean lsl_p = FALSE; /* Is LSL shift operator? */
2325 bfd_boolean wm_p = FALSE; /* Should Rm be Wm? */
2326 bfd_boolean print_extend_p = TRUE;
2327 bfd_boolean print_amount_p = TRUE;
2328 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2329
2330 switch (opnd->shifter.kind)
2331 {
2332 case AARCH64_MOD_UXTW: wm_p = TRUE; break;
2333 case AARCH64_MOD_LSL : lsl_p = TRUE; break;
2334 case AARCH64_MOD_SXTW: wm_p = TRUE; break;
2335 case AARCH64_MOD_SXTX: break;
2336 default: assert (0);
2337 }
2338
2339 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2340 || !opnd->shifter.amount_present))
2341 {
2342 /* Not print the shift/extend amount when the amount is zero and
2343 when it is not the special case of 8-bit load/store instruction. */
2344 print_amount_p = FALSE;
2345 /* Likewise, no need to print the shift operator LSL in such a
2346 situation. */
2347 if (lsl_p)
2348 print_extend_p = FALSE;
2349 }
2350
2351 /* Prepare for the extend/shift. */
2352 if (print_extend_p)
2353 {
2354 if (print_amount_p)
2355 snprintf (tb, sizeof (tb), ",%s #%d", shift_name, opnd->shifter.amount);
2356 else
2357 snprintf (tb, sizeof (tb), ",%s", shift_name);
2358 }
2359 else
2360 tb[0] = '\0';
2361
2362 snprintf (buf, size, "[%s,%s%s]",
2363 get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2364 get_int_reg_name (opnd->addr.offset.regno,
2365 wm_p ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X,
2366 0 /* sp_reg_p */),
2367 tb);
2368 }
2369
2370 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2371 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2372 PC, PCREL_P and ADDRESS are used to pass in and return information about
2373 the PC-relative address calculation, where the PC value is passed in
2374 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2375 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2376 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2377
2378 The function serves both the disassembler and the assembler diagnostics
2379 issuer, which is the reason why it lives in this file. */
2380
2381 void
2382 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2383 const aarch64_opcode *opcode,
2384 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2385 bfd_vma *address)
2386 {
2387 int i;
2388 const char *name = NULL;
2389 const aarch64_opnd_info *opnd = opnds + idx;
2390 enum aarch64_modifier_kind kind;
2391 uint64_t addr;
2392
2393 buf[0] = '\0';
2394 if (pcrel_p)
2395 *pcrel_p = 0;
2396
2397 switch (opnd->type)
2398 {
2399 case AARCH64_OPND_Rd:
2400 case AARCH64_OPND_Rn:
2401 case AARCH64_OPND_Rm:
2402 case AARCH64_OPND_Rt:
2403 case AARCH64_OPND_Rt2:
2404 case AARCH64_OPND_Rs:
2405 case AARCH64_OPND_Ra:
2406 case AARCH64_OPND_Rt_SYS:
2407 case AARCH64_OPND_PAIRREG:
2408 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2409 the <ic_op>, therefore we we use opnd->present to override the
2410 generic optional-ness information. */
2411 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2412 break;
2413 /* Omit the operand, e.g. RET. */
2414 if (optional_operand_p (opcode, idx)
2415 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2416 break;
2417 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2418 || opnd->qualifier == AARCH64_OPND_QLF_X);
2419 snprintf (buf, size, "%s",
2420 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2421 break;
2422
2423 case AARCH64_OPND_Rd_SP:
2424 case AARCH64_OPND_Rn_SP:
2425 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2426 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2427 || opnd->qualifier == AARCH64_OPND_QLF_X
2428 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2429 snprintf (buf, size, "%s",
2430 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2431 break;
2432
2433 case AARCH64_OPND_Rm_EXT:
2434 kind = opnd->shifter.kind;
2435 assert (idx == 1 || idx == 2);
2436 if ((aarch64_stack_pointer_p (opnds)
2437 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2438 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2439 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2440 && kind == AARCH64_MOD_UXTW)
2441 || (opnd->qualifier == AARCH64_OPND_QLF_X
2442 && kind == AARCH64_MOD_UXTX)))
2443 {
2444 /* 'LSL' is the preferred form in this case. */
2445 kind = AARCH64_MOD_LSL;
2446 if (opnd->shifter.amount == 0)
2447 {
2448 /* Shifter omitted. */
2449 snprintf (buf, size, "%s",
2450 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2451 break;
2452 }
2453 }
2454 if (opnd->shifter.amount)
2455 snprintf (buf, size, "%s, %s #%d",
2456 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2457 aarch64_operand_modifiers[kind].name,
2458 opnd->shifter.amount);
2459 else
2460 snprintf (buf, size, "%s, %s",
2461 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2462 aarch64_operand_modifiers[kind].name);
2463 break;
2464
2465 case AARCH64_OPND_Rm_SFT:
2466 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2467 || opnd->qualifier == AARCH64_OPND_QLF_X);
2468 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2469 snprintf (buf, size, "%s",
2470 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2471 else
2472 snprintf (buf, size, "%s, %s #%d",
2473 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2474 aarch64_operand_modifiers[opnd->shifter.kind].name,
2475 opnd->shifter.amount);
2476 break;
2477
2478 case AARCH64_OPND_Fd:
2479 case AARCH64_OPND_Fn:
2480 case AARCH64_OPND_Fm:
2481 case AARCH64_OPND_Fa:
2482 case AARCH64_OPND_Ft:
2483 case AARCH64_OPND_Ft2:
2484 case AARCH64_OPND_Sd:
2485 case AARCH64_OPND_Sn:
2486 case AARCH64_OPND_Sm:
2487 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2488 opnd->reg.regno);
2489 break;
2490
2491 case AARCH64_OPND_Vd:
2492 case AARCH64_OPND_Vn:
2493 case AARCH64_OPND_Vm:
2494 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2495 aarch64_get_qualifier_name (opnd->qualifier));
2496 break;
2497
2498 case AARCH64_OPND_Ed:
2499 case AARCH64_OPND_En:
2500 case AARCH64_OPND_Em:
2501 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2502 aarch64_get_qualifier_name (opnd->qualifier),
2503 opnd->reglane.index);
2504 break;
2505
2506 case AARCH64_OPND_VdD1:
2507 case AARCH64_OPND_VnD1:
2508 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2509 break;
2510
2511 case AARCH64_OPND_LVn:
2512 case AARCH64_OPND_LVt:
2513 case AARCH64_OPND_LVt_AL:
2514 case AARCH64_OPND_LEt:
2515 print_register_list (buf, size, opnd);
2516 break;
2517
2518 case AARCH64_OPND_Cn:
2519 case AARCH64_OPND_Cm:
2520 snprintf (buf, size, "C%d", opnd->reg.regno);
2521 break;
2522
2523 case AARCH64_OPND_IDX:
2524 case AARCH64_OPND_IMM:
2525 case AARCH64_OPND_WIDTH:
2526 case AARCH64_OPND_UIMM3_OP1:
2527 case AARCH64_OPND_UIMM3_OP2:
2528 case AARCH64_OPND_BIT_NUM:
2529 case AARCH64_OPND_IMM_VLSL:
2530 case AARCH64_OPND_IMM_VLSR:
2531 case AARCH64_OPND_SHLL_IMM:
2532 case AARCH64_OPND_IMM0:
2533 case AARCH64_OPND_IMMR:
2534 case AARCH64_OPND_IMMS:
2535 case AARCH64_OPND_FBITS:
2536 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2537 break;
2538
2539 case AARCH64_OPND_IMM_MOV:
2540 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2541 {
2542 case 4: /* e.g. MOV Wd, #<imm32>. */
2543 {
2544 int imm32 = opnd->imm.value;
2545 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2546 }
2547 break;
2548 case 8: /* e.g. MOV Xd, #<imm64>. */
2549 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2550 opnd->imm.value, opnd->imm.value);
2551 break;
2552 default: assert (0);
2553 }
2554 break;
2555
2556 case AARCH64_OPND_FPIMM0:
2557 snprintf (buf, size, "#0.0");
2558 break;
2559
2560 case AARCH64_OPND_LIMM:
2561 case AARCH64_OPND_AIMM:
2562 case AARCH64_OPND_HALF:
2563 if (opnd->shifter.amount)
2564 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2565 opnd->shifter.amount);
2566 else
2567 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2568 break;
2569
2570 case AARCH64_OPND_SIMD_IMM:
2571 case AARCH64_OPND_SIMD_IMM_SFT:
2572 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2573 || opnd->shifter.kind == AARCH64_MOD_NONE)
2574 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2575 else
2576 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2577 aarch64_operand_modifiers[opnd->shifter.kind].name,
2578 opnd->shifter.amount);
2579 break;
2580
2581 case AARCH64_OPND_FPIMM:
2582 case AARCH64_OPND_SIMD_FPIMM:
2583 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2584 {
2585 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2586 {
2587 half_conv_t c;
2588 c.i = expand_fp_imm (2, opnd->imm.value);
2589 snprintf (buf, size, "#%.18e", c.f);
2590 }
2591 break;
2592 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2593 {
2594 single_conv_t c;
2595 c.i = expand_fp_imm (4, opnd->imm.value);
2596 snprintf (buf, size, "#%.18e", c.f);
2597 }
2598 break;
2599 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2600 {
2601 double_conv_t c;
2602 c.i = expand_fp_imm (8, opnd->imm.value);
2603 snprintf (buf, size, "#%.18e", c.d);
2604 }
2605 break;
2606 default: assert (0);
2607 }
2608 break;
2609
2610 case AARCH64_OPND_CCMP_IMM:
2611 case AARCH64_OPND_NZCV:
2612 case AARCH64_OPND_EXCEPTION:
2613 case AARCH64_OPND_UIMM4:
2614 case AARCH64_OPND_UIMM7:
2615 if (optional_operand_p (opcode, idx) == TRUE
2616 && (opnd->imm.value ==
2617 (int64_t) get_optional_operand_default_value (opcode)))
2618 /* Omit the operand, e.g. DCPS1. */
2619 break;
2620 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2621 break;
2622
2623 case AARCH64_OPND_COND:
2624 case AARCH64_OPND_COND1:
2625 snprintf (buf, size, "%s", opnd->cond->names[0]);
2626 break;
2627
2628 case AARCH64_OPND_ADDR_ADRP:
2629 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2630 + opnd->imm.value;
2631 if (pcrel_p)
2632 *pcrel_p = 1;
2633 if (address)
2634 *address = addr;
2635 /* This is not necessary during the disassembling, as print_address_func
2636 in the disassemble_info will take care of the printing. But some
2637 other callers may be still interested in getting the string in *STR,
2638 so here we do snprintf regardless. */
2639 snprintf (buf, size, "#0x%" PRIx64, addr);
2640 break;
2641
2642 case AARCH64_OPND_ADDR_PCREL14:
2643 case AARCH64_OPND_ADDR_PCREL19:
2644 case AARCH64_OPND_ADDR_PCREL21:
2645 case AARCH64_OPND_ADDR_PCREL26:
2646 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2647 if (pcrel_p)
2648 *pcrel_p = 1;
2649 if (address)
2650 *address = addr;
2651 /* This is not necessary during the disassembling, as print_address_func
2652 in the disassemble_info will take care of the printing. But some
2653 other callers may be still interested in getting the string in *STR,
2654 so here we do snprintf regardless. */
2655 snprintf (buf, size, "#0x%" PRIx64, addr);
2656 break;
2657
2658 case AARCH64_OPND_ADDR_SIMPLE:
2659 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2660 case AARCH64_OPND_SIMD_ADDR_POST:
2661 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2662 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2663 {
2664 if (opnd->addr.offset.is_reg)
2665 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2666 else
2667 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2668 }
2669 else
2670 snprintf (buf, size, "[%s]", name);
2671 break;
2672
2673 case AARCH64_OPND_ADDR_REGOFF:
2674 print_register_offset_address (buf, size, opnd);
2675 break;
2676
2677 case AARCH64_OPND_ADDR_SIMM7:
2678 case AARCH64_OPND_ADDR_SIMM9:
2679 case AARCH64_OPND_ADDR_SIMM9_2:
2680 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2681 if (opnd->addr.writeback)
2682 {
2683 if (opnd->addr.preind)
2684 snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
2685 else
2686 snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
2687 }
2688 else
2689 {
2690 if (opnd->addr.offset.imm)
2691 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2692 else
2693 snprintf (buf, size, "[%s]", name);
2694 }
2695 break;
2696
2697 case AARCH64_OPND_ADDR_UIMM12:
2698 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2699 if (opnd->addr.offset.imm)
2700 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2701 else
2702 snprintf (buf, size, "[%s]", name);
2703 break;
2704
2705 case AARCH64_OPND_SYSREG:
2706 for (i = 0; aarch64_sys_regs[i].name; ++i)
2707 if (aarch64_sys_regs[i].value == opnd->sysreg
2708 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
2709 break;
2710 if (aarch64_sys_regs[i].name)
2711 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2712 else
2713 {
2714 /* Implementation defined system register. */
2715 unsigned int value = opnd->sysreg;
2716 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2717 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2718 value & 0x7);
2719 }
2720 break;
2721
2722 case AARCH64_OPND_PSTATEFIELD:
2723 for (i = 0; aarch64_pstatefields[i].name; ++i)
2724 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2725 break;
2726 assert (aarch64_pstatefields[i].name);
2727 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2728 break;
2729
2730 case AARCH64_OPND_SYSREG_AT:
2731 case AARCH64_OPND_SYSREG_DC:
2732 case AARCH64_OPND_SYSREG_IC:
2733 case AARCH64_OPND_SYSREG_TLBI:
2734 snprintf (buf, size, "%s", opnd->sysins_op->name);
2735 break;
2736
2737 case AARCH64_OPND_BARRIER:
2738 snprintf (buf, size, "%s", opnd->barrier->name);
2739 break;
2740
2741 case AARCH64_OPND_BARRIER_ISB:
2742 /* Operand can be omitted, e.g. in DCPS1. */
2743 if (! optional_operand_p (opcode, idx)
2744 || (opnd->barrier->value
2745 != get_optional_operand_default_value (opcode)))
2746 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2747 break;
2748
2749 case AARCH64_OPND_PRFOP:
2750 if (opnd->prfop->name != NULL)
2751 snprintf (buf, size, "%s", opnd->prfop->name);
2752 else
2753 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
2754 break;
2755
2756 case AARCH64_OPND_BARRIER_PSB:
2757 snprintf (buf, size, "%s", opnd->hint_option->name);
2758 break;
2759
2760 default:
2761 assert (0);
2762 }
2763 }
2764 \f
2765 #define CPENC(op0,op1,crn,crm,op2) \
2766 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2767 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2768 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2769 /* for 3.9.10 System Instructions */
2770 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2771
2772 #define C0 0
2773 #define C1 1
2774 #define C2 2
2775 #define C3 3
2776 #define C4 4
2777 #define C5 5
2778 #define C6 6
2779 #define C7 7
2780 #define C8 8
2781 #define C9 9
2782 #define C10 10
2783 #define C11 11
2784 #define C12 12
2785 #define C13 13
2786 #define C14 14
2787 #define C15 15
2788
2789 #ifdef F_DEPRECATED
2790 #undef F_DEPRECATED
2791 #endif
2792 #define F_DEPRECATED 0x1 /* Deprecated system register. */
2793
2794 #ifdef F_ARCHEXT
2795 #undef F_ARCHEXT
2796 #endif
2797 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
2798
2799 #ifdef F_HASXT
2800 #undef F_HASXT
2801 #endif
2802 #define F_HASXT 0x4 /* System instruction register <Xt>
2803 operand. */
2804
2805
2806 /* TODO there are two more issues need to be resolved
2807 1. handle read-only and write-only system registers
2808 2. handle cpu-implementation-defined system registers. */
2809 const aarch64_sys_reg aarch64_sys_regs [] =
2810 {
2811 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
2812 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
2813 { "elr_el1", CPEN_(0,C0,1), 0 },
2814 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
2815 { "sp_el0", CPEN_(0,C1,0), 0 },
2816 { "spsel", CPEN_(0,C2,0), 0 },
2817 { "daif", CPEN_(3,C2,1), 0 },
2818 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
2819 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
2820 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
2821 { "nzcv", CPEN_(3,C2,0), 0 },
2822 { "fpcr", CPEN_(3,C4,0), 0 },
2823 { "fpsr", CPEN_(3,C4,1), 0 },
2824 { "dspsr_el0", CPEN_(3,C5,0), 0 },
2825 { "dlr_el0", CPEN_(3,C5,1), 0 },
2826 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
2827 { "elr_el2", CPEN_(4,C0,1), 0 },
2828 { "sp_el1", CPEN_(4,C1,0), 0 },
2829 { "spsr_irq", CPEN_(4,C3,0), 0 },
2830 { "spsr_abt", CPEN_(4,C3,1), 0 },
2831 { "spsr_und", CPEN_(4,C3,2), 0 },
2832 { "spsr_fiq", CPEN_(4,C3,3), 0 },
2833 { "spsr_el3", CPEN_(6,C0,0), 0 },
2834 { "elr_el3", CPEN_(6,C0,1), 0 },
2835 { "sp_el2", CPEN_(6,C1,0), 0 },
2836 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
2837 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
2838 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
2839 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
2840 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
2841 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
2842 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
2843 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
2844 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
2845 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
2846 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
2847 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
2848 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
2849 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
2850 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
2851 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
2852 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
2853 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
2854 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
2855 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
2856 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
2857 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
2858 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
2859 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
2860 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
2861 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
2862 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
2863 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
2864 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
2865 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
2866 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
2867 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
2868 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
2869 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
2870 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
2871 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
2872 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
2873 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
2874 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
2875 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
2876 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
2877 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
2878 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
2879 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
2880 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
2881 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
2882 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
2883 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
2884 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
2885 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
2886 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
2887 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
2888 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
2889 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
2890 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
2891 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
2892 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
2893 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
2894 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
2895 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
2896 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
2897 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
2898 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
2899 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
2900 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
2901 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
2902 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
2903 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
2904 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
2905 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
2906 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
2907 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
2908 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
2909 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
2910 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
2911 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
2912 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
2913 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
2914 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
2915 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
2916 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
2917 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
2918 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
2919 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
2920 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
2921 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
2922 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
2923 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
2924 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
2925 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
2926 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
2927 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
2928 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
2929 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
2930 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
2931 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
2932 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
2933 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
2934 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
2935 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
2936 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
2937 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
2938 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
2939 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
2940 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
2941 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
2942 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
2943 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
2944 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
2945 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
2946 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
2947 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
2948 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
2949 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
2950 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
2951 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
2952 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
2953 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
2954 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
2955 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
2956 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
2957 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
2958 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
2959 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
2960 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
2961 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
2962 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
2963 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
2964 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
2965 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
2966 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
2967 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
2968 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
2969 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
2970 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
2971 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
2972 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
2973 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
2974 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
2975 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
2976 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
2977 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
2978 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
2979 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
2980 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
2981 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
2982 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
2983 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
2984 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
2985 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
2986 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
2987 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
2988 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
2989 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
2990 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
2991 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
2992 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
2993 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
2994 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
2995 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
2996 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
2997 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
2998 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
2999 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3000 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3001 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3002 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3003 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3004 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3005 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3006 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3007 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3008 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3009 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3010 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3011 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3012 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3013 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3014 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3015 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3016 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3017 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3018 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3019 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3020 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3021 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3022 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3023 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3024 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3025 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3026 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3027 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3028 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3029 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3030 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3031 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3032 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3033 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3034 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3035 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3036 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3037 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3038 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3039 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3040 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3041 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3042 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3043 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3044 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3045 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3046 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3047 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3048 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3049 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3050 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3051 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3052 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3053 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3054 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3055 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3056 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3057 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3058 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3059 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3060 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3061 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3062 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3063 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3064 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3065 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3066 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3067 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3068 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3069 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3070 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3071 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3072 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3073 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3074 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3075 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3076 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3077 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3078 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3079 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3080 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3081 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3082 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3083 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3084 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3085 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3086 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3087 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3088 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3089 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3090 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3091 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3092 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3093 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3094 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3095 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3096 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3097 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3098 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3099 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3100 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3101 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3102 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3103 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3104 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3105 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3106 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3107 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3108 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3109 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3110 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3111 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3112 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3113 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3114 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3115 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3116 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3117 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3118 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3119 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3120 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3121 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3122 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3123 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3124 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3125 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3126 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3127 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3128 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3129 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3130 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3131 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3132 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3133 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3134 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3135 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3136 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3137 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3138 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3139 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3140 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3141 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3142 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3143 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3144 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3145 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3146 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3147 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3148 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3149 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3150 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3151 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3152 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3153 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3154 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3155 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3156 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3157 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3158 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3159 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3160 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3161 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3162 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3163 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3164 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3165 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3166 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3167 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3168 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3169 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3170 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3171 { 0, CPENC(0,0,0,0,0), 0 },
3172 };
3173
3174 bfd_boolean
3175 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3176 {
3177 return (reg->flags & F_DEPRECATED) != 0;
3178 }
3179
3180 bfd_boolean
3181 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3182 const aarch64_sys_reg *reg)
3183 {
3184 if (!(reg->flags & F_ARCHEXT))
3185 return TRUE;
3186
3187 /* PAN. Values are from aarch64_sys_regs. */
3188 if (reg->value == CPEN_(0,C2,3)
3189 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3190 return FALSE;
3191
3192 /* Virtualization host extensions: system registers. */
3193 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3194 || reg->value == CPENC (3, 4, C13, C0, 1)
3195 || reg->value == CPENC (3, 4, C14, C3, 0)
3196 || reg->value == CPENC (3, 4, C14, C3, 1)
3197 || reg->value == CPENC (3, 4, C14, C3, 2))
3198 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3199 return FALSE;
3200
3201 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3202 if ((reg->value == CPEN_ (5, C0, 0)
3203 || reg->value == CPEN_ (5, C0, 1)
3204 || reg->value == CPENC (3, 5, C1, C0, 0)
3205 || reg->value == CPENC (3, 5, C1, C0, 2)
3206 || reg->value == CPENC (3, 5, C2, C0, 0)
3207 || reg->value == CPENC (3, 5, C2, C0, 1)
3208 || reg->value == CPENC (3, 5, C2, C0, 2)
3209 || reg->value == CPENC (3, 5, C5, C1, 0)
3210 || reg->value == CPENC (3, 5, C5, C1, 1)
3211 || reg->value == CPENC (3, 5, C5, C2, 0)
3212 || reg->value == CPENC (3, 5, C6, C0, 0)
3213 || reg->value == CPENC (3, 5, C10, C2, 0)
3214 || reg->value == CPENC (3, 5, C10, C3, 0)
3215 || reg->value == CPENC (3, 5, C12, C0, 0)
3216 || reg->value == CPENC (3, 5, C13, C0, 1)
3217 || reg->value == CPENC (3, 5, C14, C1, 0))
3218 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3219 return FALSE;
3220
3221 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3222 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3223 || reg->value == CPENC (3, 5, C14, C2, 1)
3224 || reg->value == CPENC (3, 5, C14, C2, 2)
3225 || reg->value == CPENC (3, 5, C14, C3, 0)
3226 || reg->value == CPENC (3, 5, C14, C3, 1)
3227 || reg->value == CPENC (3, 5, C14, C3, 2))
3228 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3229 return FALSE;
3230
3231 /* ARMv8.2 features. */
3232
3233 /* ID_AA64MMFR2_EL1. */
3234 if (reg->value == CPENC (3, 0, C0, C7, 2)
3235 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3236 return FALSE;
3237
3238 /* PSTATE.UAO. */
3239 if (reg->value == CPEN_ (0, C2, 4)
3240 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3241 return FALSE;
3242
3243 /* RAS extension. */
3244
3245 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3246 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3247 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3248 || reg->value == CPENC (3, 0, C5, C3, 1)
3249 || reg->value == CPENC (3, 0, C5, C3, 2)
3250 || reg->value == CPENC (3, 0, C5, C3, 3)
3251 || reg->value == CPENC (3, 0, C5, C4, 0)
3252 || reg->value == CPENC (3, 0, C5, C4, 1)
3253 || reg->value == CPENC (3, 0, C5, C4, 2)
3254 || reg->value == CPENC (3, 0, C5, C4, 3)
3255 || reg->value == CPENC (3, 0, C5, C5, 0)
3256 || reg->value == CPENC (3, 0, C5, C5, 1))
3257 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3258 return FALSE;
3259
3260 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3261 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3262 || reg->value == CPENC (3, 0, C12, C1, 1)
3263 || reg->value == CPENC (3, 4, C12, C1, 1))
3264 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3265 return FALSE;
3266
3267 /* Statistical Profiling extension. */
3268 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3269 || reg->value == CPENC (3, 0, C9, C10, 1)
3270 || reg->value == CPENC (3, 0, C9, C10, 3)
3271 || reg->value == CPENC (3, 0, C9, C10, 7)
3272 || reg->value == CPENC (3, 0, C9, C9, 0)
3273 || reg->value == CPENC (3, 0, C9, C9, 2)
3274 || reg->value == CPENC (3, 0, C9, C9, 3)
3275 || reg->value == CPENC (3, 0, C9, C9, 4)
3276 || reg->value == CPENC (3, 0, C9, C9, 5)
3277 || reg->value == CPENC (3, 0, C9, C9, 6)
3278 || reg->value == CPENC (3, 0, C9, C9, 7)
3279 || reg->value == CPENC (3, 4, C9, C9, 0)
3280 || reg->value == CPENC (3, 5, C9, C9, 0))
3281 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
3282 return FALSE;
3283
3284 return TRUE;
3285 }
3286
3287 const aarch64_sys_reg aarch64_pstatefields [] =
3288 {
3289 { "spsel", 0x05, 0 },
3290 { "daifset", 0x1e, 0 },
3291 { "daifclr", 0x1f, 0 },
3292 { "pan", 0x04, F_ARCHEXT },
3293 { "uao", 0x03, F_ARCHEXT },
3294 { 0, CPENC(0,0,0,0,0), 0 },
3295 };
3296
3297 bfd_boolean
3298 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
3299 const aarch64_sys_reg *reg)
3300 {
3301 if (!(reg->flags & F_ARCHEXT))
3302 return TRUE;
3303
3304 /* PAN. Values are from aarch64_pstatefields. */
3305 if (reg->value == 0x04
3306 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3307 return FALSE;
3308
3309 /* UAO. Values are from aarch64_pstatefields. */
3310 if (reg->value == 0x03
3311 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3312 return FALSE;
3313
3314 return TRUE;
3315 }
3316
3317 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3318 {
3319 { "ialluis", CPENS(0,C7,C1,0), 0 },
3320 { "iallu", CPENS(0,C7,C5,0), 0 },
3321 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
3322 { 0, CPENS(0,0,0,0), 0 }
3323 };
3324
3325 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3326 {
3327 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
3328 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
3329 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
3330 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
3331 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
3332 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
3333 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
3334 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
3335 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
3336 { 0, CPENS(0,0,0,0), 0 }
3337 };
3338
3339 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3340 {
3341 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
3342 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
3343 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
3344 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
3345 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
3346 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
3347 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
3348 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
3349 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
3350 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
3351 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
3352 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
3353 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
3354 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
3355 { 0, CPENS(0,0,0,0), 0 }
3356 };
3357
3358 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3359 {
3360 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3361 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
3362 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
3363 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
3364 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3365 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
3366 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
3367 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
3368 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
3369 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
3370 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
3371 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
3372 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
3373 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
3374 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3375 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3376 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
3377 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
3378 { "alle2", CPENS(4,C8,C7,0), 0 },
3379 { "alle2is", CPENS(4,C8,C3,0), 0 },
3380 { "alle1", CPENS(4,C8,C7,4), 0 },
3381 { "alle1is", CPENS(4,C8,C3,4), 0 },
3382 { "alle3", CPENS(6,C8,C7,0), 0 },
3383 { "alle3is", CPENS(6,C8,C3,0), 0 },
3384 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
3385 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
3386 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
3387 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
3388 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
3389 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
3390 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
3391 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
3392 { 0, CPENS(0,0,0,0), 0 }
3393 };
3394
3395 bfd_boolean
3396 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
3397 {
3398 return (sys_ins_reg->flags & F_HASXT) != 0;
3399 }
3400
3401 extern bfd_boolean
3402 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
3403 const aarch64_sys_ins_reg *reg)
3404 {
3405 if (!(reg->flags & F_ARCHEXT))
3406 return TRUE;
3407
3408 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3409 if (reg->value == CPENS (3, C7, C12, 1)
3410 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3411 return FALSE;
3412
3413 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3414 if ((reg->value == CPENS (0, C7, C9, 0)
3415 || reg->value == CPENS (0, C7, C9, 1))
3416 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3417 return FALSE;
3418
3419 return TRUE;
3420 }
3421
3422 #undef C0
3423 #undef C1
3424 #undef C2
3425 #undef C3
3426 #undef C4
3427 #undef C5
3428 #undef C6
3429 #undef C7
3430 #undef C8
3431 #undef C9
3432 #undef C10
3433 #undef C11
3434 #undef C12
3435 #undef C13
3436 #undef C14
3437 #undef C15
3438
3439 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
3440 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
3441
3442 static bfd_boolean
3443 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
3444 const aarch64_insn insn)
3445 {
3446 int t = BITS (insn, 4, 0);
3447 int n = BITS (insn, 9, 5);
3448 int t2 = BITS (insn, 14, 10);
3449
3450 if (BIT (insn, 23))
3451 {
3452 /* Write back enabled. */
3453 if ((t == n || t2 == n) && n != 31)
3454 return FALSE;
3455 }
3456
3457 if (BIT (insn, 22))
3458 {
3459 /* Load */
3460 if (t == t2)
3461 return FALSE;
3462 }
3463
3464 return TRUE;
3465 }
3466
3467 /* Include the opcode description table as well as the operand description
3468 table. */
3469 #define VERIFIER(x) verify_##x
3470 #include "aarch64-tbl.h"
This page took 0.72709 seconds and 4 git commands to generate.