322b991a4bc3e3ade711ba025747ea2664d7a714
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30
31 #include "aarch64-opc.h"
32
33 #ifdef DEBUG_AARCH64
34 int debug_dump = FALSE;
35 #endif /* DEBUG_AARCH64 */
36
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
39
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
42 {
43 return ((qualifier >= AARCH64_OPND_QLF_V_8B
44 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
45 : FALSE);
46 }
47
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
50 {
51 return ((qualifier >= AARCH64_OPND_QLF_S_B
52 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
53 : FALSE);
54 }
55
56 enum data_pattern
57 {
58 DP_UNKNOWN,
59 DP_VECTOR_3SAME,
60 DP_VECTOR_LONG,
61 DP_VECTOR_WIDE,
62 DP_VECTOR_ACROSS_LANES,
63 };
64
65 static const char significant_operand_index [] =
66 {
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
72 };
73
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
75 the data pattern.
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
78
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
81 {
82 if (vector_qualifier_p (qualifiers[0]) == TRUE)
83 {
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers[0] == qualifiers[1]
87 && vector_qualifier_p (qualifiers[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers[0])
89 == aarch64_get_qualifier_esize (qualifiers[1]))
90 && (aarch64_get_qualifier_esize (qualifiers[0])
91 == aarch64_get_qualifier_esize (qualifiers[2])))
92 return DP_VECTOR_3SAME;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
95 or v.8h, v.16b. */
96 if (vector_qualifier_p (qualifiers[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers[0])
99 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
100 return DP_VECTOR_LONG;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers[0] == qualifiers[1]
103 && vector_qualifier_p (qualifiers[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers[0])
106 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers[0])
108 == aarch64_get_qualifier_esize (qualifiers[1])))
109 return DP_VECTOR_WIDE;
110 }
111 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
112 {
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers[1]) == TRUE
115 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
116 return DP_VECTOR_ACROSS_LANES;
117 }
118
119 return DP_UNKNOWN;
120 }
121
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
128 benefit. */
129
130 int
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
132 {
133 return
134 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
135 }
136 \f
137 const aarch64_field fields[] =
138 {
139 { 0, 0 }, /* NIL. */
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
196 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
197 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
198 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
199 { 31, 1 }, /* b5: in the test bit and branch instructions. */
200 { 19, 5 }, /* b40: in the test bit and branch instructions. */
201 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
202 };
203
204 enum aarch64_operand_class
205 aarch64_get_operand_class (enum aarch64_opnd type)
206 {
207 return aarch64_operands[type].op_class;
208 }
209
210 const char *
211 aarch64_get_operand_name (enum aarch64_opnd type)
212 {
213 return aarch64_operands[type].name;
214 }
215
216 /* Get operand description string.
217 This is usually for the diagnosis purpose. */
218 const char *
219 aarch64_get_operand_desc (enum aarch64_opnd type)
220 {
221 return aarch64_operands[type].desc;
222 }
223
224 /* Table of all conditional affixes. */
225 const aarch64_cond aarch64_conds[16] =
226 {
227 {{"eq"}, 0x0},
228 {{"ne"}, 0x1},
229 {{"cs", "hs"}, 0x2},
230 {{"cc", "lo", "ul"}, 0x3},
231 {{"mi"}, 0x4},
232 {{"pl"}, 0x5},
233 {{"vs"}, 0x6},
234 {{"vc"}, 0x7},
235 {{"hi"}, 0x8},
236 {{"ls"}, 0x9},
237 {{"ge"}, 0xa},
238 {{"lt"}, 0xb},
239 {{"gt"}, 0xc},
240 {{"le"}, 0xd},
241 {{"al"}, 0xe},
242 {{"nv"}, 0xf},
243 };
244
245 const aarch64_cond *
246 get_cond_from_value (aarch64_insn value)
247 {
248 assert (value < 16);
249 return &aarch64_conds[(unsigned int) value];
250 }
251
252 const aarch64_cond *
253 get_inverted_cond (const aarch64_cond *cond)
254 {
255 return &aarch64_conds[cond->value ^ 0x1];
256 }
257
258 /* Table describing the operand extension/shifting operators; indexed by
259 enum aarch64_modifier_kind.
260
261 The value column provides the most common values for encoding modifiers,
262 which enables table-driven encoding/decoding for the modifiers. */
263 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
264 {
265 {"none", 0x0},
266 {"msl", 0x0},
267 {"ror", 0x3},
268 {"asr", 0x2},
269 {"lsr", 0x1},
270 {"lsl", 0x0},
271 {"uxtb", 0x0},
272 {"uxth", 0x1},
273 {"uxtw", 0x2},
274 {"uxtx", 0x3},
275 {"sxtb", 0x4},
276 {"sxth", 0x5},
277 {"sxtw", 0x6},
278 {"sxtx", 0x7},
279 {NULL, 0},
280 };
281
282 enum aarch64_modifier_kind
283 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
284 {
285 return desc - aarch64_operand_modifiers;
286 }
287
288 aarch64_insn
289 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
290 {
291 return aarch64_operand_modifiers[kind].value;
292 }
293
294 enum aarch64_modifier_kind
295 aarch64_get_operand_modifier_from_value (aarch64_insn value,
296 bfd_boolean extend_p)
297 {
298 if (extend_p == TRUE)
299 return AARCH64_MOD_UXTB + value;
300 else
301 return AARCH64_MOD_LSL - value;
302 }
303
304 bfd_boolean
305 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
306 {
307 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
308 ? TRUE : FALSE;
309 }
310
311 static inline bfd_boolean
312 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
313 {
314 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
315 ? TRUE : FALSE;
316 }
317
318 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
319 {
320 { "#0x00", 0x0 },
321 { "oshld", 0x1 },
322 { "oshst", 0x2 },
323 { "osh", 0x3 },
324 { "#0x04", 0x4 },
325 { "nshld", 0x5 },
326 { "nshst", 0x6 },
327 { "nsh", 0x7 },
328 { "#0x08", 0x8 },
329 { "ishld", 0x9 },
330 { "ishst", 0xa },
331 { "ish", 0xb },
332 { "#0x0c", 0xc },
333 { "ld", 0xd },
334 { "st", 0xe },
335 { "sy", 0xf },
336 };
337
338 /* Table describing the operands supported by the aliases of the HINT
339 instruction.
340
341 The name column is the operand that is accepted for the alias. The value
342 column is the hint number of the alias. The list of operands is terminated
343 by NULL in the name column. */
344
345 const struct aarch64_name_value_pair aarch64_hint_options[] =
346 {
347 { "csync", 0x11 }, /* PSB CSYNC. */
348 { NULL, 0x0 },
349 };
350
351 /* op -> op: load = 0 instruction = 1 store = 2
352 l -> level: 1-3
353 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
354 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
355 const struct aarch64_name_value_pair aarch64_prfops[32] =
356 {
357 { "pldl1keep", B(0, 1, 0) },
358 { "pldl1strm", B(0, 1, 1) },
359 { "pldl2keep", B(0, 2, 0) },
360 { "pldl2strm", B(0, 2, 1) },
361 { "pldl3keep", B(0, 3, 0) },
362 { "pldl3strm", B(0, 3, 1) },
363 { NULL, 0x06 },
364 { NULL, 0x07 },
365 { "plil1keep", B(1, 1, 0) },
366 { "plil1strm", B(1, 1, 1) },
367 { "plil2keep", B(1, 2, 0) },
368 { "plil2strm", B(1, 2, 1) },
369 { "plil3keep", B(1, 3, 0) },
370 { "plil3strm", B(1, 3, 1) },
371 { NULL, 0x0e },
372 { NULL, 0x0f },
373 { "pstl1keep", B(2, 1, 0) },
374 { "pstl1strm", B(2, 1, 1) },
375 { "pstl2keep", B(2, 2, 0) },
376 { "pstl2strm", B(2, 2, 1) },
377 { "pstl3keep", B(2, 3, 0) },
378 { "pstl3strm", B(2, 3, 1) },
379 { NULL, 0x16 },
380 { NULL, 0x17 },
381 { NULL, 0x18 },
382 { NULL, 0x19 },
383 { NULL, 0x1a },
384 { NULL, 0x1b },
385 { NULL, 0x1c },
386 { NULL, 0x1d },
387 { NULL, 0x1e },
388 { NULL, 0x1f },
389 };
390 #undef B
391 \f
392 /* Utilities on value constraint. */
393
394 static inline int
395 value_in_range_p (int64_t value, int low, int high)
396 {
397 return (value >= low && value <= high) ? 1 : 0;
398 }
399
400 static inline int
401 value_aligned_p (int64_t value, int align)
402 {
403 return ((value & (align - 1)) == 0) ? 1 : 0;
404 }
405
406 /* A signed value fits in a field. */
407 static inline int
408 value_fit_signed_field_p (int64_t value, unsigned width)
409 {
410 assert (width < 32);
411 if (width < sizeof (value) * 8)
412 {
413 int64_t lim = (int64_t)1 << (width - 1);
414 if (value >= -lim && value < lim)
415 return 1;
416 }
417 return 0;
418 }
419
420 /* An unsigned value fits in a field. */
421 static inline int
422 value_fit_unsigned_field_p (int64_t value, unsigned width)
423 {
424 assert (width < 32);
425 if (width < sizeof (value) * 8)
426 {
427 int64_t lim = (int64_t)1 << width;
428 if (value >= 0 && value < lim)
429 return 1;
430 }
431 return 0;
432 }
433
434 /* Return 1 if OPERAND is SP or WSP. */
435 int
436 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
437 {
438 return ((aarch64_get_operand_class (operand->type)
439 == AARCH64_OPND_CLASS_INT_REG)
440 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
441 && operand->reg.regno == 31);
442 }
443
444 /* Return 1 if OPERAND is XZR or WZP. */
445 int
446 aarch64_zero_register_p (const aarch64_opnd_info *operand)
447 {
448 return ((aarch64_get_operand_class (operand->type)
449 == AARCH64_OPND_CLASS_INT_REG)
450 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
451 && operand->reg.regno == 31);
452 }
453
454 /* Return true if the operand *OPERAND that has the operand code
455 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
456 qualified by the qualifier TARGET. */
457
458 static inline int
459 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
460 aarch64_opnd_qualifier_t target)
461 {
462 switch (operand->qualifier)
463 {
464 case AARCH64_OPND_QLF_W:
465 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
466 return 1;
467 break;
468 case AARCH64_OPND_QLF_X:
469 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
470 return 1;
471 break;
472 case AARCH64_OPND_QLF_WSP:
473 if (target == AARCH64_OPND_QLF_W
474 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
475 return 1;
476 break;
477 case AARCH64_OPND_QLF_SP:
478 if (target == AARCH64_OPND_QLF_X
479 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
480 return 1;
481 break;
482 default:
483 break;
484 }
485
486 return 0;
487 }
488
489 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
490 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
491
492 Return NIL if more than one expected qualifiers are found. */
493
494 aarch64_opnd_qualifier_t
495 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
496 int idx,
497 const aarch64_opnd_qualifier_t known_qlf,
498 int known_idx)
499 {
500 int i, saved_i;
501
502 /* Special case.
503
504 When the known qualifier is NIL, we have to assume that there is only
505 one qualifier sequence in the *QSEQ_LIST and return the corresponding
506 qualifier directly. One scenario is that for instruction
507 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
508 which has only one possible valid qualifier sequence
509 NIL, S_D
510 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
511 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
512
513 Because the qualifier NIL has dual roles in the qualifier sequence:
514 it can mean no qualifier for the operand, or the qualifer sequence is
515 not in use (when all qualifiers in the sequence are NILs), we have to
516 handle this special case here. */
517 if (known_qlf == AARCH64_OPND_NIL)
518 {
519 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
520 return qseq_list[0][idx];
521 }
522
523 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
524 {
525 if (qseq_list[i][known_idx] == known_qlf)
526 {
527 if (saved_i != -1)
528 /* More than one sequences are found to have KNOWN_QLF at
529 KNOWN_IDX. */
530 return AARCH64_OPND_NIL;
531 saved_i = i;
532 }
533 }
534
535 return qseq_list[saved_i][idx];
536 }
537
538 enum operand_qualifier_kind
539 {
540 OQK_NIL,
541 OQK_OPD_VARIANT,
542 OQK_VALUE_IN_RANGE,
543 OQK_MISC,
544 };
545
546 /* Operand qualifier description. */
547 struct operand_qualifier_data
548 {
549 /* The usage of the three data fields depends on the qualifier kind. */
550 int data0;
551 int data1;
552 int data2;
553 /* Description. */
554 const char *desc;
555 /* Kind. */
556 enum operand_qualifier_kind kind;
557 };
558
559 /* Indexed by the operand qualifier enumerators. */
560 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
561 {
562 {0, 0, 0, "NIL", OQK_NIL},
563
564 /* Operand variant qualifiers.
565 First 3 fields:
566 element size, number of elements and common value for encoding. */
567
568 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
569 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
570 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
571 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
572
573 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
574 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
575 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
576 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
577 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
578
579 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
580 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
581 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
582 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
583 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
584 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
585 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
586 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
587 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
588 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
589
590 /* Qualifiers constraining the value range.
591 First 3 fields:
592 Lower bound, higher bound, unused. */
593
594 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
595 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
596 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
597 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
598 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
599 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
600
601 /* Qualifiers for miscellaneous purpose.
602 First 3 fields:
603 unused, unused and unused. */
604
605 {0, 0, 0, "lsl", 0},
606 {0, 0, 0, "msl", 0},
607
608 {0, 0, 0, "retrieving", 0},
609 };
610
611 static inline bfd_boolean
612 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
613 {
614 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
615 ? TRUE : FALSE;
616 }
617
618 static inline bfd_boolean
619 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
620 {
621 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
622 ? TRUE : FALSE;
623 }
624
625 const char*
626 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
627 {
628 return aarch64_opnd_qualifiers[qualifier].desc;
629 }
630
631 /* Given an operand qualifier, return the expected data element size
632 of a qualified operand. */
633 unsigned char
634 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
635 {
636 assert (operand_variant_qualifier_p (qualifier) == TRUE);
637 return aarch64_opnd_qualifiers[qualifier].data0;
638 }
639
640 unsigned char
641 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
642 {
643 assert (operand_variant_qualifier_p (qualifier) == TRUE);
644 return aarch64_opnd_qualifiers[qualifier].data1;
645 }
646
647 aarch64_insn
648 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
649 {
650 assert (operand_variant_qualifier_p (qualifier) == TRUE);
651 return aarch64_opnd_qualifiers[qualifier].data2;
652 }
653
654 static int
655 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
656 {
657 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
658 return aarch64_opnd_qualifiers[qualifier].data0;
659 }
660
661 static int
662 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
663 {
664 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
665 return aarch64_opnd_qualifiers[qualifier].data1;
666 }
667
668 #ifdef DEBUG_AARCH64
669 void
670 aarch64_verbose (const char *str, ...)
671 {
672 va_list ap;
673 va_start (ap, str);
674 printf ("#### ");
675 vprintf (str, ap);
676 printf ("\n");
677 va_end (ap);
678 }
679
680 static inline void
681 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
682 {
683 int i;
684 printf ("#### \t");
685 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
686 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
687 printf ("\n");
688 }
689
690 static void
691 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
692 const aarch64_opnd_qualifier_t *qualifier)
693 {
694 int i;
695 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
696
697 aarch64_verbose ("dump_match_qualifiers:");
698 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
699 curr[i] = opnd[i].qualifier;
700 dump_qualifier_sequence (curr);
701 aarch64_verbose ("against");
702 dump_qualifier_sequence (qualifier);
703 }
704 #endif /* DEBUG_AARCH64 */
705
706 /* TODO improve this, we can have an extra field at the runtime to
707 store the number of operands rather than calculating it every time. */
708
709 int
710 aarch64_num_of_operands (const aarch64_opcode *opcode)
711 {
712 int i = 0;
713 const enum aarch64_opnd *opnds = opcode->operands;
714 while (opnds[i++] != AARCH64_OPND_NIL)
715 ;
716 --i;
717 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
718 return i;
719 }
720
721 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
722 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
723
724 N.B. on the entry, it is very likely that only some operands in *INST
725 have had their qualifiers been established.
726
727 If STOP_AT is not -1, the function will only try to match
728 the qualifier sequence for operands before and including the operand
729 of index STOP_AT; and on success *RET will only be filled with the first
730 (STOP_AT+1) qualifiers.
731
732 A couple examples of the matching algorithm:
733
734 X,W,NIL should match
735 X,W,NIL
736
737 NIL,NIL should match
738 X ,NIL
739
740 Apart from serving the main encoding routine, this can also be called
741 during or after the operand decoding. */
742
743 int
744 aarch64_find_best_match (const aarch64_inst *inst,
745 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
746 int stop_at, aarch64_opnd_qualifier_t *ret)
747 {
748 int found = 0;
749 int i, num_opnds;
750 const aarch64_opnd_qualifier_t *qualifiers;
751
752 num_opnds = aarch64_num_of_operands (inst->opcode);
753 if (num_opnds == 0)
754 {
755 DEBUG_TRACE ("SUCCEED: no operand");
756 return 1;
757 }
758
759 if (stop_at < 0 || stop_at >= num_opnds)
760 stop_at = num_opnds - 1;
761
762 /* For each pattern. */
763 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
764 {
765 int j;
766 qualifiers = *qualifiers_list;
767
768 /* Start as positive. */
769 found = 1;
770
771 DEBUG_TRACE ("%d", i);
772 #ifdef DEBUG_AARCH64
773 if (debug_dump)
774 dump_match_qualifiers (inst->operands, qualifiers);
775 #endif
776
777 /* Most opcodes has much fewer patterns in the list.
778 First NIL qualifier indicates the end in the list. */
779 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
780 {
781 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
782 if (i)
783 found = 0;
784 break;
785 }
786
787 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
788 {
789 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
790 {
791 /* Either the operand does not have qualifier, or the qualifier
792 for the operand needs to be deduced from the qualifier
793 sequence.
794 In the latter case, any constraint checking related with
795 the obtained qualifier should be done later in
796 operand_general_constraint_met_p. */
797 continue;
798 }
799 else if (*qualifiers != inst->operands[j].qualifier)
800 {
801 /* Unless the target qualifier can also qualify the operand
802 (which has already had a non-nil qualifier), non-equal
803 qualifiers are generally un-matched. */
804 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
805 continue;
806 else
807 {
808 found = 0;
809 break;
810 }
811 }
812 else
813 continue; /* Equal qualifiers are certainly matched. */
814 }
815
816 /* Qualifiers established. */
817 if (found == 1)
818 break;
819 }
820
821 if (found == 1)
822 {
823 /* Fill the result in *RET. */
824 int j;
825 qualifiers = *qualifiers_list;
826
827 DEBUG_TRACE ("complete qualifiers using list %d", i);
828 #ifdef DEBUG_AARCH64
829 if (debug_dump)
830 dump_qualifier_sequence (qualifiers);
831 #endif
832
833 for (j = 0; j <= stop_at; ++j, ++qualifiers)
834 ret[j] = *qualifiers;
835 for (; j < AARCH64_MAX_OPND_NUM; ++j)
836 ret[j] = AARCH64_OPND_QLF_NIL;
837
838 DEBUG_TRACE ("SUCCESS");
839 return 1;
840 }
841
842 DEBUG_TRACE ("FAIL");
843 return 0;
844 }
845
846 /* Operand qualifier matching and resolving.
847
848 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
849 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
850
851 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
852 succeeds. */
853
854 static int
855 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
856 {
857 int i;
858 aarch64_opnd_qualifier_seq_t qualifiers;
859
860 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
861 qualifiers))
862 {
863 DEBUG_TRACE ("matching FAIL");
864 return 0;
865 }
866
867 /* Update the qualifiers. */
868 if (update_p == TRUE)
869 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
870 {
871 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
872 break;
873 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
874 "update %s with %s for operand %d",
875 aarch64_get_qualifier_name (inst->operands[i].qualifier),
876 aarch64_get_qualifier_name (qualifiers[i]), i);
877 inst->operands[i].qualifier = qualifiers[i];
878 }
879
880 DEBUG_TRACE ("matching SUCCESS");
881 return 1;
882 }
883
884 /* Return TRUE if VALUE is a wide constant that can be moved into a general
885 register by MOVZ.
886
887 IS32 indicates whether value is a 32-bit immediate or not.
888 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
889 amount will be returned in *SHIFT_AMOUNT. */
890
891 bfd_boolean
892 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
893 {
894 int amount;
895
896 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
897
898 if (is32)
899 {
900 /* Allow all zeros or all ones in top 32-bits, so that
901 32-bit constant expressions like ~0x80000000 are
902 permitted. */
903 uint64_t ext = value;
904 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
905 /* Immediate out of range. */
906 return FALSE;
907 value &= (int64_t) 0xffffffff;
908 }
909
910 /* first, try movz then movn */
911 amount = -1;
912 if ((value & ((int64_t) 0xffff << 0)) == value)
913 amount = 0;
914 else if ((value & ((int64_t) 0xffff << 16)) == value)
915 amount = 16;
916 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
917 amount = 32;
918 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
919 amount = 48;
920
921 if (amount == -1)
922 {
923 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
924 return FALSE;
925 }
926
927 if (shift_amount != NULL)
928 *shift_amount = amount;
929
930 DEBUG_TRACE ("exit TRUE with amount %d", amount);
931
932 return TRUE;
933 }
934
935 /* Build the accepted values for immediate logical SIMD instructions.
936
937 The standard encodings of the immediate value are:
938 N imms immr SIMD size R S
939 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
940 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
941 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
942 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
943 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
944 0 11110s 00000r 2 UInt(r) UInt(s)
945 where all-ones value of S is reserved.
946
947 Let's call E the SIMD size.
948
949 The immediate value is: S+1 bits '1' rotated to the right by R.
950
951 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
952 (remember S != E - 1). */
953
954 #define TOTAL_IMM_NB 5334
955
956 typedef struct
957 {
958 uint64_t imm;
959 aarch64_insn encoding;
960 } simd_imm_encoding;
961
962 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
963
964 static int
965 simd_imm_encoding_cmp(const void *i1, const void *i2)
966 {
967 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
968 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
969
970 if (imm1->imm < imm2->imm)
971 return -1;
972 if (imm1->imm > imm2->imm)
973 return +1;
974 return 0;
975 }
976
977 /* immediate bitfield standard encoding
978 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
979 1 ssssss rrrrrr 64 rrrrrr ssssss
980 0 0sssss 0rrrrr 32 rrrrr sssss
981 0 10ssss 00rrrr 16 rrrr ssss
982 0 110sss 000rrr 8 rrr sss
983 0 1110ss 0000rr 4 rr ss
984 0 11110s 00000r 2 r s */
985 static inline int
986 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
987 {
988 return (is64 << 12) | (r << 6) | s;
989 }
990
991 static void
992 build_immediate_table (void)
993 {
994 uint32_t log_e, e, s, r, s_mask;
995 uint64_t mask, imm;
996 int nb_imms;
997 int is64;
998
999 nb_imms = 0;
1000 for (log_e = 1; log_e <= 6; log_e++)
1001 {
1002 /* Get element size. */
1003 e = 1u << log_e;
1004 if (log_e == 6)
1005 {
1006 is64 = 1;
1007 mask = 0xffffffffffffffffull;
1008 s_mask = 0;
1009 }
1010 else
1011 {
1012 is64 = 0;
1013 mask = (1ull << e) - 1;
1014 /* log_e s_mask
1015 1 ((1 << 4) - 1) << 2 = 111100
1016 2 ((1 << 3) - 1) << 3 = 111000
1017 3 ((1 << 2) - 1) << 4 = 110000
1018 4 ((1 << 1) - 1) << 5 = 100000
1019 5 ((1 << 0) - 1) << 6 = 000000 */
1020 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1021 }
1022 for (s = 0; s < e - 1; s++)
1023 for (r = 0; r < e; r++)
1024 {
1025 /* s+1 consecutive bits to 1 (s < 63) */
1026 imm = (1ull << (s + 1)) - 1;
1027 /* rotate right by r */
1028 if (r != 0)
1029 imm = (imm >> r) | ((imm << (e - r)) & mask);
1030 /* replicate the constant depending on SIMD size */
1031 switch (log_e)
1032 {
1033 case 1: imm = (imm << 2) | imm;
1034 case 2: imm = (imm << 4) | imm;
1035 case 3: imm = (imm << 8) | imm;
1036 case 4: imm = (imm << 16) | imm;
1037 case 5: imm = (imm << 32) | imm;
1038 case 6: break;
1039 default: abort ();
1040 }
1041 simd_immediates[nb_imms].imm = imm;
1042 simd_immediates[nb_imms].encoding =
1043 encode_immediate_bitfield(is64, s | s_mask, r);
1044 nb_imms++;
1045 }
1046 }
1047 assert (nb_imms == TOTAL_IMM_NB);
1048 qsort(simd_immediates, nb_imms,
1049 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1050 }
1051
1052 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1053 be accepted by logical (immediate) instructions
1054 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1055
1056 IS32 indicates whether or not VALUE is a 32-bit immediate.
1057 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1058 VALUE will be returned in *ENCODING. */
1059
1060 bfd_boolean
1061 aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding)
1062 {
1063 simd_imm_encoding imm_enc;
1064 const simd_imm_encoding *imm_encoding;
1065 static bfd_boolean initialized = FALSE;
1066
1067 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1068 value, is32);
1069
1070 if (initialized == FALSE)
1071 {
1072 build_immediate_table ();
1073 initialized = TRUE;
1074 }
1075
1076 if (is32)
1077 {
1078 /* Allow all zeros or all ones in top 32-bits, so that
1079 constant expressions like ~1 are permitted. */
1080 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1081 return FALSE;
1082
1083 /* Replicate the 32 lower bits to the 32 upper bits. */
1084 value &= 0xffffffff;
1085 value |= value << 32;
1086 }
1087
1088 imm_enc.imm = value;
1089 imm_encoding = (const simd_imm_encoding *)
1090 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1091 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1092 if (imm_encoding == NULL)
1093 {
1094 DEBUG_TRACE ("exit with FALSE");
1095 return FALSE;
1096 }
1097 if (encoding != NULL)
1098 *encoding = imm_encoding->encoding;
1099 DEBUG_TRACE ("exit with TRUE");
1100 return TRUE;
1101 }
1102
1103 /* If 64-bit immediate IMM is in the format of
1104 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1105 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1106 of value "abcdefgh". Otherwise return -1. */
1107 int
1108 aarch64_shrink_expanded_imm8 (uint64_t imm)
1109 {
1110 int i, ret;
1111 uint32_t byte;
1112
1113 ret = 0;
1114 for (i = 0; i < 8; i++)
1115 {
1116 byte = (imm >> (8 * i)) & 0xff;
1117 if (byte == 0xff)
1118 ret |= 1 << i;
1119 else if (byte != 0x00)
1120 return -1;
1121 }
1122 return ret;
1123 }
1124
1125 /* Utility inline functions for operand_general_constraint_met_p. */
1126
1127 static inline void
1128 set_error (aarch64_operand_error *mismatch_detail,
1129 enum aarch64_operand_error_kind kind, int idx,
1130 const char* error)
1131 {
1132 if (mismatch_detail == NULL)
1133 return;
1134 mismatch_detail->kind = kind;
1135 mismatch_detail->index = idx;
1136 mismatch_detail->error = error;
1137 }
1138
1139 static inline void
1140 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1141 const char* error)
1142 {
1143 if (mismatch_detail == NULL)
1144 return;
1145 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1146 }
1147
1148 static inline void
1149 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1150 int idx, int lower_bound, int upper_bound,
1151 const char* error)
1152 {
1153 if (mismatch_detail == NULL)
1154 return;
1155 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1156 mismatch_detail->data[0] = lower_bound;
1157 mismatch_detail->data[1] = upper_bound;
1158 }
1159
1160 static inline void
1161 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1162 int idx, int lower_bound, int upper_bound)
1163 {
1164 if (mismatch_detail == NULL)
1165 return;
1166 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1167 _("immediate value"));
1168 }
1169
1170 static inline void
1171 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1172 int idx, int lower_bound, int upper_bound)
1173 {
1174 if (mismatch_detail == NULL)
1175 return;
1176 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1177 _("immediate offset"));
1178 }
1179
1180 static inline void
1181 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1182 int idx, int lower_bound, int upper_bound)
1183 {
1184 if (mismatch_detail == NULL)
1185 return;
1186 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1187 _("register number"));
1188 }
1189
1190 static inline void
1191 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1192 int idx, int lower_bound, int upper_bound)
1193 {
1194 if (mismatch_detail == NULL)
1195 return;
1196 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1197 _("register element index"));
1198 }
1199
1200 static inline void
1201 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1202 int idx, int lower_bound, int upper_bound)
1203 {
1204 if (mismatch_detail == NULL)
1205 return;
1206 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1207 _("shift amount"));
1208 }
1209
1210 static inline void
1211 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1212 int alignment)
1213 {
1214 if (mismatch_detail == NULL)
1215 return;
1216 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1217 mismatch_detail->data[0] = alignment;
1218 }
1219
1220 static inline void
1221 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1222 int expected_num)
1223 {
1224 if (mismatch_detail == NULL)
1225 return;
1226 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1227 mismatch_detail->data[0] = expected_num;
1228 }
1229
1230 static inline void
1231 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1232 const char* error)
1233 {
1234 if (mismatch_detail == NULL)
1235 return;
1236 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1237 }
1238
1239 /* General constraint checking based on operand code.
1240
1241 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1242 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1243
1244 This function has to be called after the qualifiers for all operands
1245 have been resolved.
1246
1247 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1248 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1249 of error message during the disassembling where error message is not
1250 wanted. We avoid the dynamic construction of strings of error messages
1251 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1252 use a combination of error code, static string and some integer data to
1253 represent an error. */
1254
1255 static int
1256 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1257 enum aarch64_opnd type,
1258 const aarch64_opcode *opcode,
1259 aarch64_operand_error *mismatch_detail)
1260 {
1261 unsigned num;
1262 unsigned char size;
1263 int64_t imm;
1264 const aarch64_opnd_info *opnd = opnds + idx;
1265 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1266
1267 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1268
1269 switch (aarch64_operands[type].op_class)
1270 {
1271 case AARCH64_OPND_CLASS_INT_REG:
1272 /* Check pair reg constraints for cas* instructions. */
1273 if (type == AARCH64_OPND_PAIRREG)
1274 {
1275 assert (idx == 1 || idx == 3);
1276 if (opnds[idx - 1].reg.regno % 2 != 0)
1277 {
1278 set_syntax_error (mismatch_detail, idx - 1,
1279 _("reg pair must start from even reg"));
1280 return 0;
1281 }
1282 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1283 {
1284 set_syntax_error (mismatch_detail, idx,
1285 _("reg pair must be contiguous"));
1286 return 0;
1287 }
1288 break;
1289 }
1290
1291 /* <Xt> may be optional in some IC and TLBI instructions. */
1292 if (type == AARCH64_OPND_Rt_SYS)
1293 {
1294 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1295 == AARCH64_OPND_CLASS_SYSTEM));
1296 if (opnds[1].present
1297 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1298 {
1299 set_other_error (mismatch_detail, idx, _("extraneous register"));
1300 return 0;
1301 }
1302 if (!opnds[1].present
1303 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1304 {
1305 set_other_error (mismatch_detail, idx, _("missing register"));
1306 return 0;
1307 }
1308 }
1309 switch (qualifier)
1310 {
1311 case AARCH64_OPND_QLF_WSP:
1312 case AARCH64_OPND_QLF_SP:
1313 if (!aarch64_stack_pointer_p (opnd))
1314 {
1315 set_other_error (mismatch_detail, idx,
1316 _("stack pointer register expected"));
1317 return 0;
1318 }
1319 break;
1320 default:
1321 break;
1322 }
1323 break;
1324
1325 case AARCH64_OPND_CLASS_COND:
1326 if (type == AARCH64_OPND_COND1
1327 && (opnds[idx].cond->value & 0xe) == 0xe)
1328 {
1329 /* Not allow AL or NV. */
1330 set_syntax_error (mismatch_detail, idx, NULL);
1331 }
1332 break;
1333
1334 case AARCH64_OPND_CLASS_ADDRESS:
1335 /* Check writeback. */
1336 switch (opcode->iclass)
1337 {
1338 case ldst_pos:
1339 case ldst_unscaled:
1340 case ldstnapair_offs:
1341 case ldstpair_off:
1342 case ldst_unpriv:
1343 if (opnd->addr.writeback == 1)
1344 {
1345 set_syntax_error (mismatch_detail, idx,
1346 _("unexpected address writeback"));
1347 return 0;
1348 }
1349 break;
1350 case ldst_imm9:
1351 case ldstpair_indexed:
1352 case asisdlsep:
1353 case asisdlsop:
1354 if (opnd->addr.writeback == 0)
1355 {
1356 set_syntax_error (mismatch_detail, idx,
1357 _("address writeback expected"));
1358 return 0;
1359 }
1360 break;
1361 default:
1362 assert (opnd->addr.writeback == 0);
1363 break;
1364 }
1365 switch (type)
1366 {
1367 case AARCH64_OPND_ADDR_SIMM7:
1368 /* Scaled signed 7 bits immediate offset. */
1369 /* Get the size of the data element that is accessed, which may be
1370 different from that of the source register size,
1371 e.g. in strb/ldrb. */
1372 size = aarch64_get_qualifier_esize (opnd->qualifier);
1373 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1374 {
1375 set_offset_out_of_range_error (mismatch_detail, idx,
1376 -64 * size, 63 * size);
1377 return 0;
1378 }
1379 if (!value_aligned_p (opnd->addr.offset.imm, size))
1380 {
1381 set_unaligned_error (mismatch_detail, idx, size);
1382 return 0;
1383 }
1384 break;
1385 case AARCH64_OPND_ADDR_SIMM9:
1386 /* Unscaled signed 9 bits immediate offset. */
1387 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1388 {
1389 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1390 return 0;
1391 }
1392 break;
1393
1394 case AARCH64_OPND_ADDR_SIMM9_2:
1395 /* Unscaled signed 9 bits immediate offset, which has to be negative
1396 or unaligned. */
1397 size = aarch64_get_qualifier_esize (qualifier);
1398 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1399 && !value_aligned_p (opnd->addr.offset.imm, size))
1400 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1401 return 1;
1402 set_other_error (mismatch_detail, idx,
1403 _("negative or unaligned offset expected"));
1404 return 0;
1405
1406 case AARCH64_OPND_SIMD_ADDR_POST:
1407 /* AdvSIMD load/store multiple structures, post-index. */
1408 assert (idx == 1);
1409 if (opnd->addr.offset.is_reg)
1410 {
1411 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1412 return 1;
1413 else
1414 {
1415 set_other_error (mismatch_detail, idx,
1416 _("invalid register offset"));
1417 return 0;
1418 }
1419 }
1420 else
1421 {
1422 const aarch64_opnd_info *prev = &opnds[idx-1];
1423 unsigned num_bytes; /* total number of bytes transferred. */
1424 /* The opcode dependent area stores the number of elements in
1425 each structure to be loaded/stored. */
1426 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1427 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1428 /* Special handling of loading single structure to all lane. */
1429 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1430 * aarch64_get_qualifier_esize (prev->qualifier);
1431 else
1432 num_bytes = prev->reglist.num_regs
1433 * aarch64_get_qualifier_esize (prev->qualifier)
1434 * aarch64_get_qualifier_nelem (prev->qualifier);
1435 if ((int) num_bytes != opnd->addr.offset.imm)
1436 {
1437 set_other_error (mismatch_detail, idx,
1438 _("invalid post-increment amount"));
1439 return 0;
1440 }
1441 }
1442 break;
1443
1444 case AARCH64_OPND_ADDR_REGOFF:
1445 /* Get the size of the data element that is accessed, which may be
1446 different from that of the source register size,
1447 e.g. in strb/ldrb. */
1448 size = aarch64_get_qualifier_esize (opnd->qualifier);
1449 /* It is either no shift or shift by the binary logarithm of SIZE. */
1450 if (opnd->shifter.amount != 0
1451 && opnd->shifter.amount != (int)get_logsz (size))
1452 {
1453 set_other_error (mismatch_detail, idx,
1454 _("invalid shift amount"));
1455 return 0;
1456 }
1457 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1458 operators. */
1459 switch (opnd->shifter.kind)
1460 {
1461 case AARCH64_MOD_UXTW:
1462 case AARCH64_MOD_LSL:
1463 case AARCH64_MOD_SXTW:
1464 case AARCH64_MOD_SXTX: break;
1465 default:
1466 set_other_error (mismatch_detail, idx,
1467 _("invalid extend/shift operator"));
1468 return 0;
1469 }
1470 break;
1471
1472 case AARCH64_OPND_ADDR_UIMM12:
1473 imm = opnd->addr.offset.imm;
1474 /* Get the size of the data element that is accessed, which may be
1475 different from that of the source register size,
1476 e.g. in strb/ldrb. */
1477 size = aarch64_get_qualifier_esize (qualifier);
1478 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1479 {
1480 set_offset_out_of_range_error (mismatch_detail, idx,
1481 0, 4095 * size);
1482 return 0;
1483 }
1484 if (!value_aligned_p (opnd->addr.offset.imm, size))
1485 {
1486 set_unaligned_error (mismatch_detail, idx, size);
1487 return 0;
1488 }
1489 break;
1490
1491 case AARCH64_OPND_ADDR_PCREL14:
1492 case AARCH64_OPND_ADDR_PCREL19:
1493 case AARCH64_OPND_ADDR_PCREL21:
1494 case AARCH64_OPND_ADDR_PCREL26:
1495 imm = opnd->imm.value;
1496 if (operand_need_shift_by_two (get_operand_from_code (type)))
1497 {
1498 /* The offset value in a PC-relative branch instruction is alway
1499 4-byte aligned and is encoded without the lowest 2 bits. */
1500 if (!value_aligned_p (imm, 4))
1501 {
1502 set_unaligned_error (mismatch_detail, idx, 4);
1503 return 0;
1504 }
1505 /* Right shift by 2 so that we can carry out the following check
1506 canonically. */
1507 imm >>= 2;
1508 }
1509 size = get_operand_fields_width (get_operand_from_code (type));
1510 if (!value_fit_signed_field_p (imm, size))
1511 {
1512 set_other_error (mismatch_detail, idx,
1513 _("immediate out of range"));
1514 return 0;
1515 }
1516 break;
1517
1518 default:
1519 break;
1520 }
1521 break;
1522
1523 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1524 if (type == AARCH64_OPND_LEt)
1525 {
1526 /* Get the upper bound for the element index. */
1527 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1528 if (!value_in_range_p (opnd->reglist.index, 0, num))
1529 {
1530 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1531 return 0;
1532 }
1533 }
1534 /* The opcode dependent area stores the number of elements in
1535 each structure to be loaded/stored. */
1536 num = get_opcode_dependent_value (opcode);
1537 switch (type)
1538 {
1539 case AARCH64_OPND_LVt:
1540 assert (num >= 1 && num <= 4);
1541 /* Unless LD1/ST1, the number of registers should be equal to that
1542 of the structure elements. */
1543 if (num != 1 && opnd->reglist.num_regs != num)
1544 {
1545 set_reg_list_error (mismatch_detail, idx, num);
1546 return 0;
1547 }
1548 break;
1549 case AARCH64_OPND_LVt_AL:
1550 case AARCH64_OPND_LEt:
1551 assert (num >= 1 && num <= 4);
1552 /* The number of registers should be equal to that of the structure
1553 elements. */
1554 if (opnd->reglist.num_regs != num)
1555 {
1556 set_reg_list_error (mismatch_detail, idx, num);
1557 return 0;
1558 }
1559 break;
1560 default:
1561 break;
1562 }
1563 break;
1564
1565 case AARCH64_OPND_CLASS_IMMEDIATE:
1566 /* Constraint check on immediate operand. */
1567 imm = opnd->imm.value;
1568 /* E.g. imm_0_31 constrains value to be 0..31. */
1569 if (qualifier_value_in_range_constraint_p (qualifier)
1570 && !value_in_range_p (imm, get_lower_bound (qualifier),
1571 get_upper_bound (qualifier)))
1572 {
1573 set_imm_out_of_range_error (mismatch_detail, idx,
1574 get_lower_bound (qualifier),
1575 get_upper_bound (qualifier));
1576 return 0;
1577 }
1578
1579 switch (type)
1580 {
1581 case AARCH64_OPND_AIMM:
1582 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1583 {
1584 set_other_error (mismatch_detail, idx,
1585 _("invalid shift operator"));
1586 return 0;
1587 }
1588 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1589 {
1590 set_other_error (mismatch_detail, idx,
1591 _("shift amount expected to be 0 or 12"));
1592 return 0;
1593 }
1594 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1595 {
1596 set_other_error (mismatch_detail, idx,
1597 _("immediate out of range"));
1598 return 0;
1599 }
1600 break;
1601
1602 case AARCH64_OPND_HALF:
1603 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1604 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1605 {
1606 set_other_error (mismatch_detail, idx,
1607 _("invalid shift operator"));
1608 return 0;
1609 }
1610 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1611 if (!value_aligned_p (opnd->shifter.amount, 16))
1612 {
1613 set_other_error (mismatch_detail, idx,
1614 _("shift amount should be a multiple of 16"));
1615 return 0;
1616 }
1617 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1618 {
1619 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1620 0, size * 8 - 16);
1621 return 0;
1622 }
1623 if (opnd->imm.value < 0)
1624 {
1625 set_other_error (mismatch_detail, idx,
1626 _("negative immediate value not allowed"));
1627 return 0;
1628 }
1629 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1630 {
1631 set_other_error (mismatch_detail, idx,
1632 _("immediate out of range"));
1633 return 0;
1634 }
1635 break;
1636
1637 case AARCH64_OPND_IMM_MOV:
1638 {
1639 int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4;
1640 imm = opnd->imm.value;
1641 assert (idx == 1);
1642 switch (opcode->op)
1643 {
1644 case OP_MOV_IMM_WIDEN:
1645 imm = ~imm;
1646 /* Fall through... */
1647 case OP_MOV_IMM_WIDE:
1648 if (!aarch64_wide_constant_p (imm, is32, NULL))
1649 {
1650 set_other_error (mismatch_detail, idx,
1651 _("immediate out of range"));
1652 return 0;
1653 }
1654 break;
1655 case OP_MOV_IMM_LOG:
1656 if (!aarch64_logical_immediate_p (imm, is32, NULL))
1657 {
1658 set_other_error (mismatch_detail, idx,
1659 _("immediate out of range"));
1660 return 0;
1661 }
1662 break;
1663 default:
1664 assert (0);
1665 return 0;
1666 }
1667 }
1668 break;
1669
1670 case AARCH64_OPND_NZCV:
1671 case AARCH64_OPND_CCMP_IMM:
1672 case AARCH64_OPND_EXCEPTION:
1673 case AARCH64_OPND_UIMM4:
1674 case AARCH64_OPND_UIMM7:
1675 case AARCH64_OPND_UIMM3_OP1:
1676 case AARCH64_OPND_UIMM3_OP2:
1677 size = get_operand_fields_width (get_operand_from_code (type));
1678 assert (size < 32);
1679 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1680 {
1681 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1682 (1 << size) - 1);
1683 return 0;
1684 }
1685 break;
1686
1687 case AARCH64_OPND_WIDTH:
1688 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
1689 && opnds[0].type == AARCH64_OPND_Rd);
1690 size = get_upper_bound (qualifier);
1691 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1692 /* lsb+width <= reg.size */
1693 {
1694 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1695 size - opnds[idx-1].imm.value);
1696 return 0;
1697 }
1698 break;
1699
1700 case AARCH64_OPND_LIMM:
1701 {
1702 int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W;
1703 uint64_t uimm = opnd->imm.value;
1704 if (opcode->op == OP_BIC)
1705 uimm = ~uimm;
1706 if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE)
1707 {
1708 set_other_error (mismatch_detail, idx,
1709 _("immediate out of range"));
1710 return 0;
1711 }
1712 }
1713 break;
1714
1715 case AARCH64_OPND_IMM0:
1716 case AARCH64_OPND_FPIMM0:
1717 if (opnd->imm.value != 0)
1718 {
1719 set_other_error (mismatch_detail, idx,
1720 _("immediate zero expected"));
1721 return 0;
1722 }
1723 break;
1724
1725 case AARCH64_OPND_SHLL_IMM:
1726 assert (idx == 2);
1727 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1728 if (opnd->imm.value != size)
1729 {
1730 set_other_error (mismatch_detail, idx,
1731 _("invalid shift amount"));
1732 return 0;
1733 }
1734 break;
1735
1736 case AARCH64_OPND_IMM_VLSL:
1737 size = aarch64_get_qualifier_esize (qualifier);
1738 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1739 {
1740 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1741 size * 8 - 1);
1742 return 0;
1743 }
1744 break;
1745
1746 case AARCH64_OPND_IMM_VLSR:
1747 size = aarch64_get_qualifier_esize (qualifier);
1748 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1749 {
1750 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1751 return 0;
1752 }
1753 break;
1754
1755 case AARCH64_OPND_SIMD_IMM:
1756 case AARCH64_OPND_SIMD_IMM_SFT:
1757 /* Qualifier check. */
1758 switch (qualifier)
1759 {
1760 case AARCH64_OPND_QLF_LSL:
1761 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1762 {
1763 set_other_error (mismatch_detail, idx,
1764 _("invalid shift operator"));
1765 return 0;
1766 }
1767 break;
1768 case AARCH64_OPND_QLF_MSL:
1769 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1770 {
1771 set_other_error (mismatch_detail, idx,
1772 _("invalid shift operator"));
1773 return 0;
1774 }
1775 break;
1776 case AARCH64_OPND_QLF_NIL:
1777 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1778 {
1779 set_other_error (mismatch_detail, idx,
1780 _("shift is not permitted"));
1781 return 0;
1782 }
1783 break;
1784 default:
1785 assert (0);
1786 return 0;
1787 }
1788 /* Is the immediate valid? */
1789 assert (idx == 1);
1790 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1791 {
1792 /* uimm8 or simm8 */
1793 if (!value_in_range_p (opnd->imm.value, -128, 255))
1794 {
1795 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
1796 return 0;
1797 }
1798 }
1799 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1800 {
1801 /* uimm64 is not
1802 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1803 ffffffffgggggggghhhhhhhh'. */
1804 set_other_error (mismatch_detail, idx,
1805 _("invalid value for immediate"));
1806 return 0;
1807 }
1808 /* Is the shift amount valid? */
1809 switch (opnd->shifter.kind)
1810 {
1811 case AARCH64_MOD_LSL:
1812 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1813 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1814 {
1815 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1816 (size - 1) * 8);
1817 return 0;
1818 }
1819 if (!value_aligned_p (opnd->shifter.amount, 8))
1820 {
1821 set_unaligned_error (mismatch_detail, idx, 8);
1822 return 0;
1823 }
1824 break;
1825 case AARCH64_MOD_MSL:
1826 /* Only 8 and 16 are valid shift amount. */
1827 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1828 {
1829 set_other_error (mismatch_detail, idx,
1830 _("shift amount expected to be 0 or 16"));
1831 return 0;
1832 }
1833 break;
1834 default:
1835 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1836 {
1837 set_other_error (mismatch_detail, idx,
1838 _("invalid shift operator"));
1839 return 0;
1840 }
1841 break;
1842 }
1843 break;
1844
1845 case AARCH64_OPND_FPIMM:
1846 case AARCH64_OPND_SIMD_FPIMM:
1847 if (opnd->imm.is_fp == 0)
1848 {
1849 set_other_error (mismatch_detail, idx,
1850 _("floating-point immediate expected"));
1851 return 0;
1852 }
1853 /* The value is expected to be an 8-bit floating-point constant with
1854 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1855 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1856 instruction). */
1857 if (!value_in_range_p (opnd->imm.value, 0, 255))
1858 {
1859 set_other_error (mismatch_detail, idx,
1860 _("immediate out of range"));
1861 return 0;
1862 }
1863 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1864 {
1865 set_other_error (mismatch_detail, idx,
1866 _("invalid shift operator"));
1867 return 0;
1868 }
1869 break;
1870
1871 default:
1872 break;
1873 }
1874 break;
1875
1876 case AARCH64_OPND_CLASS_CP_REG:
1877 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1878 valid range: C0 - C15. */
1879 if (opnd->reg.regno > 15)
1880 {
1881 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1882 return 0;
1883 }
1884 break;
1885
1886 case AARCH64_OPND_CLASS_SYSTEM:
1887 switch (type)
1888 {
1889 case AARCH64_OPND_PSTATEFIELD:
1890 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
1891 /* MSR UAO, #uimm4
1892 MSR PAN, #uimm4
1893 The immediate must be #0 or #1. */
1894 if ((opnd->pstatefield == 0x03 /* UAO. */
1895 || opnd->pstatefield == 0x04) /* PAN. */
1896 && opnds[1].imm.value > 1)
1897 {
1898 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1899 return 0;
1900 }
1901 /* MSR SPSel, #uimm4
1902 Uses uimm4 as a control value to select the stack pointer: if
1903 bit 0 is set it selects the current exception level's stack
1904 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1905 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1906 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
1907 {
1908 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1909 return 0;
1910 }
1911 break;
1912 default:
1913 break;
1914 }
1915 break;
1916
1917 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
1918 /* Get the upper bound for the element index. */
1919 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1920 /* Index out-of-range. */
1921 if (!value_in_range_p (opnd->reglane.index, 0, num))
1922 {
1923 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1924 return 0;
1925 }
1926 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1927 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1928 number is encoded in "size:M:Rm":
1929 size <Vm>
1930 00 RESERVED
1931 01 0:Rm
1932 10 M:Rm
1933 11 RESERVED */
1934 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
1935 && !value_in_range_p (opnd->reglane.regno, 0, 15))
1936 {
1937 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1938 return 0;
1939 }
1940 break;
1941
1942 case AARCH64_OPND_CLASS_MODIFIED_REG:
1943 assert (idx == 1 || idx == 2);
1944 switch (type)
1945 {
1946 case AARCH64_OPND_Rm_EXT:
1947 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
1948 && opnd->shifter.kind != AARCH64_MOD_LSL)
1949 {
1950 set_other_error (mismatch_detail, idx,
1951 _("extend operator expected"));
1952 return 0;
1953 }
1954 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1955 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1956 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1957 case. */
1958 if (!aarch64_stack_pointer_p (opnds + 0)
1959 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
1960 {
1961 if (!opnd->shifter.operator_present)
1962 {
1963 set_other_error (mismatch_detail, idx,
1964 _("missing extend operator"));
1965 return 0;
1966 }
1967 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
1968 {
1969 set_other_error (mismatch_detail, idx,
1970 _("'LSL' operator not allowed"));
1971 return 0;
1972 }
1973 }
1974 assert (opnd->shifter.operator_present /* Default to LSL. */
1975 || opnd->shifter.kind == AARCH64_MOD_LSL);
1976 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
1977 {
1978 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
1979 return 0;
1980 }
1981 /* In the 64-bit form, the final register operand is written as Wm
1982 for all but the (possibly omitted) UXTX/LSL and SXTX
1983 operators.
1984 N.B. GAS allows X register to be used with any operator as a
1985 programming convenience. */
1986 if (qualifier == AARCH64_OPND_QLF_X
1987 && opnd->shifter.kind != AARCH64_MOD_LSL
1988 && opnd->shifter.kind != AARCH64_MOD_UXTX
1989 && opnd->shifter.kind != AARCH64_MOD_SXTX)
1990 {
1991 set_other_error (mismatch_detail, idx, _("W register expected"));
1992 return 0;
1993 }
1994 break;
1995
1996 case AARCH64_OPND_Rm_SFT:
1997 /* ROR is not available to the shifted register operand in
1998 arithmetic instructions. */
1999 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2000 {
2001 set_other_error (mismatch_detail, idx,
2002 _("shift operator expected"));
2003 return 0;
2004 }
2005 if (opnd->shifter.kind == AARCH64_MOD_ROR
2006 && opcode->iclass != log_shift)
2007 {
2008 set_other_error (mismatch_detail, idx,
2009 _("'ROR' operator not allowed"));
2010 return 0;
2011 }
2012 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2013 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2014 {
2015 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2016 return 0;
2017 }
2018 break;
2019
2020 default:
2021 break;
2022 }
2023 break;
2024
2025 default:
2026 break;
2027 }
2028
2029 return 1;
2030 }
2031
2032 /* Main entrypoint for the operand constraint checking.
2033
2034 Return 1 if operands of *INST meet the constraint applied by the operand
2035 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2036 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2037 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2038 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2039 error kind when it is notified that an instruction does not pass the check).
2040
2041 Un-determined operand qualifiers may get established during the process. */
2042
2043 int
2044 aarch64_match_operands_constraint (aarch64_inst *inst,
2045 aarch64_operand_error *mismatch_detail)
2046 {
2047 int i;
2048
2049 DEBUG_TRACE ("enter");
2050
2051 /* Match operands' qualifier.
2052 *INST has already had qualifier establish for some, if not all, of
2053 its operands; we need to find out whether these established
2054 qualifiers match one of the qualifier sequence in
2055 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2056 with the corresponding qualifier in such a sequence.
2057 Only basic operand constraint checking is done here; the more thorough
2058 constraint checking will carried out by operand_general_constraint_met_p,
2059 which has be to called after this in order to get all of the operands'
2060 qualifiers established. */
2061 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2062 {
2063 DEBUG_TRACE ("FAIL on operand qualifier matching");
2064 if (mismatch_detail)
2065 {
2066 /* Return an error type to indicate that it is the qualifier
2067 matching failure; we don't care about which operand as there
2068 are enough information in the opcode table to reproduce it. */
2069 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2070 mismatch_detail->index = -1;
2071 mismatch_detail->error = NULL;
2072 }
2073 return 0;
2074 }
2075
2076 /* Match operands' constraint. */
2077 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2078 {
2079 enum aarch64_opnd type = inst->opcode->operands[i];
2080 if (type == AARCH64_OPND_NIL)
2081 break;
2082 if (inst->operands[i].skip)
2083 {
2084 DEBUG_TRACE ("skip the incomplete operand %d", i);
2085 continue;
2086 }
2087 if (operand_general_constraint_met_p (inst->operands, i, type,
2088 inst->opcode, mismatch_detail) == 0)
2089 {
2090 DEBUG_TRACE ("FAIL on operand %d", i);
2091 return 0;
2092 }
2093 }
2094
2095 DEBUG_TRACE ("PASS");
2096
2097 return 1;
2098 }
2099
2100 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2101 Also updates the TYPE of each INST->OPERANDS with the corresponding
2102 value of OPCODE->OPERANDS.
2103
2104 Note that some operand qualifiers may need to be manually cleared by
2105 the caller before it further calls the aarch64_opcode_encode; by
2106 doing this, it helps the qualifier matching facilities work
2107 properly. */
2108
2109 const aarch64_opcode*
2110 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2111 {
2112 int i;
2113 const aarch64_opcode *old = inst->opcode;
2114
2115 inst->opcode = opcode;
2116
2117 /* Update the operand types. */
2118 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2119 {
2120 inst->operands[i].type = opcode->operands[i];
2121 if (opcode->operands[i] == AARCH64_OPND_NIL)
2122 break;
2123 }
2124
2125 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2126
2127 return old;
2128 }
2129
2130 int
2131 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2132 {
2133 int i;
2134 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2135 if (operands[i] == operand)
2136 return i;
2137 else if (operands[i] == AARCH64_OPND_NIL)
2138 break;
2139 return -1;
2140 }
2141 \f
2142 /* [0][0] 32-bit integer regs with sp Wn
2143 [0][1] 64-bit integer regs with sp Xn sf=1
2144 [1][0] 32-bit integer regs with #0 Wn
2145 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2146 static const char *int_reg[2][2][32] = {
2147 #define R32 "w"
2148 #define R64 "x"
2149 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2150 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2151 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2152 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", "wsp" },
2153 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2154 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2155 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2156 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", "sp" } },
2157 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2158 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2159 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2160 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
2161 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2162 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2163 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2164 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
2165 #undef R64
2166 #undef R32
2167 };
2168
2169 /* Return the integer register name.
2170 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2171
2172 static inline const char *
2173 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2174 {
2175 const int has_zr = sp_reg_p ? 0 : 1;
2176 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2177 return int_reg[has_zr][is_64][regno];
2178 }
2179
2180 /* Like get_int_reg_name, but IS_64 is always 1. */
2181
2182 static inline const char *
2183 get_64bit_int_reg_name (int regno, int sp_reg_p)
2184 {
2185 const int has_zr = sp_reg_p ? 0 : 1;
2186 return int_reg[has_zr][1][regno];
2187 }
2188
2189 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2190
2191 typedef union
2192 {
2193 uint64_t i;
2194 double d;
2195 } double_conv_t;
2196
2197 typedef union
2198 {
2199 uint32_t i;
2200 float f;
2201 } single_conv_t;
2202
2203 typedef union
2204 {
2205 uint32_t i;
2206 float f;
2207 } half_conv_t;
2208
2209 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2210 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2211 (depending on the type of the instruction). IMM8 will be expanded to a
2212 single-precision floating-point value (SIZE == 4) or a double-precision
2213 floating-point value (SIZE == 8). A half-precision floating-point value
2214 (SIZE == 2) is expanded to a single-precision floating-point value. The
2215 expanded value is returned. */
2216
2217 static uint64_t
2218 expand_fp_imm (int size, uint32_t imm8)
2219 {
2220 uint64_t imm;
2221 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2222
2223 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2224 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2225 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2226 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2227 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2228 if (size == 8)
2229 {
2230 imm = (imm8_7 << (63-32)) /* imm8<7> */
2231 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2232 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2233 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2234 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2235 imm <<= 32;
2236 }
2237 else if (size == 4 || size == 2)
2238 {
2239 imm = (imm8_7 << 31) /* imm8<7> */
2240 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2241 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2242 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2243 }
2244 else
2245 {
2246 /* An unsupported size. */
2247 assert (0);
2248 }
2249
2250 return imm;
2251 }
2252
2253 /* Produce the string representation of the register list operand *OPND
2254 in the buffer pointed by BUF of size SIZE. */
2255 static void
2256 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd)
2257 {
2258 const int num_regs = opnd->reglist.num_regs;
2259 const int first_reg = opnd->reglist.first_regno;
2260 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2261 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2262 char tb[8]; /* Temporary buffer. */
2263
2264 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2265 assert (num_regs >= 1 && num_regs <= 4);
2266
2267 /* Prepare the index if any. */
2268 if (opnd->reglist.has_index)
2269 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2270 else
2271 tb[0] = '\0';
2272
2273 /* The hyphenated form is preferred for disassembly if there are
2274 more than two registers in the list, and the register numbers
2275 are monotonically increasing in increments of one. */
2276 if (num_regs > 2 && last_reg > first_reg)
2277 snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name,
2278 last_reg, qlf_name, tb);
2279 else
2280 {
2281 const int reg0 = first_reg;
2282 const int reg1 = (first_reg + 1) & 0x1f;
2283 const int reg2 = (first_reg + 2) & 0x1f;
2284 const int reg3 = (first_reg + 3) & 0x1f;
2285
2286 switch (num_regs)
2287 {
2288 case 1:
2289 snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb);
2290 break;
2291 case 2:
2292 snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name,
2293 reg1, qlf_name, tb);
2294 break;
2295 case 3:
2296 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name,
2297 reg1, qlf_name, reg2, qlf_name, tb);
2298 break;
2299 case 4:
2300 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2301 reg0, qlf_name, reg1, qlf_name, reg2, qlf_name,
2302 reg3, qlf_name, tb);
2303 break;
2304 }
2305 }
2306 }
2307
2308 /* Produce the string representation of the register offset address operand
2309 *OPND in the buffer pointed by BUF of size SIZE. */
2310 static void
2311 print_register_offset_address (char *buf, size_t size,
2312 const aarch64_opnd_info *opnd)
2313 {
2314 char tb[16]; /* Temporary buffer. */
2315 bfd_boolean lsl_p = FALSE; /* Is LSL shift operator? */
2316 bfd_boolean wm_p = FALSE; /* Should Rm be Wm? */
2317 bfd_boolean print_extend_p = TRUE;
2318 bfd_boolean print_amount_p = TRUE;
2319 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2320
2321 switch (opnd->shifter.kind)
2322 {
2323 case AARCH64_MOD_UXTW: wm_p = TRUE; break;
2324 case AARCH64_MOD_LSL : lsl_p = TRUE; break;
2325 case AARCH64_MOD_SXTW: wm_p = TRUE; break;
2326 case AARCH64_MOD_SXTX: break;
2327 default: assert (0);
2328 }
2329
2330 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2331 || !opnd->shifter.amount_present))
2332 {
2333 /* Not print the shift/extend amount when the amount is zero and
2334 when it is not the special case of 8-bit load/store instruction. */
2335 print_amount_p = FALSE;
2336 /* Likewise, no need to print the shift operator LSL in such a
2337 situation. */
2338 if (lsl_p)
2339 print_extend_p = FALSE;
2340 }
2341
2342 /* Prepare for the extend/shift. */
2343 if (print_extend_p)
2344 {
2345 if (print_amount_p)
2346 snprintf (tb, sizeof (tb), ",%s #%d", shift_name, opnd->shifter.amount);
2347 else
2348 snprintf (tb, sizeof (tb), ",%s", shift_name);
2349 }
2350 else
2351 tb[0] = '\0';
2352
2353 snprintf (buf, size, "[%s,%s%s]",
2354 get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2355 get_int_reg_name (opnd->addr.offset.regno,
2356 wm_p ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X,
2357 0 /* sp_reg_p */),
2358 tb);
2359 }
2360
2361 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2362 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2363 PC, PCREL_P and ADDRESS are used to pass in and return information about
2364 the PC-relative address calculation, where the PC value is passed in
2365 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2366 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2367 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2368
2369 The function serves both the disassembler and the assembler diagnostics
2370 issuer, which is the reason why it lives in this file. */
2371
2372 void
2373 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2374 const aarch64_opcode *opcode,
2375 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2376 bfd_vma *address)
2377 {
2378 int i;
2379 const char *name = NULL;
2380 const aarch64_opnd_info *opnd = opnds + idx;
2381 enum aarch64_modifier_kind kind;
2382 uint64_t addr;
2383
2384 buf[0] = '\0';
2385 if (pcrel_p)
2386 *pcrel_p = 0;
2387
2388 switch (opnd->type)
2389 {
2390 case AARCH64_OPND_Rd:
2391 case AARCH64_OPND_Rn:
2392 case AARCH64_OPND_Rm:
2393 case AARCH64_OPND_Rt:
2394 case AARCH64_OPND_Rt2:
2395 case AARCH64_OPND_Rs:
2396 case AARCH64_OPND_Ra:
2397 case AARCH64_OPND_Rt_SYS:
2398 case AARCH64_OPND_PAIRREG:
2399 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2400 the <ic_op>, therefore we we use opnd->present to override the
2401 generic optional-ness information. */
2402 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2403 break;
2404 /* Omit the operand, e.g. RET. */
2405 if (optional_operand_p (opcode, idx)
2406 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2407 break;
2408 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2409 || opnd->qualifier == AARCH64_OPND_QLF_X);
2410 snprintf (buf, size, "%s",
2411 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2412 break;
2413
2414 case AARCH64_OPND_Rd_SP:
2415 case AARCH64_OPND_Rn_SP:
2416 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2417 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2418 || opnd->qualifier == AARCH64_OPND_QLF_X
2419 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2420 snprintf (buf, size, "%s",
2421 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2422 break;
2423
2424 case AARCH64_OPND_Rm_EXT:
2425 kind = opnd->shifter.kind;
2426 assert (idx == 1 || idx == 2);
2427 if ((aarch64_stack_pointer_p (opnds)
2428 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2429 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2430 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2431 && kind == AARCH64_MOD_UXTW)
2432 || (opnd->qualifier == AARCH64_OPND_QLF_X
2433 && kind == AARCH64_MOD_UXTX)))
2434 {
2435 /* 'LSL' is the preferred form in this case. */
2436 kind = AARCH64_MOD_LSL;
2437 if (opnd->shifter.amount == 0)
2438 {
2439 /* Shifter omitted. */
2440 snprintf (buf, size, "%s",
2441 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2442 break;
2443 }
2444 }
2445 if (opnd->shifter.amount)
2446 snprintf (buf, size, "%s, %s #%d",
2447 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2448 aarch64_operand_modifiers[kind].name,
2449 opnd->shifter.amount);
2450 else
2451 snprintf (buf, size, "%s, %s",
2452 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2453 aarch64_operand_modifiers[kind].name);
2454 break;
2455
2456 case AARCH64_OPND_Rm_SFT:
2457 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2458 || opnd->qualifier == AARCH64_OPND_QLF_X);
2459 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2460 snprintf (buf, size, "%s",
2461 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2462 else
2463 snprintf (buf, size, "%s, %s #%d",
2464 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2465 aarch64_operand_modifiers[opnd->shifter.kind].name,
2466 opnd->shifter.amount);
2467 break;
2468
2469 case AARCH64_OPND_Fd:
2470 case AARCH64_OPND_Fn:
2471 case AARCH64_OPND_Fm:
2472 case AARCH64_OPND_Fa:
2473 case AARCH64_OPND_Ft:
2474 case AARCH64_OPND_Ft2:
2475 case AARCH64_OPND_Sd:
2476 case AARCH64_OPND_Sn:
2477 case AARCH64_OPND_Sm:
2478 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2479 opnd->reg.regno);
2480 break;
2481
2482 case AARCH64_OPND_Vd:
2483 case AARCH64_OPND_Vn:
2484 case AARCH64_OPND_Vm:
2485 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2486 aarch64_get_qualifier_name (opnd->qualifier));
2487 break;
2488
2489 case AARCH64_OPND_Ed:
2490 case AARCH64_OPND_En:
2491 case AARCH64_OPND_Em:
2492 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2493 aarch64_get_qualifier_name (opnd->qualifier),
2494 opnd->reglane.index);
2495 break;
2496
2497 case AARCH64_OPND_VdD1:
2498 case AARCH64_OPND_VnD1:
2499 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2500 break;
2501
2502 case AARCH64_OPND_LVn:
2503 case AARCH64_OPND_LVt:
2504 case AARCH64_OPND_LVt_AL:
2505 case AARCH64_OPND_LEt:
2506 print_register_list (buf, size, opnd);
2507 break;
2508
2509 case AARCH64_OPND_Cn:
2510 case AARCH64_OPND_Cm:
2511 snprintf (buf, size, "C%d", opnd->reg.regno);
2512 break;
2513
2514 case AARCH64_OPND_IDX:
2515 case AARCH64_OPND_IMM:
2516 case AARCH64_OPND_WIDTH:
2517 case AARCH64_OPND_UIMM3_OP1:
2518 case AARCH64_OPND_UIMM3_OP2:
2519 case AARCH64_OPND_BIT_NUM:
2520 case AARCH64_OPND_IMM_VLSL:
2521 case AARCH64_OPND_IMM_VLSR:
2522 case AARCH64_OPND_SHLL_IMM:
2523 case AARCH64_OPND_IMM0:
2524 case AARCH64_OPND_IMMR:
2525 case AARCH64_OPND_IMMS:
2526 case AARCH64_OPND_FBITS:
2527 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2528 break;
2529
2530 case AARCH64_OPND_IMM_MOV:
2531 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2532 {
2533 case 4: /* e.g. MOV Wd, #<imm32>. */
2534 {
2535 int imm32 = opnd->imm.value;
2536 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2537 }
2538 break;
2539 case 8: /* e.g. MOV Xd, #<imm64>. */
2540 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2541 opnd->imm.value, opnd->imm.value);
2542 break;
2543 default: assert (0);
2544 }
2545 break;
2546
2547 case AARCH64_OPND_FPIMM0:
2548 snprintf (buf, size, "#0.0");
2549 break;
2550
2551 case AARCH64_OPND_LIMM:
2552 case AARCH64_OPND_AIMM:
2553 case AARCH64_OPND_HALF:
2554 if (opnd->shifter.amount)
2555 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2556 opnd->shifter.amount);
2557 else
2558 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2559 break;
2560
2561 case AARCH64_OPND_SIMD_IMM:
2562 case AARCH64_OPND_SIMD_IMM_SFT:
2563 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2564 || opnd->shifter.kind == AARCH64_MOD_NONE)
2565 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2566 else
2567 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2568 aarch64_operand_modifiers[opnd->shifter.kind].name,
2569 opnd->shifter.amount);
2570 break;
2571
2572 case AARCH64_OPND_FPIMM:
2573 case AARCH64_OPND_SIMD_FPIMM:
2574 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2575 {
2576 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2577 {
2578 half_conv_t c;
2579 c.i = expand_fp_imm (2, opnd->imm.value);
2580 snprintf (buf, size, "#%.18e", c.f);
2581 }
2582 break;
2583 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2584 {
2585 single_conv_t c;
2586 c.i = expand_fp_imm (4, opnd->imm.value);
2587 snprintf (buf, size, "#%.18e", c.f);
2588 }
2589 break;
2590 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2591 {
2592 double_conv_t c;
2593 c.i = expand_fp_imm (8, opnd->imm.value);
2594 snprintf (buf, size, "#%.18e", c.d);
2595 }
2596 break;
2597 default: assert (0);
2598 }
2599 break;
2600
2601 case AARCH64_OPND_CCMP_IMM:
2602 case AARCH64_OPND_NZCV:
2603 case AARCH64_OPND_EXCEPTION:
2604 case AARCH64_OPND_UIMM4:
2605 case AARCH64_OPND_UIMM7:
2606 if (optional_operand_p (opcode, idx) == TRUE
2607 && (opnd->imm.value ==
2608 (int64_t) get_optional_operand_default_value (opcode)))
2609 /* Omit the operand, e.g. DCPS1. */
2610 break;
2611 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2612 break;
2613
2614 case AARCH64_OPND_COND:
2615 case AARCH64_OPND_COND1:
2616 snprintf (buf, size, "%s", opnd->cond->names[0]);
2617 break;
2618
2619 case AARCH64_OPND_ADDR_ADRP:
2620 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2621 + opnd->imm.value;
2622 if (pcrel_p)
2623 *pcrel_p = 1;
2624 if (address)
2625 *address = addr;
2626 /* This is not necessary during the disassembling, as print_address_func
2627 in the disassemble_info will take care of the printing. But some
2628 other callers may be still interested in getting the string in *STR,
2629 so here we do snprintf regardless. */
2630 snprintf (buf, size, "#0x%" PRIx64, addr);
2631 break;
2632
2633 case AARCH64_OPND_ADDR_PCREL14:
2634 case AARCH64_OPND_ADDR_PCREL19:
2635 case AARCH64_OPND_ADDR_PCREL21:
2636 case AARCH64_OPND_ADDR_PCREL26:
2637 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2638 if (pcrel_p)
2639 *pcrel_p = 1;
2640 if (address)
2641 *address = addr;
2642 /* This is not necessary during the disassembling, as print_address_func
2643 in the disassemble_info will take care of the printing. But some
2644 other callers may be still interested in getting the string in *STR,
2645 so here we do snprintf regardless. */
2646 snprintf (buf, size, "#0x%" PRIx64, addr);
2647 break;
2648
2649 case AARCH64_OPND_ADDR_SIMPLE:
2650 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2651 case AARCH64_OPND_SIMD_ADDR_POST:
2652 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2653 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2654 {
2655 if (opnd->addr.offset.is_reg)
2656 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2657 else
2658 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2659 }
2660 else
2661 snprintf (buf, size, "[%s]", name);
2662 break;
2663
2664 case AARCH64_OPND_ADDR_REGOFF:
2665 print_register_offset_address (buf, size, opnd);
2666 break;
2667
2668 case AARCH64_OPND_ADDR_SIMM7:
2669 case AARCH64_OPND_ADDR_SIMM9:
2670 case AARCH64_OPND_ADDR_SIMM9_2:
2671 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2672 if (opnd->addr.writeback)
2673 {
2674 if (opnd->addr.preind)
2675 snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
2676 else
2677 snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
2678 }
2679 else
2680 {
2681 if (opnd->addr.offset.imm)
2682 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2683 else
2684 snprintf (buf, size, "[%s]", name);
2685 }
2686 break;
2687
2688 case AARCH64_OPND_ADDR_UIMM12:
2689 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2690 if (opnd->addr.offset.imm)
2691 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2692 else
2693 snprintf (buf, size, "[%s]", name);
2694 break;
2695
2696 case AARCH64_OPND_SYSREG:
2697 for (i = 0; aarch64_sys_regs[i].name; ++i)
2698 if (aarch64_sys_regs[i].value == opnd->sysreg
2699 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
2700 break;
2701 if (aarch64_sys_regs[i].name)
2702 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2703 else
2704 {
2705 /* Implementation defined system register. */
2706 unsigned int value = opnd->sysreg;
2707 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2708 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2709 value & 0x7);
2710 }
2711 break;
2712
2713 case AARCH64_OPND_PSTATEFIELD:
2714 for (i = 0; aarch64_pstatefields[i].name; ++i)
2715 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2716 break;
2717 assert (aarch64_pstatefields[i].name);
2718 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2719 break;
2720
2721 case AARCH64_OPND_SYSREG_AT:
2722 case AARCH64_OPND_SYSREG_DC:
2723 case AARCH64_OPND_SYSREG_IC:
2724 case AARCH64_OPND_SYSREG_TLBI:
2725 snprintf (buf, size, "%s", opnd->sysins_op->name);
2726 break;
2727
2728 case AARCH64_OPND_BARRIER:
2729 snprintf (buf, size, "%s", opnd->barrier->name);
2730 break;
2731
2732 case AARCH64_OPND_BARRIER_ISB:
2733 /* Operand can be omitted, e.g. in DCPS1. */
2734 if (! optional_operand_p (opcode, idx)
2735 || (opnd->barrier->value
2736 != get_optional_operand_default_value (opcode)))
2737 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2738 break;
2739
2740 case AARCH64_OPND_PRFOP:
2741 if (opnd->prfop->name != NULL)
2742 snprintf (buf, size, "%s", opnd->prfop->name);
2743 else
2744 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
2745 break;
2746
2747 case AARCH64_OPND_BARRIER_PSB:
2748 snprintf (buf, size, "%s", opnd->hint_option->name);
2749 break;
2750
2751 default:
2752 assert (0);
2753 }
2754 }
2755 \f
2756 #define CPENC(op0,op1,crn,crm,op2) \
2757 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2758 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2759 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2760 /* for 3.9.10 System Instructions */
2761 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2762
2763 #define C0 0
2764 #define C1 1
2765 #define C2 2
2766 #define C3 3
2767 #define C4 4
2768 #define C5 5
2769 #define C6 6
2770 #define C7 7
2771 #define C8 8
2772 #define C9 9
2773 #define C10 10
2774 #define C11 11
2775 #define C12 12
2776 #define C13 13
2777 #define C14 14
2778 #define C15 15
2779
2780 #ifdef F_DEPRECATED
2781 #undef F_DEPRECATED
2782 #endif
2783 #define F_DEPRECATED 0x1 /* Deprecated system register. */
2784
2785 #ifdef F_ARCHEXT
2786 #undef F_ARCHEXT
2787 #endif
2788 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
2789
2790 #ifdef F_HASXT
2791 #undef F_HASXT
2792 #endif
2793 #define F_HASXT 0x4 /* System instruction register <Xt>
2794 operand. */
2795
2796
2797 /* TODO there are two more issues need to be resolved
2798 1. handle read-only and write-only system registers
2799 2. handle cpu-implementation-defined system registers. */
2800 const aarch64_sys_reg aarch64_sys_regs [] =
2801 {
2802 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
2803 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
2804 { "elr_el1", CPEN_(0,C0,1), 0 },
2805 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
2806 { "sp_el0", CPEN_(0,C1,0), 0 },
2807 { "spsel", CPEN_(0,C2,0), 0 },
2808 { "daif", CPEN_(3,C2,1), 0 },
2809 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
2810 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
2811 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
2812 { "nzcv", CPEN_(3,C2,0), 0 },
2813 { "fpcr", CPEN_(3,C4,0), 0 },
2814 { "fpsr", CPEN_(3,C4,1), 0 },
2815 { "dspsr_el0", CPEN_(3,C5,0), 0 },
2816 { "dlr_el0", CPEN_(3,C5,1), 0 },
2817 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
2818 { "elr_el2", CPEN_(4,C0,1), 0 },
2819 { "sp_el1", CPEN_(4,C1,0), 0 },
2820 { "spsr_irq", CPEN_(4,C3,0), 0 },
2821 { "spsr_abt", CPEN_(4,C3,1), 0 },
2822 { "spsr_und", CPEN_(4,C3,2), 0 },
2823 { "spsr_fiq", CPEN_(4,C3,3), 0 },
2824 { "spsr_el3", CPEN_(6,C0,0), 0 },
2825 { "elr_el3", CPEN_(6,C0,1), 0 },
2826 { "sp_el2", CPEN_(6,C1,0), 0 },
2827 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
2828 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
2829 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
2830 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
2831 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
2832 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
2833 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
2834 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
2835 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
2836 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
2837 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
2838 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
2839 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
2840 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
2841 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
2842 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
2843 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
2844 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
2845 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
2846 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
2847 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
2848 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
2849 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
2850 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
2851 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
2852 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
2853 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
2854 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
2855 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
2856 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
2857 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
2858 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
2859 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
2860 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
2861 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
2862 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
2863 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
2864 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
2865 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
2866 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
2867 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
2868 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
2869 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
2870 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
2871 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
2872 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
2873 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
2874 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
2875 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
2876 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
2877 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
2878 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
2879 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
2880 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
2881 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
2882 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
2883 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
2884 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
2885 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
2886 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
2887 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
2888 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
2889 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
2890 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
2891 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
2892 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
2893 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
2894 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
2895 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
2896 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
2897 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
2898 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
2899 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
2900 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
2901 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
2902 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
2903 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
2904 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
2905 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
2906 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
2907 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
2908 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
2909 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
2910 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
2911 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
2912 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
2913 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
2914 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
2915 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
2916 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
2917 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
2918 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
2919 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
2920 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
2921 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
2922 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
2923 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
2924 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
2925 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
2926 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
2927 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
2928 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
2929 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
2930 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
2931 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
2932 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
2933 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
2934 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
2935 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
2936 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
2937 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
2938 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
2939 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
2940 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
2941 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
2942 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
2943 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
2944 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
2945 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
2946 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
2947 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
2948 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
2949 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
2950 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
2951 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
2952 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
2953 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
2954 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
2955 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
2956 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
2957 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
2958 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
2959 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
2960 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
2961 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
2962 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
2963 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
2964 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
2965 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
2966 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
2967 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
2968 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
2969 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
2970 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
2971 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
2972 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
2973 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
2974 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
2975 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
2976 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
2977 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
2978 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
2979 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
2980 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
2981 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
2982 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
2983 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
2984 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
2985 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
2986 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
2987 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
2988 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
2989 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
2990 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
2991 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
2992 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
2993 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
2994 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
2995 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
2996 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
2997 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
2998 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
2999 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3000 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3001 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3002 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3003 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3004 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3005 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3006 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3007 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3008 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3009 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3010 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3011 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3012 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3013 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3014 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3015 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3016 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3017 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3018 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3019 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3020 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3021 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3022 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3023 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3024 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3025 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3026 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3027 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3028 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3029 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3030 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3031 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3032 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3033 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3034 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3035 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3036 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3037 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3038 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3039 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3040 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3041 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3042 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3043 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3044 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3045 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3046 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3047 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3048 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3049 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3050 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3051 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3052 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3053 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3054 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3055 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3056 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3057 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3058 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3059 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3060 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3061 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3062 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3063 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3064 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3065 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3066 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3067 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3068 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3069 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3070 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3071 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3072 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3073 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3074 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3075 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3076 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3077 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3078 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3079 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3080 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3081 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3082 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3083 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3084 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3085 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3086 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3087 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3088 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3089 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3090 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3091 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3092 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3093 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3094 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3095 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3096 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3097 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3098 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3099 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3100 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3101 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3102 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3103 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3104 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3105 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3106 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3107 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3108 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3109 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3110 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3111 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3112 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3113 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3114 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3115 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3116 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3117 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3118 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3119 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3120 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3121 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3122 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3123 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3124 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3125 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3126 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3127 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3128 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3129 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3130 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3131 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3132 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3133 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3134 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3135 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3136 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3137 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3138 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3139 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3140 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3141 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3142 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3143 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3144 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3145 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3146 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3147 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3148 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3149 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3150 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3151 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3152 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3153 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3154 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3155 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3156 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3157 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3158 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3159 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3160 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3161 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3162 { 0, CPENC(0,0,0,0,0), 0 },
3163 };
3164
3165 bfd_boolean
3166 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3167 {
3168 return (reg->flags & F_DEPRECATED) != 0;
3169 }
3170
3171 bfd_boolean
3172 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3173 const aarch64_sys_reg *reg)
3174 {
3175 if (!(reg->flags & F_ARCHEXT))
3176 return TRUE;
3177
3178 /* PAN. Values are from aarch64_sys_regs. */
3179 if (reg->value == CPEN_(0,C2,3)
3180 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3181 return FALSE;
3182
3183 /* Virtualization host extensions: system registers. */
3184 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3185 || reg->value == CPENC (3, 4, C13, C0, 1)
3186 || reg->value == CPENC (3, 4, C14, C3, 0)
3187 || reg->value == CPENC (3, 4, C14, C3, 1)
3188 || reg->value == CPENC (3, 4, C14, C3, 2))
3189 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3190 return FALSE;
3191
3192 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3193 if ((reg->value == CPEN_ (5, C0, 0)
3194 || reg->value == CPEN_ (5, C0, 1)
3195 || reg->value == CPENC (3, 5, C1, C0, 0)
3196 || reg->value == CPENC (3, 5, C1, C0, 2)
3197 || reg->value == CPENC (3, 5, C2, C0, 0)
3198 || reg->value == CPENC (3, 5, C2, C0, 1)
3199 || reg->value == CPENC (3, 5, C2, C0, 2)
3200 || reg->value == CPENC (3, 5, C5, C1, 0)
3201 || reg->value == CPENC (3, 5, C5, C1, 1)
3202 || reg->value == CPENC (3, 5, C5, C2, 0)
3203 || reg->value == CPENC (3, 5, C6, C0, 0)
3204 || reg->value == CPENC (3, 5, C10, C2, 0)
3205 || reg->value == CPENC (3, 5, C10, C3, 0)
3206 || reg->value == CPENC (3, 5, C12, C0, 0)
3207 || reg->value == CPENC (3, 5, C13, C0, 1)
3208 || reg->value == CPENC (3, 5, C14, C1, 0))
3209 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3210 return FALSE;
3211
3212 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3213 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3214 || reg->value == CPENC (3, 5, C14, C2, 1)
3215 || reg->value == CPENC (3, 5, C14, C2, 2)
3216 || reg->value == CPENC (3, 5, C14, C3, 0)
3217 || reg->value == CPENC (3, 5, C14, C3, 1)
3218 || reg->value == CPENC (3, 5, C14, C3, 2))
3219 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3220 return FALSE;
3221
3222 /* ARMv8.2 features. */
3223
3224 /* ID_AA64MMFR2_EL1. */
3225 if (reg->value == CPENC (3, 0, C0, C7, 2)
3226 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3227 return FALSE;
3228
3229 /* PSTATE.UAO. */
3230 if (reg->value == CPEN_ (0, C2, 4)
3231 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3232 return FALSE;
3233
3234 /* RAS extension. */
3235
3236 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3237 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3238 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3239 || reg->value == CPENC (3, 0, C5, C3, 1)
3240 || reg->value == CPENC (3, 0, C5, C3, 2)
3241 || reg->value == CPENC (3, 0, C5, C3, 3)
3242 || reg->value == CPENC (3, 0, C5, C4, 0)
3243 || reg->value == CPENC (3, 0, C5, C4, 1)
3244 || reg->value == CPENC (3, 0, C5, C4, 2)
3245 || reg->value == CPENC (3, 0, C5, C4, 3)
3246 || reg->value == CPENC (3, 0, C5, C5, 0)
3247 || reg->value == CPENC (3, 0, C5, C5, 1))
3248 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3249 return FALSE;
3250
3251 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3252 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3253 || reg->value == CPENC (3, 0, C12, C1, 1)
3254 || reg->value == CPENC (3, 4, C12, C1, 1))
3255 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3256 return FALSE;
3257
3258 /* Statistical Profiling extension. */
3259 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3260 || reg->value == CPENC (3, 0, C9, C10, 1)
3261 || reg->value == CPENC (3, 0, C9, C10, 3)
3262 || reg->value == CPENC (3, 0, C9, C10, 7)
3263 || reg->value == CPENC (3, 0, C9, C9, 0)
3264 || reg->value == CPENC (3, 0, C9, C9, 2)
3265 || reg->value == CPENC (3, 0, C9, C9, 3)
3266 || reg->value == CPENC (3, 0, C9, C9, 4)
3267 || reg->value == CPENC (3, 0, C9, C9, 5)
3268 || reg->value == CPENC (3, 0, C9, C9, 6)
3269 || reg->value == CPENC (3, 0, C9, C9, 7)
3270 || reg->value == CPENC (3, 4, C9, C9, 0)
3271 || reg->value == CPENC (3, 5, C9, C9, 0))
3272 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
3273 return FALSE;
3274
3275 return TRUE;
3276 }
3277
3278 const aarch64_sys_reg aarch64_pstatefields [] =
3279 {
3280 { "spsel", 0x05, 0 },
3281 { "daifset", 0x1e, 0 },
3282 { "daifclr", 0x1f, 0 },
3283 { "pan", 0x04, F_ARCHEXT },
3284 { "uao", 0x03, F_ARCHEXT },
3285 { 0, CPENC(0,0,0,0,0), 0 },
3286 };
3287
3288 bfd_boolean
3289 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
3290 const aarch64_sys_reg *reg)
3291 {
3292 if (!(reg->flags & F_ARCHEXT))
3293 return TRUE;
3294
3295 /* PAN. Values are from aarch64_pstatefields. */
3296 if (reg->value == 0x04
3297 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3298 return FALSE;
3299
3300 /* UAO. Values are from aarch64_pstatefields. */
3301 if (reg->value == 0x03
3302 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3303 return FALSE;
3304
3305 return TRUE;
3306 }
3307
3308 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3309 {
3310 { "ialluis", CPENS(0,C7,C1,0), 0 },
3311 { "iallu", CPENS(0,C7,C5,0), 0 },
3312 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
3313 { 0, CPENS(0,0,0,0), 0 }
3314 };
3315
3316 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3317 {
3318 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
3319 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
3320 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
3321 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
3322 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
3323 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
3324 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
3325 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
3326 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
3327 { 0, CPENS(0,0,0,0), 0 }
3328 };
3329
3330 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3331 {
3332 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
3333 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
3334 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
3335 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
3336 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
3337 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
3338 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
3339 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
3340 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
3341 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
3342 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
3343 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
3344 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
3345 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
3346 { 0, CPENS(0,0,0,0), 0 }
3347 };
3348
3349 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3350 {
3351 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3352 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
3353 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
3354 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
3355 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3356 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
3357 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
3358 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
3359 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
3360 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
3361 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
3362 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
3363 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
3364 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
3365 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3366 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3367 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
3368 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
3369 { "alle2", CPENS(4,C8,C7,0), 0 },
3370 { "alle2is", CPENS(4,C8,C3,0), 0 },
3371 { "alle1", CPENS(4,C8,C7,4), 0 },
3372 { "alle1is", CPENS(4,C8,C3,4), 0 },
3373 { "alle3", CPENS(6,C8,C7,0), 0 },
3374 { "alle3is", CPENS(6,C8,C3,0), 0 },
3375 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
3376 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
3377 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
3378 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
3379 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
3380 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
3381 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
3382 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
3383 { 0, CPENS(0,0,0,0), 0 }
3384 };
3385
3386 bfd_boolean
3387 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
3388 {
3389 return (sys_ins_reg->flags & F_HASXT) != 0;
3390 }
3391
3392 extern bfd_boolean
3393 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
3394 const aarch64_sys_ins_reg *reg)
3395 {
3396 if (!(reg->flags & F_ARCHEXT))
3397 return TRUE;
3398
3399 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3400 if (reg->value == CPENS (3, C7, C12, 1)
3401 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3402 return FALSE;
3403
3404 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3405 if ((reg->value == CPENS (0, C7, C9, 0)
3406 || reg->value == CPENS (0, C7, C9, 1))
3407 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3408 return FALSE;
3409
3410 return TRUE;
3411 }
3412
3413 #undef C0
3414 #undef C1
3415 #undef C2
3416 #undef C3
3417 #undef C4
3418 #undef C5
3419 #undef C6
3420 #undef C7
3421 #undef C8
3422 #undef C9
3423 #undef C10
3424 #undef C11
3425 #undef C12
3426 #undef C13
3427 #undef C14
3428 #undef C15
3429
3430 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
3431 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
3432
3433 static bfd_boolean
3434 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
3435 const aarch64_insn insn)
3436 {
3437 int t = BITS (insn, 4, 0);
3438 int n = BITS (insn, 9, 5);
3439 int t2 = BITS (insn, 14, 10);
3440
3441 if (BIT (insn, 23))
3442 {
3443 /* Write back enabled. */
3444 if ((t == n || t2 == n) && n != 31)
3445 return FALSE;
3446 }
3447
3448 if (BIT (insn, 22))
3449 {
3450 /* Load */
3451 if (t == t2)
3452 return FALSE;
3453 }
3454
3455 return TRUE;
3456 }
3457
3458 /* Include the opcode description table as well as the operand description
3459 table. */
3460 #define VERIFIER(x) verify_##x
3461 #include "aarch64-tbl.h"
This page took 0.107893 seconds and 4 git commands to generate.