2013-10-07 Chao-ying Fu <Chao-ying.Fu@imgtec.com>
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30
31 #include "aarch64-opc.h"
32
33 #ifdef DEBUG_AARCH64
34 int debug_dump = FALSE;
35 #endif /* DEBUG_AARCH64 */
36
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
39
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
42 {
43 return ((qualifier >= AARCH64_OPND_QLF_V_8B
44 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
45 : FALSE);
46 }
47
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
50 {
51 return ((qualifier >= AARCH64_OPND_QLF_S_B
52 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
53 : FALSE);
54 }
55
56 enum data_pattern
57 {
58 DP_UNKNOWN,
59 DP_VECTOR_3SAME,
60 DP_VECTOR_LONG,
61 DP_VECTOR_WIDE,
62 DP_VECTOR_ACROSS_LANES,
63 };
64
65 static const char significant_operand_index [] =
66 {
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
72 };
73
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
75 the data pattern.
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
78
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
81 {
82 if (vector_qualifier_p (qualifiers[0]) == TRUE)
83 {
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers[0] == qualifiers[1]
87 && vector_qualifier_p (qualifiers[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers[0])
89 == aarch64_get_qualifier_esize (qualifiers[1]))
90 && (aarch64_get_qualifier_esize (qualifiers[0])
91 == aarch64_get_qualifier_esize (qualifiers[2])))
92 return DP_VECTOR_3SAME;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
95 or v.8h, v.16b. */
96 if (vector_qualifier_p (qualifiers[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers[0])
99 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
100 return DP_VECTOR_LONG;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers[0] == qualifiers[1]
103 && vector_qualifier_p (qualifiers[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers[0])
106 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers[0])
108 == aarch64_get_qualifier_esize (qualifiers[1])))
109 return DP_VECTOR_WIDE;
110 }
111 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
112 {
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers[1]) == TRUE
115 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
116 return DP_VECTOR_ACROSS_LANES;
117 }
118
119 return DP_UNKNOWN;
120 }
121
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
128 benefit. */
129
130 int
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
132 {
133 return
134 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
135 }
136 \f
137 const aarch64_field fields[] =
138 {
139 { 0, 0 }, /* NIL. */
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
196 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
197 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
198 { 31, 1 }, /* b5: in the test bit and branch instructions. */
199 { 19, 5 }, /* b40: in the test bit and branch instructions. */
200 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
201 };
202
203 enum aarch64_operand_class
204 aarch64_get_operand_class (enum aarch64_opnd type)
205 {
206 return aarch64_operands[type].op_class;
207 }
208
209 const char *
210 aarch64_get_operand_name (enum aarch64_opnd type)
211 {
212 return aarch64_operands[type].name;
213 }
214
215 /* Get operand description string.
216 This is usually for the diagnosis purpose. */
217 const char *
218 aarch64_get_operand_desc (enum aarch64_opnd type)
219 {
220 return aarch64_operands[type].desc;
221 }
222
223 /* Table of all conditional affixes. */
224 const aarch64_cond aarch64_conds[16] =
225 {
226 {{"eq"}, 0x0},
227 {{"ne"}, 0x1},
228 {{"cs", "hs"}, 0x2},
229 {{"cc", "lo", "ul"}, 0x3},
230 {{"mi"}, 0x4},
231 {{"pl"}, 0x5},
232 {{"vs"}, 0x6},
233 {{"vc"}, 0x7},
234 {{"hi"}, 0x8},
235 {{"ls"}, 0x9},
236 {{"ge"}, 0xa},
237 {{"lt"}, 0xb},
238 {{"gt"}, 0xc},
239 {{"le"}, 0xd},
240 {{"al"}, 0xe},
241 {{"nv"}, 0xf},
242 };
243
244 const aarch64_cond *
245 get_cond_from_value (aarch64_insn value)
246 {
247 assert (value < 16);
248 return &aarch64_conds[(unsigned int) value];
249 }
250
251 const aarch64_cond *
252 get_inverted_cond (const aarch64_cond *cond)
253 {
254 return &aarch64_conds[cond->value ^ 0x1];
255 }
256
257 /* Table describing the operand extension/shifting operators; indexed by
258 enum aarch64_modifier_kind.
259
260 The value column provides the most common values for encoding modifiers,
261 which enables table-driven encoding/decoding for the modifiers. */
262 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
263 {
264 {"none", 0x0},
265 {"msl", 0x0},
266 {"ror", 0x3},
267 {"asr", 0x2},
268 {"lsr", 0x1},
269 {"lsl", 0x0},
270 {"uxtb", 0x0},
271 {"uxth", 0x1},
272 {"uxtw", 0x2},
273 {"uxtx", 0x3},
274 {"sxtb", 0x4},
275 {"sxth", 0x5},
276 {"sxtw", 0x6},
277 {"sxtx", 0x7},
278 {NULL, 0},
279 };
280
281 enum aarch64_modifier_kind
282 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
283 {
284 return desc - aarch64_operand_modifiers;
285 }
286
287 aarch64_insn
288 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
289 {
290 return aarch64_operand_modifiers[kind].value;
291 }
292
293 enum aarch64_modifier_kind
294 aarch64_get_operand_modifier_from_value (aarch64_insn value,
295 bfd_boolean extend_p)
296 {
297 if (extend_p == TRUE)
298 return AARCH64_MOD_UXTB + value;
299 else
300 return AARCH64_MOD_LSL - value;
301 }
302
303 bfd_boolean
304 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
305 {
306 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
307 ? TRUE : FALSE;
308 }
309
310 static inline bfd_boolean
311 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
312 {
313 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
314 ? TRUE : FALSE;
315 }
316
317 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
318 {
319 { "#0x00", 0x0 },
320 { "oshld", 0x1 },
321 { "oshst", 0x2 },
322 { "osh", 0x3 },
323 { "#0x04", 0x4 },
324 { "nshld", 0x5 },
325 { "nshst", 0x6 },
326 { "nsh", 0x7 },
327 { "#0x08", 0x8 },
328 { "ishld", 0x9 },
329 { "ishst", 0xa },
330 { "ish", 0xb },
331 { "#0x0c", 0xc },
332 { "ld", 0xd },
333 { "st", 0xe },
334 { "sy", 0xf },
335 };
336
337 /* op -> op: load = 0 instruction = 1 store = 2
338 l -> level: 1-3
339 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
340 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
341 const struct aarch64_name_value_pair aarch64_prfops[32] =
342 {
343 { "pldl1keep", B(0, 1, 0) },
344 { "pldl1strm", B(0, 1, 1) },
345 { "pldl2keep", B(0, 2, 0) },
346 { "pldl2strm", B(0, 2, 1) },
347 { "pldl3keep", B(0, 3, 0) },
348 { "pldl3strm", B(0, 3, 1) },
349 { NULL, 0x06 },
350 { NULL, 0x07 },
351 { "plil1keep", B(1, 1, 0) },
352 { "plil1strm", B(1, 1, 1) },
353 { "plil2keep", B(1, 2, 0) },
354 { "plil2strm", B(1, 2, 1) },
355 { "plil3keep", B(1, 3, 0) },
356 { "plil3strm", B(1, 3, 1) },
357 { NULL, 0x0e },
358 { NULL, 0x0f },
359 { "pstl1keep", B(2, 1, 0) },
360 { "pstl1strm", B(2, 1, 1) },
361 { "pstl2keep", B(2, 2, 0) },
362 { "pstl2strm", B(2, 2, 1) },
363 { "pstl3keep", B(2, 3, 0) },
364 { "pstl3strm", B(2, 3, 1) },
365 { NULL, 0x16 },
366 { NULL, 0x17 },
367 { NULL, 0x18 },
368 { NULL, 0x19 },
369 { NULL, 0x1a },
370 { NULL, 0x1b },
371 { NULL, 0x1c },
372 { NULL, 0x1d },
373 { NULL, 0x1e },
374 { NULL, 0x1f },
375 };
376 #undef B
377 \f
378 /* Utilities on value constraint. */
379
380 static inline int
381 value_in_range_p (int64_t value, int low, int high)
382 {
383 return (value >= low && value <= high) ? 1 : 0;
384 }
385
386 static inline int
387 value_aligned_p (int64_t value, int align)
388 {
389 return ((value & (align - 1)) == 0) ? 1 : 0;
390 }
391
392 /* A signed value fits in a field. */
393 static inline int
394 value_fit_signed_field_p (int64_t value, unsigned width)
395 {
396 assert (width < 32);
397 if (width < sizeof (value) * 8)
398 {
399 int64_t lim = (int64_t)1 << (width - 1);
400 if (value >= -lim && value < lim)
401 return 1;
402 }
403 return 0;
404 }
405
406 /* An unsigned value fits in a field. */
407 static inline int
408 value_fit_unsigned_field_p (int64_t value, unsigned width)
409 {
410 assert (width < 32);
411 if (width < sizeof (value) * 8)
412 {
413 int64_t lim = (int64_t)1 << width;
414 if (value >= 0 && value < lim)
415 return 1;
416 }
417 return 0;
418 }
419
420 /* Return 1 if OPERAND is SP or WSP. */
421 int
422 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
423 {
424 return ((aarch64_get_operand_class (operand->type)
425 == AARCH64_OPND_CLASS_INT_REG)
426 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
427 && operand->reg.regno == 31);
428 }
429
430 /* Return 1 if OPERAND is XZR or WZP. */
431 int
432 aarch64_zero_register_p (const aarch64_opnd_info *operand)
433 {
434 return ((aarch64_get_operand_class (operand->type)
435 == AARCH64_OPND_CLASS_INT_REG)
436 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
437 && operand->reg.regno == 31);
438 }
439
440 /* Return true if the operand *OPERAND that has the operand code
441 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
442 qualified by the qualifier TARGET. */
443
444 static inline int
445 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
446 aarch64_opnd_qualifier_t target)
447 {
448 switch (operand->qualifier)
449 {
450 case AARCH64_OPND_QLF_W:
451 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
452 return 1;
453 break;
454 case AARCH64_OPND_QLF_X:
455 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
456 return 1;
457 break;
458 case AARCH64_OPND_QLF_WSP:
459 if (target == AARCH64_OPND_QLF_W
460 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
461 return 1;
462 break;
463 case AARCH64_OPND_QLF_SP:
464 if (target == AARCH64_OPND_QLF_X
465 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
466 return 1;
467 break;
468 default:
469 break;
470 }
471
472 return 0;
473 }
474
475 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
476 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
477
478 Return NIL if more than one expected qualifiers are found. */
479
480 aarch64_opnd_qualifier_t
481 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
482 int idx,
483 const aarch64_opnd_qualifier_t known_qlf,
484 int known_idx)
485 {
486 int i, saved_i;
487
488 /* Special case.
489
490 When the known qualifier is NIL, we have to assume that there is only
491 one qualifier sequence in the *QSEQ_LIST and return the corresponding
492 qualifier directly. One scenario is that for instruction
493 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
494 which has only one possible valid qualifier sequence
495 NIL, S_D
496 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
497 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
498
499 Because the qualifier NIL has dual roles in the qualifier sequence:
500 it can mean no qualifier for the operand, or the qualifer sequence is
501 not in use (when all qualifiers in the sequence are NILs), we have to
502 handle this special case here. */
503 if (known_qlf == AARCH64_OPND_NIL)
504 {
505 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
506 return qseq_list[0][idx];
507 }
508
509 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
510 {
511 if (qseq_list[i][known_idx] == known_qlf)
512 {
513 if (saved_i != -1)
514 /* More than one sequences are found to have KNOWN_QLF at
515 KNOWN_IDX. */
516 return AARCH64_OPND_NIL;
517 saved_i = i;
518 }
519 }
520
521 return qseq_list[saved_i][idx];
522 }
523
524 enum operand_qualifier_kind
525 {
526 OQK_NIL,
527 OQK_OPD_VARIANT,
528 OQK_VALUE_IN_RANGE,
529 OQK_MISC,
530 };
531
532 /* Operand qualifier description. */
533 struct operand_qualifier_data
534 {
535 /* The usage of the three data fields depends on the qualifier kind. */
536 int data0;
537 int data1;
538 int data2;
539 /* Description. */
540 const char *desc;
541 /* Kind. */
542 enum operand_qualifier_kind kind;
543 };
544
545 /* Indexed by the operand qualifier enumerators. */
546 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
547 {
548 {0, 0, 0, "NIL", OQK_NIL},
549
550 /* Operand variant qualifiers.
551 First 3 fields:
552 element size, number of elements and common value for encoding. */
553
554 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
555 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
556 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
557 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
558
559 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
560 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
561 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
562 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
563 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
564
565 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
566 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
567 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
568 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
569 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
570 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
571 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
572 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
573 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
574
575 /* Qualifiers constraining the value range.
576 First 3 fields:
577 Lower bound, higher bound, unused. */
578
579 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
580 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
581 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
582 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
583 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
584 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
585
586 /* Qualifiers for miscellaneous purpose.
587 First 3 fields:
588 unused, unused and unused. */
589
590 {0, 0, 0, "lsl", 0},
591 {0, 0, 0, "msl", 0},
592
593 {0, 0, 0, "retrieving", 0},
594 };
595
596 static inline bfd_boolean
597 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
598 {
599 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
600 ? TRUE : FALSE;
601 }
602
603 static inline bfd_boolean
604 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
605 {
606 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
607 ? TRUE : FALSE;
608 }
609
610 const char*
611 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
612 {
613 return aarch64_opnd_qualifiers[qualifier].desc;
614 }
615
616 /* Given an operand qualifier, return the expected data element size
617 of a qualified operand. */
618 unsigned char
619 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
620 {
621 assert (operand_variant_qualifier_p (qualifier) == TRUE);
622 return aarch64_opnd_qualifiers[qualifier].data0;
623 }
624
625 unsigned char
626 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
627 {
628 assert (operand_variant_qualifier_p (qualifier) == TRUE);
629 return aarch64_opnd_qualifiers[qualifier].data1;
630 }
631
632 aarch64_insn
633 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
634 {
635 assert (operand_variant_qualifier_p (qualifier) == TRUE);
636 return aarch64_opnd_qualifiers[qualifier].data2;
637 }
638
639 static int
640 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
641 {
642 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
643 return aarch64_opnd_qualifiers[qualifier].data0;
644 }
645
646 static int
647 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
648 {
649 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
650 return aarch64_opnd_qualifiers[qualifier].data1;
651 }
652
653 #ifdef DEBUG_AARCH64
654 void
655 aarch64_verbose (const char *str, ...)
656 {
657 va_list ap;
658 va_start (ap, str);
659 printf ("#### ");
660 vprintf (str, ap);
661 printf ("\n");
662 va_end (ap);
663 }
664
665 static inline void
666 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
667 {
668 int i;
669 printf ("#### \t");
670 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
671 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
672 printf ("\n");
673 }
674
675 static void
676 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
677 const aarch64_opnd_qualifier_t *qualifier)
678 {
679 int i;
680 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
681
682 aarch64_verbose ("dump_match_qualifiers:");
683 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
684 curr[i] = opnd[i].qualifier;
685 dump_qualifier_sequence (curr);
686 aarch64_verbose ("against");
687 dump_qualifier_sequence (qualifier);
688 }
689 #endif /* DEBUG_AARCH64 */
690
691 /* TODO improve this, we can have an extra field at the runtime to
692 store the number of operands rather than calculating it every time. */
693
694 int
695 aarch64_num_of_operands (const aarch64_opcode *opcode)
696 {
697 int i = 0;
698 const enum aarch64_opnd *opnds = opcode->operands;
699 while (opnds[i++] != AARCH64_OPND_NIL)
700 ;
701 --i;
702 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
703 return i;
704 }
705
706 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
707 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
708
709 N.B. on the entry, it is very likely that only some operands in *INST
710 have had their qualifiers been established.
711
712 If STOP_AT is not -1, the function will only try to match
713 the qualifier sequence for operands before and including the operand
714 of index STOP_AT; and on success *RET will only be filled with the first
715 (STOP_AT+1) qualifiers.
716
717 A couple examples of the matching algorithm:
718
719 X,W,NIL should match
720 X,W,NIL
721
722 NIL,NIL should match
723 X ,NIL
724
725 Apart from serving the main encoding routine, this can also be called
726 during or after the operand decoding. */
727
728 int
729 aarch64_find_best_match (const aarch64_inst *inst,
730 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
731 int stop_at, aarch64_opnd_qualifier_t *ret)
732 {
733 int found = 0;
734 int i, num_opnds;
735 const aarch64_opnd_qualifier_t *qualifiers;
736
737 num_opnds = aarch64_num_of_operands (inst->opcode);
738 if (num_opnds == 0)
739 {
740 DEBUG_TRACE ("SUCCEED: no operand");
741 return 1;
742 }
743
744 if (stop_at < 0 || stop_at >= num_opnds)
745 stop_at = num_opnds - 1;
746
747 /* For each pattern. */
748 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
749 {
750 int j;
751 qualifiers = *qualifiers_list;
752
753 /* Start as positive. */
754 found = 1;
755
756 DEBUG_TRACE ("%d", i);
757 #ifdef DEBUG_AARCH64
758 if (debug_dump)
759 dump_match_qualifiers (inst->operands, qualifiers);
760 #endif
761
762 /* Most opcodes has much fewer patterns in the list.
763 First NIL qualifier indicates the end in the list. */
764 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
765 {
766 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
767 if (i)
768 found = 0;
769 break;
770 }
771
772 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
773 {
774 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
775 {
776 /* Either the operand does not have qualifier, or the qualifier
777 for the operand needs to be deduced from the qualifier
778 sequence.
779 In the latter case, any constraint checking related with
780 the obtained qualifier should be done later in
781 operand_general_constraint_met_p. */
782 continue;
783 }
784 else if (*qualifiers != inst->operands[j].qualifier)
785 {
786 /* Unless the target qualifier can also qualify the operand
787 (which has already had a non-nil qualifier), non-equal
788 qualifiers are generally un-matched. */
789 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
790 continue;
791 else
792 {
793 found = 0;
794 break;
795 }
796 }
797 else
798 continue; /* Equal qualifiers are certainly matched. */
799 }
800
801 /* Qualifiers established. */
802 if (found == 1)
803 break;
804 }
805
806 if (found == 1)
807 {
808 /* Fill the result in *RET. */
809 int j;
810 qualifiers = *qualifiers_list;
811
812 DEBUG_TRACE ("complete qualifiers using list %d", i);
813 #ifdef DEBUG_AARCH64
814 if (debug_dump)
815 dump_qualifier_sequence (qualifiers);
816 #endif
817
818 for (j = 0; j <= stop_at; ++j, ++qualifiers)
819 ret[j] = *qualifiers;
820 for (; j < AARCH64_MAX_OPND_NUM; ++j)
821 ret[j] = AARCH64_OPND_QLF_NIL;
822
823 DEBUG_TRACE ("SUCCESS");
824 return 1;
825 }
826
827 DEBUG_TRACE ("FAIL");
828 return 0;
829 }
830
831 /* Operand qualifier matching and resolving.
832
833 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
834 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
835
836 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
837 succeeds. */
838
839 static int
840 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
841 {
842 int i;
843 aarch64_opnd_qualifier_seq_t qualifiers;
844
845 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
846 qualifiers))
847 {
848 DEBUG_TRACE ("matching FAIL");
849 return 0;
850 }
851
852 /* Update the qualifiers. */
853 if (update_p == TRUE)
854 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
855 {
856 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
857 break;
858 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
859 "update %s with %s for operand %d",
860 aarch64_get_qualifier_name (inst->operands[i].qualifier),
861 aarch64_get_qualifier_name (qualifiers[i]), i);
862 inst->operands[i].qualifier = qualifiers[i];
863 }
864
865 DEBUG_TRACE ("matching SUCCESS");
866 return 1;
867 }
868
869 /* Return TRUE if VALUE is a wide constant that can be moved into a general
870 register by MOVZ.
871
872 IS32 indicates whether value is a 32-bit immediate or not.
873 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
874 amount will be returned in *SHIFT_AMOUNT. */
875
876 bfd_boolean
877 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
878 {
879 int amount;
880
881 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
882
883 if (is32)
884 {
885 /* Allow all zeros or all ones in top 32-bits, so that
886 32-bit constant expressions like ~0x80000000 are
887 permitted. */
888 uint64_t ext = value;
889 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
890 /* Immediate out of range. */
891 return FALSE;
892 value &= (int64_t) 0xffffffff;
893 }
894
895 /* first, try movz then movn */
896 amount = -1;
897 if ((value & ((int64_t) 0xffff << 0)) == value)
898 amount = 0;
899 else if ((value & ((int64_t) 0xffff << 16)) == value)
900 amount = 16;
901 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
902 amount = 32;
903 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
904 amount = 48;
905
906 if (amount == -1)
907 {
908 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
909 return FALSE;
910 }
911
912 if (shift_amount != NULL)
913 *shift_amount = amount;
914
915 DEBUG_TRACE ("exit TRUE with amount %d", amount);
916
917 return TRUE;
918 }
919
920 /* Build the accepted values for immediate logical SIMD instructions.
921
922 The standard encodings of the immediate value are:
923 N imms immr SIMD size R S
924 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
925 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
926 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
927 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
928 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
929 0 11110s 00000r 2 UInt(r) UInt(s)
930 where all-ones value of S is reserved.
931
932 Let's call E the SIMD size.
933
934 The immediate value is: S+1 bits '1' rotated to the right by R.
935
936 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
937 (remember S != E - 1). */
938
939 #define TOTAL_IMM_NB 5334
940
941 typedef struct
942 {
943 uint64_t imm;
944 aarch64_insn encoding;
945 } simd_imm_encoding;
946
947 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
948
949 static int
950 simd_imm_encoding_cmp(const void *i1, const void *i2)
951 {
952 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
953 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
954
955 if (imm1->imm < imm2->imm)
956 return -1;
957 if (imm1->imm > imm2->imm)
958 return +1;
959 return 0;
960 }
961
962 /* immediate bitfield standard encoding
963 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
964 1 ssssss rrrrrr 64 rrrrrr ssssss
965 0 0sssss 0rrrrr 32 rrrrr sssss
966 0 10ssss 00rrrr 16 rrrr ssss
967 0 110sss 000rrr 8 rrr sss
968 0 1110ss 0000rr 4 rr ss
969 0 11110s 00000r 2 r s */
970 static inline int
971 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
972 {
973 return (is64 << 12) | (r << 6) | s;
974 }
975
976 static void
977 build_immediate_table (void)
978 {
979 uint32_t log_e, e, s, r, s_mask;
980 uint64_t mask, imm;
981 int nb_imms;
982 int is64;
983
984 nb_imms = 0;
985 for (log_e = 1; log_e <= 6; log_e++)
986 {
987 /* Get element size. */
988 e = 1u << log_e;
989 if (log_e == 6)
990 {
991 is64 = 1;
992 mask = 0xffffffffffffffffull;
993 s_mask = 0;
994 }
995 else
996 {
997 is64 = 0;
998 mask = (1ull << e) - 1;
999 /* log_e s_mask
1000 1 ((1 << 4) - 1) << 2 = 111100
1001 2 ((1 << 3) - 1) << 3 = 111000
1002 3 ((1 << 2) - 1) << 4 = 110000
1003 4 ((1 << 1) - 1) << 5 = 100000
1004 5 ((1 << 0) - 1) << 6 = 000000 */
1005 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1006 }
1007 for (s = 0; s < e - 1; s++)
1008 for (r = 0; r < e; r++)
1009 {
1010 /* s+1 consecutive bits to 1 (s < 63) */
1011 imm = (1ull << (s + 1)) - 1;
1012 /* rotate right by r */
1013 if (r != 0)
1014 imm = (imm >> r) | ((imm << (e - r)) & mask);
1015 /* replicate the constant depending on SIMD size */
1016 switch (log_e)
1017 {
1018 case 1: imm = (imm << 2) | imm;
1019 case 2: imm = (imm << 4) | imm;
1020 case 3: imm = (imm << 8) | imm;
1021 case 4: imm = (imm << 16) | imm;
1022 case 5: imm = (imm << 32) | imm;
1023 case 6: break;
1024 default: abort ();
1025 }
1026 simd_immediates[nb_imms].imm = imm;
1027 simd_immediates[nb_imms].encoding =
1028 encode_immediate_bitfield(is64, s | s_mask, r);
1029 nb_imms++;
1030 }
1031 }
1032 assert (nb_imms == TOTAL_IMM_NB);
1033 qsort(simd_immediates, nb_imms,
1034 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1035 }
1036
1037 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1038 be accepted by logical (immediate) instructions
1039 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1040
1041 IS32 indicates whether or not VALUE is a 32-bit immediate.
1042 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1043 VALUE will be returned in *ENCODING. */
1044
1045 bfd_boolean
1046 aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding)
1047 {
1048 simd_imm_encoding imm_enc;
1049 const simd_imm_encoding *imm_encoding;
1050 static bfd_boolean initialized = FALSE;
1051
1052 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1053 value, is32);
1054
1055 if (initialized == FALSE)
1056 {
1057 build_immediate_table ();
1058 initialized = TRUE;
1059 }
1060
1061 if (is32)
1062 {
1063 /* Allow all zeros or all ones in top 32-bits, so that
1064 constant expressions like ~1 are permitted. */
1065 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1066 return FALSE;
1067
1068 /* Replicate the 32 lower bits to the 32 upper bits. */
1069 value &= 0xffffffff;
1070 value |= value << 32;
1071 }
1072
1073 imm_enc.imm = value;
1074 imm_encoding = (const simd_imm_encoding *)
1075 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1076 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1077 if (imm_encoding == NULL)
1078 {
1079 DEBUG_TRACE ("exit with FALSE");
1080 return FALSE;
1081 }
1082 if (encoding != NULL)
1083 *encoding = imm_encoding->encoding;
1084 DEBUG_TRACE ("exit with TRUE");
1085 return TRUE;
1086 }
1087
1088 /* If 64-bit immediate IMM is in the format of
1089 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1090 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1091 of value "abcdefgh". Otherwise return -1. */
1092 int
1093 aarch64_shrink_expanded_imm8 (uint64_t imm)
1094 {
1095 int i, ret;
1096 uint32_t byte;
1097
1098 ret = 0;
1099 for (i = 0; i < 8; i++)
1100 {
1101 byte = (imm >> (8 * i)) & 0xff;
1102 if (byte == 0xff)
1103 ret |= 1 << i;
1104 else if (byte != 0x00)
1105 return -1;
1106 }
1107 return ret;
1108 }
1109
1110 /* Utility inline functions for operand_general_constraint_met_p. */
1111
1112 static inline void
1113 set_error (aarch64_operand_error *mismatch_detail,
1114 enum aarch64_operand_error_kind kind, int idx,
1115 const char* error)
1116 {
1117 if (mismatch_detail == NULL)
1118 return;
1119 mismatch_detail->kind = kind;
1120 mismatch_detail->index = idx;
1121 mismatch_detail->error = error;
1122 }
1123
1124 static inline void
1125 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1126 int idx, int lower_bound, int upper_bound,
1127 const char* error)
1128 {
1129 if (mismatch_detail == NULL)
1130 return;
1131 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1132 mismatch_detail->data[0] = lower_bound;
1133 mismatch_detail->data[1] = upper_bound;
1134 }
1135
1136 static inline void
1137 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1138 int idx, int lower_bound, int upper_bound)
1139 {
1140 if (mismatch_detail == NULL)
1141 return;
1142 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1143 _("immediate value"));
1144 }
1145
1146 static inline void
1147 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1148 int idx, int lower_bound, int upper_bound)
1149 {
1150 if (mismatch_detail == NULL)
1151 return;
1152 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1153 _("immediate offset"));
1154 }
1155
1156 static inline void
1157 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1158 int idx, int lower_bound, int upper_bound)
1159 {
1160 if (mismatch_detail == NULL)
1161 return;
1162 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1163 _("register number"));
1164 }
1165
1166 static inline void
1167 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1168 int idx, int lower_bound, int upper_bound)
1169 {
1170 if (mismatch_detail == NULL)
1171 return;
1172 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1173 _("register element index"));
1174 }
1175
1176 static inline void
1177 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1178 int idx, int lower_bound, int upper_bound)
1179 {
1180 if (mismatch_detail == NULL)
1181 return;
1182 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1183 _("shift amount"));
1184 }
1185
1186 static inline void
1187 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1188 int alignment)
1189 {
1190 if (mismatch_detail == NULL)
1191 return;
1192 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1193 mismatch_detail->data[0] = alignment;
1194 }
1195
1196 static inline void
1197 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1198 int expected_num)
1199 {
1200 if (mismatch_detail == NULL)
1201 return;
1202 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1203 mismatch_detail->data[0] = expected_num;
1204 }
1205
1206 static inline void
1207 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1208 const char* error)
1209 {
1210 if (mismatch_detail == NULL)
1211 return;
1212 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1213 }
1214
1215 /* General constraint checking based on operand code.
1216
1217 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1218 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1219
1220 This function has to be called after the qualifiers for all operands
1221 have been resolved.
1222
1223 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1224 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1225 of error message during the disassembling where error message is not
1226 wanted. We avoid the dynamic construction of strings of error messages
1227 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1228 use a combination of error code, static string and some integer data to
1229 represent an error. */
1230
1231 static int
1232 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1233 enum aarch64_opnd type,
1234 const aarch64_opcode *opcode,
1235 aarch64_operand_error *mismatch_detail)
1236 {
1237 unsigned num;
1238 unsigned char size;
1239 int64_t imm;
1240 const aarch64_opnd_info *opnd = opnds + idx;
1241 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1242
1243 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1244
1245 switch (aarch64_operands[type].op_class)
1246 {
1247 case AARCH64_OPND_CLASS_INT_REG:
1248 /* <Xt> may be optional in some IC and TLBI instructions. */
1249 if (type == AARCH64_OPND_Rt_SYS)
1250 {
1251 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1252 == AARCH64_OPND_CLASS_SYSTEM));
1253 if (opnds[1].present && !opnds[0].sysins_op->has_xt)
1254 {
1255 set_other_error (mismatch_detail, idx, _("extraneous register"));
1256 return 0;
1257 }
1258 if (!opnds[1].present && opnds[0].sysins_op->has_xt)
1259 {
1260 set_other_error (mismatch_detail, idx, _("missing register"));
1261 return 0;
1262 }
1263 }
1264 switch (qualifier)
1265 {
1266 case AARCH64_OPND_QLF_WSP:
1267 case AARCH64_OPND_QLF_SP:
1268 if (!aarch64_stack_pointer_p (opnd))
1269 {
1270 set_other_error (mismatch_detail, idx,
1271 _("stack pointer register expected"));
1272 return 0;
1273 }
1274 break;
1275 default:
1276 break;
1277 }
1278 break;
1279
1280 case AARCH64_OPND_CLASS_ADDRESS:
1281 /* Check writeback. */
1282 switch (opcode->iclass)
1283 {
1284 case ldst_pos:
1285 case ldst_unscaled:
1286 case ldstnapair_offs:
1287 case ldstpair_off:
1288 case ldst_unpriv:
1289 if (opnd->addr.writeback == 1)
1290 {
1291 set_other_error (mismatch_detail, idx,
1292 _("unexpected address writeback"));
1293 return 0;
1294 }
1295 break;
1296 case ldst_imm9:
1297 case ldstpair_indexed:
1298 case asisdlsep:
1299 case asisdlsop:
1300 if (opnd->addr.writeback == 0)
1301 {
1302 set_other_error (mismatch_detail, idx,
1303 _("address writeback expected"));
1304 return 0;
1305 }
1306 break;
1307 default:
1308 assert (opnd->addr.writeback == 0);
1309 break;
1310 }
1311 switch (type)
1312 {
1313 case AARCH64_OPND_ADDR_SIMM7:
1314 /* Scaled signed 7 bits immediate offset. */
1315 /* Get the size of the data element that is accessed, which may be
1316 different from that of the source register size,
1317 e.g. in strb/ldrb. */
1318 size = aarch64_get_qualifier_esize (opnd->qualifier);
1319 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1320 {
1321 set_offset_out_of_range_error (mismatch_detail, idx,
1322 -64 * size, 63 * size);
1323 return 0;
1324 }
1325 if (!value_aligned_p (opnd->addr.offset.imm, size))
1326 {
1327 set_unaligned_error (mismatch_detail, idx, size);
1328 return 0;
1329 }
1330 break;
1331 case AARCH64_OPND_ADDR_SIMM9:
1332 /* Unscaled signed 9 bits immediate offset. */
1333 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1334 {
1335 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1336 return 0;
1337 }
1338 break;
1339
1340 case AARCH64_OPND_ADDR_SIMM9_2:
1341 /* Unscaled signed 9 bits immediate offset, which has to be negative
1342 or unaligned. */
1343 size = aarch64_get_qualifier_esize (qualifier);
1344 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1345 && !value_aligned_p (opnd->addr.offset.imm, size))
1346 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1347 return 1;
1348 set_other_error (mismatch_detail, idx,
1349 _("negative or unaligned offset expected"));
1350 return 0;
1351
1352 case AARCH64_OPND_SIMD_ADDR_POST:
1353 /* AdvSIMD load/store multiple structures, post-index. */
1354 assert (idx == 1);
1355 if (opnd->addr.offset.is_reg)
1356 {
1357 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1358 return 1;
1359 else
1360 {
1361 set_other_error (mismatch_detail, idx,
1362 _("invalid register offset"));
1363 return 0;
1364 }
1365 }
1366 else
1367 {
1368 const aarch64_opnd_info *prev = &opnds[idx-1];
1369 unsigned num_bytes; /* total number of bytes transferred. */
1370 /* The opcode dependent area stores the number of elements in
1371 each structure to be loaded/stored. */
1372 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1373 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1374 /* Special handling of loading single structure to all lane. */
1375 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1376 * aarch64_get_qualifier_esize (prev->qualifier);
1377 else
1378 num_bytes = prev->reglist.num_regs
1379 * aarch64_get_qualifier_esize (prev->qualifier)
1380 * aarch64_get_qualifier_nelem (prev->qualifier);
1381 if ((int) num_bytes != opnd->addr.offset.imm)
1382 {
1383 set_other_error (mismatch_detail, idx,
1384 _("invalid post-increment amount"));
1385 return 0;
1386 }
1387 }
1388 break;
1389
1390 case AARCH64_OPND_ADDR_REGOFF:
1391 /* Get the size of the data element that is accessed, which may be
1392 different from that of the source register size,
1393 e.g. in strb/ldrb. */
1394 size = aarch64_get_qualifier_esize (opnd->qualifier);
1395 /* It is either no shift or shift by the binary logarithm of SIZE. */
1396 if (opnd->shifter.amount != 0
1397 && opnd->shifter.amount != (int)get_logsz (size))
1398 {
1399 set_other_error (mismatch_detail, idx,
1400 _("invalid shift amount"));
1401 return 0;
1402 }
1403 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1404 operators. */
1405 switch (opnd->shifter.kind)
1406 {
1407 case AARCH64_MOD_UXTW:
1408 case AARCH64_MOD_LSL:
1409 case AARCH64_MOD_SXTW:
1410 case AARCH64_MOD_SXTX: break;
1411 default:
1412 set_other_error (mismatch_detail, idx,
1413 _("invalid extend/shift operator"));
1414 return 0;
1415 }
1416 break;
1417
1418 case AARCH64_OPND_ADDR_UIMM12:
1419 imm = opnd->addr.offset.imm;
1420 /* Get the size of the data element that is accessed, which may be
1421 different from that of the source register size,
1422 e.g. in strb/ldrb. */
1423 size = aarch64_get_qualifier_esize (qualifier);
1424 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1425 {
1426 set_offset_out_of_range_error (mismatch_detail, idx,
1427 0, 4095 * size);
1428 return 0;
1429 }
1430 if (!value_aligned_p (opnd->addr.offset.imm, size))
1431 {
1432 set_unaligned_error (mismatch_detail, idx, size);
1433 return 0;
1434 }
1435 break;
1436
1437 case AARCH64_OPND_ADDR_PCREL14:
1438 case AARCH64_OPND_ADDR_PCREL19:
1439 case AARCH64_OPND_ADDR_PCREL21:
1440 case AARCH64_OPND_ADDR_PCREL26:
1441 imm = opnd->imm.value;
1442 if (operand_need_shift_by_two (get_operand_from_code (type)))
1443 {
1444 /* The offset value in a PC-relative branch instruction is alway
1445 4-byte aligned and is encoded without the lowest 2 bits. */
1446 if (!value_aligned_p (imm, 4))
1447 {
1448 set_unaligned_error (mismatch_detail, idx, 4);
1449 return 0;
1450 }
1451 /* Right shift by 2 so that we can carry out the following check
1452 canonically. */
1453 imm >>= 2;
1454 }
1455 size = get_operand_fields_width (get_operand_from_code (type));
1456 if (!value_fit_signed_field_p (imm, size))
1457 {
1458 set_other_error (mismatch_detail, idx,
1459 _("immediate out of range"));
1460 return 0;
1461 }
1462 break;
1463
1464 default:
1465 break;
1466 }
1467 break;
1468
1469 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1470 /* The opcode dependent area stores the number of elements in
1471 each structure to be loaded/stored. */
1472 num = get_opcode_dependent_value (opcode);
1473 switch (type)
1474 {
1475 case AARCH64_OPND_LVt:
1476 assert (num >= 1 && num <= 4);
1477 /* Unless LD1/ST1, the number of registers should be equal to that
1478 of the structure elements. */
1479 if (num != 1 && opnd->reglist.num_regs != num)
1480 {
1481 set_reg_list_error (mismatch_detail, idx, num);
1482 return 0;
1483 }
1484 break;
1485 case AARCH64_OPND_LVt_AL:
1486 case AARCH64_OPND_LEt:
1487 assert (num >= 1 && num <= 4);
1488 /* The number of registers should be equal to that of the structure
1489 elements. */
1490 if (opnd->reglist.num_regs != num)
1491 {
1492 set_reg_list_error (mismatch_detail, idx, num);
1493 return 0;
1494 }
1495 break;
1496 default:
1497 break;
1498 }
1499 break;
1500
1501 case AARCH64_OPND_CLASS_IMMEDIATE:
1502 /* Constraint check on immediate operand. */
1503 imm = opnd->imm.value;
1504 /* E.g. imm_0_31 constrains value to be 0..31. */
1505 if (qualifier_value_in_range_constraint_p (qualifier)
1506 && !value_in_range_p (imm, get_lower_bound (qualifier),
1507 get_upper_bound (qualifier)))
1508 {
1509 set_imm_out_of_range_error (mismatch_detail, idx,
1510 get_lower_bound (qualifier),
1511 get_upper_bound (qualifier));
1512 return 0;
1513 }
1514
1515 switch (type)
1516 {
1517 case AARCH64_OPND_AIMM:
1518 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1519 {
1520 set_other_error (mismatch_detail, idx,
1521 _("invalid shift operator"));
1522 return 0;
1523 }
1524 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1525 {
1526 set_other_error (mismatch_detail, idx,
1527 _("shift amount expected to be 0 or 12"));
1528 return 0;
1529 }
1530 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1531 {
1532 set_other_error (mismatch_detail, idx,
1533 _("immediate out of range"));
1534 return 0;
1535 }
1536 break;
1537
1538 case AARCH64_OPND_HALF:
1539 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1540 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1541 {
1542 set_other_error (mismatch_detail, idx,
1543 _("invalid shift operator"));
1544 return 0;
1545 }
1546 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1547 if (!value_aligned_p (opnd->shifter.amount, 16))
1548 {
1549 set_other_error (mismatch_detail, idx,
1550 _("shift amount should be a multiple of 16"));
1551 return 0;
1552 }
1553 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1554 {
1555 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1556 0, size * 8 - 16);
1557 return 0;
1558 }
1559 if (opnd->imm.value < 0)
1560 {
1561 set_other_error (mismatch_detail, idx,
1562 _("negative immediate value not allowed"));
1563 return 0;
1564 }
1565 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1566 {
1567 set_other_error (mismatch_detail, idx,
1568 _("immediate out of range"));
1569 return 0;
1570 }
1571 break;
1572
1573 case AARCH64_OPND_IMM_MOV:
1574 {
1575 int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4;
1576 imm = opnd->imm.value;
1577 assert (idx == 1);
1578 switch (opcode->op)
1579 {
1580 case OP_MOV_IMM_WIDEN:
1581 imm = ~imm;
1582 /* Fall through... */
1583 case OP_MOV_IMM_WIDE:
1584 if (!aarch64_wide_constant_p (imm, is32, NULL))
1585 {
1586 set_other_error (mismatch_detail, idx,
1587 _("immediate out of range"));
1588 return 0;
1589 }
1590 break;
1591 case OP_MOV_IMM_LOG:
1592 if (!aarch64_logical_immediate_p (imm, is32, NULL))
1593 {
1594 set_other_error (mismatch_detail, idx,
1595 _("immediate out of range"));
1596 return 0;
1597 }
1598 break;
1599 default:
1600 assert (0);
1601 return 0;
1602 }
1603 }
1604 break;
1605
1606 case AARCH64_OPND_NZCV:
1607 case AARCH64_OPND_CCMP_IMM:
1608 case AARCH64_OPND_EXCEPTION:
1609 case AARCH64_OPND_UIMM4:
1610 case AARCH64_OPND_UIMM7:
1611 case AARCH64_OPND_UIMM3_OP1:
1612 case AARCH64_OPND_UIMM3_OP2:
1613 size = get_operand_fields_width (get_operand_from_code (type));
1614 assert (size < 32);
1615 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1616 {
1617 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1618 (1 << size) - 1);
1619 return 0;
1620 }
1621 break;
1622
1623 case AARCH64_OPND_WIDTH:
1624 assert (idx == 3 && opnds[idx-1].type == AARCH64_OPND_IMM
1625 && opnds[0].type == AARCH64_OPND_Rd);
1626 size = get_upper_bound (qualifier);
1627 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1628 /* lsb+width <= reg.size */
1629 {
1630 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1631 size - opnds[idx-1].imm.value);
1632 return 0;
1633 }
1634 break;
1635
1636 case AARCH64_OPND_LIMM:
1637 {
1638 int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W;
1639 uint64_t uimm = opnd->imm.value;
1640 if (opcode->op == OP_BIC)
1641 uimm = ~uimm;
1642 if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE)
1643 {
1644 set_other_error (mismatch_detail, idx,
1645 _("immediate out of range"));
1646 return 0;
1647 }
1648 }
1649 break;
1650
1651 case AARCH64_OPND_IMM0:
1652 case AARCH64_OPND_FPIMM0:
1653 if (opnd->imm.value != 0)
1654 {
1655 set_other_error (mismatch_detail, idx,
1656 _("immediate zero expected"));
1657 return 0;
1658 }
1659 break;
1660
1661 case AARCH64_OPND_SHLL_IMM:
1662 assert (idx == 2);
1663 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1664 if (opnd->imm.value != size)
1665 {
1666 set_other_error (mismatch_detail, idx,
1667 _("invalid shift amount"));
1668 return 0;
1669 }
1670 break;
1671
1672 case AARCH64_OPND_IMM_VLSL:
1673 size = aarch64_get_qualifier_esize (qualifier);
1674 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1675 {
1676 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1677 size * 8 - 1);
1678 return 0;
1679 }
1680 break;
1681
1682 case AARCH64_OPND_IMM_VLSR:
1683 size = aarch64_get_qualifier_esize (qualifier);
1684 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1685 {
1686 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1687 return 0;
1688 }
1689 break;
1690
1691 case AARCH64_OPND_SIMD_IMM:
1692 case AARCH64_OPND_SIMD_IMM_SFT:
1693 /* Qualifier check. */
1694 switch (qualifier)
1695 {
1696 case AARCH64_OPND_QLF_LSL:
1697 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1698 {
1699 set_other_error (mismatch_detail, idx,
1700 _("invalid shift operator"));
1701 return 0;
1702 }
1703 break;
1704 case AARCH64_OPND_QLF_MSL:
1705 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1706 {
1707 set_other_error (mismatch_detail, idx,
1708 _("invalid shift operator"));
1709 return 0;
1710 }
1711 break;
1712 case AARCH64_OPND_QLF_NIL:
1713 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1714 {
1715 set_other_error (mismatch_detail, idx,
1716 _("shift is not permitted"));
1717 return 0;
1718 }
1719 break;
1720 default:
1721 assert (0);
1722 return 0;
1723 }
1724 /* Is the immediate valid? */
1725 assert (idx == 1);
1726 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1727 {
1728 /* uimm8 or simm8 */
1729 if (!value_in_range_p (opnd->imm.value, -128, 255))
1730 {
1731 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
1732 return 0;
1733 }
1734 }
1735 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1736 {
1737 /* uimm64 is not
1738 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1739 ffffffffgggggggghhhhhhhh'. */
1740 set_other_error (mismatch_detail, idx,
1741 _("invalid value for immediate"));
1742 return 0;
1743 }
1744 /* Is the shift amount valid? */
1745 switch (opnd->shifter.kind)
1746 {
1747 case AARCH64_MOD_LSL:
1748 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1749 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1750 {
1751 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1752 (size - 1) * 8);
1753 return 0;
1754 }
1755 if (!value_aligned_p (opnd->shifter.amount, 8))
1756 {
1757 set_unaligned_error (mismatch_detail, idx, 8);
1758 return 0;
1759 }
1760 break;
1761 case AARCH64_MOD_MSL:
1762 /* Only 8 and 16 are valid shift amount. */
1763 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1764 {
1765 set_other_error (mismatch_detail, idx,
1766 _("shift amount expected to be 0 or 16"));
1767 return 0;
1768 }
1769 break;
1770 default:
1771 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1772 {
1773 set_other_error (mismatch_detail, idx,
1774 _("invalid shift operator"));
1775 return 0;
1776 }
1777 break;
1778 }
1779 break;
1780
1781 case AARCH64_OPND_FPIMM:
1782 case AARCH64_OPND_SIMD_FPIMM:
1783 if (opnd->imm.is_fp == 0)
1784 {
1785 set_other_error (mismatch_detail, idx,
1786 _("floating-point immediate expected"));
1787 return 0;
1788 }
1789 /* The value is expected to be an 8-bit floating-point constant with
1790 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1791 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1792 instruction). */
1793 if (!value_in_range_p (opnd->imm.value, 0, 255))
1794 {
1795 set_other_error (mismatch_detail, idx,
1796 _("immediate out of range"));
1797 return 0;
1798 }
1799 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1800 {
1801 set_other_error (mismatch_detail, idx,
1802 _("invalid shift operator"));
1803 return 0;
1804 }
1805 break;
1806
1807 default:
1808 break;
1809 }
1810 break;
1811
1812 case AARCH64_OPND_CLASS_CP_REG:
1813 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1814 valid range: C0 - C15. */
1815 if (opnd->reg.regno > 15)
1816 {
1817 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1818 return 0;
1819 }
1820 break;
1821
1822 case AARCH64_OPND_CLASS_SYSTEM:
1823 switch (type)
1824 {
1825 case AARCH64_OPND_PSTATEFIELD:
1826 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
1827 /* MSR SPSel, #uimm4
1828 Uses uimm4 as a control value to select the stack pointer: if
1829 bit 0 is set it selects the current exception level's stack
1830 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1831 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1832 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
1833 {
1834 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1835 return 0;
1836 }
1837 break;
1838 default:
1839 break;
1840 }
1841 break;
1842
1843 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
1844 /* Get the upper bound for the element index. */
1845 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1846 /* Index out-of-range. */
1847 if (!value_in_range_p (opnd->reglane.index, 0, num))
1848 {
1849 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1850 return 0;
1851 }
1852 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1853 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1854 number is encoded in "size:M:Rm":
1855 size <Vm>
1856 00 RESERVED
1857 01 0:Rm
1858 10 M:Rm
1859 11 RESERVED */
1860 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
1861 && !value_in_range_p (opnd->reglane.regno, 0, 15))
1862 {
1863 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1864 return 0;
1865 }
1866 break;
1867
1868 case AARCH64_OPND_CLASS_MODIFIED_REG:
1869 assert (idx == 1 || idx == 2);
1870 switch (type)
1871 {
1872 case AARCH64_OPND_Rm_EXT:
1873 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
1874 && opnd->shifter.kind != AARCH64_MOD_LSL)
1875 {
1876 set_other_error (mismatch_detail, idx,
1877 _("extend operator expected"));
1878 return 0;
1879 }
1880 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1881 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1882 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1883 case. */
1884 if (!aarch64_stack_pointer_p (opnds + 0)
1885 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
1886 {
1887 if (!opnd->shifter.operator_present)
1888 {
1889 set_other_error (mismatch_detail, idx,
1890 _("missing extend operator"));
1891 return 0;
1892 }
1893 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
1894 {
1895 set_other_error (mismatch_detail, idx,
1896 _("'LSL' operator not allowed"));
1897 return 0;
1898 }
1899 }
1900 assert (opnd->shifter.operator_present /* Default to LSL. */
1901 || opnd->shifter.kind == AARCH64_MOD_LSL);
1902 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
1903 {
1904 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
1905 return 0;
1906 }
1907 /* In the 64-bit form, the final register operand is written as Wm
1908 for all but the (possibly omitted) UXTX/LSL and SXTX
1909 operators.
1910 N.B. GAS allows X register to be used with any operator as a
1911 programming convenience. */
1912 if (qualifier == AARCH64_OPND_QLF_X
1913 && opnd->shifter.kind != AARCH64_MOD_LSL
1914 && opnd->shifter.kind != AARCH64_MOD_UXTX
1915 && opnd->shifter.kind != AARCH64_MOD_SXTX)
1916 {
1917 set_other_error (mismatch_detail, idx, _("W register expected"));
1918 return 0;
1919 }
1920 break;
1921
1922 case AARCH64_OPND_Rm_SFT:
1923 /* ROR is not available to the shifted register operand in
1924 arithmetic instructions. */
1925 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
1926 {
1927 set_other_error (mismatch_detail, idx,
1928 _("shift operator expected"));
1929 return 0;
1930 }
1931 if (opnd->shifter.kind == AARCH64_MOD_ROR
1932 && opcode->iclass != log_shift)
1933 {
1934 set_other_error (mismatch_detail, idx,
1935 _("'ROR' operator not allowed"));
1936 return 0;
1937 }
1938 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
1939 if (!value_in_range_p (opnd->shifter.amount, 0, num))
1940 {
1941 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
1942 return 0;
1943 }
1944 break;
1945
1946 default:
1947 break;
1948 }
1949 break;
1950
1951 default:
1952 break;
1953 }
1954
1955 return 1;
1956 }
1957
1958 /* Main entrypoint for the operand constraint checking.
1959
1960 Return 1 if operands of *INST meet the constraint applied by the operand
1961 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
1962 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
1963 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
1964 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
1965 error kind when it is notified that an instruction does not pass the check).
1966
1967 Un-determined operand qualifiers may get established during the process. */
1968
1969 int
1970 aarch64_match_operands_constraint (aarch64_inst *inst,
1971 aarch64_operand_error *mismatch_detail)
1972 {
1973 int i;
1974
1975 DEBUG_TRACE ("enter");
1976
1977 /* Match operands' qualifier.
1978 *INST has already had qualifier establish for some, if not all, of
1979 its operands; we need to find out whether these established
1980 qualifiers match one of the qualifier sequence in
1981 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
1982 with the corresponding qualifier in such a sequence.
1983 Only basic operand constraint checking is done here; the more thorough
1984 constraint checking will carried out by operand_general_constraint_met_p,
1985 which has be to called after this in order to get all of the operands'
1986 qualifiers established. */
1987 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
1988 {
1989 DEBUG_TRACE ("FAIL on operand qualifier matching");
1990 if (mismatch_detail)
1991 {
1992 /* Return an error type to indicate that it is the qualifier
1993 matching failure; we don't care about which operand as there
1994 are enough information in the opcode table to reproduce it. */
1995 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
1996 mismatch_detail->index = -1;
1997 mismatch_detail->error = NULL;
1998 }
1999 return 0;
2000 }
2001
2002 /* Match operands' constraint. */
2003 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2004 {
2005 enum aarch64_opnd type = inst->opcode->operands[i];
2006 if (type == AARCH64_OPND_NIL)
2007 break;
2008 if (inst->operands[i].skip)
2009 {
2010 DEBUG_TRACE ("skip the incomplete operand %d", i);
2011 continue;
2012 }
2013 if (operand_general_constraint_met_p (inst->operands, i, type,
2014 inst->opcode, mismatch_detail) == 0)
2015 {
2016 DEBUG_TRACE ("FAIL on operand %d", i);
2017 return 0;
2018 }
2019 }
2020
2021 DEBUG_TRACE ("PASS");
2022
2023 return 1;
2024 }
2025
2026 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2027 Also updates the TYPE of each INST->OPERANDS with the corresponding
2028 value of OPCODE->OPERANDS.
2029
2030 Note that some operand qualifiers may need to be manually cleared by
2031 the caller before it further calls the aarch64_opcode_encode; by
2032 doing this, it helps the qualifier matching facilities work
2033 properly. */
2034
2035 const aarch64_opcode*
2036 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2037 {
2038 int i;
2039 const aarch64_opcode *old = inst->opcode;
2040
2041 inst->opcode = opcode;
2042
2043 /* Update the operand types. */
2044 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2045 {
2046 inst->operands[i].type = opcode->operands[i];
2047 if (opcode->operands[i] == AARCH64_OPND_NIL)
2048 break;
2049 }
2050
2051 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2052
2053 return old;
2054 }
2055
2056 int
2057 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2058 {
2059 int i;
2060 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2061 if (operands[i] == operand)
2062 return i;
2063 else if (operands[i] == AARCH64_OPND_NIL)
2064 break;
2065 return -1;
2066 }
2067 \f
2068 /* [0][0] 32-bit integer regs with sp Wn
2069 [0][1] 64-bit integer regs with sp Xn sf=1
2070 [1][0] 32-bit integer regs with #0 Wn
2071 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2072 static const char *int_reg[2][2][32] = {
2073 #define R32 "w"
2074 #define R64 "x"
2075 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2076 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2077 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2078 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", "wsp" },
2079 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2080 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2081 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2082 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", "sp" } },
2083 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2084 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2085 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2086 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
2087 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2088 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2089 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2090 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
2091 #undef R64
2092 #undef R32
2093 };
2094
2095 /* Return the integer register name.
2096 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2097
2098 static inline const char *
2099 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2100 {
2101 const int has_zr = sp_reg_p ? 0 : 1;
2102 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2103 return int_reg[has_zr][is_64][regno];
2104 }
2105
2106 /* Like get_int_reg_name, but IS_64 is always 1. */
2107
2108 static inline const char *
2109 get_64bit_int_reg_name (int regno, int sp_reg_p)
2110 {
2111 const int has_zr = sp_reg_p ? 0 : 1;
2112 return int_reg[has_zr][1][regno];
2113 }
2114
2115 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2116
2117 typedef union
2118 {
2119 uint64_t i;
2120 double d;
2121 } double_conv_t;
2122
2123 typedef union
2124 {
2125 uint32_t i;
2126 float f;
2127 } single_conv_t;
2128
2129 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2130 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2131 (depending on the type of the instruction). IMM8 will be expanded to a
2132 single-precision floating-point value (IS_DP == 0) or a double-precision
2133 floating-point value (IS_DP == 1). The expanded value is returned. */
2134
2135 static uint64_t
2136 expand_fp_imm (int is_dp, uint32_t imm8)
2137 {
2138 uint64_t imm;
2139 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2140
2141 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2142 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2143 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2144 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2145 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2146 if (is_dp)
2147 {
2148 imm = (imm8_7 << (63-32)) /* imm8<7> */
2149 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2150 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2151 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2152 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2153 imm <<= 32;
2154 }
2155 else
2156 {
2157 imm = (imm8_7 << 31) /* imm8<7> */
2158 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2159 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2160 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2161 }
2162
2163 return imm;
2164 }
2165
2166 /* Produce the string representation of the register list operand *OPND
2167 in the buffer pointed by BUF of size SIZE. */
2168 static void
2169 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd)
2170 {
2171 const int num_regs = opnd->reglist.num_regs;
2172 const int first_reg = opnd->reglist.first_regno;
2173 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2174 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2175 char tb[8]; /* Temporary buffer. */
2176
2177 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2178 assert (num_regs >= 1 && num_regs <= 4);
2179
2180 /* Prepare the index if any. */
2181 if (opnd->reglist.has_index)
2182 snprintf (tb, 8, "[%d]", opnd->reglist.index);
2183 else
2184 tb[0] = '\0';
2185
2186 /* The hyphenated form is preferred for disassembly if there are
2187 more than two registers in the list, and the register numbers
2188 are monotonically increasing in increments of one. */
2189 if (num_regs > 2 && last_reg > first_reg)
2190 snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name,
2191 last_reg, qlf_name, tb);
2192 else
2193 {
2194 const int reg0 = first_reg;
2195 const int reg1 = (first_reg + 1) & 0x1f;
2196 const int reg2 = (first_reg + 2) & 0x1f;
2197 const int reg3 = (first_reg + 3) & 0x1f;
2198
2199 switch (num_regs)
2200 {
2201 case 1:
2202 snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb);
2203 break;
2204 case 2:
2205 snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name,
2206 reg1, qlf_name, tb);
2207 break;
2208 case 3:
2209 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name,
2210 reg1, qlf_name, reg2, qlf_name, tb);
2211 break;
2212 case 4:
2213 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2214 reg0, qlf_name, reg1, qlf_name, reg2, qlf_name,
2215 reg3, qlf_name, tb);
2216 break;
2217 }
2218 }
2219 }
2220
2221 /* Produce the string representation of the register offset address operand
2222 *OPND in the buffer pointed by BUF of size SIZE. */
2223 static void
2224 print_register_offset_address (char *buf, size_t size,
2225 const aarch64_opnd_info *opnd)
2226 {
2227 const size_t tblen = 16;
2228 char tb[tblen]; /* Temporary buffer. */
2229 bfd_boolean lsl_p = FALSE; /* Is LSL shift operator? */
2230 bfd_boolean wm_p = FALSE; /* Should Rm be Wm? */
2231 bfd_boolean print_extend_p = TRUE;
2232 bfd_boolean print_amount_p = TRUE;
2233 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2234
2235 switch (opnd->shifter.kind)
2236 {
2237 case AARCH64_MOD_UXTW: wm_p = TRUE; break;
2238 case AARCH64_MOD_LSL : lsl_p = TRUE; break;
2239 case AARCH64_MOD_SXTW: wm_p = TRUE; break;
2240 case AARCH64_MOD_SXTX: break;
2241 default: assert (0);
2242 }
2243
2244 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2245 || !opnd->shifter.amount_present))
2246 {
2247 /* Not print the shift/extend amount when the amount is zero and
2248 when it is not the special case of 8-bit load/store instruction. */
2249 print_amount_p = FALSE;
2250 /* Likewise, no need to print the shift operator LSL in such a
2251 situation. */
2252 if (lsl_p)
2253 print_extend_p = FALSE;
2254 }
2255
2256 /* Prepare for the extend/shift. */
2257 if (print_extend_p)
2258 {
2259 if (print_amount_p)
2260 snprintf (tb, tblen, ",%s #%d", shift_name, opnd->shifter.amount);
2261 else
2262 snprintf (tb, tblen, ",%s", shift_name);
2263 }
2264 else
2265 tb[0] = '\0';
2266
2267 snprintf (buf, size, "[%s,%c%d%s]",
2268 get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2269 wm_p ? 'w' : 'x', opnd->addr.offset.regno, tb);
2270 }
2271
2272 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2273 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2274 PC, PCREL_P and ADDRESS are used to pass in and return information about
2275 the PC-relative address calculation, where the PC value is passed in
2276 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2277 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2278 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2279
2280 The function serves both the disassembler and the assembler diagnostics
2281 issuer, which is the reason why it lives in this file. */
2282
2283 void
2284 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2285 const aarch64_opcode *opcode,
2286 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2287 bfd_vma *address)
2288 {
2289 int i;
2290 const char *name = NULL;
2291 const aarch64_opnd_info *opnd = opnds + idx;
2292 enum aarch64_modifier_kind kind;
2293 uint64_t addr;
2294
2295 buf[0] = '\0';
2296 if (pcrel_p)
2297 *pcrel_p = 0;
2298
2299 switch (opnd->type)
2300 {
2301 case AARCH64_OPND_Rd:
2302 case AARCH64_OPND_Rn:
2303 case AARCH64_OPND_Rm:
2304 case AARCH64_OPND_Rt:
2305 case AARCH64_OPND_Rt2:
2306 case AARCH64_OPND_Rs:
2307 case AARCH64_OPND_Ra:
2308 case AARCH64_OPND_Rt_SYS:
2309 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2310 the <ic_op>, therefore we we use opnd->present to override the
2311 generic optional-ness information. */
2312 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2313 break;
2314 /* Omit the operand, e.g. RET. */
2315 if (optional_operand_p (opcode, idx)
2316 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2317 break;
2318 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2319 || opnd->qualifier == AARCH64_OPND_QLF_X);
2320 snprintf (buf, size, "%s",
2321 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2322 break;
2323
2324 case AARCH64_OPND_Rd_SP:
2325 case AARCH64_OPND_Rn_SP:
2326 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2327 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2328 || opnd->qualifier == AARCH64_OPND_QLF_X
2329 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2330 snprintf (buf, size, "%s",
2331 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2332 break;
2333
2334 case AARCH64_OPND_Rm_EXT:
2335 kind = opnd->shifter.kind;
2336 assert (idx == 1 || idx == 2);
2337 if ((aarch64_stack_pointer_p (opnds)
2338 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2339 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2340 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2341 && kind == AARCH64_MOD_UXTW)
2342 || (opnd->qualifier == AARCH64_OPND_QLF_X
2343 && kind == AARCH64_MOD_UXTX)))
2344 {
2345 /* 'LSL' is the preferred form in this case. */
2346 kind = AARCH64_MOD_LSL;
2347 if (opnd->shifter.amount == 0)
2348 {
2349 /* Shifter omitted. */
2350 snprintf (buf, size, "%s",
2351 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2352 break;
2353 }
2354 }
2355 if (opnd->shifter.amount)
2356 snprintf (buf, size, "%s, %s #%d",
2357 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2358 aarch64_operand_modifiers[kind].name,
2359 opnd->shifter.amount);
2360 else
2361 snprintf (buf, size, "%s, %s",
2362 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2363 aarch64_operand_modifiers[kind].name);
2364 break;
2365
2366 case AARCH64_OPND_Rm_SFT:
2367 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2368 || opnd->qualifier == AARCH64_OPND_QLF_X);
2369 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2370 snprintf (buf, size, "%s",
2371 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2372 else
2373 snprintf (buf, size, "%s, %s #%d",
2374 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2375 aarch64_operand_modifiers[opnd->shifter.kind].name,
2376 opnd->shifter.amount);
2377 break;
2378
2379 case AARCH64_OPND_Fd:
2380 case AARCH64_OPND_Fn:
2381 case AARCH64_OPND_Fm:
2382 case AARCH64_OPND_Fa:
2383 case AARCH64_OPND_Ft:
2384 case AARCH64_OPND_Ft2:
2385 case AARCH64_OPND_Sd:
2386 case AARCH64_OPND_Sn:
2387 case AARCH64_OPND_Sm:
2388 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2389 opnd->reg.regno);
2390 break;
2391
2392 case AARCH64_OPND_Vd:
2393 case AARCH64_OPND_Vn:
2394 case AARCH64_OPND_Vm:
2395 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2396 aarch64_get_qualifier_name (opnd->qualifier));
2397 break;
2398
2399 case AARCH64_OPND_Ed:
2400 case AARCH64_OPND_En:
2401 case AARCH64_OPND_Em:
2402 snprintf (buf, size, "v%d.%s[%d]", opnd->reglane.regno,
2403 aarch64_get_qualifier_name (opnd->qualifier),
2404 opnd->reglane.index);
2405 break;
2406
2407 case AARCH64_OPND_VdD1:
2408 case AARCH64_OPND_VnD1:
2409 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2410 break;
2411
2412 case AARCH64_OPND_LVn:
2413 case AARCH64_OPND_LVt:
2414 case AARCH64_OPND_LVt_AL:
2415 case AARCH64_OPND_LEt:
2416 print_register_list (buf, size, opnd);
2417 break;
2418
2419 case AARCH64_OPND_Cn:
2420 case AARCH64_OPND_Cm:
2421 snprintf (buf, size, "C%d", opnd->reg.regno);
2422 break;
2423
2424 case AARCH64_OPND_IDX:
2425 case AARCH64_OPND_IMM:
2426 case AARCH64_OPND_WIDTH:
2427 case AARCH64_OPND_UIMM3_OP1:
2428 case AARCH64_OPND_UIMM3_OP2:
2429 case AARCH64_OPND_BIT_NUM:
2430 case AARCH64_OPND_IMM_VLSL:
2431 case AARCH64_OPND_IMM_VLSR:
2432 case AARCH64_OPND_SHLL_IMM:
2433 case AARCH64_OPND_IMM0:
2434 case AARCH64_OPND_IMMR:
2435 case AARCH64_OPND_IMMS:
2436 case AARCH64_OPND_FBITS:
2437 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2438 break;
2439
2440 case AARCH64_OPND_IMM_MOV:
2441 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2442 {
2443 case 4: /* e.g. MOV Wd, #<imm32>. */
2444 {
2445 int imm32 = opnd->imm.value;
2446 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2447 }
2448 break;
2449 case 8: /* e.g. MOV Xd, #<imm64>. */
2450 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2451 opnd->imm.value, opnd->imm.value);
2452 break;
2453 default: assert (0);
2454 }
2455 break;
2456
2457 case AARCH64_OPND_FPIMM0:
2458 snprintf (buf, size, "#0.0");
2459 break;
2460
2461 case AARCH64_OPND_LIMM:
2462 case AARCH64_OPND_AIMM:
2463 case AARCH64_OPND_HALF:
2464 if (opnd->shifter.amount)
2465 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2466 opnd->shifter.amount);
2467 else
2468 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2469 break;
2470
2471 case AARCH64_OPND_SIMD_IMM:
2472 case AARCH64_OPND_SIMD_IMM_SFT:
2473 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2474 || opnd->shifter.kind == AARCH64_MOD_NONE)
2475 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2476 else
2477 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2478 aarch64_operand_modifiers[opnd->shifter.kind].name,
2479 opnd->shifter.amount);
2480 break;
2481
2482 case AARCH64_OPND_FPIMM:
2483 case AARCH64_OPND_SIMD_FPIMM:
2484 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2485 {
2486 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2487 {
2488 single_conv_t c;
2489 c.i = expand_fp_imm (0, opnd->imm.value);
2490 snprintf (buf, size, "#%.18e", c.f);
2491 }
2492 break;
2493 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2494 {
2495 double_conv_t c;
2496 c.i = expand_fp_imm (1, opnd->imm.value);
2497 snprintf (buf, size, "#%.18e", c.d);
2498 }
2499 break;
2500 default: assert (0);
2501 }
2502 break;
2503
2504 case AARCH64_OPND_CCMP_IMM:
2505 case AARCH64_OPND_NZCV:
2506 case AARCH64_OPND_EXCEPTION:
2507 case AARCH64_OPND_UIMM4:
2508 case AARCH64_OPND_UIMM7:
2509 if (optional_operand_p (opcode, idx) == TRUE
2510 && (opnd->imm.value ==
2511 (int64_t) get_optional_operand_default_value (opcode)))
2512 /* Omit the operand, e.g. DCPS1. */
2513 break;
2514 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2515 break;
2516
2517 case AARCH64_OPND_COND:
2518 snprintf (buf, size, "%s", opnd->cond->names[0]);
2519 break;
2520
2521 case AARCH64_OPND_ADDR_ADRP:
2522 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2523 + opnd->imm.value;
2524 if (pcrel_p)
2525 *pcrel_p = 1;
2526 if (address)
2527 *address = addr;
2528 /* This is not necessary during the disassembling, as print_address_func
2529 in the disassemble_info will take care of the printing. But some
2530 other callers may be still interested in getting the string in *STR,
2531 so here we do snprintf regardless. */
2532 snprintf (buf, size, "#0x%" PRIx64, addr);
2533 break;
2534
2535 case AARCH64_OPND_ADDR_PCREL14:
2536 case AARCH64_OPND_ADDR_PCREL19:
2537 case AARCH64_OPND_ADDR_PCREL21:
2538 case AARCH64_OPND_ADDR_PCREL26:
2539 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2540 if (pcrel_p)
2541 *pcrel_p = 1;
2542 if (address)
2543 *address = addr;
2544 /* This is not necessary during the disassembling, as print_address_func
2545 in the disassemble_info will take care of the printing. But some
2546 other callers may be still interested in getting the string in *STR,
2547 so here we do snprintf regardless. */
2548 snprintf (buf, size, "#0x%" PRIx64, addr);
2549 break;
2550
2551 case AARCH64_OPND_ADDR_SIMPLE:
2552 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2553 case AARCH64_OPND_SIMD_ADDR_POST:
2554 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2555 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2556 {
2557 if (opnd->addr.offset.is_reg)
2558 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2559 else
2560 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2561 }
2562 else
2563 snprintf (buf, size, "[%s]", name);
2564 break;
2565
2566 case AARCH64_OPND_ADDR_REGOFF:
2567 print_register_offset_address (buf, size, opnd);
2568 break;
2569
2570 case AARCH64_OPND_ADDR_SIMM7:
2571 case AARCH64_OPND_ADDR_SIMM9:
2572 case AARCH64_OPND_ADDR_SIMM9_2:
2573 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2574 if (opnd->addr.writeback)
2575 {
2576 if (opnd->addr.preind)
2577 snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
2578 else
2579 snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
2580 }
2581 else
2582 {
2583 if (opnd->addr.offset.imm)
2584 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2585 else
2586 snprintf (buf, size, "[%s]", name);
2587 }
2588 break;
2589
2590 case AARCH64_OPND_ADDR_UIMM12:
2591 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2592 if (opnd->addr.offset.imm)
2593 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2594 else
2595 snprintf (buf, size, "[%s]", name);
2596 break;
2597
2598 case AARCH64_OPND_SYSREG:
2599 for (i = 0; aarch64_sys_regs[i].name; ++i)
2600 if (aarch64_sys_regs[i].value == opnd->sysreg)
2601 break;
2602 if (aarch64_sys_regs[i].name)
2603 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2604 else
2605 {
2606 /* Implementation defined system register. */
2607 unsigned int value = opnd->sysreg;
2608 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2609 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2610 value & 0x7);
2611 }
2612 break;
2613
2614 case AARCH64_OPND_PSTATEFIELD:
2615 for (i = 0; aarch64_pstatefields[i].name; ++i)
2616 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2617 break;
2618 assert (aarch64_pstatefields[i].name);
2619 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2620 break;
2621
2622 case AARCH64_OPND_SYSREG_AT:
2623 case AARCH64_OPND_SYSREG_DC:
2624 case AARCH64_OPND_SYSREG_IC:
2625 case AARCH64_OPND_SYSREG_TLBI:
2626 snprintf (buf, size, "%s", opnd->sysins_op->template);
2627 break;
2628
2629 case AARCH64_OPND_BARRIER:
2630 snprintf (buf, size, "%s", opnd->barrier->name);
2631 break;
2632
2633 case AARCH64_OPND_BARRIER_ISB:
2634 /* Operand can be omitted, e.g. in DCPS1. */
2635 if (! optional_operand_p (opcode, idx)
2636 || (opnd->barrier->value
2637 != get_optional_operand_default_value (opcode)))
2638 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2639 break;
2640
2641 case AARCH64_OPND_PRFOP:
2642 if (opnd->prfop->name != NULL)
2643 snprintf (buf, size, "%s", opnd->prfop->name);
2644 else
2645 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
2646 break;
2647
2648 default:
2649 assert (0);
2650 }
2651 }
2652 \f
2653 #define CPENC(op0,op1,crn,crm,op2) \
2654 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2655 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2656 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2657 /* for 3.9.10 System Instructions */
2658 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2659
2660 #define C0 0
2661 #define C1 1
2662 #define C2 2
2663 #define C3 3
2664 #define C4 4
2665 #define C5 5
2666 #define C6 6
2667 #define C7 7
2668 #define C8 8
2669 #define C9 9
2670 #define C10 10
2671 #define C11 11
2672 #define C12 12
2673 #define C13 13
2674 #define C14 14
2675 #define C15 15
2676
2677 /* TODO there are two more issues need to be resolved
2678 1. handle read-only and write-only system registers
2679 2. handle cpu-implementation-defined system registers. */
2680 const struct aarch64_name_value_pair aarch64_sys_regs [] =
2681 {
2682 { "spsr_el1", CPEN_(0,C0,0) }, /* = spsr_svc */
2683 { "elr_el1", CPEN_(0,C0,1) },
2684 { "sp_el0", CPEN_(0,C1,0) },
2685 { "spsel", CPEN_(0,C2,0) },
2686 { "daif", CPEN_(3,C2,1) },
2687 { "currentel", CPEN_(0,C2,2) }, /* RO */
2688 { "nzcv", CPEN_(3,C2,0) },
2689 { "fpcr", CPEN_(3,C4,0) },
2690 { "fpsr", CPEN_(3,C4,1) },
2691 { "dspsr_el0", CPEN_(3,C5,0) },
2692 { "dlr_el0", CPEN_(3,C5,1) },
2693 { "spsr_el2", CPEN_(4,C0,0) }, /* = spsr_hyp */
2694 { "elr_el2", CPEN_(4,C0,1) },
2695 { "sp_el1", CPEN_(4,C1,0) },
2696 { "spsr_irq", CPEN_(4,C3,0) },
2697 { "spsr_abt", CPEN_(4,C3,1) },
2698 { "spsr_und", CPEN_(4,C3,2) },
2699 { "spsr_fiq", CPEN_(4,C3,3) },
2700 { "spsr_el3", CPEN_(6,C0,0) },
2701 { "elr_el3", CPEN_(6,C0,1) },
2702 { "sp_el2", CPEN_(6,C1,0) },
2703 { "spsr_svc", CPEN_(0,C0,0) }, /* = spsr_el1 */
2704 { "spsr_hyp", CPEN_(4,C0,0) }, /* = spsr_el2 */
2705 { "midr_el1", CPENC(3,0,C0,C0,0) }, /* RO */
2706 { "ctr_el0", CPENC(3,3,C0,C0,1) }, /* RO */
2707 { "mpidr_el1", CPENC(3,0,C0,C0,5) }, /* RO */
2708 { "revidr_el1", CPENC(3,0,C0,C0,6) }, /* RO */
2709 { "aidr_el1", CPENC(3,1,C0,C0,7) }, /* RO */
2710 { "dczid_el0", CPENC(3,3,C0,C0,7) }, /* RO */
2711 { "id_dfr0_el1", CPENC(3,0,C0,C1,2) }, /* RO */
2712 { "id_pfr0_el1", CPENC(3,0,C0,C1,0) }, /* RO */
2713 { "id_pfr1_el1", CPENC(3,0,C0,C1,1) }, /* RO */
2714 { "id_afr0_el1", CPENC(3,0,C0,C1,3) }, /* RO */
2715 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4) }, /* RO */
2716 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5) }, /* RO */
2717 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6) }, /* RO */
2718 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7) }, /* RO */
2719 { "id_isar0_el1", CPENC(3,0,C0,C2,0) }, /* RO */
2720 { "id_isar1_el1", CPENC(3,0,C0,C2,1) }, /* RO */
2721 { "id_isar2_el1", CPENC(3,0,C0,C2,2) }, /* RO */
2722 { "id_isar3_el1", CPENC(3,0,C0,C2,3) }, /* RO */
2723 { "id_isar4_el1", CPENC(3,0,C0,C2,4) }, /* RO */
2724 { "id_isar5_el1", CPENC(3,0,C0,C2,5) }, /* RO */
2725 { "mvfr0_el1", CPENC(3,0,C0,C3,0) }, /* RO */
2726 { "mvfr1_el1", CPENC(3,0,C0,C3,1) }, /* RO */
2727 { "mvfr2_el1", CPENC(3,0,C0,C3,2) }, /* RO */
2728 { "ccsidr_el1", CPENC(3,1,C0,C0,0) }, /* RO */
2729 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0) }, /* RO */
2730 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1) }, /* RO */
2731 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0) }, /* RO */
2732 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1) }, /* RO */
2733 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0) }, /* RO */
2734 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1) }, /* RO */
2735 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0) }, /* RO */
2736 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1) }, /* RO */
2737 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4) }, /* RO */
2738 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5) }, /* RO */
2739 { "clidr_el1", CPENC(3,1,C0,C0,1) }, /* RO */
2740 { "csselr_el1", CPENC(3,2,C0,C0,0) }, /* RO */
2741 { "vpidr_el2", CPENC(3,4,C0,C0,0) },
2742 { "vmpidr_el2", CPENC(3,4,C0,C0,5) },
2743 { "sctlr_el1", CPENC(3,0,C1,C0,0) },
2744 { "sctlr_el2", CPENC(3,4,C1,C0,0) },
2745 { "sctlr_el3", CPENC(3,6,C1,C0,0) },
2746 { "actlr_el1", CPENC(3,0,C1,C0,1) },
2747 { "actlr_el2", CPENC(3,4,C1,C0,1) },
2748 { "actlr_el3", CPENC(3,6,C1,C0,1) },
2749 { "cpacr_el1", CPENC(3,0,C1,C0,2) },
2750 { "cptr_el2", CPENC(3,4,C1,C1,2) },
2751 { "cptr_el3", CPENC(3,6,C1,C1,2) },
2752 { "scr_el3", CPENC(3,6,C1,C1,0) },
2753 { "hcr_el2", CPENC(3,4,C1,C1,0) },
2754 { "mdcr_el2", CPENC(3,4,C1,C1,1) },
2755 { "mdcr_el3", CPENC(3,6,C1,C3,1) },
2756 { "hstr_el2", CPENC(3,4,C1,C1,3) },
2757 { "hacr_el2", CPENC(3,4,C1,C1,7) },
2758 { "ttbr0_el1", CPENC(3,0,C2,C0,0) },
2759 { "ttbr1_el1", CPENC(3,0,C2,C0,1) },
2760 { "ttbr0_el2", CPENC(3,4,C2,C0,0) },
2761 { "ttbr0_el3", CPENC(3,6,C2,C0,0) },
2762 { "vttbr_el2", CPENC(3,4,C2,C1,0) },
2763 { "tcr_el1", CPENC(3,0,C2,C0,2) },
2764 { "tcr_el2", CPENC(3,4,C2,C0,2) },
2765 { "tcr_el3", CPENC(3,6,C2,C0,2) },
2766 { "vtcr_el2", CPENC(3,4,C2,C1,2) },
2767 { "afsr0_el1", CPENC(3,0,C5,C1,0) },
2768 { "afsr1_el1", CPENC(3,0,C5,C1,1) },
2769 { "afsr0_el2", CPENC(3,4,C5,C1,0) },
2770 { "afsr1_el2", CPENC(3,4,C5,C1,1) },
2771 { "afsr0_el3", CPENC(3,6,C5,C1,0) },
2772 { "afsr1_el3", CPENC(3,6,C5,C1,1) },
2773 { "esr_el1", CPENC(3,0,C5,C2,0) },
2774 { "esr_el2", CPENC(3,4,C5,C2,0) },
2775 { "esr_el3", CPENC(3,6,C5,C2,0) },
2776 { "fpexc32_el2", CPENC(3,4,C5,C3,0) },
2777 { "far_el1", CPENC(3,0,C6,C0,0) },
2778 { "far_el2", CPENC(3,4,C6,C0,0) },
2779 { "far_el3", CPENC(3,6,C6,C0,0) },
2780 { "hpfar_el2", CPENC(3,4,C6,C0,4) },
2781 { "par_el1", CPENC(3,0,C7,C4,0) },
2782 { "mair_el1", CPENC(3,0,C10,C2,0) },
2783 { "mair_el2", CPENC(3,4,C10,C2,0) },
2784 { "mair_el3", CPENC(3,6,C10,C2,0) },
2785 { "amair_el1", CPENC(3,0,C10,C3,0) },
2786 { "amair_el2", CPENC(3,4,C10,C3,0) },
2787 { "amair_el3", CPENC(3,6,C10,C3,0) },
2788 { "vbar_el1", CPENC(3,0,C12,C0,0) },
2789 { "vbar_el2", CPENC(3,4,C12,C0,0) },
2790 { "vbar_el3", CPENC(3,6,C12,C0,0) },
2791 { "rvbar_el1", CPENC(3,0,C12,C0,1) }, /* RO */
2792 { "rvbar_el2", CPENC(3,4,C12,C0,1) }, /* RO */
2793 { "rvbar_el3", CPENC(3,6,C12,C0,1) }, /* RO */
2794 { "rmr_el1", CPENC(3,0,C12,C0,2) },
2795 { "rmr_el2", CPENC(3,4,C12,C0,2) },
2796 { "rmr_el3", CPENC(3,6,C12,C0,2) },
2797 { "isr_el1", CPENC(3,0,C12,C1,0) }, /* RO */
2798 { "contextidr_el1", CPENC(3,0,C13,C0,1) },
2799 { "tpidr_el0", CPENC(3,3,C13,C0,2) },
2800 { "tpidrro_el0", CPENC(3,3,C13,C0,3) }, /* RO */
2801 { "tpidr_el1", CPENC(3,0,C13,C0,4) },
2802 { "tpidr_el2", CPENC(3,4,C13,C0,2) },
2803 { "tpidr_el3", CPENC(3,6,C13,C0,2) },
2804 { "teecr32_el1", CPENC(2,2,C0, C0,0) }, /* See section 3.9.7.1 */
2805 { "cntfrq_el0", CPENC(3,3,C14,C0,0) }, /* RO */
2806 { "cntpct_el0", CPENC(3,3,C14,C0,1) }, /* RO */
2807 { "cntvct_el0", CPENC(3,3,C14,C0,2) }, /* RO */
2808 { "cntvoff_el2", CPENC(3,4,C14,C0,3) },
2809 { "cntkctl_el1", CPENC(3,0,C14,C1,0) },
2810 { "cnthctl_el2", CPENC(3,4,C14,C1,0) },
2811 { "cntp_tval_el0", CPENC(3,3,C14,C2,0) },
2812 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1) },
2813 { "cntp_cval_el0", CPENC(3,3,C14,C2,2) },
2814 { "cntv_tval_el0", CPENC(3,3,C14,C3,0) },
2815 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1) },
2816 { "cntv_cval_el0", CPENC(3,3,C14,C3,2) },
2817 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0) },
2818 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1) },
2819 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2) },
2820 { "cntps_tval_el1", CPENC(3,7,C14,C2,0) },
2821 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1) },
2822 { "cntps_cval_el1", CPENC(3,7,C14,C2,2) },
2823 { "dacr32_el2", CPENC(3,4,C3,C0,0) },
2824 { "ifsr32_el2", CPENC(3,4,C5,C0,1) },
2825 { "teehbr32_el1", CPENC(2,2,C1,C0,0) },
2826 { "sder32_el3", CPENC(3,6,C1,C1,1) },
2827 { "mdscr_el1", CPENC(2,0,C0, C2, 2) },
2828 { "mdccsr_el0", CPENC(2,3,C0, C1, 0) }, /* r */
2829 { "mdccint_el1", CPENC(2,0,C0, C2, 0) },
2830 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0) },
2831 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0) }, /* r */
2832 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0) }, /* w */
2833 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2) }, /* r */
2834 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2) }, /* w */
2835 { "oseccr_el1", CPENC(2,0,C0, C6, 2) },
2836 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0) },
2837 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4) },
2838 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4) },
2839 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4) },
2840 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4) },
2841 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4) },
2842 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4) },
2843 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4) },
2844 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4) },
2845 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4) },
2846 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4) },
2847 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4) },
2848 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4) },
2849 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4) },
2850 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4) },
2851 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4) },
2852 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4) },
2853 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5) },
2854 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5) },
2855 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5) },
2856 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5) },
2857 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5) },
2858 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5) },
2859 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5) },
2860 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5) },
2861 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5) },
2862 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5) },
2863 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5) },
2864 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5) },
2865 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5) },
2866 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5) },
2867 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5) },
2868 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5) },
2869 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6) },
2870 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6) },
2871 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6) },
2872 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6) },
2873 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6) },
2874 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6) },
2875 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6) },
2876 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6) },
2877 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6) },
2878 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6) },
2879 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6) },
2880 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6) },
2881 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6) },
2882 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6) },
2883 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6) },
2884 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6) },
2885 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7) },
2886 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7) },
2887 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7) },
2888 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7) },
2889 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7) },
2890 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7) },
2891 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7) },
2892 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7) },
2893 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7) },
2894 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7) },
2895 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7) },
2896 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7) },
2897 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7) },
2898 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7) },
2899 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7) },
2900 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7) },
2901 { "mdrar_el1", CPENC(2,0,C1, C0, 0) }, /* r */
2902 { "oslar_el1", CPENC(2,0,C1, C0, 4) }, /* w */
2903 { "oslsr_el1", CPENC(2,0,C1, C1, 4) }, /* r */
2904 { "osdlr_el1", CPENC(2,0,C1, C3, 4) },
2905 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4) },
2906 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6) },
2907 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6) },
2908 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6) }, /* r */
2909
2910 { "pmcr_el0", CPENC(3,3,C9,C12, 0) },
2911 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1) },
2912 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2) },
2913 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3) },
2914 { "pmswinc_el0", CPENC(3,3,C9,C12, 4) }, /* w */
2915 { "pmselr_el0", CPENC(3,3,C9,C12, 5) },
2916 { "pmceid0_el0", CPENC(3,3,C9,C12, 6) }, /* r */
2917 { "pmceid1_el0", CPENC(3,3,C9,C12, 7) }, /* r */
2918 { "pmccntr_el0", CPENC(3,3,C9,C13, 0) },
2919 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1) },
2920 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2) },
2921 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0) },
2922 { "pmintenset_el1", CPENC(3,0,C9,C14, 1) },
2923 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2) },
2924 { "pmovsset_el0", CPENC(3,3,C9,C14, 3) },
2925 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0) },
2926 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1) },
2927 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2) },
2928 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3) },
2929 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4) },
2930 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5) },
2931 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6) },
2932 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7) },
2933 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0) },
2934 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1) },
2935 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2) },
2936 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3) },
2937 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4) },
2938 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5) },
2939 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6) },
2940 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7) },
2941 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0) },
2942 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1) },
2943 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2) },
2944 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3) },
2945 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4) },
2946 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5) },
2947 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6) },
2948 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7) },
2949 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0) },
2950 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1) },
2951 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2) },
2952 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3) },
2953 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4) },
2954 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5) },
2955 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6) },
2956 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0) },
2957 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1) },
2958 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2) },
2959 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3) },
2960 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4) },
2961 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5) },
2962 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6) },
2963 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7) },
2964 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0) },
2965 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1) },
2966 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2) },
2967 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3) },
2968 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4) },
2969 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5) },
2970 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6) },
2971 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7) },
2972 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0) },
2973 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1) },
2974 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2) },
2975 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3) },
2976 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4) },
2977 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5) },
2978 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6) },
2979 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7) },
2980 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0) },
2981 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1) },
2982 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2) },
2983 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3) },
2984 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4) },
2985 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5) },
2986 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6) },
2987 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7) },
2988 { 0, CPENC(0,0,0,0,0) },
2989 };
2990
2991 const struct aarch64_name_value_pair aarch64_pstatefields [] =
2992 {
2993 { "spsel", 0x05 },
2994 { "daifset", 0x1e },
2995 { "daifclr", 0x1f },
2996 { 0, CPENC(0,0,0,0,0) },
2997 };
2998
2999 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3000 {
3001 { "ialluis", CPENS(0,C7,C1,0), 0 },
3002 { "iallu", CPENS(0,C7,C5,0), 0 },
3003 { "ivau", CPENS(3,C7,C5,1), 1 },
3004 { 0, CPENS(0,0,0,0), 0 }
3005 };
3006
3007 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3008 {
3009 { "zva", CPENS(3,C7,C4,1), 1 },
3010 { "ivac", CPENS(0,C7,C6,1), 1 },
3011 { "isw", CPENS(0,C7,C6,2), 1 },
3012 { "cvac", CPENS(3,C7,C10,1), 1 },
3013 { "csw", CPENS(0,C7,C10,2), 1 },
3014 { "cvau", CPENS(3,C7,C11,1), 1 },
3015 { "civac", CPENS(3,C7,C14,1), 1 },
3016 { "cisw", CPENS(0,C7,C14,2), 1 },
3017 { 0, CPENS(0,0,0,0), 0 }
3018 };
3019
3020 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3021 {
3022 { "s1e1r", CPENS(0,C7,C8,0), 1 },
3023 { "s1e1w", CPENS(0,C7,C8,1), 1 },
3024 { "s1e0r", CPENS(0,C7,C8,2), 1 },
3025 { "s1e0w", CPENS(0,C7,C8,3), 1 },
3026 { "s12e1r", CPENS(4,C7,C8,4), 1 },
3027 { "s12e1w", CPENS(4,C7,C8,5), 1 },
3028 { "s12e0r", CPENS(4,C7,C8,6), 1 },
3029 { "s12e0w", CPENS(4,C7,C8,7), 1 },
3030 { "s1e2r", CPENS(4,C7,C8,0), 1 },
3031 { "s1e2w", CPENS(4,C7,C8,1), 1 },
3032 { "s1e3r", CPENS(6,C7,C8,0), 1 },
3033 { "s1e3w", CPENS(6,C7,C8,1), 1 },
3034 { 0, CPENS(0,0,0,0), 0 }
3035 };
3036
3037 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3038 {
3039 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3040 { "vae1", CPENS(0,C8,C7,1), 1 },
3041 { "aside1", CPENS(0,C8,C7,2), 1 },
3042 { "vaae1", CPENS(0,C8,C7,3), 1 },
3043 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3044 { "vae1is", CPENS(0,C8,C3,1), 1 },
3045 { "aside1is", CPENS(0,C8,C3,2), 1 },
3046 { "vaae1is", CPENS(0,C8,C3,3), 1 },
3047 { "ipas2e1is", CPENS(4,C8,C0,1), 1 },
3048 { "ipas2le1is",CPENS(4,C8,C0,5), 1 },
3049 { "ipas2e1", CPENS(4,C8,C4,1), 1 },
3050 { "ipas2le1", CPENS(4,C8,C4,5), 1 },
3051 { "vae2", CPENS(4,C8,C7,1), 1 },
3052 { "vae2is", CPENS(4,C8,C3,1), 1 },
3053 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3054 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3055 { "vae3", CPENS(6,C8,C7,1), 1 },
3056 { "vae3is", CPENS(6,C8,C3,1), 1 },
3057 { "alle2", CPENS(4,C8,C7,0), 0 },
3058 { "alle2is", CPENS(4,C8,C3,0), 0 },
3059 { "alle1", CPENS(4,C8,C7,4), 0 },
3060 { "alle1is", CPENS(4,C8,C3,4), 0 },
3061 { "alle3", CPENS(6,C8,C7,0), 0 },
3062 { "alle3is", CPENS(6,C8,C3,0), 0 },
3063 { "vale1is", CPENS(0,C8,C3,5), 1 },
3064 { "vale2is", CPENS(4,C8,C3,5), 1 },
3065 { "vale3is", CPENS(6,C8,C3,5), 1 },
3066 { "vaale1is", CPENS(0,C8,C3,7), 1 },
3067 { "vale1", CPENS(0,C8,C7,5), 1 },
3068 { "vale2", CPENS(4,C8,C7,5), 1 },
3069 { "vale3", CPENS(6,C8,C7,5), 1 },
3070 { "vaale1", CPENS(0,C8,C7,7), 1 },
3071 { 0, CPENS(0,0,0,0), 0 }
3072 };
3073
3074 #undef C0
3075 #undef C1
3076 #undef C2
3077 #undef C3
3078 #undef C4
3079 #undef C5
3080 #undef C6
3081 #undef C7
3082 #undef C8
3083 #undef C9
3084 #undef C10
3085 #undef C11
3086 #undef C12
3087 #undef C13
3088 #undef C14
3089 #undef C15
3090
3091 /* Include the opcode description table as well as the operand description
3092 table. */
3093 #include "aarch64-tbl.h"
This page took 0.098455 seconds and 4 git commands to generate.