[AArch64][SVE 30/32] Add SVE instruction classes
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "dis-asm.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define ERR_OK 0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33
34 #define INSNLEN 4
35
36 /* Cached mapping symbol state. */
37 enum map_type
38 {
39 MAP_INSN,
40 MAP_DATA
41 };
42
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46
47 /* Other options */
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \f
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 #ifdef DEBUG_AARCH64
73 if (CONST_STRNEQ (option, "debug_dump"))
74 {
75 debug_dump = 1;
76 return;
77 }
78 #endif /* DEBUG_AARCH64 */
79
80 /* Invalid option. */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
82 }
83
84 static void
85 parse_aarch64_dis_options (const char *options)
86 {
87 const char *option_end;
88
89 if (options == NULL)
90 return;
91
92 while (*options != '\0')
93 {
94 /* Skip empty options. */
95 if (*options == ',')
96 {
97 options++;
98 continue;
99 }
100
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
104 option_end++;
105
106 parse_aarch64_dis_option (options, option_end - options);
107
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
111 }
112 }
113 \f
114 /* Functions doing the instruction disassembling. */
115
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
119
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
125
126 static inline aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129 uint32_t num;
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
132 va_list va;
133
134 va_start (va, mask);
135 num = va_arg (va, uint32_t);
136 assert (num <= 5);
137 aarch64_insn value = 0x0;
138 while (num--)
139 {
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
144 }
145 return value;
146 }
147
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
150
151 static aarch64_insn
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
153 {
154 aarch64_insn value;
155 unsigned int i;
156 enum aarch64_field_kind kind;
157
158 value = 0;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
160 {
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
164 }
165 return value;
166 }
167
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
171 {
172 uint32_t ret = value;
173
174 assert (i < 32);
175 if ((value >> i) & 0x1)
176 {
177 uint32_t val = (uint32_t)(-1) << i;
178 ret = ret | val;
179 }
180 return (int32_t) ret;
181 }
182
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
185
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
189 {
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
191 assert (value <= 0x1
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
193 return qualifier;
194 }
195
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
198
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
201 {
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
203
204 /* Instructions using vector type 2H should not call this function. Skip over
205 the 2H qualifier. */
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
207 qualifier += 1;
208
209 assert (value <= 0x8
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
211 return qualifier;
212 }
213
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
217 {
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
219
220 assert (value <= 0x4
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
222 return qualifier;
223 }
224
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
229
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
232 {
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
237 i, qualifiers))
238 return qualifiers[i];
239 else
240 return AARCH64_OPND_QLF_NIL;
241 }
242
243 /* Operand extractors. */
244
245 int
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED)
249 {
250 info->reg.regno = extract_field (self->fields[0], code, 0);
251 return 1;
252 }
253
254 int
255 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
256 const aarch64_insn code ATTRIBUTE_UNUSED,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED)
258 {
259 assert (info->idx == 1
260 || info->idx ==3);
261 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
262 return 1;
263 }
264
265 /* e.g. IC <ic_op>{, <Xt>}. */
266 int
267 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
268 const aarch64_insn code,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED)
270 {
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 assert (info->idx == 1
273 && (aarch64_get_operand_class (inst->operands[0].type)
274 == AARCH64_OPND_CLASS_SYSTEM));
275 /* This will make the constraint checking happy and more importantly will
276 help the disassembler determine whether this operand is optional or
277 not. */
278 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
279
280 return 1;
281 }
282
283 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
284 int
285 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED)
288 {
289 /* regno */
290 info->reglane.regno = extract_field (self->fields[0], code,
291 inst->opcode->mask);
292
293 /* Index and/or type. */
294 if (inst->opcode->iclass == asisdone
295 || inst->opcode->iclass == asimdins)
296 {
297 if (info->type == AARCH64_OPND_En
298 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
299 {
300 unsigned shift;
301 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
302 assert (info->idx == 1); /* Vn */
303 aarch64_insn value = extract_field (FLD_imm4, code, 0);
304 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
305 info->qualifier = get_expected_qualifier (inst, info->idx);
306 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
307 info->reglane.index = value >> shift;
308 }
309 else
310 {
311 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
312 imm5<3:0> <V>
313 0000 RESERVED
314 xxx1 B
315 xx10 H
316 x100 S
317 1000 D */
318 int pos = -1;
319 aarch64_insn value = extract_field (FLD_imm5, code, 0);
320 while (++pos <= 3 && (value & 0x1) == 0)
321 value >>= 1;
322 if (pos > 3)
323 return 0;
324 info->qualifier = get_sreg_qualifier_from_value (pos);
325 info->reglane.index = (unsigned) (value >> 1);
326 }
327 }
328 else
329 {
330 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
331 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
332
333 /* Need information in other operand(s) to help decoding. */
334 info->qualifier = get_expected_qualifier (inst, info->idx);
335 switch (info->qualifier)
336 {
337 case AARCH64_OPND_QLF_S_H:
338 /* h:l:m */
339 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
340 FLD_M);
341 info->reglane.regno &= 0xf;
342 break;
343 case AARCH64_OPND_QLF_S_S:
344 /* h:l */
345 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
346 break;
347 case AARCH64_OPND_QLF_S_D:
348 /* H */
349 info->reglane.index = extract_field (FLD_H, code, 0);
350 break;
351 default:
352 return 0;
353 }
354 }
355
356 return 1;
357 }
358
359 int
360 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
361 const aarch64_insn code,
362 const aarch64_inst *inst ATTRIBUTE_UNUSED)
363 {
364 /* R */
365 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
366 /* len */
367 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
368 return 1;
369 }
370
371 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
372 int
373 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
374 aarch64_opnd_info *info, const aarch64_insn code,
375 const aarch64_inst *inst)
376 {
377 aarch64_insn value;
378 /* Number of elements in each structure to be loaded/stored. */
379 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
380
381 struct
382 {
383 unsigned is_reserved;
384 unsigned num_regs;
385 unsigned num_elements;
386 } data [] =
387 { {0, 4, 4},
388 {1, 4, 4},
389 {0, 4, 1},
390 {0, 4, 2},
391 {0, 3, 3},
392 {1, 3, 3},
393 {0, 3, 1},
394 {0, 1, 1},
395 {0, 2, 2},
396 {1, 2, 2},
397 {0, 2, 1},
398 };
399
400 /* Rt */
401 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
402 /* opcode */
403 value = extract_field (FLD_opcode, code, 0);
404 if (expected_num != data[value].num_elements || data[value].is_reserved)
405 return 0;
406 info->reglist.num_regs = data[value].num_regs;
407
408 return 1;
409 }
410
411 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
412 lanes instructions. */
413 int
414 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
415 aarch64_opnd_info *info, const aarch64_insn code,
416 const aarch64_inst *inst)
417 {
418 aarch64_insn value;
419
420 /* Rt */
421 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
422 /* S */
423 value = extract_field (FLD_S, code, 0);
424
425 /* Number of registers is equal to the number of elements in
426 each structure to be loaded/stored. */
427 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
428 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
429
430 /* Except when it is LD1R. */
431 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
432 info->reglist.num_regs = 2;
433
434 return 1;
435 }
436
437 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
438 load/store single element instructions. */
439 int
440 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
441 aarch64_opnd_info *info, const aarch64_insn code,
442 const aarch64_inst *inst ATTRIBUTE_UNUSED)
443 {
444 aarch64_field field = {0, 0};
445 aarch64_insn QSsize; /* fields Q:S:size. */
446 aarch64_insn opcodeh2; /* opcode<2:1> */
447
448 /* Rt */
449 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
450
451 /* Decode the index, opcode<2:1> and size. */
452 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
453 opcodeh2 = extract_field_2 (&field, code, 0);
454 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
455 switch (opcodeh2)
456 {
457 case 0x0:
458 info->qualifier = AARCH64_OPND_QLF_S_B;
459 /* Index encoded in "Q:S:size". */
460 info->reglist.index = QSsize;
461 break;
462 case 0x1:
463 if (QSsize & 0x1)
464 /* UND. */
465 return 0;
466 info->qualifier = AARCH64_OPND_QLF_S_H;
467 /* Index encoded in "Q:S:size<1>". */
468 info->reglist.index = QSsize >> 1;
469 break;
470 case 0x2:
471 if ((QSsize >> 1) & 0x1)
472 /* UND. */
473 return 0;
474 if ((QSsize & 0x1) == 0)
475 {
476 info->qualifier = AARCH64_OPND_QLF_S_S;
477 /* Index encoded in "Q:S". */
478 info->reglist.index = QSsize >> 2;
479 }
480 else
481 {
482 if (extract_field (FLD_S, code, 0))
483 /* UND */
484 return 0;
485 info->qualifier = AARCH64_OPND_QLF_S_D;
486 /* Index encoded in "Q". */
487 info->reglist.index = QSsize >> 3;
488 }
489 break;
490 default:
491 return 0;
492 }
493
494 info->reglist.has_index = 1;
495 info->reglist.num_regs = 0;
496 /* Number of registers is equal to the number of elements in
497 each structure to be loaded/stored. */
498 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
499 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
500
501 return 1;
502 }
503
504 /* Decode fields immh:immb and/or Q for e.g.
505 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
506 or SSHR <V><d>, <V><n>, #<shift>. */
507
508 int
509 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
510 aarch64_opnd_info *info, const aarch64_insn code,
511 const aarch64_inst *inst)
512 {
513 int pos;
514 aarch64_insn Q, imm, immh;
515 enum aarch64_insn_class iclass = inst->opcode->iclass;
516
517 immh = extract_field (FLD_immh, code, 0);
518 if (immh == 0)
519 return 0;
520 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
521 pos = 4;
522 /* Get highest set bit in immh. */
523 while (--pos >= 0 && (immh & 0x8) == 0)
524 immh <<= 1;
525
526 assert ((iclass == asimdshf || iclass == asisdshf)
527 && (info->type == AARCH64_OPND_IMM_VLSR
528 || info->type == AARCH64_OPND_IMM_VLSL));
529
530 if (iclass == asimdshf)
531 {
532 Q = extract_field (FLD_Q, code, 0);
533 /* immh Q <T>
534 0000 x SEE AdvSIMD modified immediate
535 0001 0 8B
536 0001 1 16B
537 001x 0 4H
538 001x 1 8H
539 01xx 0 2S
540 01xx 1 4S
541 1xxx 0 RESERVED
542 1xxx 1 2D */
543 info->qualifier =
544 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
545 }
546 else
547 info->qualifier = get_sreg_qualifier_from_value (pos);
548
549 if (info->type == AARCH64_OPND_IMM_VLSR)
550 /* immh <shift>
551 0000 SEE AdvSIMD modified immediate
552 0001 (16-UInt(immh:immb))
553 001x (32-UInt(immh:immb))
554 01xx (64-UInt(immh:immb))
555 1xxx (128-UInt(immh:immb)) */
556 info->imm.value = (16 << pos) - imm;
557 else
558 /* immh:immb
559 immh <shift>
560 0000 SEE AdvSIMD modified immediate
561 0001 (UInt(immh:immb)-8)
562 001x (UInt(immh:immb)-16)
563 01xx (UInt(immh:immb)-32)
564 1xxx (UInt(immh:immb)-64) */
565 info->imm.value = imm - (8 << pos);
566
567 return 1;
568 }
569
570 /* Decode shift immediate for e.g. sshr (imm). */
571 int
572 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
573 aarch64_opnd_info *info, const aarch64_insn code,
574 const aarch64_inst *inst ATTRIBUTE_UNUSED)
575 {
576 int64_t imm;
577 aarch64_insn val;
578 val = extract_field (FLD_size, code, 0);
579 switch (val)
580 {
581 case 0: imm = 8; break;
582 case 1: imm = 16; break;
583 case 2: imm = 32; break;
584 default: return 0;
585 }
586 info->imm.value = imm;
587 return 1;
588 }
589
590 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
591 value in the field(s) will be extracted as unsigned immediate value. */
592 int
593 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
594 const aarch64_insn code,
595 const aarch64_inst *inst ATTRIBUTE_UNUSED)
596 {
597 int64_t imm;
598
599 imm = extract_all_fields (self, code);
600
601 if (operand_need_sign_extension (self))
602 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
603
604 if (operand_need_shift_by_two (self))
605 imm <<= 2;
606
607 if (info->type == AARCH64_OPND_ADDR_ADRP)
608 imm <<= 12;
609
610 info->imm.value = imm;
611 return 1;
612 }
613
614 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
615 int
616 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
617 const aarch64_insn code,
618 const aarch64_inst *inst ATTRIBUTE_UNUSED)
619 {
620 aarch64_ext_imm (self, info, code, inst);
621 info->shifter.kind = AARCH64_MOD_LSL;
622 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
623 return 1;
624 }
625
626 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
627 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
628 int
629 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
630 aarch64_opnd_info *info,
631 const aarch64_insn code,
632 const aarch64_inst *inst ATTRIBUTE_UNUSED)
633 {
634 uint64_t imm;
635 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
636 aarch64_field field = {0, 0};
637
638 assert (info->idx == 1);
639
640 if (info->type == AARCH64_OPND_SIMD_FPIMM)
641 info->imm.is_fp = 1;
642
643 /* a:b:c:d:e:f:g:h */
644 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
645 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
646 {
647 /* Either MOVI <Dd>, #<imm>
648 or MOVI <Vd>.2D, #<imm>.
649 <imm> is a 64-bit immediate
650 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
651 encoded in "a:b:c:d:e:f:g:h". */
652 int i;
653 unsigned abcdefgh = imm;
654 for (imm = 0ull, i = 0; i < 8; i++)
655 if (((abcdefgh >> i) & 0x1) != 0)
656 imm |= 0xffull << (8 * i);
657 }
658 info->imm.value = imm;
659
660 /* cmode */
661 info->qualifier = get_expected_qualifier (inst, info->idx);
662 switch (info->qualifier)
663 {
664 case AARCH64_OPND_QLF_NIL:
665 /* no shift */
666 info->shifter.kind = AARCH64_MOD_NONE;
667 return 1;
668 case AARCH64_OPND_QLF_LSL:
669 /* shift zeros */
670 info->shifter.kind = AARCH64_MOD_LSL;
671 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
672 {
673 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
674 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
675 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
676 default: assert (0); return 0;
677 }
678 /* 00: 0; 01: 8; 10:16; 11:24. */
679 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
680 break;
681 case AARCH64_OPND_QLF_MSL:
682 /* shift ones */
683 info->shifter.kind = AARCH64_MOD_MSL;
684 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
685 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
686 break;
687 default:
688 assert (0);
689 return 0;
690 }
691
692 return 1;
693 }
694
695 /* Decode an 8-bit floating-point immediate. */
696 int
697 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
698 const aarch64_insn code,
699 const aarch64_inst *inst ATTRIBUTE_UNUSED)
700 {
701 info->imm.value = extract_all_fields (self, code);
702 info->imm.is_fp = 1;
703 return 1;
704 }
705
706 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
707 int
708 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
709 aarch64_opnd_info *info, const aarch64_insn code,
710 const aarch64_inst *inst ATTRIBUTE_UNUSED)
711 {
712 info->imm.value = 64- extract_field (FLD_scale, code, 0);
713 return 1;
714 }
715
716 /* Decode arithmetic immediate for e.g.
717 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
718 int
719 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
720 aarch64_opnd_info *info, const aarch64_insn code,
721 const aarch64_inst *inst ATTRIBUTE_UNUSED)
722 {
723 aarch64_insn value;
724
725 info->shifter.kind = AARCH64_MOD_LSL;
726 /* shift */
727 value = extract_field (FLD_shift, code, 0);
728 if (value >= 2)
729 return 0;
730 info->shifter.amount = value ? 12 : 0;
731 /* imm12 (unsigned) */
732 info->imm.value = extract_field (FLD_imm12, code, 0);
733
734 return 1;
735 }
736
737 /* Return true if VALUE is a valid logical immediate encoding, storing the
738 decoded value in *RESULT if so. ESIZE is the number of bytes in the
739 decoded immediate. */
740 static int
741 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
742 {
743 uint64_t imm, mask;
744 uint32_t N, R, S;
745 unsigned simd_size;
746
747 /* value is N:immr:imms. */
748 S = value & 0x3f;
749 R = (value >> 6) & 0x3f;
750 N = (value >> 12) & 0x1;
751
752 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
753 (in other words, right rotated by R), then replicated. */
754 if (N != 0)
755 {
756 simd_size = 64;
757 mask = 0xffffffffffffffffull;
758 }
759 else
760 {
761 switch (S)
762 {
763 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
764 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
765 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
766 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
767 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
768 default: return 0;
769 }
770 mask = (1ull << simd_size) - 1;
771 /* Top bits are IGNORED. */
772 R &= simd_size - 1;
773 }
774
775 if (simd_size > esize * 8)
776 return 0;
777
778 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
779 if (S == simd_size - 1)
780 return 0;
781 /* S+1 consecutive bits to 1. */
782 /* NOTE: S can't be 63 due to detection above. */
783 imm = (1ull << (S + 1)) - 1;
784 /* Rotate to the left by simd_size - R. */
785 if (R != 0)
786 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
787 /* Replicate the value according to SIMD size. */
788 switch (simd_size)
789 {
790 case 2: imm = (imm << 2) | imm;
791 case 4: imm = (imm << 4) | imm;
792 case 8: imm = (imm << 8) | imm;
793 case 16: imm = (imm << 16) | imm;
794 case 32: imm = (imm << 32) | imm;
795 case 64: break;
796 default: assert (0); return 0;
797 }
798
799 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
800
801 return 1;
802 }
803
804 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
805 int
806 aarch64_ext_limm (const aarch64_operand *self,
807 aarch64_opnd_info *info, const aarch64_insn code,
808 const aarch64_inst *inst)
809 {
810 uint32_t esize;
811 aarch64_insn value;
812
813 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
814 self->fields[2]);
815 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
816 return decode_limm (esize, value, &info->imm.value);
817 }
818
819 /* Decode a logical immediate for the BIC alias of AND (etc.). */
820 int
821 aarch64_ext_inv_limm (const aarch64_operand *self,
822 aarch64_opnd_info *info, const aarch64_insn code,
823 const aarch64_inst *inst)
824 {
825 if (!aarch64_ext_limm (self, info, code, inst))
826 return 0;
827 info->imm.value = ~info->imm.value;
828 return 1;
829 }
830
831 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
832 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
833 int
834 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
835 aarch64_opnd_info *info,
836 const aarch64_insn code, const aarch64_inst *inst)
837 {
838 aarch64_insn value;
839
840 /* Rt */
841 info->reg.regno = extract_field (FLD_Rt, code, 0);
842
843 /* size */
844 value = extract_field (FLD_ldst_size, code, 0);
845 if (inst->opcode->iclass == ldstpair_indexed
846 || inst->opcode->iclass == ldstnapair_offs
847 || inst->opcode->iclass == ldstpair_off
848 || inst->opcode->iclass == loadlit)
849 {
850 enum aarch64_opnd_qualifier qualifier;
851 switch (value)
852 {
853 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
854 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
855 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
856 default: return 0;
857 }
858 info->qualifier = qualifier;
859 }
860 else
861 {
862 /* opc1:size */
863 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
864 if (value > 0x4)
865 return 0;
866 info->qualifier = get_sreg_qualifier_from_value (value);
867 }
868
869 return 1;
870 }
871
872 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
873 int
874 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
875 aarch64_opnd_info *info,
876 aarch64_insn code,
877 const aarch64_inst *inst ATTRIBUTE_UNUSED)
878 {
879 /* Rn */
880 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
881 return 1;
882 }
883
884 /* Decode the address operand for e.g.
885 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
886 int
887 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
888 aarch64_opnd_info *info,
889 aarch64_insn code, const aarch64_inst *inst)
890 {
891 aarch64_insn S, value;
892
893 /* Rn */
894 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
895 /* Rm */
896 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
897 /* option */
898 value = extract_field (FLD_option, code, 0);
899 info->shifter.kind =
900 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
901 /* Fix-up the shifter kind; although the table-driven approach is
902 efficient, it is slightly inflexible, thus needing this fix-up. */
903 if (info->shifter.kind == AARCH64_MOD_UXTX)
904 info->shifter.kind = AARCH64_MOD_LSL;
905 /* S */
906 S = extract_field (FLD_S, code, 0);
907 if (S == 0)
908 {
909 info->shifter.amount = 0;
910 info->shifter.amount_present = 0;
911 }
912 else
913 {
914 int size;
915 /* Need information in other operand(s) to help achieve the decoding
916 from 'S' field. */
917 info->qualifier = get_expected_qualifier (inst, info->idx);
918 /* Get the size of the data element that is accessed, which may be
919 different from that of the source register size, e.g. in strb/ldrb. */
920 size = aarch64_get_qualifier_esize (info->qualifier);
921 info->shifter.amount = get_logsz (size);
922 info->shifter.amount_present = 1;
923 }
924
925 return 1;
926 }
927
928 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
929 int
930 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
931 aarch64_insn code, const aarch64_inst *inst)
932 {
933 aarch64_insn imm;
934 info->qualifier = get_expected_qualifier (inst, info->idx);
935
936 /* Rn */
937 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
938 /* simm (imm9 or imm7) */
939 imm = extract_field (self->fields[0], code, 0);
940 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
941 if (self->fields[0] == FLD_imm7)
942 /* scaled immediate in ld/st pair instructions. */
943 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
944 /* qualifier */
945 if (inst->opcode->iclass == ldst_unscaled
946 || inst->opcode->iclass == ldstnapair_offs
947 || inst->opcode->iclass == ldstpair_off
948 || inst->opcode->iclass == ldst_unpriv)
949 info->addr.writeback = 0;
950 else
951 {
952 /* pre/post- index */
953 info->addr.writeback = 1;
954 if (extract_field (self->fields[1], code, 0) == 1)
955 info->addr.preind = 1;
956 else
957 info->addr.postind = 1;
958 }
959
960 return 1;
961 }
962
963 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
964 int
965 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
966 aarch64_insn code,
967 const aarch64_inst *inst ATTRIBUTE_UNUSED)
968 {
969 int shift;
970 info->qualifier = get_expected_qualifier (inst, info->idx);
971 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
972 /* Rn */
973 info->addr.base_regno = extract_field (self->fields[0], code, 0);
974 /* uimm12 */
975 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
976 return 1;
977 }
978
979 /* Decode the address operand for e.g.
980 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
981 int
982 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
983 aarch64_opnd_info *info,
984 aarch64_insn code, const aarch64_inst *inst)
985 {
986 /* The opcode dependent area stores the number of elements in
987 each structure to be loaded/stored. */
988 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
989
990 /* Rn */
991 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
992 /* Rm | #<amount> */
993 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
994 if (info->addr.offset.regno == 31)
995 {
996 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
997 /* Special handling of loading single structure to all lane. */
998 info->addr.offset.imm = (is_ld1r ? 1
999 : inst->operands[0].reglist.num_regs)
1000 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1001 else
1002 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1003 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1004 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1005 }
1006 else
1007 info->addr.offset.is_reg = 1;
1008 info->addr.writeback = 1;
1009
1010 return 1;
1011 }
1012
1013 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1014 int
1015 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1016 aarch64_opnd_info *info,
1017 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1018 {
1019 aarch64_insn value;
1020 /* cond */
1021 value = extract_field (FLD_cond, code, 0);
1022 info->cond = get_cond_from_value (value);
1023 return 1;
1024 }
1025
1026 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1027 int
1028 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1029 aarch64_opnd_info *info,
1030 aarch64_insn code,
1031 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1032 {
1033 /* op0:op1:CRn:CRm:op2 */
1034 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1035 FLD_CRm, FLD_op2);
1036 return 1;
1037 }
1038
1039 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1040 int
1041 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1042 aarch64_opnd_info *info, aarch64_insn code,
1043 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1044 {
1045 int i;
1046 /* op1:op2 */
1047 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1048 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1049 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1050 return 1;
1051 /* Reserved value in <pstatefield>. */
1052 return 0;
1053 }
1054
1055 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1056 int
1057 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1058 aarch64_opnd_info *info,
1059 aarch64_insn code,
1060 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1061 {
1062 int i;
1063 aarch64_insn value;
1064 const aarch64_sys_ins_reg *sysins_ops;
1065 /* op0:op1:CRn:CRm:op2 */
1066 value = extract_fields (code, 0, 5,
1067 FLD_op0, FLD_op1, FLD_CRn,
1068 FLD_CRm, FLD_op2);
1069
1070 switch (info->type)
1071 {
1072 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1073 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1074 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1075 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1076 default: assert (0); return 0;
1077 }
1078
1079 for (i = 0; sysins_ops[i].name != NULL; ++i)
1080 if (sysins_ops[i].value == value)
1081 {
1082 info->sysins_op = sysins_ops + i;
1083 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1084 info->sysins_op->name,
1085 (unsigned)info->sysins_op->value,
1086 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1087 return 1;
1088 }
1089
1090 return 0;
1091 }
1092
1093 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1094
1095 int
1096 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1097 aarch64_opnd_info *info,
1098 aarch64_insn code,
1099 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1100 {
1101 /* CRm */
1102 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1103 return 1;
1104 }
1105
1106 /* Decode the prefetch operation option operand for e.g.
1107 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1108
1109 int
1110 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1111 aarch64_opnd_info *info,
1112 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1113 {
1114 /* prfop in Rt */
1115 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1116 return 1;
1117 }
1118
1119 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1120 to the matching name/value pair in aarch64_hint_options. */
1121
1122 int
1123 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1124 aarch64_opnd_info *info,
1125 aarch64_insn code,
1126 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1127 {
1128 /* CRm:op2. */
1129 unsigned hint_number;
1130 int i;
1131
1132 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1133
1134 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1135 {
1136 if (hint_number == aarch64_hint_options[i].value)
1137 {
1138 info->hint_option = &(aarch64_hint_options[i]);
1139 return 1;
1140 }
1141 }
1142
1143 return 0;
1144 }
1145
1146 /* Decode the extended register operand for e.g.
1147 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1148 int
1149 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1150 aarch64_opnd_info *info,
1151 aarch64_insn code,
1152 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1153 {
1154 aarch64_insn value;
1155
1156 /* Rm */
1157 info->reg.regno = extract_field (FLD_Rm, code, 0);
1158 /* option */
1159 value = extract_field (FLD_option, code, 0);
1160 info->shifter.kind =
1161 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1162 /* imm3 */
1163 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1164
1165 /* This makes the constraint checking happy. */
1166 info->shifter.operator_present = 1;
1167
1168 /* Assume inst->operands[0].qualifier has been resolved. */
1169 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1170 info->qualifier = AARCH64_OPND_QLF_W;
1171 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1172 && (info->shifter.kind == AARCH64_MOD_UXTX
1173 || info->shifter.kind == AARCH64_MOD_SXTX))
1174 info->qualifier = AARCH64_OPND_QLF_X;
1175
1176 return 1;
1177 }
1178
1179 /* Decode the shifted register operand for e.g.
1180 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1181 int
1182 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1183 aarch64_opnd_info *info,
1184 aarch64_insn code,
1185 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1186 {
1187 aarch64_insn value;
1188
1189 /* Rm */
1190 info->reg.regno = extract_field (FLD_Rm, code, 0);
1191 /* shift */
1192 value = extract_field (FLD_shift, code, 0);
1193 info->shifter.kind =
1194 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1195 if (info->shifter.kind == AARCH64_MOD_ROR
1196 && inst->opcode->iclass != log_shift)
1197 /* ROR is not available for the shifted register operand in arithmetic
1198 instructions. */
1199 return 0;
1200 /* imm6 */
1201 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1202
1203 /* This makes the constraint checking happy. */
1204 info->shifter.operator_present = 1;
1205
1206 return 1;
1207 }
1208
1209 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1210 where <offset> is given by the OFFSET parameter and where <factor> is
1211 1 plus SELF's operand-dependent value. fields[0] specifies the field
1212 that holds <base>. */
1213 static int
1214 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1215 aarch64_opnd_info *info, aarch64_insn code,
1216 int64_t offset)
1217 {
1218 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1219 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1220 info->addr.offset.is_reg = FALSE;
1221 info->addr.writeback = FALSE;
1222 info->addr.preind = TRUE;
1223 if (offset != 0)
1224 info->shifter.kind = AARCH64_MOD_MUL_VL;
1225 info->shifter.amount = 1;
1226 info->shifter.operator_present = (info->addr.offset.imm != 0);
1227 info->shifter.amount_present = FALSE;
1228 return 1;
1229 }
1230
1231 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1232 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1233 SELF's operand-dependent value. fields[0] specifies the field that
1234 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1235 int
1236 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1237 aarch64_opnd_info *info, aarch64_insn code,
1238 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1239 {
1240 int offset;
1241
1242 offset = extract_field (FLD_SVE_imm4, code, 0);
1243 offset = ((offset + 8) & 15) - 8;
1244 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1245 }
1246
1247 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1248 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1249 SELF's operand-dependent value. fields[0] specifies the field that
1250 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1251 int
1252 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1253 aarch64_opnd_info *info, aarch64_insn code,
1254 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1255 {
1256 int offset;
1257
1258 offset = extract_field (FLD_SVE_imm6, code, 0);
1259 offset = (((offset + 32) & 63) - 32);
1260 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1261 }
1262
1263 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1264 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1265 SELF's operand-dependent value. fields[0] specifies the field that
1266 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1267 and imm3 fields, with imm3 being the less-significant part. */
1268 int
1269 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1270 aarch64_opnd_info *info,
1271 aarch64_insn code,
1272 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1273 {
1274 int offset;
1275
1276 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1277 offset = (((offset + 256) & 511) - 256);
1278 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1279 }
1280
1281 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1282 is given by the OFFSET parameter and where <shift> is SELF's operand-
1283 dependent value. fields[0] specifies the base register field <base>. */
1284 static int
1285 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1286 aarch64_opnd_info *info, aarch64_insn code,
1287 int64_t offset)
1288 {
1289 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1290 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1291 info->addr.offset.is_reg = FALSE;
1292 info->addr.writeback = FALSE;
1293 info->addr.preind = TRUE;
1294 info->shifter.operator_present = FALSE;
1295 info->shifter.amount_present = FALSE;
1296 return 1;
1297 }
1298
1299 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1300 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1301 value. fields[0] specifies the base register field. */
1302 int
1303 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1304 aarch64_opnd_info *info, aarch64_insn code,
1305 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1306 {
1307 int offset = extract_field (FLD_SVE_imm6, code, 0);
1308 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1309 }
1310
1311 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1312 is SELF's operand-dependent value. fields[0] specifies the base
1313 register field and fields[1] specifies the offset register field. */
1314 int
1315 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1316 aarch64_opnd_info *info, aarch64_insn code,
1317 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1318 {
1319 int index;
1320
1321 index = extract_field (self->fields[1], code, 0);
1322 if (index == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1323 return 0;
1324
1325 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1326 info->addr.offset.regno = index;
1327 info->addr.offset.is_reg = TRUE;
1328 info->addr.writeback = FALSE;
1329 info->addr.preind = TRUE;
1330 info->shifter.kind = AARCH64_MOD_LSL;
1331 info->shifter.amount = get_operand_specific_data (self);
1332 info->shifter.operator_present = (info->shifter.amount != 0);
1333 info->shifter.amount_present = (info->shifter.amount != 0);
1334 return 1;
1335 }
1336
1337 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1338 <shift> is SELF's operand-dependent value. fields[0] specifies the
1339 base register field, fields[1] specifies the offset register field and
1340 fields[2] is a single-bit field that selects SXTW over UXTW. */
1341 int
1342 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1343 aarch64_opnd_info *info, aarch64_insn code,
1344 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1345 {
1346 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1347 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1348 info->addr.offset.is_reg = TRUE;
1349 info->addr.writeback = FALSE;
1350 info->addr.preind = TRUE;
1351 if (extract_field (self->fields[2], code, 0))
1352 info->shifter.kind = AARCH64_MOD_SXTW;
1353 else
1354 info->shifter.kind = AARCH64_MOD_UXTW;
1355 info->shifter.amount = get_operand_specific_data (self);
1356 info->shifter.operator_present = TRUE;
1357 info->shifter.amount_present = (info->shifter.amount != 0);
1358 return 1;
1359 }
1360
1361 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1362 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1363 fields[0] specifies the base register field. */
1364 int
1365 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1366 aarch64_opnd_info *info, aarch64_insn code,
1367 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1368 {
1369 int offset = extract_field (FLD_imm5, code, 0);
1370 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1371 }
1372
1373 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1374 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1375 number. fields[0] specifies the base register field and fields[1]
1376 specifies the offset register field. */
1377 static int
1378 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1379 aarch64_insn code, enum aarch64_modifier_kind kind)
1380 {
1381 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1382 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1383 info->addr.offset.is_reg = TRUE;
1384 info->addr.writeback = FALSE;
1385 info->addr.preind = TRUE;
1386 info->shifter.kind = kind;
1387 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1388 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1389 || info->shifter.amount != 0);
1390 info->shifter.amount_present = (info->shifter.amount != 0);
1391 return 1;
1392 }
1393
1394 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1395 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1396 field and fields[1] specifies the offset register field. */
1397 int
1398 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1399 aarch64_opnd_info *info, aarch64_insn code,
1400 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1401 {
1402 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1403 }
1404
1405 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1406 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1407 field and fields[1] specifies the offset register field. */
1408 int
1409 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1410 aarch64_opnd_info *info, aarch64_insn code,
1411 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1412 {
1413 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1414 }
1415
1416 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1417 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1418 field and fields[1] specifies the offset register field. */
1419 int
1420 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1421 aarch64_opnd_info *info, aarch64_insn code,
1422 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1423 {
1424 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1425 }
1426
1427 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1428 has the raw field value and that the low 8 bits decode to VALUE. */
1429 static int
1430 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1431 {
1432 info->shifter.kind = AARCH64_MOD_LSL;
1433 info->shifter.amount = 0;
1434 if (info->imm.value & 0x100)
1435 {
1436 if (value == 0)
1437 /* Decode 0x100 as #0, LSL #8. */
1438 info->shifter.amount = 8;
1439 else
1440 value *= 256;
1441 }
1442 info->shifter.operator_present = (info->shifter.amount != 0);
1443 info->shifter.amount_present = (info->shifter.amount != 0);
1444 info->imm.value = value;
1445 return 1;
1446 }
1447
1448 /* Decode an SVE ADD/SUB immediate. */
1449 int
1450 aarch64_ext_sve_aimm (const aarch64_operand *self,
1451 aarch64_opnd_info *info, const aarch64_insn code,
1452 const aarch64_inst *inst)
1453 {
1454 return (aarch64_ext_imm (self, info, code, inst)
1455 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1456 }
1457
1458 /* Decode an SVE CPY/DUP immediate. */
1459 int
1460 aarch64_ext_sve_asimm (const aarch64_operand *self,
1461 aarch64_opnd_info *info, const aarch64_insn code,
1462 const aarch64_inst *inst)
1463 {
1464 return (aarch64_ext_imm (self, info, code, inst)
1465 && decode_sve_aimm (info, (int8_t) info->imm.value));
1466 }
1467
1468 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1469 The fields array specifies which field to use. */
1470 int
1471 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1472 aarch64_opnd_info *info, aarch64_insn code,
1473 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1474 {
1475 if (extract_field (self->fields[0], code, 0))
1476 info->imm.value = 0x3f800000;
1477 else
1478 info->imm.value = 0x3f000000;
1479 info->imm.is_fp = TRUE;
1480 return 1;
1481 }
1482
1483 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1484 The fields array specifies which field to use. */
1485 int
1486 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1487 aarch64_opnd_info *info, aarch64_insn code,
1488 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1489 {
1490 if (extract_field (self->fields[0], code, 0))
1491 info->imm.value = 0x40000000;
1492 else
1493 info->imm.value = 0x3f000000;
1494 info->imm.is_fp = TRUE;
1495 return 1;
1496 }
1497
1498 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1499 The fields array specifies which field to use. */
1500 int
1501 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1502 aarch64_opnd_info *info, aarch64_insn code,
1503 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1504 {
1505 if (extract_field (self->fields[0], code, 0))
1506 info->imm.value = 0x3f800000;
1507 else
1508 info->imm.value = 0x0;
1509 info->imm.is_fp = TRUE;
1510 return 1;
1511 }
1512
1513 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1514 array specifies which field to use for Zn. MM is encoded in the
1515 concatenation of imm5 and SVE_tszh, with imm5 being the less
1516 significant part. */
1517 int
1518 aarch64_ext_sve_index (const aarch64_operand *self,
1519 aarch64_opnd_info *info, aarch64_insn code,
1520 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1521 {
1522 int val;
1523
1524 info->reglane.regno = extract_field (self->fields[0], code, 0);
1525 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1526 if ((val & 15) == 0)
1527 return 0;
1528 while ((val & 1) == 0)
1529 val /= 2;
1530 info->reglane.index = val / 2;
1531 return 1;
1532 }
1533
1534 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1535 int
1536 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1537 aarch64_opnd_info *info, const aarch64_insn code,
1538 const aarch64_inst *inst)
1539 {
1540 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1541 return (aarch64_ext_limm (self, info, code, inst)
1542 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1543 }
1544
1545 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1546 to use for Zn. The opcode-dependent value specifies the number
1547 of registers in the list. */
1548 int
1549 aarch64_ext_sve_reglist (const aarch64_operand *self,
1550 aarch64_opnd_info *info, aarch64_insn code,
1551 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1552 {
1553 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1554 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1555 return 1;
1556 }
1557
1558 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1559 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1560 field. */
1561 int
1562 aarch64_ext_sve_scale (const aarch64_operand *self,
1563 aarch64_opnd_info *info, aarch64_insn code,
1564 const aarch64_inst *inst)
1565 {
1566 int val;
1567
1568 if (!aarch64_ext_imm (self, info, code, inst))
1569 return 0;
1570 val = extract_field (FLD_SVE_imm4, code, 0);
1571 info->shifter.kind = AARCH64_MOD_MUL;
1572 info->shifter.amount = val + 1;
1573 info->shifter.operator_present = (val != 0);
1574 info->shifter.amount_present = (val != 0);
1575 return 1;
1576 }
1577
1578 /* Return the top set bit in VALUE, which is expected to be relatively
1579 small. */
1580 static uint64_t
1581 get_top_bit (uint64_t value)
1582 {
1583 while ((value & -value) != value)
1584 value -= value & -value;
1585 return value;
1586 }
1587
1588 /* Decode an SVE shift-left immediate. */
1589 int
1590 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1591 aarch64_opnd_info *info, const aarch64_insn code,
1592 const aarch64_inst *inst)
1593 {
1594 if (!aarch64_ext_imm (self, info, code, inst)
1595 || info->imm.value == 0)
1596 return 0;
1597
1598 info->imm.value -= get_top_bit (info->imm.value);
1599 return 1;
1600 }
1601
1602 /* Decode an SVE shift-right immediate. */
1603 int
1604 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1605 aarch64_opnd_info *info, const aarch64_insn code,
1606 const aarch64_inst *inst)
1607 {
1608 if (!aarch64_ext_imm (self, info, code, inst)
1609 || info->imm.value == 0)
1610 return 0;
1611
1612 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1613 return 1;
1614 }
1615 \f
1616 /* Bitfields that are commonly used to encode certain operands' information
1617 may be partially used as part of the base opcode in some instructions.
1618 For example, the bit 1 of the field 'size' in
1619 FCVTXN <Vb><d>, <Va><n>
1620 is actually part of the base opcode, while only size<0> is available
1621 for encoding the register type. Another example is the AdvSIMD
1622 instruction ORR (register), in which the field 'size' is also used for
1623 the base opcode, leaving only the field 'Q' available to encode the
1624 vector register arrangement specifier '8B' or '16B'.
1625
1626 This function tries to deduce the qualifier from the value of partially
1627 constrained field(s). Given the VALUE of such a field or fields, the
1628 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1629 operand encoding), the function returns the matching qualifier or
1630 AARCH64_OPND_QLF_NIL if nothing matches.
1631
1632 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1633 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1634 may end with AARCH64_OPND_QLF_NIL. */
1635
1636 static enum aarch64_opnd_qualifier
1637 get_qualifier_from_partial_encoding (aarch64_insn value,
1638 const enum aarch64_opnd_qualifier* \
1639 candidates,
1640 aarch64_insn mask)
1641 {
1642 int i;
1643 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1644 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1645 {
1646 aarch64_insn standard_value;
1647 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1648 break;
1649 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1650 if ((standard_value & mask) == (value & mask))
1651 return candidates[i];
1652 }
1653 return AARCH64_OPND_QLF_NIL;
1654 }
1655
1656 /* Given a list of qualifier sequences, return all possible valid qualifiers
1657 for operand IDX in QUALIFIERS.
1658 Assume QUALIFIERS is an array whose length is large enough. */
1659
1660 static void
1661 get_operand_possible_qualifiers (int idx,
1662 const aarch64_opnd_qualifier_seq_t *list,
1663 enum aarch64_opnd_qualifier *qualifiers)
1664 {
1665 int i;
1666 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1667 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1668 break;
1669 }
1670
1671 /* Decode the size Q field for e.g. SHADD.
1672 We tag one operand with the qualifer according to the code;
1673 whether the qualifier is valid for this opcode or not, it is the
1674 duty of the semantic checking. */
1675
1676 static int
1677 decode_sizeq (aarch64_inst *inst)
1678 {
1679 int idx;
1680 enum aarch64_opnd_qualifier qualifier;
1681 aarch64_insn code;
1682 aarch64_insn value, mask;
1683 enum aarch64_field_kind fld_sz;
1684 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1685
1686 if (inst->opcode->iclass == asisdlse
1687 || inst->opcode->iclass == asisdlsep
1688 || inst->opcode->iclass == asisdlso
1689 || inst->opcode->iclass == asisdlsop)
1690 fld_sz = FLD_vldst_size;
1691 else
1692 fld_sz = FLD_size;
1693
1694 code = inst->value;
1695 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1696 /* Obtain the info that which bits of fields Q and size are actually
1697 available for operand encoding. Opcodes like FMAXNM and FMLA have
1698 size[1] unavailable. */
1699 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1700
1701 /* The index of the operand we are going to tag a qualifier and the qualifer
1702 itself are reasoned from the value of the size and Q fields and the
1703 possible valid qualifier lists. */
1704 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1705 DEBUG_TRACE ("key idx: %d", idx);
1706
1707 /* For most related instruciton, size:Q are fully available for operand
1708 encoding. */
1709 if (mask == 0x7)
1710 {
1711 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1712 return 1;
1713 }
1714
1715 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1716 candidates);
1717 #ifdef DEBUG_AARCH64
1718 if (debug_dump)
1719 {
1720 int i;
1721 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1722 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1723 DEBUG_TRACE ("qualifier %d: %s", i,
1724 aarch64_get_qualifier_name(candidates[i]));
1725 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1726 }
1727 #endif /* DEBUG_AARCH64 */
1728
1729 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1730
1731 if (qualifier == AARCH64_OPND_QLF_NIL)
1732 return 0;
1733
1734 inst->operands[idx].qualifier = qualifier;
1735 return 1;
1736 }
1737
1738 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1739 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1740
1741 static int
1742 decode_asimd_fcvt (aarch64_inst *inst)
1743 {
1744 aarch64_field field = {0, 0};
1745 aarch64_insn value;
1746 enum aarch64_opnd_qualifier qualifier;
1747
1748 gen_sub_field (FLD_size, 0, 1, &field);
1749 value = extract_field_2 (&field, inst->value, 0);
1750 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1751 : AARCH64_OPND_QLF_V_2D;
1752 switch (inst->opcode->op)
1753 {
1754 case OP_FCVTN:
1755 case OP_FCVTN2:
1756 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1757 inst->operands[1].qualifier = qualifier;
1758 break;
1759 case OP_FCVTL:
1760 case OP_FCVTL2:
1761 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1762 inst->operands[0].qualifier = qualifier;
1763 break;
1764 default:
1765 assert (0);
1766 return 0;
1767 }
1768
1769 return 1;
1770 }
1771
1772 /* Decode size[0], i.e. bit 22, for
1773 e.g. FCVTXN <Vb><d>, <Va><n>. */
1774
1775 static int
1776 decode_asisd_fcvtxn (aarch64_inst *inst)
1777 {
1778 aarch64_field field = {0, 0};
1779 gen_sub_field (FLD_size, 0, 1, &field);
1780 if (!extract_field_2 (&field, inst->value, 0))
1781 return 0;
1782 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1783 return 1;
1784 }
1785
1786 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1787 static int
1788 decode_fcvt (aarch64_inst *inst)
1789 {
1790 enum aarch64_opnd_qualifier qualifier;
1791 aarch64_insn value;
1792 const aarch64_field field = {15, 2};
1793
1794 /* opc dstsize */
1795 value = extract_field_2 (&field, inst->value, 0);
1796 switch (value)
1797 {
1798 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1799 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1800 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1801 default: return 0;
1802 }
1803 inst->operands[0].qualifier = qualifier;
1804
1805 return 1;
1806 }
1807
1808 /* Do miscellaneous decodings that are not common enough to be driven by
1809 flags. */
1810
1811 static int
1812 do_misc_decoding (aarch64_inst *inst)
1813 {
1814 switch (inst->opcode->op)
1815 {
1816 case OP_FCVT:
1817 return decode_fcvt (inst);
1818 case OP_FCVTN:
1819 case OP_FCVTN2:
1820 case OP_FCVTL:
1821 case OP_FCVTL2:
1822 return decode_asimd_fcvt (inst);
1823 case OP_FCVTXN_S:
1824 return decode_asisd_fcvtxn (inst);
1825 default:
1826 return 0;
1827 }
1828 }
1829
1830 /* Opcodes that have fields shared by multiple operands are usually flagged
1831 with flags. In this function, we detect such flags, decode the related
1832 field(s) and store the information in one of the related operands. The
1833 'one' operand is not any operand but one of the operands that can
1834 accommadate all the information that has been decoded. */
1835
1836 static int
1837 do_special_decoding (aarch64_inst *inst)
1838 {
1839 int idx;
1840 aarch64_insn value;
1841 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1842 if (inst->opcode->flags & F_COND)
1843 {
1844 value = extract_field (FLD_cond2, inst->value, 0);
1845 inst->cond = get_cond_from_value (value);
1846 }
1847 /* 'sf' field. */
1848 if (inst->opcode->flags & F_SF)
1849 {
1850 idx = select_operand_for_sf_field_coding (inst->opcode);
1851 value = extract_field (FLD_sf, inst->value, 0);
1852 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1853 if ((inst->opcode->flags & F_N)
1854 && extract_field (FLD_N, inst->value, 0) != value)
1855 return 0;
1856 }
1857 /* 'sf' field. */
1858 if (inst->opcode->flags & F_LSE_SZ)
1859 {
1860 idx = select_operand_for_sf_field_coding (inst->opcode);
1861 value = extract_field (FLD_lse_sz, inst->value, 0);
1862 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1863 }
1864 /* size:Q fields. */
1865 if (inst->opcode->flags & F_SIZEQ)
1866 return decode_sizeq (inst);
1867
1868 if (inst->opcode->flags & F_FPTYPE)
1869 {
1870 idx = select_operand_for_fptype_field_coding (inst->opcode);
1871 value = extract_field (FLD_type, inst->value, 0);
1872 switch (value)
1873 {
1874 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1875 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1876 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1877 default: return 0;
1878 }
1879 }
1880
1881 if (inst->opcode->flags & F_SSIZE)
1882 {
1883 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1884 of the base opcode. */
1885 aarch64_insn mask;
1886 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1887 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1888 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1889 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1890 /* For most related instruciton, the 'size' field is fully available for
1891 operand encoding. */
1892 if (mask == 0x3)
1893 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1894 else
1895 {
1896 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1897 candidates);
1898 inst->operands[idx].qualifier
1899 = get_qualifier_from_partial_encoding (value, candidates, mask);
1900 }
1901 }
1902
1903 if (inst->opcode->flags & F_T)
1904 {
1905 /* Num of consecutive '0's on the right side of imm5<3:0>. */
1906 int num = 0;
1907 unsigned val, Q;
1908 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1909 == AARCH64_OPND_CLASS_SIMD_REG);
1910 /* imm5<3:0> q <t>
1911 0000 x reserved
1912 xxx1 0 8b
1913 xxx1 1 16b
1914 xx10 0 4h
1915 xx10 1 8h
1916 x100 0 2s
1917 x100 1 4s
1918 1000 0 reserved
1919 1000 1 2d */
1920 val = extract_field (FLD_imm5, inst->value, 0);
1921 while ((val & 0x1) == 0 && ++num <= 3)
1922 val >>= 1;
1923 if (num > 3)
1924 return 0;
1925 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1926 inst->operands[0].qualifier =
1927 get_vreg_qualifier_from_value ((num << 1) | Q);
1928 }
1929
1930 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1931 {
1932 /* Use Rt to encode in the case of e.g.
1933 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1934 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1935 if (idx == -1)
1936 {
1937 /* Otherwise use the result operand, which has to be a integer
1938 register. */
1939 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1940 == AARCH64_OPND_CLASS_INT_REG);
1941 idx = 0;
1942 }
1943 assert (idx == 0 || idx == 1);
1944 value = extract_field (FLD_Q, inst->value, 0);
1945 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1946 }
1947
1948 if (inst->opcode->flags & F_LDS_SIZE)
1949 {
1950 aarch64_field field = {0, 0};
1951 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1952 == AARCH64_OPND_CLASS_INT_REG);
1953 gen_sub_field (FLD_opc, 0, 1, &field);
1954 value = extract_field_2 (&field, inst->value, 0);
1955 inst->operands[0].qualifier
1956 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
1957 }
1958
1959 /* Miscellaneous decoding; done as the last step. */
1960 if (inst->opcode->flags & F_MISC)
1961 return do_misc_decoding (inst);
1962
1963 return 1;
1964 }
1965
1966 /* Converters converting a real opcode instruction to its alias form. */
1967
1968 /* ROR <Wd>, <Ws>, #<shift>
1969 is equivalent to:
1970 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1971 static int
1972 convert_extr_to_ror (aarch64_inst *inst)
1973 {
1974 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1975 {
1976 copy_operand_info (inst, 2, 3);
1977 inst->operands[3].type = AARCH64_OPND_NIL;
1978 return 1;
1979 }
1980 return 0;
1981 }
1982
1983 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1984 is equivalent to:
1985 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1986 static int
1987 convert_shll_to_xtl (aarch64_inst *inst)
1988 {
1989 if (inst->operands[2].imm.value == 0)
1990 {
1991 inst->operands[2].type = AARCH64_OPND_NIL;
1992 return 1;
1993 }
1994 return 0;
1995 }
1996
1997 /* Convert
1998 UBFM <Xd>, <Xn>, #<shift>, #63.
1999 to
2000 LSR <Xd>, <Xn>, #<shift>. */
2001 static int
2002 convert_bfm_to_sr (aarch64_inst *inst)
2003 {
2004 int64_t imms, val;
2005
2006 imms = inst->operands[3].imm.value;
2007 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2008 if (imms == val)
2009 {
2010 inst->operands[3].type = AARCH64_OPND_NIL;
2011 return 1;
2012 }
2013
2014 return 0;
2015 }
2016
2017 /* Convert MOV to ORR. */
2018 static int
2019 convert_orr_to_mov (aarch64_inst *inst)
2020 {
2021 /* MOV <Vd>.<T>, <Vn>.<T>
2022 is equivalent to:
2023 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2024 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2025 {
2026 inst->operands[2].type = AARCH64_OPND_NIL;
2027 return 1;
2028 }
2029 return 0;
2030 }
2031
2032 /* When <imms> >= <immr>, the instruction written:
2033 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2034 is equivalent to:
2035 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2036
2037 static int
2038 convert_bfm_to_bfx (aarch64_inst *inst)
2039 {
2040 int64_t immr, imms;
2041
2042 immr = inst->operands[2].imm.value;
2043 imms = inst->operands[3].imm.value;
2044 if (imms >= immr)
2045 {
2046 int64_t lsb = immr;
2047 inst->operands[2].imm.value = lsb;
2048 inst->operands[3].imm.value = imms + 1 - lsb;
2049 /* The two opcodes have different qualifiers for
2050 the immediate operands; reset to help the checking. */
2051 reset_operand_qualifier (inst, 2);
2052 reset_operand_qualifier (inst, 3);
2053 return 1;
2054 }
2055
2056 return 0;
2057 }
2058
2059 /* When <imms> < <immr>, the instruction written:
2060 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2061 is equivalent to:
2062 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2063
2064 static int
2065 convert_bfm_to_bfi (aarch64_inst *inst)
2066 {
2067 int64_t immr, imms, val;
2068
2069 immr = inst->operands[2].imm.value;
2070 imms = inst->operands[3].imm.value;
2071 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2072 if (imms < immr)
2073 {
2074 inst->operands[2].imm.value = (val - immr) & (val - 1);
2075 inst->operands[3].imm.value = imms + 1;
2076 /* The two opcodes have different qualifiers for
2077 the immediate operands; reset to help the checking. */
2078 reset_operand_qualifier (inst, 2);
2079 reset_operand_qualifier (inst, 3);
2080 return 1;
2081 }
2082
2083 return 0;
2084 }
2085
2086 /* The instruction written:
2087 BFC <Xd>, #<lsb>, #<width>
2088 is equivalent to:
2089 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2090
2091 static int
2092 convert_bfm_to_bfc (aarch64_inst *inst)
2093 {
2094 int64_t immr, imms, val;
2095
2096 /* Should have been assured by the base opcode value. */
2097 assert (inst->operands[1].reg.regno == 0x1f);
2098
2099 immr = inst->operands[2].imm.value;
2100 imms = inst->operands[3].imm.value;
2101 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2102 if (imms < immr)
2103 {
2104 /* Drop XZR from the second operand. */
2105 copy_operand_info (inst, 1, 2);
2106 copy_operand_info (inst, 2, 3);
2107 inst->operands[3].type = AARCH64_OPND_NIL;
2108
2109 /* Recalculate the immediates. */
2110 inst->operands[1].imm.value = (val - immr) & (val - 1);
2111 inst->operands[2].imm.value = imms + 1;
2112
2113 /* The two opcodes have different qualifiers for the operands; reset to
2114 help the checking. */
2115 reset_operand_qualifier (inst, 1);
2116 reset_operand_qualifier (inst, 2);
2117 reset_operand_qualifier (inst, 3);
2118
2119 return 1;
2120 }
2121
2122 return 0;
2123 }
2124
2125 /* The instruction written:
2126 LSL <Xd>, <Xn>, #<shift>
2127 is equivalent to:
2128 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2129
2130 static int
2131 convert_ubfm_to_lsl (aarch64_inst *inst)
2132 {
2133 int64_t immr = inst->operands[2].imm.value;
2134 int64_t imms = inst->operands[3].imm.value;
2135 int64_t val
2136 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2137
2138 if ((immr == 0 && imms == val) || immr == imms + 1)
2139 {
2140 inst->operands[3].type = AARCH64_OPND_NIL;
2141 inst->operands[2].imm.value = val - imms;
2142 return 1;
2143 }
2144
2145 return 0;
2146 }
2147
2148 /* CINC <Wd>, <Wn>, <cond>
2149 is equivalent to:
2150 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2151 where <cond> is not AL or NV. */
2152
2153 static int
2154 convert_from_csel (aarch64_inst *inst)
2155 {
2156 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2157 && (inst->operands[3].cond->value & 0xe) != 0xe)
2158 {
2159 copy_operand_info (inst, 2, 3);
2160 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2161 inst->operands[3].type = AARCH64_OPND_NIL;
2162 return 1;
2163 }
2164 return 0;
2165 }
2166
2167 /* CSET <Wd>, <cond>
2168 is equivalent to:
2169 CSINC <Wd>, WZR, WZR, invert(<cond>)
2170 where <cond> is not AL or NV. */
2171
2172 static int
2173 convert_csinc_to_cset (aarch64_inst *inst)
2174 {
2175 if (inst->operands[1].reg.regno == 0x1f
2176 && inst->operands[2].reg.regno == 0x1f
2177 && (inst->operands[3].cond->value & 0xe) != 0xe)
2178 {
2179 copy_operand_info (inst, 1, 3);
2180 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2181 inst->operands[3].type = AARCH64_OPND_NIL;
2182 inst->operands[2].type = AARCH64_OPND_NIL;
2183 return 1;
2184 }
2185 return 0;
2186 }
2187
2188 /* MOV <Wd>, #<imm>
2189 is equivalent to:
2190 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2191
2192 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2193 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2194 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2195 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2196 machine-instruction mnemonic must be used. */
2197
2198 static int
2199 convert_movewide_to_mov (aarch64_inst *inst)
2200 {
2201 uint64_t value = inst->operands[1].imm.value;
2202 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2203 if (value == 0 && inst->operands[1].shifter.amount != 0)
2204 return 0;
2205 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2206 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2207 value <<= inst->operands[1].shifter.amount;
2208 /* As an alias convertor, it has to be clear that the INST->OPCODE
2209 is the opcode of the real instruction. */
2210 if (inst->opcode->op == OP_MOVN)
2211 {
2212 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2213 value = ~value;
2214 /* A MOVN has an immediate that could be encoded by MOVZ. */
2215 if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
2216 return 0;
2217 }
2218 inst->operands[1].imm.value = value;
2219 inst->operands[1].shifter.amount = 0;
2220 return 1;
2221 }
2222
2223 /* MOV <Wd>, #<imm>
2224 is equivalent to:
2225 ORR <Wd>, WZR, #<imm>.
2226
2227 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2228 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2229 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2230 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2231 machine-instruction mnemonic must be used. */
2232
2233 static int
2234 convert_movebitmask_to_mov (aarch64_inst *inst)
2235 {
2236 int is32;
2237 uint64_t value;
2238
2239 /* Should have been assured by the base opcode value. */
2240 assert (inst->operands[1].reg.regno == 0x1f);
2241 copy_operand_info (inst, 1, 2);
2242 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2243 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2244 value = inst->operands[1].imm.value;
2245 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2246 instruction. */
2247 if (inst->operands[0].reg.regno != 0x1f
2248 && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
2249 || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
2250 return 0;
2251
2252 inst->operands[2].type = AARCH64_OPND_NIL;
2253 return 1;
2254 }
2255
2256 /* Some alias opcodes are disassembled by being converted from their real-form.
2257 N.B. INST->OPCODE is the real opcode rather than the alias. */
2258
2259 static int
2260 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2261 {
2262 switch (alias->op)
2263 {
2264 case OP_ASR_IMM:
2265 case OP_LSR_IMM:
2266 return convert_bfm_to_sr (inst);
2267 case OP_LSL_IMM:
2268 return convert_ubfm_to_lsl (inst);
2269 case OP_CINC:
2270 case OP_CINV:
2271 case OP_CNEG:
2272 return convert_from_csel (inst);
2273 case OP_CSET:
2274 case OP_CSETM:
2275 return convert_csinc_to_cset (inst);
2276 case OP_UBFX:
2277 case OP_BFXIL:
2278 case OP_SBFX:
2279 return convert_bfm_to_bfx (inst);
2280 case OP_SBFIZ:
2281 case OP_BFI:
2282 case OP_UBFIZ:
2283 return convert_bfm_to_bfi (inst);
2284 case OP_BFC:
2285 return convert_bfm_to_bfc (inst);
2286 case OP_MOV_V:
2287 return convert_orr_to_mov (inst);
2288 case OP_MOV_IMM_WIDE:
2289 case OP_MOV_IMM_WIDEN:
2290 return convert_movewide_to_mov (inst);
2291 case OP_MOV_IMM_LOG:
2292 return convert_movebitmask_to_mov (inst);
2293 case OP_ROR_IMM:
2294 return convert_extr_to_ror (inst);
2295 case OP_SXTL:
2296 case OP_SXTL2:
2297 case OP_UXTL:
2298 case OP_UXTL2:
2299 return convert_shll_to_xtl (inst);
2300 default:
2301 return 0;
2302 }
2303 }
2304
2305 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2306 aarch64_inst *, int);
2307
2308 /* Given the instruction information in *INST, check if the instruction has
2309 any alias form that can be used to represent *INST. If the answer is yes,
2310 update *INST to be in the form of the determined alias. */
2311
2312 /* In the opcode description table, the following flags are used in opcode
2313 entries to help establish the relations between the real and alias opcodes:
2314
2315 F_ALIAS: opcode is an alias
2316 F_HAS_ALIAS: opcode has alias(es)
2317 F_P1
2318 F_P2
2319 F_P3: Disassembly preference priority 1-3 (the larger the
2320 higher). If nothing is specified, it is the priority
2321 0 by default, i.e. the lowest priority.
2322
2323 Although the relation between the machine and the alias instructions are not
2324 explicitly described, it can be easily determined from the base opcode
2325 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2326 description entries:
2327
2328 The mask of an alias opcode must be equal to or a super-set (i.e. more
2329 constrained) of that of the aliased opcode; so is the base opcode value.
2330
2331 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2332 && (opcode->mask & real->mask) == real->mask
2333 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2334 then OPCODE is an alias of, and only of, the REAL instruction
2335
2336 The alias relationship is forced flat-structured to keep related algorithm
2337 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2338
2339 During the disassembling, the decoding decision tree (in
2340 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2341 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2342 not specified), the disassembler will check whether there is any alias
2343 instruction exists for this real instruction. If there is, the disassembler
2344 will try to disassemble the 32-bit binary again using the alias's rule, or
2345 try to convert the IR to the form of the alias. In the case of the multiple
2346 aliases, the aliases are tried one by one from the highest priority
2347 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2348 first succeeds first adopted.
2349
2350 You may ask why there is a need for the conversion of IR from one form to
2351 another in handling certain aliases. This is because on one hand it avoids
2352 adding more operand code to handle unusual encoding/decoding; on other
2353 hand, during the disassembling, the conversion is an effective approach to
2354 check the condition of an alias (as an alias may be adopted only if certain
2355 conditions are met).
2356
2357 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2358 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2359 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2360
2361 static void
2362 determine_disassembling_preference (struct aarch64_inst *inst)
2363 {
2364 const aarch64_opcode *opcode;
2365 const aarch64_opcode *alias;
2366
2367 opcode = inst->opcode;
2368
2369 /* This opcode does not have an alias, so use itself. */
2370 if (opcode_has_alias (opcode) == FALSE)
2371 return;
2372
2373 alias = aarch64_find_alias_opcode (opcode);
2374 assert (alias);
2375
2376 #ifdef DEBUG_AARCH64
2377 if (debug_dump)
2378 {
2379 const aarch64_opcode *tmp = alias;
2380 printf ("#### LIST orderd: ");
2381 while (tmp)
2382 {
2383 printf ("%s, ", tmp->name);
2384 tmp = aarch64_find_next_alias_opcode (tmp);
2385 }
2386 printf ("\n");
2387 }
2388 #endif /* DEBUG_AARCH64 */
2389
2390 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2391 {
2392 DEBUG_TRACE ("try %s", alias->name);
2393 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2394
2395 /* An alias can be a pseudo opcode which will never be used in the
2396 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2397 aliasing AND. */
2398 if (pseudo_opcode_p (alias))
2399 {
2400 DEBUG_TRACE ("skip pseudo %s", alias->name);
2401 continue;
2402 }
2403
2404 if ((inst->value & alias->mask) != alias->opcode)
2405 {
2406 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2407 continue;
2408 }
2409 /* No need to do any complicated transformation on operands, if the alias
2410 opcode does not have any operand. */
2411 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2412 {
2413 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2414 aarch64_replace_opcode (inst, alias);
2415 return;
2416 }
2417 if (alias->flags & F_CONV)
2418 {
2419 aarch64_inst copy;
2420 memcpy (&copy, inst, sizeof (aarch64_inst));
2421 /* ALIAS is the preference as long as the instruction can be
2422 successfully converted to the form of ALIAS. */
2423 if (convert_to_alias (&copy, alias) == 1)
2424 {
2425 aarch64_replace_opcode (&copy, alias);
2426 assert (aarch64_match_operands_constraint (&copy, NULL));
2427 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2428 memcpy (inst, &copy, sizeof (aarch64_inst));
2429 return;
2430 }
2431 }
2432 else
2433 {
2434 /* Directly decode the alias opcode. */
2435 aarch64_inst temp;
2436 memset (&temp, '\0', sizeof (aarch64_inst));
2437 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
2438 {
2439 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2440 memcpy (inst, &temp, sizeof (aarch64_inst));
2441 return;
2442 }
2443 }
2444 }
2445 }
2446
2447 /* Some instructions (including all SVE ones) use the instruction class
2448 to describe how a qualifiers_list index is represented in the instruction
2449 encoding. If INST is such an instruction, decode the appropriate fields
2450 and fill in the operand qualifiers accordingly. Return true if no
2451 problems are found. */
2452
2453 static bfd_boolean
2454 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2455 {
2456 int i, variant;
2457
2458 variant = 0;
2459 switch (inst->opcode->iclass)
2460 {
2461 case sve_cpy:
2462 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2463 break;
2464
2465 case sve_index:
2466 i = extract_field (FLD_SVE_tsz, inst->value, 0);
2467 if (i == 0)
2468 return FALSE;
2469 while ((i & 1) == 0)
2470 {
2471 i >>= 1;
2472 variant += 1;
2473 }
2474 break;
2475
2476 case sve_limm:
2477 /* Pick the smallest applicable element size. */
2478 if ((inst->value & 0x20600) == 0x600)
2479 variant = 0;
2480 else if ((inst->value & 0x20400) == 0x400)
2481 variant = 1;
2482 else if ((inst->value & 0x20000) == 0)
2483 variant = 2;
2484 else
2485 variant = 3;
2486 break;
2487
2488 case sve_misc:
2489 /* sve_misc instructions have only a single variant. */
2490 break;
2491
2492 case sve_movprfx:
2493 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2494 break;
2495
2496 case sve_pred_zm:
2497 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2498 break;
2499
2500 case sve_shift_pred:
2501 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2502 sve_shift:
2503 if (i == 0)
2504 return FALSE;
2505 while (i != 1)
2506 {
2507 i >>= 1;
2508 variant += 1;
2509 }
2510 break;
2511
2512 case sve_shift_unpred:
2513 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2514 goto sve_shift;
2515
2516 case sve_size_bhs:
2517 variant = extract_field (FLD_size, inst->value, 0);
2518 if (variant >= 3)
2519 return FALSE;
2520 break;
2521
2522 case sve_size_bhsd:
2523 variant = extract_field (FLD_size, inst->value, 0);
2524 break;
2525
2526 case sve_size_hsd:
2527 i = extract_field (FLD_size, inst->value, 0);
2528 if (i < 1)
2529 return FALSE;
2530 variant = i - 1;
2531 break;
2532
2533 case sve_size_sd:
2534 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2535 break;
2536
2537 default:
2538 /* No mapping between instruction class and qualifiers. */
2539 return TRUE;
2540 }
2541
2542 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2543 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2544 return TRUE;
2545 }
2546 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2547 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2548 return 1.
2549
2550 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2551 determined and used to disassemble CODE; this is done just before the
2552 return. */
2553
2554 static int
2555 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2556 aarch64_inst *inst, int noaliases_p)
2557 {
2558 int i;
2559
2560 DEBUG_TRACE ("enter with %s", opcode->name);
2561
2562 assert (opcode && inst);
2563
2564 /* Check the base opcode. */
2565 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2566 {
2567 DEBUG_TRACE ("base opcode match FAIL");
2568 goto decode_fail;
2569 }
2570
2571 /* Clear inst. */
2572 memset (inst, '\0', sizeof (aarch64_inst));
2573
2574 inst->opcode = opcode;
2575 inst->value = code;
2576
2577 /* Assign operand codes and indexes. */
2578 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2579 {
2580 if (opcode->operands[i] == AARCH64_OPND_NIL)
2581 break;
2582 inst->operands[i].type = opcode->operands[i];
2583 inst->operands[i].idx = i;
2584 }
2585
2586 /* Call the opcode decoder indicated by flags. */
2587 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2588 {
2589 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2590 goto decode_fail;
2591 }
2592
2593 /* Possibly use the instruction class to determine the correct
2594 qualifier. */
2595 if (!aarch64_decode_variant_using_iclass (inst))
2596 {
2597 DEBUG_TRACE ("iclass-based decoder FAIL");
2598 goto decode_fail;
2599 }
2600
2601 /* Call operand decoders. */
2602 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2603 {
2604 const aarch64_operand *opnd;
2605 enum aarch64_opnd type;
2606
2607 type = opcode->operands[i];
2608 if (type == AARCH64_OPND_NIL)
2609 break;
2610 opnd = &aarch64_operands[type];
2611 if (operand_has_extractor (opnd)
2612 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2613 {
2614 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2615 goto decode_fail;
2616 }
2617 }
2618
2619 /* If the opcode has a verifier, then check it now. */
2620 if (opcode->verifier && ! opcode->verifier (opcode, code))
2621 {
2622 DEBUG_TRACE ("operand verifier FAIL");
2623 goto decode_fail;
2624 }
2625
2626 /* Match the qualifiers. */
2627 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2628 {
2629 /* Arriving here, the CODE has been determined as a valid instruction
2630 of OPCODE and *INST has been filled with information of this OPCODE
2631 instruction. Before the return, check if the instruction has any
2632 alias and should be disassembled in the form of its alias instead.
2633 If the answer is yes, *INST will be updated. */
2634 if (!noaliases_p)
2635 determine_disassembling_preference (inst);
2636 DEBUG_TRACE ("SUCCESS");
2637 return 1;
2638 }
2639 else
2640 {
2641 DEBUG_TRACE ("constraint matching FAIL");
2642 }
2643
2644 decode_fail:
2645 return 0;
2646 }
2647 \f
2648 /* This does some user-friendly fix-up to *INST. It is currently focus on
2649 the adjustment of qualifiers to help the printed instruction
2650 recognized/understood more easily. */
2651
2652 static void
2653 user_friendly_fixup (aarch64_inst *inst)
2654 {
2655 switch (inst->opcode->iclass)
2656 {
2657 case testbranch:
2658 /* TBNZ Xn|Wn, #uimm6, label
2659 Test and Branch Not Zero: conditionally jumps to label if bit number
2660 uimm6 in register Xn is not zero. The bit number implies the width of
2661 the register, which may be written and should be disassembled as Wn if
2662 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2663 */
2664 if (inst->operands[1].imm.value < 32)
2665 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2666 break;
2667 default: break;
2668 }
2669 }
2670
2671 /* Decode INSN and fill in *INST the instruction information. An alias
2672 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2673 success. */
2674
2675 int
2676 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2677 bfd_boolean noaliases_p)
2678 {
2679 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2680
2681 #ifdef DEBUG_AARCH64
2682 if (debug_dump)
2683 {
2684 const aarch64_opcode *tmp = opcode;
2685 printf ("\n");
2686 DEBUG_TRACE ("opcode lookup:");
2687 while (tmp != NULL)
2688 {
2689 aarch64_verbose (" %s", tmp->name);
2690 tmp = aarch64_find_next_opcode (tmp);
2691 }
2692 }
2693 #endif /* DEBUG_AARCH64 */
2694
2695 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2696 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2697 opcode field and value, apart from the difference that one of them has an
2698 extra field as part of the opcode, but such a field is used for operand
2699 encoding in other opcode(s) ('immh' in the case of the example). */
2700 while (opcode != NULL)
2701 {
2702 /* But only one opcode can be decoded successfully for, as the
2703 decoding routine will check the constraint carefully. */
2704 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2705 return ERR_OK;
2706 opcode = aarch64_find_next_opcode (opcode);
2707 }
2708
2709 return ERR_UND;
2710 }
2711
2712 /* Print operands. */
2713
2714 static void
2715 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2716 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2717 {
2718 int i, pcrel_p, num_printed;
2719 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2720 {
2721 char str[128];
2722 /* We regard the opcode operand info more, however we also look into
2723 the inst->operands to support the disassembling of the optional
2724 operand.
2725 The two operand code should be the same in all cases, apart from
2726 when the operand can be optional. */
2727 if (opcode->operands[i] == AARCH64_OPND_NIL
2728 || opnds[i].type == AARCH64_OPND_NIL)
2729 break;
2730
2731 /* Generate the operand string in STR. */
2732 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2733 &info->target);
2734
2735 /* Print the delimiter (taking account of omitted operand(s)). */
2736 if (str[0] != '\0')
2737 (*info->fprintf_func) (info->stream, "%s",
2738 num_printed++ == 0 ? "\t" : ", ");
2739
2740 /* Print the operand. */
2741 if (pcrel_p)
2742 (*info->print_address_func) (info->target, info);
2743 else
2744 (*info->fprintf_func) (info->stream, "%s", str);
2745 }
2746 }
2747
2748 /* Print the instruction mnemonic name. */
2749
2750 static void
2751 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2752 {
2753 if (inst->opcode->flags & F_COND)
2754 {
2755 /* For instructions that are truly conditionally executed, e.g. b.cond,
2756 prepare the full mnemonic name with the corresponding condition
2757 suffix. */
2758 char name[8], *ptr;
2759 size_t len;
2760
2761 ptr = strchr (inst->opcode->name, '.');
2762 assert (ptr && inst->cond);
2763 len = ptr - inst->opcode->name;
2764 assert (len < 8);
2765 strncpy (name, inst->opcode->name, len);
2766 name [len] = '\0';
2767 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2768 }
2769 else
2770 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2771 }
2772
2773 /* Print the instruction according to *INST. */
2774
2775 static void
2776 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2777 struct disassemble_info *info)
2778 {
2779 print_mnemonic_name (inst, info);
2780 print_operands (pc, inst->opcode, inst->operands, info);
2781 }
2782
2783 /* Entry-point of the instruction disassembler and printer. */
2784
2785 static void
2786 print_insn_aarch64_word (bfd_vma pc,
2787 uint32_t word,
2788 struct disassemble_info *info)
2789 {
2790 static const char *err_msg[6] =
2791 {
2792 [ERR_OK] = "_",
2793 [-ERR_UND] = "undefined",
2794 [-ERR_UNP] = "unpredictable",
2795 [-ERR_NYI] = "NYI"
2796 };
2797
2798 int ret;
2799 aarch64_inst inst;
2800
2801 info->insn_info_valid = 1;
2802 info->branch_delay_insns = 0;
2803 info->data_size = 0;
2804 info->target = 0;
2805 info->target2 = 0;
2806
2807 if (info->flags & INSN_HAS_RELOC)
2808 /* If the instruction has a reloc associated with it, then
2809 the offset field in the instruction will actually be the
2810 addend for the reloc. (If we are using REL type relocs).
2811 In such cases, we can ignore the pc when computing
2812 addresses, since the addend is not currently pc-relative. */
2813 pc = 0;
2814
2815 ret = aarch64_decode_insn (word, &inst, no_aliases);
2816
2817 if (((word >> 21) & 0x3ff) == 1)
2818 {
2819 /* RESERVED for ALES. */
2820 assert (ret != ERR_OK);
2821 ret = ERR_NYI;
2822 }
2823
2824 switch (ret)
2825 {
2826 case ERR_UND:
2827 case ERR_UNP:
2828 case ERR_NYI:
2829 /* Handle undefined instructions. */
2830 info->insn_type = dis_noninsn;
2831 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2832 word, err_msg[-ret]);
2833 break;
2834 case ERR_OK:
2835 user_friendly_fixup (&inst);
2836 print_aarch64_insn (pc, &inst, info);
2837 break;
2838 default:
2839 abort ();
2840 }
2841 }
2842
2843 /* Disallow mapping symbols ($x, $d etc) from
2844 being displayed in symbol relative addresses. */
2845
2846 bfd_boolean
2847 aarch64_symbol_is_valid (asymbol * sym,
2848 struct disassemble_info * info ATTRIBUTE_UNUSED)
2849 {
2850 const char * name;
2851
2852 if (sym == NULL)
2853 return FALSE;
2854
2855 name = bfd_asymbol_name (sym);
2856
2857 return name
2858 && (name[0] != '$'
2859 || (name[1] != 'x' && name[1] != 'd')
2860 || (name[2] != '\0' && name[2] != '.'));
2861 }
2862
2863 /* Print data bytes on INFO->STREAM. */
2864
2865 static void
2866 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2867 uint32_t word,
2868 struct disassemble_info *info)
2869 {
2870 switch (info->bytes_per_chunk)
2871 {
2872 case 1:
2873 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2874 break;
2875 case 2:
2876 info->fprintf_func (info->stream, ".short\t0x%04x", word);
2877 break;
2878 case 4:
2879 info->fprintf_func (info->stream, ".word\t0x%08x", word);
2880 break;
2881 default:
2882 abort ();
2883 }
2884 }
2885
2886 /* Try to infer the code or data type from a symbol.
2887 Returns nonzero if *MAP_TYPE was set. */
2888
2889 static int
2890 get_sym_code_type (struct disassemble_info *info, int n,
2891 enum map_type *map_type)
2892 {
2893 elf_symbol_type *es;
2894 unsigned int type;
2895 const char *name;
2896
2897 es = *(elf_symbol_type **)(info->symtab + n);
2898 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2899
2900 /* If the symbol has function type then use that. */
2901 if (type == STT_FUNC)
2902 {
2903 *map_type = MAP_INSN;
2904 return TRUE;
2905 }
2906
2907 /* Check for mapping symbols. */
2908 name = bfd_asymbol_name(info->symtab[n]);
2909 if (name[0] == '$'
2910 && (name[1] == 'x' || name[1] == 'd')
2911 && (name[2] == '\0' || name[2] == '.'))
2912 {
2913 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
2914 return TRUE;
2915 }
2916
2917 return FALSE;
2918 }
2919
2920 /* Entry-point of the AArch64 disassembler. */
2921
2922 int
2923 print_insn_aarch64 (bfd_vma pc,
2924 struct disassemble_info *info)
2925 {
2926 bfd_byte buffer[INSNLEN];
2927 int status;
2928 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
2929 bfd_boolean found = FALSE;
2930 unsigned int size = 4;
2931 unsigned long data;
2932
2933 if (info->disassembler_options)
2934 {
2935 set_default_aarch64_dis_options (info);
2936
2937 parse_aarch64_dis_options (info->disassembler_options);
2938
2939 /* To avoid repeated parsing of these options, we remove them here. */
2940 info->disassembler_options = NULL;
2941 }
2942
2943 /* Aarch64 instructions are always little-endian */
2944 info->endian_code = BFD_ENDIAN_LITTLE;
2945
2946 /* First check the full symtab for a mapping symbol, even if there
2947 are no usable non-mapping symbols for this address. */
2948 if (info->symtab_size != 0
2949 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
2950 {
2951 enum map_type type = MAP_INSN;
2952 int last_sym = -1;
2953 bfd_vma addr;
2954 int n;
2955
2956 if (pc <= last_mapping_addr)
2957 last_mapping_sym = -1;
2958
2959 /* Start scanning at the start of the function, or wherever
2960 we finished last time. */
2961 n = info->symtab_pos + 1;
2962 if (n < last_mapping_sym)
2963 n = last_mapping_sym;
2964
2965 /* Scan up to the location being disassembled. */
2966 for (; n < info->symtab_size; n++)
2967 {
2968 addr = bfd_asymbol_value (info->symtab[n]);
2969 if (addr > pc)
2970 break;
2971 if ((info->section == NULL
2972 || info->section == info->symtab[n]->section)
2973 && get_sym_code_type (info, n, &type))
2974 {
2975 last_sym = n;
2976 found = TRUE;
2977 }
2978 }
2979
2980 if (!found)
2981 {
2982 n = info->symtab_pos;
2983 if (n < last_mapping_sym)
2984 n = last_mapping_sym;
2985
2986 /* No mapping symbol found at this address. Look backwards
2987 for a preceeding one. */
2988 for (; n >= 0; n--)
2989 {
2990 if (get_sym_code_type (info, n, &type))
2991 {
2992 last_sym = n;
2993 found = TRUE;
2994 break;
2995 }
2996 }
2997 }
2998
2999 last_mapping_sym = last_sym;
3000 last_type = type;
3001
3002 /* Look a little bit ahead to see if we should print out
3003 less than four bytes of data. If there's a symbol,
3004 mapping or otherwise, after two bytes then don't
3005 print more. */
3006 if (last_type == MAP_DATA)
3007 {
3008 size = 4 - (pc & 3);
3009 for (n = last_sym + 1; n < info->symtab_size; n++)
3010 {
3011 addr = bfd_asymbol_value (info->symtab[n]);
3012 if (addr > pc)
3013 {
3014 if (addr - pc < size)
3015 size = addr - pc;
3016 break;
3017 }
3018 }
3019 /* If the next symbol is after three bytes, we need to
3020 print only part of the data, so that we can use either
3021 .byte or .short. */
3022 if (size == 3)
3023 size = (pc & 1) ? 1 : 2;
3024 }
3025 }
3026
3027 if (last_type == MAP_DATA)
3028 {
3029 /* size was set above. */
3030 info->bytes_per_chunk = size;
3031 info->display_endian = info->endian;
3032 printer = print_insn_data;
3033 }
3034 else
3035 {
3036 info->bytes_per_chunk = size = INSNLEN;
3037 info->display_endian = info->endian_code;
3038 printer = print_insn_aarch64_word;
3039 }
3040
3041 status = (*info->read_memory_func) (pc, buffer, size, info);
3042 if (status != 0)
3043 {
3044 (*info->memory_error_func) (status, pc, info);
3045 return -1;
3046 }
3047
3048 data = bfd_get_bits (buffer, size * 8,
3049 info->display_endian == BFD_ENDIAN_BIG);
3050
3051 (*printer) (pc, data, info);
3052
3053 return size;
3054 }
3055 \f
3056 void
3057 print_aarch64_disassembler_options (FILE *stream)
3058 {
3059 fprintf (stream, _("\n\
3060 The following AARCH64 specific disassembler options are supported for use\n\
3061 with the -M switch (multiple options should be separated by commas):\n"));
3062
3063 fprintf (stream, _("\n\
3064 no-aliases Don't print instruction aliases.\n"));
3065
3066 fprintf (stream, _("\n\
3067 aliases Do print instruction aliases.\n"));
3068
3069 #ifdef DEBUG_AARCH64
3070 fprintf (stream, _("\n\
3071 debug_dump Temp switch for debug trace.\n"));
3072 #endif /* DEBUG_AARCH64 */
3073
3074 fprintf (stream, _("\n"));
3075 }
This page took 0.110221 seconds and 5 git commands to generate.