[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "dis-asm.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define ERR_OK 0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33
34 #define INSNLEN 4
35
36 /* Cached mapping symbol state. */
37 enum map_type
38 {
39 MAP_INSN,
40 MAP_DATA
41 };
42
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46
47 /* Other options */
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \f
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 #ifdef DEBUG_AARCH64
73 if (CONST_STRNEQ (option, "debug_dump"))
74 {
75 debug_dump = 1;
76 return;
77 }
78 #endif /* DEBUG_AARCH64 */
79
80 /* Invalid option. */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
82 }
83
84 static void
85 parse_aarch64_dis_options (const char *options)
86 {
87 const char *option_end;
88
89 if (options == NULL)
90 return;
91
92 while (*options != '\0')
93 {
94 /* Skip empty options. */
95 if (*options == ',')
96 {
97 options++;
98 continue;
99 }
100
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
104 option_end++;
105
106 parse_aarch64_dis_option (options, option_end - options);
107
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
111 }
112 }
113 \f
114 /* Functions doing the instruction disassembling. */
115
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
119
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
125
126 aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129 uint32_t num;
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
132 va_list va;
133
134 va_start (va, mask);
135 num = va_arg (va, uint32_t);
136 assert (num <= 5);
137 aarch64_insn value = 0x0;
138 while (num--)
139 {
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
144 }
145 return value;
146 }
147
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
150
151 static aarch64_insn
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
153 {
154 aarch64_insn value;
155 unsigned int i;
156 enum aarch64_field_kind kind;
157
158 value = 0;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
160 {
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
164 }
165 return value;
166 }
167
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
171 {
172 uint32_t ret = value;
173
174 assert (i < 32);
175 if ((value >> i) & 0x1)
176 {
177 uint32_t val = (uint32_t)(-1) << i;
178 ret = ret | val;
179 }
180 return (int32_t) ret;
181 }
182
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
185
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
189 {
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
191 assert (value <= 0x1
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
193 return qualifier;
194 }
195
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
198
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
201 {
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
203
204 /* Instructions using vector type 2H should not call this function. Skip over
205 the 2H qualifier. */
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
207 qualifier += 1;
208
209 assert (value <= 0x8
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
211 return qualifier;
212 }
213
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
217 {
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
219
220 assert (value <= 0x4
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
222 return qualifier;
223 }
224
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
229
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
232 {
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
237 i, qualifiers))
238 return qualifiers[i];
239 else
240 return AARCH64_OPND_QLF_NIL;
241 }
242
243 /* Operand extractors. */
244
245 int
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED)
249 {
250 info->reg.regno = extract_field (self->fields[0], code, 0);
251 return 1;
252 }
253
254 int
255 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
256 const aarch64_insn code ATTRIBUTE_UNUSED,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED)
258 {
259 assert (info->idx == 1
260 || info->idx ==3);
261 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
262 return 1;
263 }
264
265 /* e.g. IC <ic_op>{, <Xt>}. */
266 int
267 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
268 const aarch64_insn code,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED)
270 {
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 assert (info->idx == 1
273 && (aarch64_get_operand_class (inst->operands[0].type)
274 == AARCH64_OPND_CLASS_SYSTEM));
275 /* This will make the constraint checking happy and more importantly will
276 help the disassembler determine whether this operand is optional or
277 not. */
278 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
279
280 return 1;
281 }
282
283 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
284 int
285 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED)
288 {
289 /* regno */
290 info->reglane.regno = extract_field (self->fields[0], code,
291 inst->opcode->mask);
292
293 /* Index and/or type. */
294 if (inst->opcode->iclass == asisdone
295 || inst->opcode->iclass == asimdins)
296 {
297 if (info->type == AARCH64_OPND_En
298 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
299 {
300 unsigned shift;
301 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
302 assert (info->idx == 1); /* Vn */
303 aarch64_insn value = extract_field (FLD_imm4, code, 0);
304 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
305 info->qualifier = get_expected_qualifier (inst, info->idx);
306 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
307 info->reglane.index = value >> shift;
308 }
309 else
310 {
311 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
312 imm5<3:0> <V>
313 0000 RESERVED
314 xxx1 B
315 xx10 H
316 x100 S
317 1000 D */
318 int pos = -1;
319 aarch64_insn value = extract_field (FLD_imm5, code, 0);
320 while (++pos <= 3 && (value & 0x1) == 0)
321 value >>= 1;
322 if (pos > 3)
323 return 0;
324 info->qualifier = get_sreg_qualifier_from_value (pos);
325 info->reglane.index = (unsigned) (value >> 1);
326 }
327 }
328 else
329 {
330 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
331 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
332
333 /* Need information in other operand(s) to help decoding. */
334 info->qualifier = get_expected_qualifier (inst, info->idx);
335 switch (info->qualifier)
336 {
337 case AARCH64_OPND_QLF_S_H:
338 /* h:l:m */
339 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
340 FLD_M);
341 info->reglane.regno &= 0xf;
342 break;
343 case AARCH64_OPND_QLF_S_S:
344 /* h:l */
345 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
346 break;
347 case AARCH64_OPND_QLF_S_D:
348 /* H */
349 info->reglane.index = extract_field (FLD_H, code, 0);
350 break;
351 default:
352 return 0;
353 }
354
355 if (inst->opcode->op == OP_FCMLA_ELEM)
356 {
357 /* Complex operand takes two elements. */
358 if (info->reglane.index & 1)
359 return 0;
360 info->reglane.index /= 2;
361 }
362 }
363
364 return 1;
365 }
366
367 int
368 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
369 const aarch64_insn code,
370 const aarch64_inst *inst ATTRIBUTE_UNUSED)
371 {
372 /* R */
373 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
374 /* len */
375 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
376 return 1;
377 }
378
379 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
380 int
381 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
382 aarch64_opnd_info *info, const aarch64_insn code,
383 const aarch64_inst *inst)
384 {
385 aarch64_insn value;
386 /* Number of elements in each structure to be loaded/stored. */
387 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
388
389 struct
390 {
391 unsigned is_reserved;
392 unsigned num_regs;
393 unsigned num_elements;
394 } data [] =
395 { {0, 4, 4},
396 {1, 4, 4},
397 {0, 4, 1},
398 {0, 4, 2},
399 {0, 3, 3},
400 {1, 3, 3},
401 {0, 3, 1},
402 {0, 1, 1},
403 {0, 2, 2},
404 {1, 2, 2},
405 {0, 2, 1},
406 };
407
408 /* Rt */
409 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
410 /* opcode */
411 value = extract_field (FLD_opcode, code, 0);
412 if (expected_num != data[value].num_elements || data[value].is_reserved)
413 return 0;
414 info->reglist.num_regs = data[value].num_regs;
415
416 return 1;
417 }
418
419 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
420 lanes instructions. */
421 int
422 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
423 aarch64_opnd_info *info, const aarch64_insn code,
424 const aarch64_inst *inst)
425 {
426 aarch64_insn value;
427
428 /* Rt */
429 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
430 /* S */
431 value = extract_field (FLD_S, code, 0);
432
433 /* Number of registers is equal to the number of elements in
434 each structure to be loaded/stored. */
435 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
436 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
437
438 /* Except when it is LD1R. */
439 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
440 info->reglist.num_regs = 2;
441
442 return 1;
443 }
444
445 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
446 load/store single element instructions. */
447 int
448 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
449 aarch64_opnd_info *info, const aarch64_insn code,
450 const aarch64_inst *inst ATTRIBUTE_UNUSED)
451 {
452 aarch64_field field = {0, 0};
453 aarch64_insn QSsize; /* fields Q:S:size. */
454 aarch64_insn opcodeh2; /* opcode<2:1> */
455
456 /* Rt */
457 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
458
459 /* Decode the index, opcode<2:1> and size. */
460 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
461 opcodeh2 = extract_field_2 (&field, code, 0);
462 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
463 switch (opcodeh2)
464 {
465 case 0x0:
466 info->qualifier = AARCH64_OPND_QLF_S_B;
467 /* Index encoded in "Q:S:size". */
468 info->reglist.index = QSsize;
469 break;
470 case 0x1:
471 if (QSsize & 0x1)
472 /* UND. */
473 return 0;
474 info->qualifier = AARCH64_OPND_QLF_S_H;
475 /* Index encoded in "Q:S:size<1>". */
476 info->reglist.index = QSsize >> 1;
477 break;
478 case 0x2:
479 if ((QSsize >> 1) & 0x1)
480 /* UND. */
481 return 0;
482 if ((QSsize & 0x1) == 0)
483 {
484 info->qualifier = AARCH64_OPND_QLF_S_S;
485 /* Index encoded in "Q:S". */
486 info->reglist.index = QSsize >> 2;
487 }
488 else
489 {
490 if (extract_field (FLD_S, code, 0))
491 /* UND */
492 return 0;
493 info->qualifier = AARCH64_OPND_QLF_S_D;
494 /* Index encoded in "Q". */
495 info->reglist.index = QSsize >> 3;
496 }
497 break;
498 default:
499 return 0;
500 }
501
502 info->reglist.has_index = 1;
503 info->reglist.num_regs = 0;
504 /* Number of registers is equal to the number of elements in
505 each structure to be loaded/stored. */
506 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
507 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
508
509 return 1;
510 }
511
512 /* Decode fields immh:immb and/or Q for e.g.
513 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
514 or SSHR <V><d>, <V><n>, #<shift>. */
515
516 int
517 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
518 aarch64_opnd_info *info, const aarch64_insn code,
519 const aarch64_inst *inst)
520 {
521 int pos;
522 aarch64_insn Q, imm, immh;
523 enum aarch64_insn_class iclass = inst->opcode->iclass;
524
525 immh = extract_field (FLD_immh, code, 0);
526 if (immh == 0)
527 return 0;
528 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
529 pos = 4;
530 /* Get highest set bit in immh. */
531 while (--pos >= 0 && (immh & 0x8) == 0)
532 immh <<= 1;
533
534 assert ((iclass == asimdshf || iclass == asisdshf)
535 && (info->type == AARCH64_OPND_IMM_VLSR
536 || info->type == AARCH64_OPND_IMM_VLSL));
537
538 if (iclass == asimdshf)
539 {
540 Q = extract_field (FLD_Q, code, 0);
541 /* immh Q <T>
542 0000 x SEE AdvSIMD modified immediate
543 0001 0 8B
544 0001 1 16B
545 001x 0 4H
546 001x 1 8H
547 01xx 0 2S
548 01xx 1 4S
549 1xxx 0 RESERVED
550 1xxx 1 2D */
551 info->qualifier =
552 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
553 }
554 else
555 info->qualifier = get_sreg_qualifier_from_value (pos);
556
557 if (info->type == AARCH64_OPND_IMM_VLSR)
558 /* immh <shift>
559 0000 SEE AdvSIMD modified immediate
560 0001 (16-UInt(immh:immb))
561 001x (32-UInt(immh:immb))
562 01xx (64-UInt(immh:immb))
563 1xxx (128-UInt(immh:immb)) */
564 info->imm.value = (16 << pos) - imm;
565 else
566 /* immh:immb
567 immh <shift>
568 0000 SEE AdvSIMD modified immediate
569 0001 (UInt(immh:immb)-8)
570 001x (UInt(immh:immb)-16)
571 01xx (UInt(immh:immb)-32)
572 1xxx (UInt(immh:immb)-64) */
573 info->imm.value = imm - (8 << pos);
574
575 return 1;
576 }
577
578 /* Decode shift immediate for e.g. sshr (imm). */
579 int
580 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
581 aarch64_opnd_info *info, const aarch64_insn code,
582 const aarch64_inst *inst ATTRIBUTE_UNUSED)
583 {
584 int64_t imm;
585 aarch64_insn val;
586 val = extract_field (FLD_size, code, 0);
587 switch (val)
588 {
589 case 0: imm = 8; break;
590 case 1: imm = 16; break;
591 case 2: imm = 32; break;
592 default: return 0;
593 }
594 info->imm.value = imm;
595 return 1;
596 }
597
598 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
599 value in the field(s) will be extracted as unsigned immediate value. */
600 int
601 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
602 const aarch64_insn code,
603 const aarch64_inst *inst ATTRIBUTE_UNUSED)
604 {
605 int64_t imm;
606
607 imm = extract_all_fields (self, code);
608
609 if (operand_need_sign_extension (self))
610 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
611
612 if (operand_need_shift_by_two (self))
613 imm <<= 2;
614
615 if (info->type == AARCH64_OPND_ADDR_ADRP)
616 imm <<= 12;
617
618 info->imm.value = imm;
619 return 1;
620 }
621
622 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
623 int
624 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
625 const aarch64_insn code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED)
627 {
628 aarch64_ext_imm (self, info, code, inst);
629 info->shifter.kind = AARCH64_MOD_LSL;
630 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
631 return 1;
632 }
633
634 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
635 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
636 int
637 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
638 aarch64_opnd_info *info,
639 const aarch64_insn code,
640 const aarch64_inst *inst ATTRIBUTE_UNUSED)
641 {
642 uint64_t imm;
643 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
644 aarch64_field field = {0, 0};
645
646 assert (info->idx == 1);
647
648 if (info->type == AARCH64_OPND_SIMD_FPIMM)
649 info->imm.is_fp = 1;
650
651 /* a:b:c:d:e:f:g:h */
652 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
653 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
654 {
655 /* Either MOVI <Dd>, #<imm>
656 or MOVI <Vd>.2D, #<imm>.
657 <imm> is a 64-bit immediate
658 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
659 encoded in "a:b:c:d:e:f:g:h". */
660 int i;
661 unsigned abcdefgh = imm;
662 for (imm = 0ull, i = 0; i < 8; i++)
663 if (((abcdefgh >> i) & 0x1) != 0)
664 imm |= 0xffull << (8 * i);
665 }
666 info->imm.value = imm;
667
668 /* cmode */
669 info->qualifier = get_expected_qualifier (inst, info->idx);
670 switch (info->qualifier)
671 {
672 case AARCH64_OPND_QLF_NIL:
673 /* no shift */
674 info->shifter.kind = AARCH64_MOD_NONE;
675 return 1;
676 case AARCH64_OPND_QLF_LSL:
677 /* shift zeros */
678 info->shifter.kind = AARCH64_MOD_LSL;
679 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
680 {
681 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
682 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
683 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
684 default: assert (0); return 0;
685 }
686 /* 00: 0; 01: 8; 10:16; 11:24. */
687 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
688 break;
689 case AARCH64_OPND_QLF_MSL:
690 /* shift ones */
691 info->shifter.kind = AARCH64_MOD_MSL;
692 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
693 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
694 break;
695 default:
696 assert (0);
697 return 0;
698 }
699
700 return 1;
701 }
702
703 /* Decode an 8-bit floating-point immediate. */
704 int
705 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
706 const aarch64_insn code,
707 const aarch64_inst *inst ATTRIBUTE_UNUSED)
708 {
709 info->imm.value = extract_all_fields (self, code);
710 info->imm.is_fp = 1;
711 return 1;
712 }
713
714 /* Decode rotate immediate for FCMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<T>, #rotate. */
715 int
716 aarch64_ext_imm_rotate (const aarch64_operand *self, aarch64_opnd_info *info,
717 const aarch64_insn code,
718 const aarch64_inst *inst ATTRIBUTE_UNUSED)
719 {
720 uint64_t rot = extract_field (self->fields[0], code, 0);
721
722 switch (info->type)
723 {
724 case AARCH64_OPND_IMM_ROT1:
725 case AARCH64_OPND_IMM_ROT2:
726 /* rot value
727 0 0
728 1 90
729 2 180
730 3 270 */
731 assert (rot < 4U);
732 break;
733 case AARCH64_OPND_IMM_ROT3:
734 /* rot value
735 0 90
736 1 270 */
737 assert (rot < 2U);
738 rot = 2 * rot + 1;
739 break;
740 default:
741 assert (0);
742 return 0;
743 }
744 info->imm.value = rot * 90;
745 return 1;
746 }
747
748 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
749 int
750 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
751 aarch64_opnd_info *info, const aarch64_insn code,
752 const aarch64_inst *inst ATTRIBUTE_UNUSED)
753 {
754 info->imm.value = 64- extract_field (FLD_scale, code, 0);
755 return 1;
756 }
757
758 /* Decode arithmetic immediate for e.g.
759 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
760 int
761 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
762 aarch64_opnd_info *info, const aarch64_insn code,
763 const aarch64_inst *inst ATTRIBUTE_UNUSED)
764 {
765 aarch64_insn value;
766
767 info->shifter.kind = AARCH64_MOD_LSL;
768 /* shift */
769 value = extract_field (FLD_shift, code, 0);
770 if (value >= 2)
771 return 0;
772 info->shifter.amount = value ? 12 : 0;
773 /* imm12 (unsigned) */
774 info->imm.value = extract_field (FLD_imm12, code, 0);
775
776 return 1;
777 }
778
779 /* Return true if VALUE is a valid logical immediate encoding, storing the
780 decoded value in *RESULT if so. ESIZE is the number of bytes in the
781 decoded immediate. */
782 static int
783 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
784 {
785 uint64_t imm, mask;
786 uint32_t N, R, S;
787 unsigned simd_size;
788
789 /* value is N:immr:imms. */
790 S = value & 0x3f;
791 R = (value >> 6) & 0x3f;
792 N = (value >> 12) & 0x1;
793
794 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
795 (in other words, right rotated by R), then replicated. */
796 if (N != 0)
797 {
798 simd_size = 64;
799 mask = 0xffffffffffffffffull;
800 }
801 else
802 {
803 switch (S)
804 {
805 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
806 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
807 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
808 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
809 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
810 default: return 0;
811 }
812 mask = (1ull << simd_size) - 1;
813 /* Top bits are IGNORED. */
814 R &= simd_size - 1;
815 }
816
817 if (simd_size > esize * 8)
818 return 0;
819
820 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
821 if (S == simd_size - 1)
822 return 0;
823 /* S+1 consecutive bits to 1. */
824 /* NOTE: S can't be 63 due to detection above. */
825 imm = (1ull << (S + 1)) - 1;
826 /* Rotate to the left by simd_size - R. */
827 if (R != 0)
828 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
829 /* Replicate the value according to SIMD size. */
830 switch (simd_size)
831 {
832 case 2: imm = (imm << 2) | imm;
833 /* Fall through. */
834 case 4: imm = (imm << 4) | imm;
835 /* Fall through. */
836 case 8: imm = (imm << 8) | imm;
837 /* Fall through. */
838 case 16: imm = (imm << 16) | imm;
839 /* Fall through. */
840 case 32: imm = (imm << 32) | imm;
841 /* Fall through. */
842 case 64: break;
843 default: assert (0); return 0;
844 }
845
846 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
847
848 return 1;
849 }
850
851 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
852 int
853 aarch64_ext_limm (const aarch64_operand *self,
854 aarch64_opnd_info *info, const aarch64_insn code,
855 const aarch64_inst *inst)
856 {
857 uint32_t esize;
858 aarch64_insn value;
859
860 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
861 self->fields[2]);
862 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
863 return decode_limm (esize, value, &info->imm.value);
864 }
865
866 /* Decode a logical immediate for the BIC alias of AND (etc.). */
867 int
868 aarch64_ext_inv_limm (const aarch64_operand *self,
869 aarch64_opnd_info *info, const aarch64_insn code,
870 const aarch64_inst *inst)
871 {
872 if (!aarch64_ext_limm (self, info, code, inst))
873 return 0;
874 info->imm.value = ~info->imm.value;
875 return 1;
876 }
877
878 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
879 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
880 int
881 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
882 aarch64_opnd_info *info,
883 const aarch64_insn code, const aarch64_inst *inst)
884 {
885 aarch64_insn value;
886
887 /* Rt */
888 info->reg.regno = extract_field (FLD_Rt, code, 0);
889
890 /* size */
891 value = extract_field (FLD_ldst_size, code, 0);
892 if (inst->opcode->iclass == ldstpair_indexed
893 || inst->opcode->iclass == ldstnapair_offs
894 || inst->opcode->iclass == ldstpair_off
895 || inst->opcode->iclass == loadlit)
896 {
897 enum aarch64_opnd_qualifier qualifier;
898 switch (value)
899 {
900 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
901 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
902 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
903 default: return 0;
904 }
905 info->qualifier = qualifier;
906 }
907 else
908 {
909 /* opc1:size */
910 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
911 if (value > 0x4)
912 return 0;
913 info->qualifier = get_sreg_qualifier_from_value (value);
914 }
915
916 return 1;
917 }
918
919 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
920 int
921 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
922 aarch64_opnd_info *info,
923 aarch64_insn code,
924 const aarch64_inst *inst ATTRIBUTE_UNUSED)
925 {
926 /* Rn */
927 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
928 return 1;
929 }
930
931 /* Decode the address operand for e.g.
932 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
933 int
934 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
935 aarch64_opnd_info *info,
936 aarch64_insn code, const aarch64_inst *inst)
937 {
938 aarch64_insn S, value;
939
940 /* Rn */
941 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
942 /* Rm */
943 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
944 /* option */
945 value = extract_field (FLD_option, code, 0);
946 info->shifter.kind =
947 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
948 /* Fix-up the shifter kind; although the table-driven approach is
949 efficient, it is slightly inflexible, thus needing this fix-up. */
950 if (info->shifter.kind == AARCH64_MOD_UXTX)
951 info->shifter.kind = AARCH64_MOD_LSL;
952 /* S */
953 S = extract_field (FLD_S, code, 0);
954 if (S == 0)
955 {
956 info->shifter.amount = 0;
957 info->shifter.amount_present = 0;
958 }
959 else
960 {
961 int size;
962 /* Need information in other operand(s) to help achieve the decoding
963 from 'S' field. */
964 info->qualifier = get_expected_qualifier (inst, info->idx);
965 /* Get the size of the data element that is accessed, which may be
966 different from that of the source register size, e.g. in strb/ldrb. */
967 size = aarch64_get_qualifier_esize (info->qualifier);
968 info->shifter.amount = get_logsz (size);
969 info->shifter.amount_present = 1;
970 }
971
972 return 1;
973 }
974
975 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
976 int
977 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
978 aarch64_insn code, const aarch64_inst *inst)
979 {
980 aarch64_insn imm;
981 info->qualifier = get_expected_qualifier (inst, info->idx);
982
983 /* Rn */
984 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
985 /* simm (imm9 or imm7) */
986 imm = extract_field (self->fields[0], code, 0);
987 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
988 if (self->fields[0] == FLD_imm7)
989 /* scaled immediate in ld/st pair instructions. */
990 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
991 /* qualifier */
992 if (inst->opcode->iclass == ldst_unscaled
993 || inst->opcode->iclass == ldstnapair_offs
994 || inst->opcode->iclass == ldstpair_off
995 || inst->opcode->iclass == ldst_unpriv)
996 info->addr.writeback = 0;
997 else
998 {
999 /* pre/post- index */
1000 info->addr.writeback = 1;
1001 if (extract_field (self->fields[1], code, 0) == 1)
1002 info->addr.preind = 1;
1003 else
1004 info->addr.postind = 1;
1005 }
1006
1007 return 1;
1008 }
1009
1010 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1011 int
1012 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1013 aarch64_insn code,
1014 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1015 {
1016 int shift;
1017 info->qualifier = get_expected_qualifier (inst, info->idx);
1018 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1019 /* Rn */
1020 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1021 /* uimm12 */
1022 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1023 return 1;
1024 }
1025
1026 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1027 int
1028 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1029 aarch64_insn code,
1030 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1031 {
1032 aarch64_insn imm;
1033
1034 info->qualifier = get_expected_qualifier (inst, info->idx);
1035 /* Rn */
1036 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1037 /* simm10 */
1038 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1039 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1040 if (extract_field (self->fields[3], code, 0) == 1) {
1041 info->addr.writeback = 1;
1042 info->addr.preind = 1;
1043 }
1044 return 1;
1045 }
1046
1047 /* Decode the address operand for e.g.
1048 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1049 int
1050 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1051 aarch64_opnd_info *info,
1052 aarch64_insn code, const aarch64_inst *inst)
1053 {
1054 /* The opcode dependent area stores the number of elements in
1055 each structure to be loaded/stored. */
1056 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1057
1058 /* Rn */
1059 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1060 /* Rm | #<amount> */
1061 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1062 if (info->addr.offset.regno == 31)
1063 {
1064 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1065 /* Special handling of loading single structure to all lane. */
1066 info->addr.offset.imm = (is_ld1r ? 1
1067 : inst->operands[0].reglist.num_regs)
1068 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1069 else
1070 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1071 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1072 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1073 }
1074 else
1075 info->addr.offset.is_reg = 1;
1076 info->addr.writeback = 1;
1077
1078 return 1;
1079 }
1080
1081 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1082 int
1083 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1084 aarch64_opnd_info *info,
1085 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1086 {
1087 aarch64_insn value;
1088 /* cond */
1089 value = extract_field (FLD_cond, code, 0);
1090 info->cond = get_cond_from_value (value);
1091 return 1;
1092 }
1093
1094 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1095 int
1096 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1097 aarch64_opnd_info *info,
1098 aarch64_insn code,
1099 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1100 {
1101 /* op0:op1:CRn:CRm:op2 */
1102 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1103 FLD_CRm, FLD_op2);
1104 return 1;
1105 }
1106
1107 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1108 int
1109 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1110 aarch64_opnd_info *info, aarch64_insn code,
1111 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1112 {
1113 int i;
1114 /* op1:op2 */
1115 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1116 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1117 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1118 return 1;
1119 /* Reserved value in <pstatefield>. */
1120 return 0;
1121 }
1122
1123 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1124 int
1125 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1126 aarch64_opnd_info *info,
1127 aarch64_insn code,
1128 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1129 {
1130 int i;
1131 aarch64_insn value;
1132 const aarch64_sys_ins_reg *sysins_ops;
1133 /* op0:op1:CRn:CRm:op2 */
1134 value = extract_fields (code, 0, 5,
1135 FLD_op0, FLD_op1, FLD_CRn,
1136 FLD_CRm, FLD_op2);
1137
1138 switch (info->type)
1139 {
1140 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1141 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1142 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1143 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1144 default: assert (0); return 0;
1145 }
1146
1147 for (i = 0; sysins_ops[i].name != NULL; ++i)
1148 if (sysins_ops[i].value == value)
1149 {
1150 info->sysins_op = sysins_ops + i;
1151 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1152 info->sysins_op->name,
1153 (unsigned)info->sysins_op->value,
1154 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1155 return 1;
1156 }
1157
1158 return 0;
1159 }
1160
1161 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1162
1163 int
1164 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1165 aarch64_opnd_info *info,
1166 aarch64_insn code,
1167 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1168 {
1169 /* CRm */
1170 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1171 return 1;
1172 }
1173
1174 /* Decode the prefetch operation option operand for e.g.
1175 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1176
1177 int
1178 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1179 aarch64_opnd_info *info,
1180 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1181 {
1182 /* prfop in Rt */
1183 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1184 return 1;
1185 }
1186
1187 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1188 to the matching name/value pair in aarch64_hint_options. */
1189
1190 int
1191 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1192 aarch64_opnd_info *info,
1193 aarch64_insn code,
1194 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1195 {
1196 /* CRm:op2. */
1197 unsigned hint_number;
1198 int i;
1199
1200 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1201
1202 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1203 {
1204 if (hint_number == aarch64_hint_options[i].value)
1205 {
1206 info->hint_option = &(aarch64_hint_options[i]);
1207 return 1;
1208 }
1209 }
1210
1211 return 0;
1212 }
1213
1214 /* Decode the extended register operand for e.g.
1215 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1216 int
1217 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1218 aarch64_opnd_info *info,
1219 aarch64_insn code,
1220 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1221 {
1222 aarch64_insn value;
1223
1224 /* Rm */
1225 info->reg.regno = extract_field (FLD_Rm, code, 0);
1226 /* option */
1227 value = extract_field (FLD_option, code, 0);
1228 info->shifter.kind =
1229 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1230 /* imm3 */
1231 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1232
1233 /* This makes the constraint checking happy. */
1234 info->shifter.operator_present = 1;
1235
1236 /* Assume inst->operands[0].qualifier has been resolved. */
1237 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1238 info->qualifier = AARCH64_OPND_QLF_W;
1239 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1240 && (info->shifter.kind == AARCH64_MOD_UXTX
1241 || info->shifter.kind == AARCH64_MOD_SXTX))
1242 info->qualifier = AARCH64_OPND_QLF_X;
1243
1244 return 1;
1245 }
1246
1247 /* Decode the shifted register operand for e.g.
1248 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1249 int
1250 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1251 aarch64_opnd_info *info,
1252 aarch64_insn code,
1253 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1254 {
1255 aarch64_insn value;
1256
1257 /* Rm */
1258 info->reg.regno = extract_field (FLD_Rm, code, 0);
1259 /* shift */
1260 value = extract_field (FLD_shift, code, 0);
1261 info->shifter.kind =
1262 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1263 if (info->shifter.kind == AARCH64_MOD_ROR
1264 && inst->opcode->iclass != log_shift)
1265 /* ROR is not available for the shifted register operand in arithmetic
1266 instructions. */
1267 return 0;
1268 /* imm6 */
1269 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1270
1271 /* This makes the constraint checking happy. */
1272 info->shifter.operator_present = 1;
1273
1274 return 1;
1275 }
1276
1277 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1278 where <offset> is given by the OFFSET parameter and where <factor> is
1279 1 plus SELF's operand-dependent value. fields[0] specifies the field
1280 that holds <base>. */
1281 static int
1282 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1283 aarch64_opnd_info *info, aarch64_insn code,
1284 int64_t offset)
1285 {
1286 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1287 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1288 info->addr.offset.is_reg = FALSE;
1289 info->addr.writeback = FALSE;
1290 info->addr.preind = TRUE;
1291 if (offset != 0)
1292 info->shifter.kind = AARCH64_MOD_MUL_VL;
1293 info->shifter.amount = 1;
1294 info->shifter.operator_present = (info->addr.offset.imm != 0);
1295 info->shifter.amount_present = FALSE;
1296 return 1;
1297 }
1298
1299 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1300 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1301 SELF's operand-dependent value. fields[0] specifies the field that
1302 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1303 int
1304 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1305 aarch64_opnd_info *info, aarch64_insn code,
1306 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1307 {
1308 int offset;
1309
1310 offset = extract_field (FLD_SVE_imm4, code, 0);
1311 offset = ((offset + 8) & 15) - 8;
1312 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1313 }
1314
1315 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1316 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1317 SELF's operand-dependent value. fields[0] specifies the field that
1318 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1319 int
1320 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1321 aarch64_opnd_info *info, aarch64_insn code,
1322 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1323 {
1324 int offset;
1325
1326 offset = extract_field (FLD_SVE_imm6, code, 0);
1327 offset = (((offset + 32) & 63) - 32);
1328 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1329 }
1330
1331 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1332 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1333 SELF's operand-dependent value. fields[0] specifies the field that
1334 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1335 and imm3 fields, with imm3 being the less-significant part. */
1336 int
1337 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1338 aarch64_opnd_info *info,
1339 aarch64_insn code,
1340 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1341 {
1342 int offset;
1343
1344 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1345 offset = (((offset + 256) & 511) - 256);
1346 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1347 }
1348
1349 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1350 is given by the OFFSET parameter and where <shift> is SELF's operand-
1351 dependent value. fields[0] specifies the base register field <base>. */
1352 static int
1353 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1354 aarch64_opnd_info *info, aarch64_insn code,
1355 int64_t offset)
1356 {
1357 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1358 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1359 info->addr.offset.is_reg = FALSE;
1360 info->addr.writeback = FALSE;
1361 info->addr.preind = TRUE;
1362 info->shifter.operator_present = FALSE;
1363 info->shifter.amount_present = FALSE;
1364 return 1;
1365 }
1366
1367 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1368 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1369 value. fields[0] specifies the base register field. */
1370 int
1371 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1372 aarch64_opnd_info *info, aarch64_insn code,
1373 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1374 {
1375 int offset = extract_field (FLD_SVE_imm6, code, 0);
1376 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1377 }
1378
1379 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1380 is SELF's operand-dependent value. fields[0] specifies the base
1381 register field and fields[1] specifies the offset register field. */
1382 int
1383 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1384 aarch64_opnd_info *info, aarch64_insn code,
1385 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1386 {
1387 int index_regno;
1388
1389 index_regno = extract_field (self->fields[1], code, 0);
1390 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1391 return 0;
1392
1393 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1394 info->addr.offset.regno = index_regno;
1395 info->addr.offset.is_reg = TRUE;
1396 info->addr.writeback = FALSE;
1397 info->addr.preind = TRUE;
1398 info->shifter.kind = AARCH64_MOD_LSL;
1399 info->shifter.amount = get_operand_specific_data (self);
1400 info->shifter.operator_present = (info->shifter.amount != 0);
1401 info->shifter.amount_present = (info->shifter.amount != 0);
1402 return 1;
1403 }
1404
1405 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1406 <shift> is SELF's operand-dependent value. fields[0] specifies the
1407 base register field, fields[1] specifies the offset register field and
1408 fields[2] is a single-bit field that selects SXTW over UXTW. */
1409 int
1410 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1411 aarch64_opnd_info *info, aarch64_insn code,
1412 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1413 {
1414 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1415 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1416 info->addr.offset.is_reg = TRUE;
1417 info->addr.writeback = FALSE;
1418 info->addr.preind = TRUE;
1419 if (extract_field (self->fields[2], code, 0))
1420 info->shifter.kind = AARCH64_MOD_SXTW;
1421 else
1422 info->shifter.kind = AARCH64_MOD_UXTW;
1423 info->shifter.amount = get_operand_specific_data (self);
1424 info->shifter.operator_present = TRUE;
1425 info->shifter.amount_present = (info->shifter.amount != 0);
1426 return 1;
1427 }
1428
1429 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1430 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1431 fields[0] specifies the base register field. */
1432 int
1433 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1434 aarch64_opnd_info *info, aarch64_insn code,
1435 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1436 {
1437 int offset = extract_field (FLD_imm5, code, 0);
1438 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1439 }
1440
1441 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1442 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1443 number. fields[0] specifies the base register field and fields[1]
1444 specifies the offset register field. */
1445 static int
1446 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1447 aarch64_insn code, enum aarch64_modifier_kind kind)
1448 {
1449 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1450 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1451 info->addr.offset.is_reg = TRUE;
1452 info->addr.writeback = FALSE;
1453 info->addr.preind = TRUE;
1454 info->shifter.kind = kind;
1455 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1456 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1457 || info->shifter.amount != 0);
1458 info->shifter.amount_present = (info->shifter.amount != 0);
1459 return 1;
1460 }
1461
1462 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1463 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1464 field and fields[1] specifies the offset register field. */
1465 int
1466 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1467 aarch64_opnd_info *info, aarch64_insn code,
1468 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1469 {
1470 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1471 }
1472
1473 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1474 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1475 field and fields[1] specifies the offset register field. */
1476 int
1477 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1478 aarch64_opnd_info *info, aarch64_insn code,
1479 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1480 {
1481 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1482 }
1483
1484 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1485 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1486 field and fields[1] specifies the offset register field. */
1487 int
1488 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1489 aarch64_opnd_info *info, aarch64_insn code,
1490 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1491 {
1492 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1493 }
1494
1495 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1496 has the raw field value and that the low 8 bits decode to VALUE. */
1497 static int
1498 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1499 {
1500 info->shifter.kind = AARCH64_MOD_LSL;
1501 info->shifter.amount = 0;
1502 if (info->imm.value & 0x100)
1503 {
1504 if (value == 0)
1505 /* Decode 0x100 as #0, LSL #8. */
1506 info->shifter.amount = 8;
1507 else
1508 value *= 256;
1509 }
1510 info->shifter.operator_present = (info->shifter.amount != 0);
1511 info->shifter.amount_present = (info->shifter.amount != 0);
1512 info->imm.value = value;
1513 return 1;
1514 }
1515
1516 /* Decode an SVE ADD/SUB immediate. */
1517 int
1518 aarch64_ext_sve_aimm (const aarch64_operand *self,
1519 aarch64_opnd_info *info, const aarch64_insn code,
1520 const aarch64_inst *inst)
1521 {
1522 return (aarch64_ext_imm (self, info, code, inst)
1523 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1524 }
1525
1526 /* Decode an SVE CPY/DUP immediate. */
1527 int
1528 aarch64_ext_sve_asimm (const aarch64_operand *self,
1529 aarch64_opnd_info *info, const aarch64_insn code,
1530 const aarch64_inst *inst)
1531 {
1532 return (aarch64_ext_imm (self, info, code, inst)
1533 && decode_sve_aimm (info, (int8_t) info->imm.value));
1534 }
1535
1536 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1537 The fields array specifies which field to use. */
1538 int
1539 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1540 aarch64_opnd_info *info, aarch64_insn code,
1541 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1542 {
1543 if (extract_field (self->fields[0], code, 0))
1544 info->imm.value = 0x3f800000;
1545 else
1546 info->imm.value = 0x3f000000;
1547 info->imm.is_fp = TRUE;
1548 return 1;
1549 }
1550
1551 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1552 The fields array specifies which field to use. */
1553 int
1554 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1555 aarch64_opnd_info *info, aarch64_insn code,
1556 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1557 {
1558 if (extract_field (self->fields[0], code, 0))
1559 info->imm.value = 0x40000000;
1560 else
1561 info->imm.value = 0x3f000000;
1562 info->imm.is_fp = TRUE;
1563 return 1;
1564 }
1565
1566 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1567 The fields array specifies which field to use. */
1568 int
1569 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1570 aarch64_opnd_info *info, aarch64_insn code,
1571 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1572 {
1573 if (extract_field (self->fields[0], code, 0))
1574 info->imm.value = 0x3f800000;
1575 else
1576 info->imm.value = 0x0;
1577 info->imm.is_fp = TRUE;
1578 return 1;
1579 }
1580
1581 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1582 array specifies which field to use for Zn. MM is encoded in the
1583 concatenation of imm5 and SVE_tszh, with imm5 being the less
1584 significant part. */
1585 int
1586 aarch64_ext_sve_index (const aarch64_operand *self,
1587 aarch64_opnd_info *info, aarch64_insn code,
1588 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1589 {
1590 int val;
1591
1592 info->reglane.regno = extract_field (self->fields[0], code, 0);
1593 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1594 if ((val & 15) == 0)
1595 return 0;
1596 while ((val & 1) == 0)
1597 val /= 2;
1598 info->reglane.index = val / 2;
1599 return 1;
1600 }
1601
1602 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1603 int
1604 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1605 aarch64_opnd_info *info, const aarch64_insn code,
1606 const aarch64_inst *inst)
1607 {
1608 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1609 return (aarch64_ext_limm (self, info, code, inst)
1610 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1611 }
1612
1613 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1614 to use for Zn. The opcode-dependent value specifies the number
1615 of registers in the list. */
1616 int
1617 aarch64_ext_sve_reglist (const aarch64_operand *self,
1618 aarch64_opnd_info *info, aarch64_insn code,
1619 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1620 {
1621 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1622 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1623 return 1;
1624 }
1625
1626 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1627 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1628 field. */
1629 int
1630 aarch64_ext_sve_scale (const aarch64_operand *self,
1631 aarch64_opnd_info *info, aarch64_insn code,
1632 const aarch64_inst *inst)
1633 {
1634 int val;
1635
1636 if (!aarch64_ext_imm (self, info, code, inst))
1637 return 0;
1638 val = extract_field (FLD_SVE_imm4, code, 0);
1639 info->shifter.kind = AARCH64_MOD_MUL;
1640 info->shifter.amount = val + 1;
1641 info->shifter.operator_present = (val != 0);
1642 info->shifter.amount_present = (val != 0);
1643 return 1;
1644 }
1645
1646 /* Return the top set bit in VALUE, which is expected to be relatively
1647 small. */
1648 static uint64_t
1649 get_top_bit (uint64_t value)
1650 {
1651 while ((value & -value) != value)
1652 value -= value & -value;
1653 return value;
1654 }
1655
1656 /* Decode an SVE shift-left immediate. */
1657 int
1658 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1659 aarch64_opnd_info *info, const aarch64_insn code,
1660 const aarch64_inst *inst)
1661 {
1662 if (!aarch64_ext_imm (self, info, code, inst)
1663 || info->imm.value == 0)
1664 return 0;
1665
1666 info->imm.value -= get_top_bit (info->imm.value);
1667 return 1;
1668 }
1669
1670 /* Decode an SVE shift-right immediate. */
1671 int
1672 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1673 aarch64_opnd_info *info, const aarch64_insn code,
1674 const aarch64_inst *inst)
1675 {
1676 if (!aarch64_ext_imm (self, info, code, inst)
1677 || info->imm.value == 0)
1678 return 0;
1679
1680 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1681 return 1;
1682 }
1683 \f
1684 /* Bitfields that are commonly used to encode certain operands' information
1685 may be partially used as part of the base opcode in some instructions.
1686 For example, the bit 1 of the field 'size' in
1687 FCVTXN <Vb><d>, <Va><n>
1688 is actually part of the base opcode, while only size<0> is available
1689 for encoding the register type. Another example is the AdvSIMD
1690 instruction ORR (register), in which the field 'size' is also used for
1691 the base opcode, leaving only the field 'Q' available to encode the
1692 vector register arrangement specifier '8B' or '16B'.
1693
1694 This function tries to deduce the qualifier from the value of partially
1695 constrained field(s). Given the VALUE of such a field or fields, the
1696 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1697 operand encoding), the function returns the matching qualifier or
1698 AARCH64_OPND_QLF_NIL if nothing matches.
1699
1700 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1701 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1702 may end with AARCH64_OPND_QLF_NIL. */
1703
1704 static enum aarch64_opnd_qualifier
1705 get_qualifier_from_partial_encoding (aarch64_insn value,
1706 const enum aarch64_opnd_qualifier* \
1707 candidates,
1708 aarch64_insn mask)
1709 {
1710 int i;
1711 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1712 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1713 {
1714 aarch64_insn standard_value;
1715 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1716 break;
1717 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1718 if ((standard_value & mask) == (value & mask))
1719 return candidates[i];
1720 }
1721 return AARCH64_OPND_QLF_NIL;
1722 }
1723
1724 /* Given a list of qualifier sequences, return all possible valid qualifiers
1725 for operand IDX in QUALIFIERS.
1726 Assume QUALIFIERS is an array whose length is large enough. */
1727
1728 static void
1729 get_operand_possible_qualifiers (int idx,
1730 const aarch64_opnd_qualifier_seq_t *list,
1731 enum aarch64_opnd_qualifier *qualifiers)
1732 {
1733 int i;
1734 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1735 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1736 break;
1737 }
1738
1739 /* Decode the size Q field for e.g. SHADD.
1740 We tag one operand with the qualifer according to the code;
1741 whether the qualifier is valid for this opcode or not, it is the
1742 duty of the semantic checking. */
1743
1744 static int
1745 decode_sizeq (aarch64_inst *inst)
1746 {
1747 int idx;
1748 enum aarch64_opnd_qualifier qualifier;
1749 aarch64_insn code;
1750 aarch64_insn value, mask;
1751 enum aarch64_field_kind fld_sz;
1752 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1753
1754 if (inst->opcode->iclass == asisdlse
1755 || inst->opcode->iclass == asisdlsep
1756 || inst->opcode->iclass == asisdlso
1757 || inst->opcode->iclass == asisdlsop)
1758 fld_sz = FLD_vldst_size;
1759 else
1760 fld_sz = FLD_size;
1761
1762 code = inst->value;
1763 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1764 /* Obtain the info that which bits of fields Q and size are actually
1765 available for operand encoding. Opcodes like FMAXNM and FMLA have
1766 size[1] unavailable. */
1767 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1768
1769 /* The index of the operand we are going to tag a qualifier and the qualifer
1770 itself are reasoned from the value of the size and Q fields and the
1771 possible valid qualifier lists. */
1772 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1773 DEBUG_TRACE ("key idx: %d", idx);
1774
1775 /* For most related instruciton, size:Q are fully available for operand
1776 encoding. */
1777 if (mask == 0x7)
1778 {
1779 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1780 return 1;
1781 }
1782
1783 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1784 candidates);
1785 #ifdef DEBUG_AARCH64
1786 if (debug_dump)
1787 {
1788 int i;
1789 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1790 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1791 DEBUG_TRACE ("qualifier %d: %s", i,
1792 aarch64_get_qualifier_name(candidates[i]));
1793 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1794 }
1795 #endif /* DEBUG_AARCH64 */
1796
1797 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1798
1799 if (qualifier == AARCH64_OPND_QLF_NIL)
1800 return 0;
1801
1802 inst->operands[idx].qualifier = qualifier;
1803 return 1;
1804 }
1805
1806 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1807 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1808
1809 static int
1810 decode_asimd_fcvt (aarch64_inst *inst)
1811 {
1812 aarch64_field field = {0, 0};
1813 aarch64_insn value;
1814 enum aarch64_opnd_qualifier qualifier;
1815
1816 gen_sub_field (FLD_size, 0, 1, &field);
1817 value = extract_field_2 (&field, inst->value, 0);
1818 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1819 : AARCH64_OPND_QLF_V_2D;
1820 switch (inst->opcode->op)
1821 {
1822 case OP_FCVTN:
1823 case OP_FCVTN2:
1824 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1825 inst->operands[1].qualifier = qualifier;
1826 break;
1827 case OP_FCVTL:
1828 case OP_FCVTL2:
1829 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1830 inst->operands[0].qualifier = qualifier;
1831 break;
1832 default:
1833 assert (0);
1834 return 0;
1835 }
1836
1837 return 1;
1838 }
1839
1840 /* Decode size[0], i.e. bit 22, for
1841 e.g. FCVTXN <Vb><d>, <Va><n>. */
1842
1843 static int
1844 decode_asisd_fcvtxn (aarch64_inst *inst)
1845 {
1846 aarch64_field field = {0, 0};
1847 gen_sub_field (FLD_size, 0, 1, &field);
1848 if (!extract_field_2 (&field, inst->value, 0))
1849 return 0;
1850 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1851 return 1;
1852 }
1853
1854 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1855 static int
1856 decode_fcvt (aarch64_inst *inst)
1857 {
1858 enum aarch64_opnd_qualifier qualifier;
1859 aarch64_insn value;
1860 const aarch64_field field = {15, 2};
1861
1862 /* opc dstsize */
1863 value = extract_field_2 (&field, inst->value, 0);
1864 switch (value)
1865 {
1866 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1867 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1868 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1869 default: return 0;
1870 }
1871 inst->operands[0].qualifier = qualifier;
1872
1873 return 1;
1874 }
1875
1876 /* Do miscellaneous decodings that are not common enough to be driven by
1877 flags. */
1878
1879 static int
1880 do_misc_decoding (aarch64_inst *inst)
1881 {
1882 unsigned int value;
1883 switch (inst->opcode->op)
1884 {
1885 case OP_FCVT:
1886 return decode_fcvt (inst);
1887
1888 case OP_FCVTN:
1889 case OP_FCVTN2:
1890 case OP_FCVTL:
1891 case OP_FCVTL2:
1892 return decode_asimd_fcvt (inst);
1893
1894 case OP_FCVTXN_S:
1895 return decode_asisd_fcvtxn (inst);
1896
1897 case OP_MOV_P_P:
1898 case OP_MOVS_P_P:
1899 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1900 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
1901 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1902
1903 case OP_MOV_Z_P_Z:
1904 return (extract_field (FLD_SVE_Zd, inst->value, 0)
1905 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1906
1907 case OP_MOV_Z_V:
1908 /* Index must be zero. */
1909 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1910 return value == 1 || value == 2 || value == 4 || value == 8;
1911
1912 case OP_MOV_Z_Z:
1913 return (extract_field (FLD_SVE_Zn, inst->value, 0)
1914 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1915
1916 case OP_MOV_Z_Zi:
1917 /* Index must be nonzero. */
1918 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1919 return value != 1 && value != 2 && value != 4 && value != 8;
1920
1921 case OP_MOVM_P_P_P:
1922 return (extract_field (FLD_SVE_Pd, inst->value, 0)
1923 == extract_field (FLD_SVE_Pm, inst->value, 0));
1924
1925 case OP_MOVZS_P_P_P:
1926 case OP_MOVZ_P_P_P:
1927 return (extract_field (FLD_SVE_Pn, inst->value, 0)
1928 == extract_field (FLD_SVE_Pm, inst->value, 0));
1929
1930 case OP_NOTS_P_P_P_Z:
1931 case OP_NOT_P_P_P_Z:
1932 return (extract_field (FLD_SVE_Pm, inst->value, 0)
1933 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1934
1935 default:
1936 return 0;
1937 }
1938 }
1939
1940 /* Opcodes that have fields shared by multiple operands are usually flagged
1941 with flags. In this function, we detect such flags, decode the related
1942 field(s) and store the information in one of the related operands. The
1943 'one' operand is not any operand but one of the operands that can
1944 accommadate all the information that has been decoded. */
1945
1946 static int
1947 do_special_decoding (aarch64_inst *inst)
1948 {
1949 int idx;
1950 aarch64_insn value;
1951 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1952 if (inst->opcode->flags & F_COND)
1953 {
1954 value = extract_field (FLD_cond2, inst->value, 0);
1955 inst->cond = get_cond_from_value (value);
1956 }
1957 /* 'sf' field. */
1958 if (inst->opcode->flags & F_SF)
1959 {
1960 idx = select_operand_for_sf_field_coding (inst->opcode);
1961 value = extract_field (FLD_sf, inst->value, 0);
1962 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1963 if ((inst->opcode->flags & F_N)
1964 && extract_field (FLD_N, inst->value, 0) != value)
1965 return 0;
1966 }
1967 /* 'sf' field. */
1968 if (inst->opcode->flags & F_LSE_SZ)
1969 {
1970 idx = select_operand_for_sf_field_coding (inst->opcode);
1971 value = extract_field (FLD_lse_sz, inst->value, 0);
1972 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1973 }
1974 /* size:Q fields. */
1975 if (inst->opcode->flags & F_SIZEQ)
1976 return decode_sizeq (inst);
1977
1978 if (inst->opcode->flags & F_FPTYPE)
1979 {
1980 idx = select_operand_for_fptype_field_coding (inst->opcode);
1981 value = extract_field (FLD_type, inst->value, 0);
1982 switch (value)
1983 {
1984 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1985 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1986 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1987 default: return 0;
1988 }
1989 }
1990
1991 if (inst->opcode->flags & F_SSIZE)
1992 {
1993 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1994 of the base opcode. */
1995 aarch64_insn mask;
1996 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1997 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1998 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1999 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2000 /* For most related instruciton, the 'size' field is fully available for
2001 operand encoding. */
2002 if (mask == 0x3)
2003 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2004 else
2005 {
2006 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2007 candidates);
2008 inst->operands[idx].qualifier
2009 = get_qualifier_from_partial_encoding (value, candidates, mask);
2010 }
2011 }
2012
2013 if (inst->opcode->flags & F_T)
2014 {
2015 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2016 int num = 0;
2017 unsigned val, Q;
2018 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2019 == AARCH64_OPND_CLASS_SIMD_REG);
2020 /* imm5<3:0> q <t>
2021 0000 x reserved
2022 xxx1 0 8b
2023 xxx1 1 16b
2024 xx10 0 4h
2025 xx10 1 8h
2026 x100 0 2s
2027 x100 1 4s
2028 1000 0 reserved
2029 1000 1 2d */
2030 val = extract_field (FLD_imm5, inst->value, 0);
2031 while ((val & 0x1) == 0 && ++num <= 3)
2032 val >>= 1;
2033 if (num > 3)
2034 return 0;
2035 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2036 inst->operands[0].qualifier =
2037 get_vreg_qualifier_from_value ((num << 1) | Q);
2038 }
2039
2040 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2041 {
2042 /* Use Rt to encode in the case of e.g.
2043 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2044 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2045 if (idx == -1)
2046 {
2047 /* Otherwise use the result operand, which has to be a integer
2048 register. */
2049 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2050 == AARCH64_OPND_CLASS_INT_REG);
2051 idx = 0;
2052 }
2053 assert (idx == 0 || idx == 1);
2054 value = extract_field (FLD_Q, inst->value, 0);
2055 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2056 }
2057
2058 if (inst->opcode->flags & F_LDS_SIZE)
2059 {
2060 aarch64_field field = {0, 0};
2061 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2062 == AARCH64_OPND_CLASS_INT_REG);
2063 gen_sub_field (FLD_opc, 0, 1, &field);
2064 value = extract_field_2 (&field, inst->value, 0);
2065 inst->operands[0].qualifier
2066 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2067 }
2068
2069 /* Miscellaneous decoding; done as the last step. */
2070 if (inst->opcode->flags & F_MISC)
2071 return do_misc_decoding (inst);
2072
2073 return 1;
2074 }
2075
2076 /* Converters converting a real opcode instruction to its alias form. */
2077
2078 /* ROR <Wd>, <Ws>, #<shift>
2079 is equivalent to:
2080 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2081 static int
2082 convert_extr_to_ror (aarch64_inst *inst)
2083 {
2084 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2085 {
2086 copy_operand_info (inst, 2, 3);
2087 inst->operands[3].type = AARCH64_OPND_NIL;
2088 return 1;
2089 }
2090 return 0;
2091 }
2092
2093 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2094 is equivalent to:
2095 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2096 static int
2097 convert_shll_to_xtl (aarch64_inst *inst)
2098 {
2099 if (inst->operands[2].imm.value == 0)
2100 {
2101 inst->operands[2].type = AARCH64_OPND_NIL;
2102 return 1;
2103 }
2104 return 0;
2105 }
2106
2107 /* Convert
2108 UBFM <Xd>, <Xn>, #<shift>, #63.
2109 to
2110 LSR <Xd>, <Xn>, #<shift>. */
2111 static int
2112 convert_bfm_to_sr (aarch64_inst *inst)
2113 {
2114 int64_t imms, val;
2115
2116 imms = inst->operands[3].imm.value;
2117 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2118 if (imms == val)
2119 {
2120 inst->operands[3].type = AARCH64_OPND_NIL;
2121 return 1;
2122 }
2123
2124 return 0;
2125 }
2126
2127 /* Convert MOV to ORR. */
2128 static int
2129 convert_orr_to_mov (aarch64_inst *inst)
2130 {
2131 /* MOV <Vd>.<T>, <Vn>.<T>
2132 is equivalent to:
2133 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2134 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2135 {
2136 inst->operands[2].type = AARCH64_OPND_NIL;
2137 return 1;
2138 }
2139 return 0;
2140 }
2141
2142 /* When <imms> >= <immr>, the instruction written:
2143 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2144 is equivalent to:
2145 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2146
2147 static int
2148 convert_bfm_to_bfx (aarch64_inst *inst)
2149 {
2150 int64_t immr, imms;
2151
2152 immr = inst->operands[2].imm.value;
2153 imms = inst->operands[3].imm.value;
2154 if (imms >= immr)
2155 {
2156 int64_t lsb = immr;
2157 inst->operands[2].imm.value = lsb;
2158 inst->operands[3].imm.value = imms + 1 - lsb;
2159 /* The two opcodes have different qualifiers for
2160 the immediate operands; reset to help the checking. */
2161 reset_operand_qualifier (inst, 2);
2162 reset_operand_qualifier (inst, 3);
2163 return 1;
2164 }
2165
2166 return 0;
2167 }
2168
2169 /* When <imms> < <immr>, the instruction written:
2170 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2171 is equivalent to:
2172 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2173
2174 static int
2175 convert_bfm_to_bfi (aarch64_inst *inst)
2176 {
2177 int64_t immr, imms, val;
2178
2179 immr = inst->operands[2].imm.value;
2180 imms = inst->operands[3].imm.value;
2181 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2182 if (imms < immr)
2183 {
2184 inst->operands[2].imm.value = (val - immr) & (val - 1);
2185 inst->operands[3].imm.value = imms + 1;
2186 /* The two opcodes have different qualifiers for
2187 the immediate operands; reset to help the checking. */
2188 reset_operand_qualifier (inst, 2);
2189 reset_operand_qualifier (inst, 3);
2190 return 1;
2191 }
2192
2193 return 0;
2194 }
2195
2196 /* The instruction written:
2197 BFC <Xd>, #<lsb>, #<width>
2198 is equivalent to:
2199 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2200
2201 static int
2202 convert_bfm_to_bfc (aarch64_inst *inst)
2203 {
2204 int64_t immr, imms, val;
2205
2206 /* Should have been assured by the base opcode value. */
2207 assert (inst->operands[1].reg.regno == 0x1f);
2208
2209 immr = inst->operands[2].imm.value;
2210 imms = inst->operands[3].imm.value;
2211 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2212 if (imms < immr)
2213 {
2214 /* Drop XZR from the second operand. */
2215 copy_operand_info (inst, 1, 2);
2216 copy_operand_info (inst, 2, 3);
2217 inst->operands[3].type = AARCH64_OPND_NIL;
2218
2219 /* Recalculate the immediates. */
2220 inst->operands[1].imm.value = (val - immr) & (val - 1);
2221 inst->operands[2].imm.value = imms + 1;
2222
2223 /* The two opcodes have different qualifiers for the operands; reset to
2224 help the checking. */
2225 reset_operand_qualifier (inst, 1);
2226 reset_operand_qualifier (inst, 2);
2227 reset_operand_qualifier (inst, 3);
2228
2229 return 1;
2230 }
2231
2232 return 0;
2233 }
2234
2235 /* The instruction written:
2236 LSL <Xd>, <Xn>, #<shift>
2237 is equivalent to:
2238 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2239
2240 static int
2241 convert_ubfm_to_lsl (aarch64_inst *inst)
2242 {
2243 int64_t immr = inst->operands[2].imm.value;
2244 int64_t imms = inst->operands[3].imm.value;
2245 int64_t val
2246 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2247
2248 if ((immr == 0 && imms == val) || immr == imms + 1)
2249 {
2250 inst->operands[3].type = AARCH64_OPND_NIL;
2251 inst->operands[2].imm.value = val - imms;
2252 return 1;
2253 }
2254
2255 return 0;
2256 }
2257
2258 /* CINC <Wd>, <Wn>, <cond>
2259 is equivalent to:
2260 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2261 where <cond> is not AL or NV. */
2262
2263 static int
2264 convert_from_csel (aarch64_inst *inst)
2265 {
2266 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2267 && (inst->operands[3].cond->value & 0xe) != 0xe)
2268 {
2269 copy_operand_info (inst, 2, 3);
2270 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2271 inst->operands[3].type = AARCH64_OPND_NIL;
2272 return 1;
2273 }
2274 return 0;
2275 }
2276
2277 /* CSET <Wd>, <cond>
2278 is equivalent to:
2279 CSINC <Wd>, WZR, WZR, invert(<cond>)
2280 where <cond> is not AL or NV. */
2281
2282 static int
2283 convert_csinc_to_cset (aarch64_inst *inst)
2284 {
2285 if (inst->operands[1].reg.regno == 0x1f
2286 && inst->operands[2].reg.regno == 0x1f
2287 && (inst->operands[3].cond->value & 0xe) != 0xe)
2288 {
2289 copy_operand_info (inst, 1, 3);
2290 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2291 inst->operands[3].type = AARCH64_OPND_NIL;
2292 inst->operands[2].type = AARCH64_OPND_NIL;
2293 return 1;
2294 }
2295 return 0;
2296 }
2297
2298 /* MOV <Wd>, #<imm>
2299 is equivalent to:
2300 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2301
2302 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2303 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2304 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2305 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2306 machine-instruction mnemonic must be used. */
2307
2308 static int
2309 convert_movewide_to_mov (aarch64_inst *inst)
2310 {
2311 uint64_t value = inst->operands[1].imm.value;
2312 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2313 if (value == 0 && inst->operands[1].shifter.amount != 0)
2314 return 0;
2315 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2316 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2317 value <<= inst->operands[1].shifter.amount;
2318 /* As an alias convertor, it has to be clear that the INST->OPCODE
2319 is the opcode of the real instruction. */
2320 if (inst->opcode->op == OP_MOVN)
2321 {
2322 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2323 value = ~value;
2324 /* A MOVN has an immediate that could be encoded by MOVZ. */
2325 if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
2326 return 0;
2327 }
2328 inst->operands[1].imm.value = value;
2329 inst->operands[1].shifter.amount = 0;
2330 return 1;
2331 }
2332
2333 /* MOV <Wd>, #<imm>
2334 is equivalent to:
2335 ORR <Wd>, WZR, #<imm>.
2336
2337 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2338 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2339 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2340 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2341 machine-instruction mnemonic must be used. */
2342
2343 static int
2344 convert_movebitmask_to_mov (aarch64_inst *inst)
2345 {
2346 int is32;
2347 uint64_t value;
2348
2349 /* Should have been assured by the base opcode value. */
2350 assert (inst->operands[1].reg.regno == 0x1f);
2351 copy_operand_info (inst, 1, 2);
2352 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2353 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2354 value = inst->operands[1].imm.value;
2355 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2356 instruction. */
2357 if (inst->operands[0].reg.regno != 0x1f
2358 && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
2359 || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
2360 return 0;
2361
2362 inst->operands[2].type = AARCH64_OPND_NIL;
2363 return 1;
2364 }
2365
2366 /* Some alias opcodes are disassembled by being converted from their real-form.
2367 N.B. INST->OPCODE is the real opcode rather than the alias. */
2368
2369 static int
2370 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2371 {
2372 switch (alias->op)
2373 {
2374 case OP_ASR_IMM:
2375 case OP_LSR_IMM:
2376 return convert_bfm_to_sr (inst);
2377 case OP_LSL_IMM:
2378 return convert_ubfm_to_lsl (inst);
2379 case OP_CINC:
2380 case OP_CINV:
2381 case OP_CNEG:
2382 return convert_from_csel (inst);
2383 case OP_CSET:
2384 case OP_CSETM:
2385 return convert_csinc_to_cset (inst);
2386 case OP_UBFX:
2387 case OP_BFXIL:
2388 case OP_SBFX:
2389 return convert_bfm_to_bfx (inst);
2390 case OP_SBFIZ:
2391 case OP_BFI:
2392 case OP_UBFIZ:
2393 return convert_bfm_to_bfi (inst);
2394 case OP_BFC:
2395 return convert_bfm_to_bfc (inst);
2396 case OP_MOV_V:
2397 return convert_orr_to_mov (inst);
2398 case OP_MOV_IMM_WIDE:
2399 case OP_MOV_IMM_WIDEN:
2400 return convert_movewide_to_mov (inst);
2401 case OP_MOV_IMM_LOG:
2402 return convert_movebitmask_to_mov (inst);
2403 case OP_ROR_IMM:
2404 return convert_extr_to_ror (inst);
2405 case OP_SXTL:
2406 case OP_SXTL2:
2407 case OP_UXTL:
2408 case OP_UXTL2:
2409 return convert_shll_to_xtl (inst);
2410 default:
2411 return 0;
2412 }
2413 }
2414
2415 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2416 aarch64_inst *, int);
2417
2418 /* Given the instruction information in *INST, check if the instruction has
2419 any alias form that can be used to represent *INST. If the answer is yes,
2420 update *INST to be in the form of the determined alias. */
2421
2422 /* In the opcode description table, the following flags are used in opcode
2423 entries to help establish the relations between the real and alias opcodes:
2424
2425 F_ALIAS: opcode is an alias
2426 F_HAS_ALIAS: opcode has alias(es)
2427 F_P1
2428 F_P2
2429 F_P3: Disassembly preference priority 1-3 (the larger the
2430 higher). If nothing is specified, it is the priority
2431 0 by default, i.e. the lowest priority.
2432
2433 Although the relation between the machine and the alias instructions are not
2434 explicitly described, it can be easily determined from the base opcode
2435 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2436 description entries:
2437
2438 The mask of an alias opcode must be equal to or a super-set (i.e. more
2439 constrained) of that of the aliased opcode; so is the base opcode value.
2440
2441 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2442 && (opcode->mask & real->mask) == real->mask
2443 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2444 then OPCODE is an alias of, and only of, the REAL instruction
2445
2446 The alias relationship is forced flat-structured to keep related algorithm
2447 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2448
2449 During the disassembling, the decoding decision tree (in
2450 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2451 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2452 not specified), the disassembler will check whether there is any alias
2453 instruction exists for this real instruction. If there is, the disassembler
2454 will try to disassemble the 32-bit binary again using the alias's rule, or
2455 try to convert the IR to the form of the alias. In the case of the multiple
2456 aliases, the aliases are tried one by one from the highest priority
2457 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2458 first succeeds first adopted.
2459
2460 You may ask why there is a need for the conversion of IR from one form to
2461 another in handling certain aliases. This is because on one hand it avoids
2462 adding more operand code to handle unusual encoding/decoding; on other
2463 hand, during the disassembling, the conversion is an effective approach to
2464 check the condition of an alias (as an alias may be adopted only if certain
2465 conditions are met).
2466
2467 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2468 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2469 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2470
2471 static void
2472 determine_disassembling_preference (struct aarch64_inst *inst)
2473 {
2474 const aarch64_opcode *opcode;
2475 const aarch64_opcode *alias;
2476
2477 opcode = inst->opcode;
2478
2479 /* This opcode does not have an alias, so use itself. */
2480 if (opcode_has_alias (opcode) == FALSE)
2481 return;
2482
2483 alias = aarch64_find_alias_opcode (opcode);
2484 assert (alias);
2485
2486 #ifdef DEBUG_AARCH64
2487 if (debug_dump)
2488 {
2489 const aarch64_opcode *tmp = alias;
2490 printf ("#### LIST orderd: ");
2491 while (tmp)
2492 {
2493 printf ("%s, ", tmp->name);
2494 tmp = aarch64_find_next_alias_opcode (tmp);
2495 }
2496 printf ("\n");
2497 }
2498 #endif /* DEBUG_AARCH64 */
2499
2500 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2501 {
2502 DEBUG_TRACE ("try %s", alias->name);
2503 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2504
2505 /* An alias can be a pseudo opcode which will never be used in the
2506 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2507 aliasing AND. */
2508 if (pseudo_opcode_p (alias))
2509 {
2510 DEBUG_TRACE ("skip pseudo %s", alias->name);
2511 continue;
2512 }
2513
2514 if ((inst->value & alias->mask) != alias->opcode)
2515 {
2516 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2517 continue;
2518 }
2519 /* No need to do any complicated transformation on operands, if the alias
2520 opcode does not have any operand. */
2521 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2522 {
2523 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2524 aarch64_replace_opcode (inst, alias);
2525 return;
2526 }
2527 if (alias->flags & F_CONV)
2528 {
2529 aarch64_inst copy;
2530 memcpy (&copy, inst, sizeof (aarch64_inst));
2531 /* ALIAS is the preference as long as the instruction can be
2532 successfully converted to the form of ALIAS. */
2533 if (convert_to_alias (&copy, alias) == 1)
2534 {
2535 aarch64_replace_opcode (&copy, alias);
2536 assert (aarch64_match_operands_constraint (&copy, NULL));
2537 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2538 memcpy (inst, &copy, sizeof (aarch64_inst));
2539 return;
2540 }
2541 }
2542 else
2543 {
2544 /* Directly decode the alias opcode. */
2545 aarch64_inst temp;
2546 memset (&temp, '\0', sizeof (aarch64_inst));
2547 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
2548 {
2549 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2550 memcpy (inst, &temp, sizeof (aarch64_inst));
2551 return;
2552 }
2553 }
2554 }
2555 }
2556
2557 /* Some instructions (including all SVE ones) use the instruction class
2558 to describe how a qualifiers_list index is represented in the instruction
2559 encoding. If INST is such an instruction, decode the appropriate fields
2560 and fill in the operand qualifiers accordingly. Return true if no
2561 problems are found. */
2562
2563 static bfd_boolean
2564 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2565 {
2566 int i, variant;
2567
2568 variant = 0;
2569 switch (inst->opcode->iclass)
2570 {
2571 case sve_cpy:
2572 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2573 break;
2574
2575 case sve_index:
2576 i = extract_field (FLD_SVE_tsz, inst->value, 0);
2577 if (i == 0)
2578 return FALSE;
2579 while ((i & 1) == 0)
2580 {
2581 i >>= 1;
2582 variant += 1;
2583 }
2584 break;
2585
2586 case sve_limm:
2587 /* Pick the smallest applicable element size. */
2588 if ((inst->value & 0x20600) == 0x600)
2589 variant = 0;
2590 else if ((inst->value & 0x20400) == 0x400)
2591 variant = 1;
2592 else if ((inst->value & 0x20000) == 0)
2593 variant = 2;
2594 else
2595 variant = 3;
2596 break;
2597
2598 case sve_misc:
2599 /* sve_misc instructions have only a single variant. */
2600 break;
2601
2602 case sve_movprfx:
2603 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2604 break;
2605
2606 case sve_pred_zm:
2607 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2608 break;
2609
2610 case sve_shift_pred:
2611 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2612 sve_shift:
2613 if (i == 0)
2614 return FALSE;
2615 while (i != 1)
2616 {
2617 i >>= 1;
2618 variant += 1;
2619 }
2620 break;
2621
2622 case sve_shift_unpred:
2623 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2624 goto sve_shift;
2625
2626 case sve_size_bhs:
2627 variant = extract_field (FLD_size, inst->value, 0);
2628 if (variant >= 3)
2629 return FALSE;
2630 break;
2631
2632 case sve_size_bhsd:
2633 variant = extract_field (FLD_size, inst->value, 0);
2634 break;
2635
2636 case sve_size_hsd:
2637 i = extract_field (FLD_size, inst->value, 0);
2638 if (i < 1)
2639 return FALSE;
2640 variant = i - 1;
2641 break;
2642
2643 case sve_size_sd:
2644 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2645 break;
2646
2647 default:
2648 /* No mapping between instruction class and qualifiers. */
2649 return TRUE;
2650 }
2651
2652 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2653 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2654 return TRUE;
2655 }
2656 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2657 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2658 return 1.
2659
2660 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2661 determined and used to disassemble CODE; this is done just before the
2662 return. */
2663
2664 static int
2665 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2666 aarch64_inst *inst, int noaliases_p)
2667 {
2668 int i;
2669
2670 DEBUG_TRACE ("enter with %s", opcode->name);
2671
2672 assert (opcode && inst);
2673
2674 /* Check the base opcode. */
2675 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2676 {
2677 DEBUG_TRACE ("base opcode match FAIL");
2678 goto decode_fail;
2679 }
2680
2681 /* Clear inst. */
2682 memset (inst, '\0', sizeof (aarch64_inst));
2683
2684 inst->opcode = opcode;
2685 inst->value = code;
2686
2687 /* Assign operand codes and indexes. */
2688 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2689 {
2690 if (opcode->operands[i] == AARCH64_OPND_NIL)
2691 break;
2692 inst->operands[i].type = opcode->operands[i];
2693 inst->operands[i].idx = i;
2694 }
2695
2696 /* Call the opcode decoder indicated by flags. */
2697 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2698 {
2699 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2700 goto decode_fail;
2701 }
2702
2703 /* Possibly use the instruction class to determine the correct
2704 qualifier. */
2705 if (!aarch64_decode_variant_using_iclass (inst))
2706 {
2707 DEBUG_TRACE ("iclass-based decoder FAIL");
2708 goto decode_fail;
2709 }
2710
2711 /* Call operand decoders. */
2712 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2713 {
2714 const aarch64_operand *opnd;
2715 enum aarch64_opnd type;
2716
2717 type = opcode->operands[i];
2718 if (type == AARCH64_OPND_NIL)
2719 break;
2720 opnd = &aarch64_operands[type];
2721 if (operand_has_extractor (opnd)
2722 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2723 {
2724 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2725 goto decode_fail;
2726 }
2727 }
2728
2729 /* If the opcode has a verifier, then check it now. */
2730 if (opcode->verifier && ! opcode->verifier (opcode, code))
2731 {
2732 DEBUG_TRACE ("operand verifier FAIL");
2733 goto decode_fail;
2734 }
2735
2736 /* Match the qualifiers. */
2737 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2738 {
2739 /* Arriving here, the CODE has been determined as a valid instruction
2740 of OPCODE and *INST has been filled with information of this OPCODE
2741 instruction. Before the return, check if the instruction has any
2742 alias and should be disassembled in the form of its alias instead.
2743 If the answer is yes, *INST will be updated. */
2744 if (!noaliases_p)
2745 determine_disassembling_preference (inst);
2746 DEBUG_TRACE ("SUCCESS");
2747 return 1;
2748 }
2749 else
2750 {
2751 DEBUG_TRACE ("constraint matching FAIL");
2752 }
2753
2754 decode_fail:
2755 return 0;
2756 }
2757 \f
2758 /* This does some user-friendly fix-up to *INST. It is currently focus on
2759 the adjustment of qualifiers to help the printed instruction
2760 recognized/understood more easily. */
2761
2762 static void
2763 user_friendly_fixup (aarch64_inst *inst)
2764 {
2765 switch (inst->opcode->iclass)
2766 {
2767 case testbranch:
2768 /* TBNZ Xn|Wn, #uimm6, label
2769 Test and Branch Not Zero: conditionally jumps to label if bit number
2770 uimm6 in register Xn is not zero. The bit number implies the width of
2771 the register, which may be written and should be disassembled as Wn if
2772 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2773 */
2774 if (inst->operands[1].imm.value < 32)
2775 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2776 break;
2777 default: break;
2778 }
2779 }
2780
2781 /* Decode INSN and fill in *INST the instruction information. An alias
2782 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2783 success. */
2784
2785 int
2786 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2787 bfd_boolean noaliases_p)
2788 {
2789 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2790
2791 #ifdef DEBUG_AARCH64
2792 if (debug_dump)
2793 {
2794 const aarch64_opcode *tmp = opcode;
2795 printf ("\n");
2796 DEBUG_TRACE ("opcode lookup:");
2797 while (tmp != NULL)
2798 {
2799 aarch64_verbose (" %s", tmp->name);
2800 tmp = aarch64_find_next_opcode (tmp);
2801 }
2802 }
2803 #endif /* DEBUG_AARCH64 */
2804
2805 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2806 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2807 opcode field and value, apart from the difference that one of them has an
2808 extra field as part of the opcode, but such a field is used for operand
2809 encoding in other opcode(s) ('immh' in the case of the example). */
2810 while (opcode != NULL)
2811 {
2812 /* But only one opcode can be decoded successfully for, as the
2813 decoding routine will check the constraint carefully. */
2814 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2815 return ERR_OK;
2816 opcode = aarch64_find_next_opcode (opcode);
2817 }
2818
2819 return ERR_UND;
2820 }
2821
2822 /* Print operands. */
2823
2824 static void
2825 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2826 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2827 {
2828 int i, pcrel_p, num_printed;
2829 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2830 {
2831 char str[128];
2832 /* We regard the opcode operand info more, however we also look into
2833 the inst->operands to support the disassembling of the optional
2834 operand.
2835 The two operand code should be the same in all cases, apart from
2836 when the operand can be optional. */
2837 if (opcode->operands[i] == AARCH64_OPND_NIL
2838 || opnds[i].type == AARCH64_OPND_NIL)
2839 break;
2840
2841 /* Generate the operand string in STR. */
2842 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2843 &info->target);
2844
2845 /* Print the delimiter (taking account of omitted operand(s)). */
2846 if (str[0] != '\0')
2847 (*info->fprintf_func) (info->stream, "%s",
2848 num_printed++ == 0 ? "\t" : ", ");
2849
2850 /* Print the operand. */
2851 if (pcrel_p)
2852 (*info->print_address_func) (info->target, info);
2853 else
2854 (*info->fprintf_func) (info->stream, "%s", str);
2855 }
2856 }
2857
2858 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
2859
2860 static void
2861 remove_dot_suffix (char *name, const aarch64_inst *inst)
2862 {
2863 char *ptr;
2864 size_t len;
2865
2866 ptr = strchr (inst->opcode->name, '.');
2867 assert (ptr && inst->cond);
2868 len = ptr - inst->opcode->name;
2869 assert (len < 8);
2870 strncpy (name, inst->opcode->name, len);
2871 name[len] = '\0';
2872 }
2873
2874 /* Print the instruction mnemonic name. */
2875
2876 static void
2877 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2878 {
2879 if (inst->opcode->flags & F_COND)
2880 {
2881 /* For instructions that are truly conditionally executed, e.g. b.cond,
2882 prepare the full mnemonic name with the corresponding condition
2883 suffix. */
2884 char name[8];
2885
2886 remove_dot_suffix (name, inst);
2887 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2888 }
2889 else
2890 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2891 }
2892
2893 /* Decide whether we need to print a comment after the operands of
2894 instruction INST. */
2895
2896 static void
2897 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
2898 {
2899 if (inst->opcode->flags & F_COND)
2900 {
2901 char name[8];
2902 unsigned int i, num_conds;
2903
2904 remove_dot_suffix (name, inst);
2905 num_conds = ARRAY_SIZE (inst->cond->names);
2906 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
2907 (*info->fprintf_func) (info->stream, "%s %s.%s",
2908 i == 1 ? " //" : ",",
2909 name, inst->cond->names[i]);
2910 }
2911 }
2912
2913 /* Print the instruction according to *INST. */
2914
2915 static void
2916 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2917 struct disassemble_info *info)
2918 {
2919 print_mnemonic_name (inst, info);
2920 print_operands (pc, inst->opcode, inst->operands, info);
2921 print_comment (inst, info);
2922 }
2923
2924 /* Entry-point of the instruction disassembler and printer. */
2925
2926 static void
2927 print_insn_aarch64_word (bfd_vma pc,
2928 uint32_t word,
2929 struct disassemble_info *info)
2930 {
2931 static const char *err_msg[6] =
2932 {
2933 [ERR_OK] = "_",
2934 [-ERR_UND] = "undefined",
2935 [-ERR_UNP] = "unpredictable",
2936 [-ERR_NYI] = "NYI"
2937 };
2938
2939 int ret;
2940 aarch64_inst inst;
2941
2942 info->insn_info_valid = 1;
2943 info->branch_delay_insns = 0;
2944 info->data_size = 0;
2945 info->target = 0;
2946 info->target2 = 0;
2947
2948 if (info->flags & INSN_HAS_RELOC)
2949 /* If the instruction has a reloc associated with it, then
2950 the offset field in the instruction will actually be the
2951 addend for the reloc. (If we are using REL type relocs).
2952 In such cases, we can ignore the pc when computing
2953 addresses, since the addend is not currently pc-relative. */
2954 pc = 0;
2955
2956 ret = aarch64_decode_insn (word, &inst, no_aliases);
2957
2958 if (((word >> 21) & 0x3ff) == 1)
2959 {
2960 /* RESERVED for ALES. */
2961 assert (ret != ERR_OK);
2962 ret = ERR_NYI;
2963 }
2964
2965 switch (ret)
2966 {
2967 case ERR_UND:
2968 case ERR_UNP:
2969 case ERR_NYI:
2970 /* Handle undefined instructions. */
2971 info->insn_type = dis_noninsn;
2972 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2973 word, err_msg[-ret]);
2974 break;
2975 case ERR_OK:
2976 user_friendly_fixup (&inst);
2977 print_aarch64_insn (pc, &inst, info);
2978 break;
2979 default:
2980 abort ();
2981 }
2982 }
2983
2984 /* Disallow mapping symbols ($x, $d etc) from
2985 being displayed in symbol relative addresses. */
2986
2987 bfd_boolean
2988 aarch64_symbol_is_valid (asymbol * sym,
2989 struct disassemble_info * info ATTRIBUTE_UNUSED)
2990 {
2991 const char * name;
2992
2993 if (sym == NULL)
2994 return FALSE;
2995
2996 name = bfd_asymbol_name (sym);
2997
2998 return name
2999 && (name[0] != '$'
3000 || (name[1] != 'x' && name[1] != 'd')
3001 || (name[2] != '\0' && name[2] != '.'));
3002 }
3003
3004 /* Print data bytes on INFO->STREAM. */
3005
3006 static void
3007 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3008 uint32_t word,
3009 struct disassemble_info *info)
3010 {
3011 switch (info->bytes_per_chunk)
3012 {
3013 case 1:
3014 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3015 break;
3016 case 2:
3017 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3018 break;
3019 case 4:
3020 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3021 break;
3022 default:
3023 abort ();
3024 }
3025 }
3026
3027 /* Try to infer the code or data type from a symbol.
3028 Returns nonzero if *MAP_TYPE was set. */
3029
3030 static int
3031 get_sym_code_type (struct disassemble_info *info, int n,
3032 enum map_type *map_type)
3033 {
3034 elf_symbol_type *es;
3035 unsigned int type;
3036 const char *name;
3037
3038 es = *(elf_symbol_type **)(info->symtab + n);
3039 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3040
3041 /* If the symbol has function type then use that. */
3042 if (type == STT_FUNC)
3043 {
3044 *map_type = MAP_INSN;
3045 return TRUE;
3046 }
3047
3048 /* Check for mapping symbols. */
3049 name = bfd_asymbol_name(info->symtab[n]);
3050 if (name[0] == '$'
3051 && (name[1] == 'x' || name[1] == 'd')
3052 && (name[2] == '\0' || name[2] == '.'))
3053 {
3054 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3055 return TRUE;
3056 }
3057
3058 return FALSE;
3059 }
3060
3061 /* Entry-point of the AArch64 disassembler. */
3062
3063 int
3064 print_insn_aarch64 (bfd_vma pc,
3065 struct disassemble_info *info)
3066 {
3067 bfd_byte buffer[INSNLEN];
3068 int status;
3069 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
3070 bfd_boolean found = FALSE;
3071 unsigned int size = 4;
3072 unsigned long data;
3073
3074 if (info->disassembler_options)
3075 {
3076 set_default_aarch64_dis_options (info);
3077
3078 parse_aarch64_dis_options (info->disassembler_options);
3079
3080 /* To avoid repeated parsing of these options, we remove them here. */
3081 info->disassembler_options = NULL;
3082 }
3083
3084 /* Aarch64 instructions are always little-endian */
3085 info->endian_code = BFD_ENDIAN_LITTLE;
3086
3087 /* First check the full symtab for a mapping symbol, even if there
3088 are no usable non-mapping symbols for this address. */
3089 if (info->symtab_size != 0
3090 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3091 {
3092 enum map_type type = MAP_INSN;
3093 int last_sym = -1;
3094 bfd_vma addr;
3095 int n;
3096
3097 if (pc <= last_mapping_addr)
3098 last_mapping_sym = -1;
3099
3100 /* Start scanning at the start of the function, or wherever
3101 we finished last time. */
3102 n = info->symtab_pos + 1;
3103 if (n < last_mapping_sym)
3104 n = last_mapping_sym;
3105
3106 /* Scan up to the location being disassembled. */
3107 for (; n < info->symtab_size; n++)
3108 {
3109 addr = bfd_asymbol_value (info->symtab[n]);
3110 if (addr > pc)
3111 break;
3112 if ((info->section == NULL
3113 || info->section == info->symtab[n]->section)
3114 && get_sym_code_type (info, n, &type))
3115 {
3116 last_sym = n;
3117 found = TRUE;
3118 }
3119 }
3120
3121 if (!found)
3122 {
3123 n = info->symtab_pos;
3124 if (n < last_mapping_sym)
3125 n = last_mapping_sym;
3126
3127 /* No mapping symbol found at this address. Look backwards
3128 for a preceeding one. */
3129 for (; n >= 0; n--)
3130 {
3131 if (get_sym_code_type (info, n, &type))
3132 {
3133 last_sym = n;
3134 found = TRUE;
3135 break;
3136 }
3137 }
3138 }
3139
3140 last_mapping_sym = last_sym;
3141 last_type = type;
3142
3143 /* Look a little bit ahead to see if we should print out
3144 less than four bytes of data. If there's a symbol,
3145 mapping or otherwise, after two bytes then don't
3146 print more. */
3147 if (last_type == MAP_DATA)
3148 {
3149 size = 4 - (pc & 3);
3150 for (n = last_sym + 1; n < info->symtab_size; n++)
3151 {
3152 addr = bfd_asymbol_value (info->symtab[n]);
3153 if (addr > pc)
3154 {
3155 if (addr - pc < size)
3156 size = addr - pc;
3157 break;
3158 }
3159 }
3160 /* If the next symbol is after three bytes, we need to
3161 print only part of the data, so that we can use either
3162 .byte or .short. */
3163 if (size == 3)
3164 size = (pc & 1) ? 1 : 2;
3165 }
3166 }
3167
3168 if (last_type == MAP_DATA)
3169 {
3170 /* size was set above. */
3171 info->bytes_per_chunk = size;
3172 info->display_endian = info->endian;
3173 printer = print_insn_data;
3174 }
3175 else
3176 {
3177 info->bytes_per_chunk = size = INSNLEN;
3178 info->display_endian = info->endian_code;
3179 printer = print_insn_aarch64_word;
3180 }
3181
3182 status = (*info->read_memory_func) (pc, buffer, size, info);
3183 if (status != 0)
3184 {
3185 (*info->memory_error_func) (status, pc, info);
3186 return -1;
3187 }
3188
3189 data = bfd_get_bits (buffer, size * 8,
3190 info->display_endian == BFD_ENDIAN_BIG);
3191
3192 (*printer) (pc, data, info);
3193
3194 return size;
3195 }
3196 \f
3197 void
3198 print_aarch64_disassembler_options (FILE *stream)
3199 {
3200 fprintf (stream, _("\n\
3201 The following AARCH64 specific disassembler options are supported for use\n\
3202 with the -M switch (multiple options should be separated by commas):\n"));
3203
3204 fprintf (stream, _("\n\
3205 no-aliases Don't print instruction aliases.\n"));
3206
3207 fprintf (stream, _("\n\
3208 aliases Do print instruction aliases.\n"));
3209
3210 #ifdef DEBUG_AARCH64
3211 fprintf (stream, _("\n\
3212 debug_dump Temp switch for debug trace.\n"));
3213 #endif /* DEBUG_AARCH64 */
3214
3215 fprintf (stream, _("\n"));
3216 }
This page took 0.123882 seconds and 4 git commands to generate.