Prevent address violation problem when disassembling corrupt aarch64 binary.
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define ERR_OK 0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33
34 #define INSNLEN 4
35
36 /* Cached mapping symbol state. */
37 enum map_type
38 {
39 MAP_INSN,
40 MAP_DATA
41 };
42
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46
47 /* Other options */
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \f
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 #ifdef DEBUG_AARCH64
73 if (CONST_STRNEQ (option, "debug_dump"))
74 {
75 debug_dump = 1;
76 return;
77 }
78 #endif /* DEBUG_AARCH64 */
79
80 /* Invalid option. */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
82 }
83
84 static void
85 parse_aarch64_dis_options (const char *options)
86 {
87 const char *option_end;
88
89 if (options == NULL)
90 return;
91
92 while (*options != '\0')
93 {
94 /* Skip empty options. */
95 if (*options == ',')
96 {
97 options++;
98 continue;
99 }
100
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
104 option_end++;
105
106 parse_aarch64_dis_option (options, option_end - options);
107
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
111 }
112 }
113 \f
114 /* Functions doing the instruction disassembling. */
115
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
119
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
125
126 aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129 uint32_t num;
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
132 va_list va;
133
134 va_start (va, mask);
135 num = va_arg (va, uint32_t);
136 assert (num <= 5);
137 aarch64_insn value = 0x0;
138 while (num--)
139 {
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
144 }
145 return value;
146 }
147
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
150
151 static aarch64_insn
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
153 {
154 aarch64_insn value;
155 unsigned int i;
156 enum aarch64_field_kind kind;
157
158 value = 0;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
160 {
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
164 }
165 return value;
166 }
167
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
171 {
172 uint32_t ret = value;
173
174 assert (i < 32);
175 if ((value >> i) & 0x1)
176 {
177 uint32_t val = (uint32_t)(-1) << i;
178 ret = ret | val;
179 }
180 return (int32_t) ret;
181 }
182
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
185
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
189 {
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
191 assert (value <= 0x1
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
193 return qualifier;
194 }
195
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
198
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
201 {
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
203
204 /* Instructions using vector type 2H should not call this function. Skip over
205 the 2H qualifier. */
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
207 qualifier += 1;
208
209 assert (value <= 0x8
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
211 return qualifier;
212 }
213
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
217 {
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
219
220 assert (value <= 0x4
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
222 return qualifier;
223 }
224
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
229
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
232 {
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
237 i, qualifiers))
238 return qualifiers[i];
239 else
240 return AARCH64_OPND_QLF_NIL;
241 }
242
243 /* Operand extractors. */
244
245 int
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED)
249 {
250 info->reg.regno = extract_field (self->fields[0], code, 0);
251 return 1;
252 }
253
254 int
255 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
256 const aarch64_insn code ATTRIBUTE_UNUSED,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED)
258 {
259 assert (info->idx == 1
260 || info->idx ==3);
261 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
262 return 1;
263 }
264
265 /* e.g. IC <ic_op>{, <Xt>}. */
266 int
267 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
268 const aarch64_insn code,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED)
270 {
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 assert (info->idx == 1
273 && (aarch64_get_operand_class (inst->operands[0].type)
274 == AARCH64_OPND_CLASS_SYSTEM));
275 /* This will make the constraint checking happy and more importantly will
276 help the disassembler determine whether this operand is optional or
277 not. */
278 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
279
280 return 1;
281 }
282
283 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
284 int
285 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED)
288 {
289 /* regno */
290 info->reglane.regno = extract_field (self->fields[0], code,
291 inst->opcode->mask);
292
293 /* Index and/or type. */
294 if (inst->opcode->iclass == asisdone
295 || inst->opcode->iclass == asimdins)
296 {
297 if (info->type == AARCH64_OPND_En
298 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
299 {
300 unsigned shift;
301 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
302 assert (info->idx == 1); /* Vn */
303 aarch64_insn value = extract_field (FLD_imm4, code, 0);
304 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
305 info->qualifier = get_expected_qualifier (inst, info->idx);
306 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
307 info->reglane.index = value >> shift;
308 }
309 else
310 {
311 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
312 imm5<3:0> <V>
313 0000 RESERVED
314 xxx1 B
315 xx10 H
316 x100 S
317 1000 D */
318 int pos = -1;
319 aarch64_insn value = extract_field (FLD_imm5, code, 0);
320 while (++pos <= 3 && (value & 0x1) == 0)
321 value >>= 1;
322 if (pos > 3)
323 return 0;
324 info->qualifier = get_sreg_qualifier_from_value (pos);
325 info->reglane.index = (unsigned) (value >> 1);
326 }
327 }
328 else
329 {
330 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
331 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
332
333 /* Need information in other operand(s) to help decoding. */
334 info->qualifier = get_expected_qualifier (inst, info->idx);
335 switch (info->qualifier)
336 {
337 case AARCH64_OPND_QLF_S_H:
338 /* h:l:m */
339 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
340 FLD_M);
341 info->reglane.regno &= 0xf;
342 break;
343 case AARCH64_OPND_QLF_S_S:
344 /* h:l */
345 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
346 break;
347 case AARCH64_OPND_QLF_S_D:
348 /* H */
349 info->reglane.index = extract_field (FLD_H, code, 0);
350 break;
351 default:
352 return 0;
353 }
354
355 if (inst->opcode->op == OP_FCMLA_ELEM)
356 {
357 /* Complex operand takes two elements. */
358 if (info->reglane.index & 1)
359 return 0;
360 info->reglane.index /= 2;
361 }
362 }
363
364 return 1;
365 }
366
367 int
368 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
369 const aarch64_insn code,
370 const aarch64_inst *inst ATTRIBUTE_UNUSED)
371 {
372 /* R */
373 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
374 /* len */
375 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
376 return 1;
377 }
378
379 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
380 int
381 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
382 aarch64_opnd_info *info, const aarch64_insn code,
383 const aarch64_inst *inst)
384 {
385 aarch64_insn value;
386 /* Number of elements in each structure to be loaded/stored. */
387 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
388
389 struct
390 {
391 unsigned is_reserved;
392 unsigned num_regs;
393 unsigned num_elements;
394 } data [] =
395 { {0, 4, 4},
396 {1, 4, 4},
397 {0, 4, 1},
398 {0, 4, 2},
399 {0, 3, 3},
400 {1, 3, 3},
401 {0, 3, 1},
402 {0, 1, 1},
403 {0, 2, 2},
404 {1, 2, 2},
405 {0, 2, 1},
406 };
407
408 /* Rt */
409 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
410 /* opcode */
411 value = extract_field (FLD_opcode, code, 0);
412 /* PR 21595: Check for a bogus value. */
413 if (value >= ARRAY_SIZE (data))
414 return 0;
415 if (expected_num != data[value].num_elements || data[value].is_reserved)
416 return 0;
417 info->reglist.num_regs = data[value].num_regs;
418
419 return 1;
420 }
421
422 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
423 lanes instructions. */
424 int
425 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
426 aarch64_opnd_info *info, const aarch64_insn code,
427 const aarch64_inst *inst)
428 {
429 aarch64_insn value;
430
431 /* Rt */
432 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
433 /* S */
434 value = extract_field (FLD_S, code, 0);
435
436 /* Number of registers is equal to the number of elements in
437 each structure to be loaded/stored. */
438 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
439 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
440
441 /* Except when it is LD1R. */
442 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
443 info->reglist.num_regs = 2;
444
445 return 1;
446 }
447
448 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
449 load/store single element instructions. */
450 int
451 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
452 aarch64_opnd_info *info, const aarch64_insn code,
453 const aarch64_inst *inst ATTRIBUTE_UNUSED)
454 {
455 aarch64_field field = {0, 0};
456 aarch64_insn QSsize; /* fields Q:S:size. */
457 aarch64_insn opcodeh2; /* opcode<2:1> */
458
459 /* Rt */
460 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
461
462 /* Decode the index, opcode<2:1> and size. */
463 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
464 opcodeh2 = extract_field_2 (&field, code, 0);
465 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
466 switch (opcodeh2)
467 {
468 case 0x0:
469 info->qualifier = AARCH64_OPND_QLF_S_B;
470 /* Index encoded in "Q:S:size". */
471 info->reglist.index = QSsize;
472 break;
473 case 0x1:
474 if (QSsize & 0x1)
475 /* UND. */
476 return 0;
477 info->qualifier = AARCH64_OPND_QLF_S_H;
478 /* Index encoded in "Q:S:size<1>". */
479 info->reglist.index = QSsize >> 1;
480 break;
481 case 0x2:
482 if ((QSsize >> 1) & 0x1)
483 /* UND. */
484 return 0;
485 if ((QSsize & 0x1) == 0)
486 {
487 info->qualifier = AARCH64_OPND_QLF_S_S;
488 /* Index encoded in "Q:S". */
489 info->reglist.index = QSsize >> 2;
490 }
491 else
492 {
493 if (extract_field (FLD_S, code, 0))
494 /* UND */
495 return 0;
496 info->qualifier = AARCH64_OPND_QLF_S_D;
497 /* Index encoded in "Q". */
498 info->reglist.index = QSsize >> 3;
499 }
500 break;
501 default:
502 return 0;
503 }
504
505 info->reglist.has_index = 1;
506 info->reglist.num_regs = 0;
507 /* Number of registers is equal to the number of elements in
508 each structure to be loaded/stored. */
509 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
510 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
511
512 return 1;
513 }
514
515 /* Decode fields immh:immb and/or Q for e.g.
516 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
517 or SSHR <V><d>, <V><n>, #<shift>. */
518
519 int
520 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
521 aarch64_opnd_info *info, const aarch64_insn code,
522 const aarch64_inst *inst)
523 {
524 int pos;
525 aarch64_insn Q, imm, immh;
526 enum aarch64_insn_class iclass = inst->opcode->iclass;
527
528 immh = extract_field (FLD_immh, code, 0);
529 if (immh == 0)
530 return 0;
531 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
532 pos = 4;
533 /* Get highest set bit in immh. */
534 while (--pos >= 0 && (immh & 0x8) == 0)
535 immh <<= 1;
536
537 assert ((iclass == asimdshf || iclass == asisdshf)
538 && (info->type == AARCH64_OPND_IMM_VLSR
539 || info->type == AARCH64_OPND_IMM_VLSL));
540
541 if (iclass == asimdshf)
542 {
543 Q = extract_field (FLD_Q, code, 0);
544 /* immh Q <T>
545 0000 x SEE AdvSIMD modified immediate
546 0001 0 8B
547 0001 1 16B
548 001x 0 4H
549 001x 1 8H
550 01xx 0 2S
551 01xx 1 4S
552 1xxx 0 RESERVED
553 1xxx 1 2D */
554 info->qualifier =
555 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
556 }
557 else
558 info->qualifier = get_sreg_qualifier_from_value (pos);
559
560 if (info->type == AARCH64_OPND_IMM_VLSR)
561 /* immh <shift>
562 0000 SEE AdvSIMD modified immediate
563 0001 (16-UInt(immh:immb))
564 001x (32-UInt(immh:immb))
565 01xx (64-UInt(immh:immb))
566 1xxx (128-UInt(immh:immb)) */
567 info->imm.value = (16 << pos) - imm;
568 else
569 /* immh:immb
570 immh <shift>
571 0000 SEE AdvSIMD modified immediate
572 0001 (UInt(immh:immb)-8)
573 001x (UInt(immh:immb)-16)
574 01xx (UInt(immh:immb)-32)
575 1xxx (UInt(immh:immb)-64) */
576 info->imm.value = imm - (8 << pos);
577
578 return 1;
579 }
580
581 /* Decode shift immediate for e.g. sshr (imm). */
582 int
583 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
584 aarch64_opnd_info *info, const aarch64_insn code,
585 const aarch64_inst *inst ATTRIBUTE_UNUSED)
586 {
587 int64_t imm;
588 aarch64_insn val;
589 val = extract_field (FLD_size, code, 0);
590 switch (val)
591 {
592 case 0: imm = 8; break;
593 case 1: imm = 16; break;
594 case 2: imm = 32; break;
595 default: return 0;
596 }
597 info->imm.value = imm;
598 return 1;
599 }
600
601 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
602 value in the field(s) will be extracted as unsigned immediate value. */
603 int
604 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
605 const aarch64_insn code,
606 const aarch64_inst *inst ATTRIBUTE_UNUSED)
607 {
608 int64_t imm;
609
610 imm = extract_all_fields (self, code);
611
612 if (operand_need_sign_extension (self))
613 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
614
615 if (operand_need_shift_by_two (self))
616 imm <<= 2;
617
618 if (info->type == AARCH64_OPND_ADDR_ADRP)
619 imm <<= 12;
620
621 info->imm.value = imm;
622 return 1;
623 }
624
625 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
626 int
627 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
628 const aarch64_insn code,
629 const aarch64_inst *inst ATTRIBUTE_UNUSED)
630 {
631 aarch64_ext_imm (self, info, code, inst);
632 info->shifter.kind = AARCH64_MOD_LSL;
633 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
634 return 1;
635 }
636
637 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
638 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
639 int
640 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
641 aarch64_opnd_info *info,
642 const aarch64_insn code,
643 const aarch64_inst *inst ATTRIBUTE_UNUSED)
644 {
645 uint64_t imm;
646 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
647 aarch64_field field = {0, 0};
648
649 assert (info->idx == 1);
650
651 if (info->type == AARCH64_OPND_SIMD_FPIMM)
652 info->imm.is_fp = 1;
653
654 /* a:b:c:d:e:f:g:h */
655 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
656 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
657 {
658 /* Either MOVI <Dd>, #<imm>
659 or MOVI <Vd>.2D, #<imm>.
660 <imm> is a 64-bit immediate
661 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
662 encoded in "a:b:c:d:e:f:g:h". */
663 int i;
664 unsigned abcdefgh = imm;
665 for (imm = 0ull, i = 0; i < 8; i++)
666 if (((abcdefgh >> i) & 0x1) != 0)
667 imm |= 0xffull << (8 * i);
668 }
669 info->imm.value = imm;
670
671 /* cmode */
672 info->qualifier = get_expected_qualifier (inst, info->idx);
673 switch (info->qualifier)
674 {
675 case AARCH64_OPND_QLF_NIL:
676 /* no shift */
677 info->shifter.kind = AARCH64_MOD_NONE;
678 return 1;
679 case AARCH64_OPND_QLF_LSL:
680 /* shift zeros */
681 info->shifter.kind = AARCH64_MOD_LSL;
682 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
683 {
684 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
685 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
686 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
687 default: assert (0); return 0;
688 }
689 /* 00: 0; 01: 8; 10:16; 11:24. */
690 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
691 break;
692 case AARCH64_OPND_QLF_MSL:
693 /* shift ones */
694 info->shifter.kind = AARCH64_MOD_MSL;
695 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
696 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
697 break;
698 default:
699 assert (0);
700 return 0;
701 }
702
703 return 1;
704 }
705
706 /* Decode an 8-bit floating-point immediate. */
707 int
708 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
709 const aarch64_insn code,
710 const aarch64_inst *inst ATTRIBUTE_UNUSED)
711 {
712 info->imm.value = extract_all_fields (self, code);
713 info->imm.is_fp = 1;
714 return 1;
715 }
716
717 /* Decode a 1-bit rotate immediate (#90 or #270). */
718 int
719 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
720 const aarch64_insn code,
721 const aarch64_inst *inst ATTRIBUTE_UNUSED)
722 {
723 uint64_t rot = extract_field (self->fields[0], code, 0);
724 assert (rot < 2U);
725 info->imm.value = rot * 180 + 90;
726 return 1;
727 }
728
729 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
730 int
731 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
732 const aarch64_insn code,
733 const aarch64_inst *inst ATTRIBUTE_UNUSED)
734 {
735 uint64_t rot = extract_field (self->fields[0], code, 0);
736 assert (rot < 4U);
737 info->imm.value = rot * 90;
738 return 1;
739 }
740
741 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
742 int
743 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
744 aarch64_opnd_info *info, const aarch64_insn code,
745 const aarch64_inst *inst ATTRIBUTE_UNUSED)
746 {
747 info->imm.value = 64- extract_field (FLD_scale, code, 0);
748 return 1;
749 }
750
751 /* Decode arithmetic immediate for e.g.
752 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
753 int
754 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
755 aarch64_opnd_info *info, const aarch64_insn code,
756 const aarch64_inst *inst ATTRIBUTE_UNUSED)
757 {
758 aarch64_insn value;
759
760 info->shifter.kind = AARCH64_MOD_LSL;
761 /* shift */
762 value = extract_field (FLD_shift, code, 0);
763 if (value >= 2)
764 return 0;
765 info->shifter.amount = value ? 12 : 0;
766 /* imm12 (unsigned) */
767 info->imm.value = extract_field (FLD_imm12, code, 0);
768
769 return 1;
770 }
771
772 /* Return true if VALUE is a valid logical immediate encoding, storing the
773 decoded value in *RESULT if so. ESIZE is the number of bytes in the
774 decoded immediate. */
775 static int
776 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
777 {
778 uint64_t imm, mask;
779 uint32_t N, R, S;
780 unsigned simd_size;
781
782 /* value is N:immr:imms. */
783 S = value & 0x3f;
784 R = (value >> 6) & 0x3f;
785 N = (value >> 12) & 0x1;
786
787 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
788 (in other words, right rotated by R), then replicated. */
789 if (N != 0)
790 {
791 simd_size = 64;
792 mask = 0xffffffffffffffffull;
793 }
794 else
795 {
796 switch (S)
797 {
798 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
799 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
800 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
801 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
802 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
803 default: return 0;
804 }
805 mask = (1ull << simd_size) - 1;
806 /* Top bits are IGNORED. */
807 R &= simd_size - 1;
808 }
809
810 if (simd_size > esize * 8)
811 return 0;
812
813 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
814 if (S == simd_size - 1)
815 return 0;
816 /* S+1 consecutive bits to 1. */
817 /* NOTE: S can't be 63 due to detection above. */
818 imm = (1ull << (S + 1)) - 1;
819 /* Rotate to the left by simd_size - R. */
820 if (R != 0)
821 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
822 /* Replicate the value according to SIMD size. */
823 switch (simd_size)
824 {
825 case 2: imm = (imm << 2) | imm;
826 /* Fall through. */
827 case 4: imm = (imm << 4) | imm;
828 /* Fall through. */
829 case 8: imm = (imm << 8) | imm;
830 /* Fall through. */
831 case 16: imm = (imm << 16) | imm;
832 /* Fall through. */
833 case 32: imm = (imm << 32) | imm;
834 /* Fall through. */
835 case 64: break;
836 default: assert (0); return 0;
837 }
838
839 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
840
841 return 1;
842 }
843
844 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
845 int
846 aarch64_ext_limm (const aarch64_operand *self,
847 aarch64_opnd_info *info, const aarch64_insn code,
848 const aarch64_inst *inst)
849 {
850 uint32_t esize;
851 aarch64_insn value;
852
853 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
854 self->fields[2]);
855 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
856 return decode_limm (esize, value, &info->imm.value);
857 }
858
859 /* Decode a logical immediate for the BIC alias of AND (etc.). */
860 int
861 aarch64_ext_inv_limm (const aarch64_operand *self,
862 aarch64_opnd_info *info, const aarch64_insn code,
863 const aarch64_inst *inst)
864 {
865 if (!aarch64_ext_limm (self, info, code, inst))
866 return 0;
867 info->imm.value = ~info->imm.value;
868 return 1;
869 }
870
871 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
872 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
873 int
874 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
875 aarch64_opnd_info *info,
876 const aarch64_insn code, const aarch64_inst *inst)
877 {
878 aarch64_insn value;
879
880 /* Rt */
881 info->reg.regno = extract_field (FLD_Rt, code, 0);
882
883 /* size */
884 value = extract_field (FLD_ldst_size, code, 0);
885 if (inst->opcode->iclass == ldstpair_indexed
886 || inst->opcode->iclass == ldstnapair_offs
887 || inst->opcode->iclass == ldstpair_off
888 || inst->opcode->iclass == loadlit)
889 {
890 enum aarch64_opnd_qualifier qualifier;
891 switch (value)
892 {
893 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
894 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
895 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
896 default: return 0;
897 }
898 info->qualifier = qualifier;
899 }
900 else
901 {
902 /* opc1:size */
903 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
904 if (value > 0x4)
905 return 0;
906 info->qualifier = get_sreg_qualifier_from_value (value);
907 }
908
909 return 1;
910 }
911
912 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
913 int
914 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
915 aarch64_opnd_info *info,
916 aarch64_insn code,
917 const aarch64_inst *inst ATTRIBUTE_UNUSED)
918 {
919 /* Rn */
920 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
921 return 1;
922 }
923
924 /* Decode the address operand for e.g.
925 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
926 int
927 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
928 aarch64_opnd_info *info,
929 aarch64_insn code, const aarch64_inst *inst)
930 {
931 aarch64_insn S, value;
932
933 /* Rn */
934 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
935 /* Rm */
936 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
937 /* option */
938 value = extract_field (FLD_option, code, 0);
939 info->shifter.kind =
940 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
941 /* Fix-up the shifter kind; although the table-driven approach is
942 efficient, it is slightly inflexible, thus needing this fix-up. */
943 if (info->shifter.kind == AARCH64_MOD_UXTX)
944 info->shifter.kind = AARCH64_MOD_LSL;
945 /* S */
946 S = extract_field (FLD_S, code, 0);
947 if (S == 0)
948 {
949 info->shifter.amount = 0;
950 info->shifter.amount_present = 0;
951 }
952 else
953 {
954 int size;
955 /* Need information in other operand(s) to help achieve the decoding
956 from 'S' field. */
957 info->qualifier = get_expected_qualifier (inst, info->idx);
958 /* Get the size of the data element that is accessed, which may be
959 different from that of the source register size, e.g. in strb/ldrb. */
960 size = aarch64_get_qualifier_esize (info->qualifier);
961 info->shifter.amount = get_logsz (size);
962 info->shifter.amount_present = 1;
963 }
964
965 return 1;
966 }
967
968 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
969 int
970 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
971 aarch64_insn code, const aarch64_inst *inst)
972 {
973 aarch64_insn imm;
974 info->qualifier = get_expected_qualifier (inst, info->idx);
975
976 /* Rn */
977 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
978 /* simm (imm9 or imm7) */
979 imm = extract_field (self->fields[0], code, 0);
980 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
981 if (self->fields[0] == FLD_imm7)
982 /* scaled immediate in ld/st pair instructions. */
983 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
984 /* qualifier */
985 if (inst->opcode->iclass == ldst_unscaled
986 || inst->opcode->iclass == ldstnapair_offs
987 || inst->opcode->iclass == ldstpair_off
988 || inst->opcode->iclass == ldst_unpriv)
989 info->addr.writeback = 0;
990 else
991 {
992 /* pre/post- index */
993 info->addr.writeback = 1;
994 if (extract_field (self->fields[1], code, 0) == 1)
995 info->addr.preind = 1;
996 else
997 info->addr.postind = 1;
998 }
999
1000 return 1;
1001 }
1002
1003 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1004 int
1005 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1006 aarch64_insn code,
1007 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1008 {
1009 int shift;
1010 info->qualifier = get_expected_qualifier (inst, info->idx);
1011 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1012 /* Rn */
1013 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1014 /* uimm12 */
1015 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1016 return 1;
1017 }
1018
1019 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1020 int
1021 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1022 aarch64_insn code,
1023 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1024 {
1025 aarch64_insn imm;
1026
1027 info->qualifier = get_expected_qualifier (inst, info->idx);
1028 /* Rn */
1029 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1030 /* simm10 */
1031 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1032 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1033 if (extract_field (self->fields[3], code, 0) == 1) {
1034 info->addr.writeback = 1;
1035 info->addr.preind = 1;
1036 }
1037 return 1;
1038 }
1039
1040 /* Decode the address operand for e.g.
1041 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1042 int
1043 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1044 aarch64_opnd_info *info,
1045 aarch64_insn code, const aarch64_inst *inst)
1046 {
1047 /* The opcode dependent area stores the number of elements in
1048 each structure to be loaded/stored. */
1049 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1050
1051 /* Rn */
1052 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1053 /* Rm | #<amount> */
1054 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1055 if (info->addr.offset.regno == 31)
1056 {
1057 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1058 /* Special handling of loading single structure to all lane. */
1059 info->addr.offset.imm = (is_ld1r ? 1
1060 : inst->operands[0].reglist.num_regs)
1061 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1062 else
1063 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1064 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1065 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1066 }
1067 else
1068 info->addr.offset.is_reg = 1;
1069 info->addr.writeback = 1;
1070
1071 return 1;
1072 }
1073
1074 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1075 int
1076 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1077 aarch64_opnd_info *info,
1078 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1079 {
1080 aarch64_insn value;
1081 /* cond */
1082 value = extract_field (FLD_cond, code, 0);
1083 info->cond = get_cond_from_value (value);
1084 return 1;
1085 }
1086
1087 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1088 int
1089 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1090 aarch64_opnd_info *info,
1091 aarch64_insn code,
1092 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1093 {
1094 /* op0:op1:CRn:CRm:op2 */
1095 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1096 FLD_CRm, FLD_op2);
1097 return 1;
1098 }
1099
1100 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1101 int
1102 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1103 aarch64_opnd_info *info, aarch64_insn code,
1104 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1105 {
1106 int i;
1107 /* op1:op2 */
1108 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1109 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1110 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1111 return 1;
1112 /* Reserved value in <pstatefield>. */
1113 return 0;
1114 }
1115
1116 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1117 int
1118 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1119 aarch64_opnd_info *info,
1120 aarch64_insn code,
1121 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1122 {
1123 int i;
1124 aarch64_insn value;
1125 const aarch64_sys_ins_reg *sysins_ops;
1126 /* op0:op1:CRn:CRm:op2 */
1127 value = extract_fields (code, 0, 5,
1128 FLD_op0, FLD_op1, FLD_CRn,
1129 FLD_CRm, FLD_op2);
1130
1131 switch (info->type)
1132 {
1133 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1134 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1135 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1136 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1137 default: assert (0); return 0;
1138 }
1139
1140 for (i = 0; sysins_ops[i].name != NULL; ++i)
1141 if (sysins_ops[i].value == value)
1142 {
1143 info->sysins_op = sysins_ops + i;
1144 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1145 info->sysins_op->name,
1146 (unsigned)info->sysins_op->value,
1147 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1148 return 1;
1149 }
1150
1151 return 0;
1152 }
1153
1154 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1155
1156 int
1157 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1158 aarch64_opnd_info *info,
1159 aarch64_insn code,
1160 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1161 {
1162 /* CRm */
1163 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1164 return 1;
1165 }
1166
1167 /* Decode the prefetch operation option operand for e.g.
1168 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1169
1170 int
1171 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1172 aarch64_opnd_info *info,
1173 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1174 {
1175 /* prfop in Rt */
1176 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1177 return 1;
1178 }
1179
1180 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1181 to the matching name/value pair in aarch64_hint_options. */
1182
1183 int
1184 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1185 aarch64_opnd_info *info,
1186 aarch64_insn code,
1187 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1188 {
1189 /* CRm:op2. */
1190 unsigned hint_number;
1191 int i;
1192
1193 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1194
1195 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1196 {
1197 if (hint_number == aarch64_hint_options[i].value)
1198 {
1199 info->hint_option = &(aarch64_hint_options[i]);
1200 return 1;
1201 }
1202 }
1203
1204 return 0;
1205 }
1206
1207 /* Decode the extended register operand for e.g.
1208 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1209 int
1210 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1211 aarch64_opnd_info *info,
1212 aarch64_insn code,
1213 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1214 {
1215 aarch64_insn value;
1216
1217 /* Rm */
1218 info->reg.regno = extract_field (FLD_Rm, code, 0);
1219 /* option */
1220 value = extract_field (FLD_option, code, 0);
1221 info->shifter.kind =
1222 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1223 /* imm3 */
1224 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1225
1226 /* This makes the constraint checking happy. */
1227 info->shifter.operator_present = 1;
1228
1229 /* Assume inst->operands[0].qualifier has been resolved. */
1230 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1231 info->qualifier = AARCH64_OPND_QLF_W;
1232 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1233 && (info->shifter.kind == AARCH64_MOD_UXTX
1234 || info->shifter.kind == AARCH64_MOD_SXTX))
1235 info->qualifier = AARCH64_OPND_QLF_X;
1236
1237 return 1;
1238 }
1239
1240 /* Decode the shifted register operand for e.g.
1241 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1242 int
1243 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1244 aarch64_opnd_info *info,
1245 aarch64_insn code,
1246 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1247 {
1248 aarch64_insn value;
1249
1250 /* Rm */
1251 info->reg.regno = extract_field (FLD_Rm, code, 0);
1252 /* shift */
1253 value = extract_field (FLD_shift, code, 0);
1254 info->shifter.kind =
1255 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1256 if (info->shifter.kind == AARCH64_MOD_ROR
1257 && inst->opcode->iclass != log_shift)
1258 /* ROR is not available for the shifted register operand in arithmetic
1259 instructions. */
1260 return 0;
1261 /* imm6 */
1262 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1263
1264 /* This makes the constraint checking happy. */
1265 info->shifter.operator_present = 1;
1266
1267 return 1;
1268 }
1269
1270 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1271 where <offset> is given by the OFFSET parameter and where <factor> is
1272 1 plus SELF's operand-dependent value. fields[0] specifies the field
1273 that holds <base>. */
1274 static int
1275 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1276 aarch64_opnd_info *info, aarch64_insn code,
1277 int64_t offset)
1278 {
1279 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1280 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1281 info->addr.offset.is_reg = FALSE;
1282 info->addr.writeback = FALSE;
1283 info->addr.preind = TRUE;
1284 if (offset != 0)
1285 info->shifter.kind = AARCH64_MOD_MUL_VL;
1286 info->shifter.amount = 1;
1287 info->shifter.operator_present = (info->addr.offset.imm != 0);
1288 info->shifter.amount_present = FALSE;
1289 return 1;
1290 }
1291
1292 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1293 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1294 SELF's operand-dependent value. fields[0] specifies the field that
1295 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1296 int
1297 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1298 aarch64_opnd_info *info, aarch64_insn code,
1299 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1300 {
1301 int offset;
1302
1303 offset = extract_field (FLD_SVE_imm4, code, 0);
1304 offset = ((offset + 8) & 15) - 8;
1305 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1306 }
1307
1308 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1309 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1310 SELF's operand-dependent value. fields[0] specifies the field that
1311 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1312 int
1313 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1314 aarch64_opnd_info *info, aarch64_insn code,
1315 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1316 {
1317 int offset;
1318
1319 offset = extract_field (FLD_SVE_imm6, code, 0);
1320 offset = (((offset + 32) & 63) - 32);
1321 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1322 }
1323
1324 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1325 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1326 SELF's operand-dependent value. fields[0] specifies the field that
1327 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1328 and imm3 fields, with imm3 being the less-significant part. */
1329 int
1330 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1331 aarch64_opnd_info *info,
1332 aarch64_insn code,
1333 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1334 {
1335 int offset;
1336
1337 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1338 offset = (((offset + 256) & 511) - 256);
1339 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1340 }
1341
1342 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1343 is given by the OFFSET parameter and where <shift> is SELF's operand-
1344 dependent value. fields[0] specifies the base register field <base>. */
1345 static int
1346 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1347 aarch64_opnd_info *info, aarch64_insn code,
1348 int64_t offset)
1349 {
1350 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1351 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1352 info->addr.offset.is_reg = FALSE;
1353 info->addr.writeback = FALSE;
1354 info->addr.preind = TRUE;
1355 info->shifter.operator_present = FALSE;
1356 info->shifter.amount_present = FALSE;
1357 return 1;
1358 }
1359
1360 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1361 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1362 value. fields[0] specifies the base register field. */
1363 int
1364 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1365 aarch64_opnd_info *info, aarch64_insn code,
1366 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1367 {
1368 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1369 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1370 }
1371
1372 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1373 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1374 value. fields[0] specifies the base register field. */
1375 int
1376 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1377 aarch64_opnd_info *info, aarch64_insn code,
1378 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1379 {
1380 int offset = extract_field (FLD_SVE_imm6, code, 0);
1381 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1382 }
1383
1384 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1385 is SELF's operand-dependent value. fields[0] specifies the base
1386 register field and fields[1] specifies the offset register field. */
1387 int
1388 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1389 aarch64_opnd_info *info, aarch64_insn code,
1390 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1391 {
1392 int index_regno;
1393
1394 index_regno = extract_field (self->fields[1], code, 0);
1395 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1396 return 0;
1397
1398 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1399 info->addr.offset.regno = index_regno;
1400 info->addr.offset.is_reg = TRUE;
1401 info->addr.writeback = FALSE;
1402 info->addr.preind = TRUE;
1403 info->shifter.kind = AARCH64_MOD_LSL;
1404 info->shifter.amount = get_operand_specific_data (self);
1405 info->shifter.operator_present = (info->shifter.amount != 0);
1406 info->shifter.amount_present = (info->shifter.amount != 0);
1407 return 1;
1408 }
1409
1410 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1411 <shift> is SELF's operand-dependent value. fields[0] specifies the
1412 base register field, fields[1] specifies the offset register field and
1413 fields[2] is a single-bit field that selects SXTW over UXTW. */
1414 int
1415 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1416 aarch64_opnd_info *info, aarch64_insn code,
1417 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1418 {
1419 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1420 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1421 info->addr.offset.is_reg = TRUE;
1422 info->addr.writeback = FALSE;
1423 info->addr.preind = TRUE;
1424 if (extract_field (self->fields[2], code, 0))
1425 info->shifter.kind = AARCH64_MOD_SXTW;
1426 else
1427 info->shifter.kind = AARCH64_MOD_UXTW;
1428 info->shifter.amount = get_operand_specific_data (self);
1429 info->shifter.operator_present = TRUE;
1430 info->shifter.amount_present = (info->shifter.amount != 0);
1431 return 1;
1432 }
1433
1434 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1435 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1436 fields[0] specifies the base register field. */
1437 int
1438 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1439 aarch64_opnd_info *info, aarch64_insn code,
1440 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1441 {
1442 int offset = extract_field (FLD_imm5, code, 0);
1443 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1444 }
1445
1446 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1447 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1448 number. fields[0] specifies the base register field and fields[1]
1449 specifies the offset register field. */
1450 static int
1451 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1452 aarch64_insn code, enum aarch64_modifier_kind kind)
1453 {
1454 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1455 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1456 info->addr.offset.is_reg = TRUE;
1457 info->addr.writeback = FALSE;
1458 info->addr.preind = TRUE;
1459 info->shifter.kind = kind;
1460 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1461 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1462 || info->shifter.amount != 0);
1463 info->shifter.amount_present = (info->shifter.amount != 0);
1464 return 1;
1465 }
1466
1467 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1468 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1469 field and fields[1] specifies the offset register field. */
1470 int
1471 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1472 aarch64_opnd_info *info, aarch64_insn code,
1473 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1474 {
1475 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1476 }
1477
1478 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1479 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1480 field and fields[1] specifies the offset register field. */
1481 int
1482 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1483 aarch64_opnd_info *info, aarch64_insn code,
1484 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1485 {
1486 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1487 }
1488
1489 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1490 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1491 field and fields[1] specifies the offset register field. */
1492 int
1493 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1494 aarch64_opnd_info *info, aarch64_insn code,
1495 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1496 {
1497 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1498 }
1499
1500 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1501 has the raw field value and that the low 8 bits decode to VALUE. */
1502 static int
1503 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1504 {
1505 info->shifter.kind = AARCH64_MOD_LSL;
1506 info->shifter.amount = 0;
1507 if (info->imm.value & 0x100)
1508 {
1509 if (value == 0)
1510 /* Decode 0x100 as #0, LSL #8. */
1511 info->shifter.amount = 8;
1512 else
1513 value *= 256;
1514 }
1515 info->shifter.operator_present = (info->shifter.amount != 0);
1516 info->shifter.amount_present = (info->shifter.amount != 0);
1517 info->imm.value = value;
1518 return 1;
1519 }
1520
1521 /* Decode an SVE ADD/SUB immediate. */
1522 int
1523 aarch64_ext_sve_aimm (const aarch64_operand *self,
1524 aarch64_opnd_info *info, const aarch64_insn code,
1525 const aarch64_inst *inst)
1526 {
1527 return (aarch64_ext_imm (self, info, code, inst)
1528 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1529 }
1530
1531 /* Decode an SVE CPY/DUP immediate. */
1532 int
1533 aarch64_ext_sve_asimm (const aarch64_operand *self,
1534 aarch64_opnd_info *info, const aarch64_insn code,
1535 const aarch64_inst *inst)
1536 {
1537 return (aarch64_ext_imm (self, info, code, inst)
1538 && decode_sve_aimm (info, (int8_t) info->imm.value));
1539 }
1540
1541 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1542 The fields array specifies which field to use. */
1543 int
1544 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1545 aarch64_opnd_info *info, aarch64_insn code,
1546 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1547 {
1548 if (extract_field (self->fields[0], code, 0))
1549 info->imm.value = 0x3f800000;
1550 else
1551 info->imm.value = 0x3f000000;
1552 info->imm.is_fp = TRUE;
1553 return 1;
1554 }
1555
1556 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1557 The fields array specifies which field to use. */
1558 int
1559 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1560 aarch64_opnd_info *info, aarch64_insn code,
1561 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1562 {
1563 if (extract_field (self->fields[0], code, 0))
1564 info->imm.value = 0x40000000;
1565 else
1566 info->imm.value = 0x3f000000;
1567 info->imm.is_fp = TRUE;
1568 return 1;
1569 }
1570
1571 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1572 The fields array specifies which field to use. */
1573 int
1574 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1575 aarch64_opnd_info *info, aarch64_insn code,
1576 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1577 {
1578 if (extract_field (self->fields[0], code, 0))
1579 info->imm.value = 0x3f800000;
1580 else
1581 info->imm.value = 0x0;
1582 info->imm.is_fp = TRUE;
1583 return 1;
1584 }
1585
1586 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1587 array specifies which field to use for Zn. MM is encoded in the
1588 concatenation of imm5 and SVE_tszh, with imm5 being the less
1589 significant part. */
1590 int
1591 aarch64_ext_sve_index (const aarch64_operand *self,
1592 aarch64_opnd_info *info, aarch64_insn code,
1593 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1594 {
1595 int val;
1596
1597 info->reglane.regno = extract_field (self->fields[0], code, 0);
1598 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1599 if ((val & 31) == 0)
1600 return 0;
1601 while ((val & 1) == 0)
1602 val /= 2;
1603 info->reglane.index = val / 2;
1604 return 1;
1605 }
1606
1607 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1608 int
1609 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1610 aarch64_opnd_info *info, const aarch64_insn code,
1611 const aarch64_inst *inst)
1612 {
1613 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1614 return (aarch64_ext_limm (self, info, code, inst)
1615 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1616 }
1617
1618 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1619 and where MM occupies the most-significant part. The operand-dependent
1620 value specifies the number of bits in Zn. */
1621 int
1622 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1623 aarch64_opnd_info *info, aarch64_insn code,
1624 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1625 {
1626 unsigned int reg_bits = get_operand_specific_data (self);
1627 unsigned int val = extract_all_fields (self, code);
1628 info->reglane.regno = val & ((1 << reg_bits) - 1);
1629 info->reglane.index = val >> reg_bits;
1630 return 1;
1631 }
1632
1633 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1634 to use for Zn. The opcode-dependent value specifies the number
1635 of registers in the list. */
1636 int
1637 aarch64_ext_sve_reglist (const aarch64_operand *self,
1638 aarch64_opnd_info *info, aarch64_insn code,
1639 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1640 {
1641 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1642 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1643 return 1;
1644 }
1645
1646 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1647 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1648 field. */
1649 int
1650 aarch64_ext_sve_scale (const aarch64_operand *self,
1651 aarch64_opnd_info *info, aarch64_insn code,
1652 const aarch64_inst *inst)
1653 {
1654 int val;
1655
1656 if (!aarch64_ext_imm (self, info, code, inst))
1657 return 0;
1658 val = extract_field (FLD_SVE_imm4, code, 0);
1659 info->shifter.kind = AARCH64_MOD_MUL;
1660 info->shifter.amount = val + 1;
1661 info->shifter.operator_present = (val != 0);
1662 info->shifter.amount_present = (val != 0);
1663 return 1;
1664 }
1665
1666 /* Return the top set bit in VALUE, which is expected to be relatively
1667 small. */
1668 static uint64_t
1669 get_top_bit (uint64_t value)
1670 {
1671 while ((value & -value) != value)
1672 value -= value & -value;
1673 return value;
1674 }
1675
1676 /* Decode an SVE shift-left immediate. */
1677 int
1678 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1679 aarch64_opnd_info *info, const aarch64_insn code,
1680 const aarch64_inst *inst)
1681 {
1682 if (!aarch64_ext_imm (self, info, code, inst)
1683 || info->imm.value == 0)
1684 return 0;
1685
1686 info->imm.value -= get_top_bit (info->imm.value);
1687 return 1;
1688 }
1689
1690 /* Decode an SVE shift-right immediate. */
1691 int
1692 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1693 aarch64_opnd_info *info, const aarch64_insn code,
1694 const aarch64_inst *inst)
1695 {
1696 if (!aarch64_ext_imm (self, info, code, inst)
1697 || info->imm.value == 0)
1698 return 0;
1699
1700 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1701 return 1;
1702 }
1703 \f
1704 /* Bitfields that are commonly used to encode certain operands' information
1705 may be partially used as part of the base opcode in some instructions.
1706 For example, the bit 1 of the field 'size' in
1707 FCVTXN <Vb><d>, <Va><n>
1708 is actually part of the base opcode, while only size<0> is available
1709 for encoding the register type. Another example is the AdvSIMD
1710 instruction ORR (register), in which the field 'size' is also used for
1711 the base opcode, leaving only the field 'Q' available to encode the
1712 vector register arrangement specifier '8B' or '16B'.
1713
1714 This function tries to deduce the qualifier from the value of partially
1715 constrained field(s). Given the VALUE of such a field or fields, the
1716 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1717 operand encoding), the function returns the matching qualifier or
1718 AARCH64_OPND_QLF_NIL if nothing matches.
1719
1720 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1721 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1722 may end with AARCH64_OPND_QLF_NIL. */
1723
1724 static enum aarch64_opnd_qualifier
1725 get_qualifier_from_partial_encoding (aarch64_insn value,
1726 const enum aarch64_opnd_qualifier* \
1727 candidates,
1728 aarch64_insn mask)
1729 {
1730 int i;
1731 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1732 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1733 {
1734 aarch64_insn standard_value;
1735 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1736 break;
1737 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1738 if ((standard_value & mask) == (value & mask))
1739 return candidates[i];
1740 }
1741 return AARCH64_OPND_QLF_NIL;
1742 }
1743
1744 /* Given a list of qualifier sequences, return all possible valid qualifiers
1745 for operand IDX in QUALIFIERS.
1746 Assume QUALIFIERS is an array whose length is large enough. */
1747
1748 static void
1749 get_operand_possible_qualifiers (int idx,
1750 const aarch64_opnd_qualifier_seq_t *list,
1751 enum aarch64_opnd_qualifier *qualifiers)
1752 {
1753 int i;
1754 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1755 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1756 break;
1757 }
1758
1759 /* Decode the size Q field for e.g. SHADD.
1760 We tag one operand with the qualifer according to the code;
1761 whether the qualifier is valid for this opcode or not, it is the
1762 duty of the semantic checking. */
1763
1764 static int
1765 decode_sizeq (aarch64_inst *inst)
1766 {
1767 int idx;
1768 enum aarch64_opnd_qualifier qualifier;
1769 aarch64_insn code;
1770 aarch64_insn value, mask;
1771 enum aarch64_field_kind fld_sz;
1772 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1773
1774 if (inst->opcode->iclass == asisdlse
1775 || inst->opcode->iclass == asisdlsep
1776 || inst->opcode->iclass == asisdlso
1777 || inst->opcode->iclass == asisdlsop)
1778 fld_sz = FLD_vldst_size;
1779 else
1780 fld_sz = FLD_size;
1781
1782 code = inst->value;
1783 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1784 /* Obtain the info that which bits of fields Q and size are actually
1785 available for operand encoding. Opcodes like FMAXNM and FMLA have
1786 size[1] unavailable. */
1787 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1788
1789 /* The index of the operand we are going to tag a qualifier and the qualifer
1790 itself are reasoned from the value of the size and Q fields and the
1791 possible valid qualifier lists. */
1792 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1793 DEBUG_TRACE ("key idx: %d", idx);
1794
1795 /* For most related instruciton, size:Q are fully available for operand
1796 encoding. */
1797 if (mask == 0x7)
1798 {
1799 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1800 return 1;
1801 }
1802
1803 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1804 candidates);
1805 #ifdef DEBUG_AARCH64
1806 if (debug_dump)
1807 {
1808 int i;
1809 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1810 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1811 DEBUG_TRACE ("qualifier %d: %s", i,
1812 aarch64_get_qualifier_name(candidates[i]));
1813 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1814 }
1815 #endif /* DEBUG_AARCH64 */
1816
1817 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1818
1819 if (qualifier == AARCH64_OPND_QLF_NIL)
1820 return 0;
1821
1822 inst->operands[idx].qualifier = qualifier;
1823 return 1;
1824 }
1825
1826 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1827 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1828
1829 static int
1830 decode_asimd_fcvt (aarch64_inst *inst)
1831 {
1832 aarch64_field field = {0, 0};
1833 aarch64_insn value;
1834 enum aarch64_opnd_qualifier qualifier;
1835
1836 gen_sub_field (FLD_size, 0, 1, &field);
1837 value = extract_field_2 (&field, inst->value, 0);
1838 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1839 : AARCH64_OPND_QLF_V_2D;
1840 switch (inst->opcode->op)
1841 {
1842 case OP_FCVTN:
1843 case OP_FCVTN2:
1844 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1845 inst->operands[1].qualifier = qualifier;
1846 break;
1847 case OP_FCVTL:
1848 case OP_FCVTL2:
1849 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1850 inst->operands[0].qualifier = qualifier;
1851 break;
1852 default:
1853 assert (0);
1854 return 0;
1855 }
1856
1857 return 1;
1858 }
1859
1860 /* Decode size[0], i.e. bit 22, for
1861 e.g. FCVTXN <Vb><d>, <Va><n>. */
1862
1863 static int
1864 decode_asisd_fcvtxn (aarch64_inst *inst)
1865 {
1866 aarch64_field field = {0, 0};
1867 gen_sub_field (FLD_size, 0, 1, &field);
1868 if (!extract_field_2 (&field, inst->value, 0))
1869 return 0;
1870 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1871 return 1;
1872 }
1873
1874 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1875 static int
1876 decode_fcvt (aarch64_inst *inst)
1877 {
1878 enum aarch64_opnd_qualifier qualifier;
1879 aarch64_insn value;
1880 const aarch64_field field = {15, 2};
1881
1882 /* opc dstsize */
1883 value = extract_field_2 (&field, inst->value, 0);
1884 switch (value)
1885 {
1886 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1887 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1888 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1889 default: return 0;
1890 }
1891 inst->operands[0].qualifier = qualifier;
1892
1893 return 1;
1894 }
1895
1896 /* Do miscellaneous decodings that are not common enough to be driven by
1897 flags. */
1898
1899 static int
1900 do_misc_decoding (aarch64_inst *inst)
1901 {
1902 unsigned int value;
1903 switch (inst->opcode->op)
1904 {
1905 case OP_FCVT:
1906 return decode_fcvt (inst);
1907
1908 case OP_FCVTN:
1909 case OP_FCVTN2:
1910 case OP_FCVTL:
1911 case OP_FCVTL2:
1912 return decode_asimd_fcvt (inst);
1913
1914 case OP_FCVTXN_S:
1915 return decode_asisd_fcvtxn (inst);
1916
1917 case OP_MOV_P_P:
1918 case OP_MOVS_P_P:
1919 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1920 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
1921 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1922
1923 case OP_MOV_Z_P_Z:
1924 return (extract_field (FLD_SVE_Zd, inst->value, 0)
1925 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1926
1927 case OP_MOV_Z_V:
1928 /* Index must be zero. */
1929 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1930 return value > 0 && value <= 16 && value == (value & -value);
1931
1932 case OP_MOV_Z_Z:
1933 return (extract_field (FLD_SVE_Zn, inst->value, 0)
1934 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1935
1936 case OP_MOV_Z_Zi:
1937 /* Index must be nonzero. */
1938 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1939 return value > 0 && value != (value & -value);
1940
1941 case OP_MOVM_P_P_P:
1942 return (extract_field (FLD_SVE_Pd, inst->value, 0)
1943 == extract_field (FLD_SVE_Pm, inst->value, 0));
1944
1945 case OP_MOVZS_P_P_P:
1946 case OP_MOVZ_P_P_P:
1947 return (extract_field (FLD_SVE_Pn, inst->value, 0)
1948 == extract_field (FLD_SVE_Pm, inst->value, 0));
1949
1950 case OP_NOTS_P_P_P_Z:
1951 case OP_NOT_P_P_P_Z:
1952 return (extract_field (FLD_SVE_Pm, inst->value, 0)
1953 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1954
1955 default:
1956 return 0;
1957 }
1958 }
1959
1960 /* Opcodes that have fields shared by multiple operands are usually flagged
1961 with flags. In this function, we detect such flags, decode the related
1962 field(s) and store the information in one of the related operands. The
1963 'one' operand is not any operand but one of the operands that can
1964 accommadate all the information that has been decoded. */
1965
1966 static int
1967 do_special_decoding (aarch64_inst *inst)
1968 {
1969 int idx;
1970 aarch64_insn value;
1971 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1972 if (inst->opcode->flags & F_COND)
1973 {
1974 value = extract_field (FLD_cond2, inst->value, 0);
1975 inst->cond = get_cond_from_value (value);
1976 }
1977 /* 'sf' field. */
1978 if (inst->opcode->flags & F_SF)
1979 {
1980 idx = select_operand_for_sf_field_coding (inst->opcode);
1981 value = extract_field (FLD_sf, inst->value, 0);
1982 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1983 if ((inst->opcode->flags & F_N)
1984 && extract_field (FLD_N, inst->value, 0) != value)
1985 return 0;
1986 }
1987 /* 'sf' field. */
1988 if (inst->opcode->flags & F_LSE_SZ)
1989 {
1990 idx = select_operand_for_sf_field_coding (inst->opcode);
1991 value = extract_field (FLD_lse_sz, inst->value, 0);
1992 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1993 }
1994 /* size:Q fields. */
1995 if (inst->opcode->flags & F_SIZEQ)
1996 return decode_sizeq (inst);
1997
1998 if (inst->opcode->flags & F_FPTYPE)
1999 {
2000 idx = select_operand_for_fptype_field_coding (inst->opcode);
2001 value = extract_field (FLD_type, inst->value, 0);
2002 switch (value)
2003 {
2004 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2005 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2006 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2007 default: return 0;
2008 }
2009 }
2010
2011 if (inst->opcode->flags & F_SSIZE)
2012 {
2013 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2014 of the base opcode. */
2015 aarch64_insn mask;
2016 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2017 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2018 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2019 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2020 /* For most related instruciton, the 'size' field is fully available for
2021 operand encoding. */
2022 if (mask == 0x3)
2023 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2024 else
2025 {
2026 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2027 candidates);
2028 inst->operands[idx].qualifier
2029 = get_qualifier_from_partial_encoding (value, candidates, mask);
2030 }
2031 }
2032
2033 if (inst->opcode->flags & F_T)
2034 {
2035 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2036 int num = 0;
2037 unsigned val, Q;
2038 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2039 == AARCH64_OPND_CLASS_SIMD_REG);
2040 /* imm5<3:0> q <t>
2041 0000 x reserved
2042 xxx1 0 8b
2043 xxx1 1 16b
2044 xx10 0 4h
2045 xx10 1 8h
2046 x100 0 2s
2047 x100 1 4s
2048 1000 0 reserved
2049 1000 1 2d */
2050 val = extract_field (FLD_imm5, inst->value, 0);
2051 while ((val & 0x1) == 0 && ++num <= 3)
2052 val >>= 1;
2053 if (num > 3)
2054 return 0;
2055 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2056 inst->operands[0].qualifier =
2057 get_vreg_qualifier_from_value ((num << 1) | Q);
2058 }
2059
2060 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2061 {
2062 /* Use Rt to encode in the case of e.g.
2063 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2064 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2065 if (idx == -1)
2066 {
2067 /* Otherwise use the result operand, which has to be a integer
2068 register. */
2069 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2070 == AARCH64_OPND_CLASS_INT_REG);
2071 idx = 0;
2072 }
2073 assert (idx == 0 || idx == 1);
2074 value = extract_field (FLD_Q, inst->value, 0);
2075 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2076 }
2077
2078 if (inst->opcode->flags & F_LDS_SIZE)
2079 {
2080 aarch64_field field = {0, 0};
2081 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2082 == AARCH64_OPND_CLASS_INT_REG);
2083 gen_sub_field (FLD_opc, 0, 1, &field);
2084 value = extract_field_2 (&field, inst->value, 0);
2085 inst->operands[0].qualifier
2086 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2087 }
2088
2089 /* Miscellaneous decoding; done as the last step. */
2090 if (inst->opcode->flags & F_MISC)
2091 return do_misc_decoding (inst);
2092
2093 return 1;
2094 }
2095
2096 /* Converters converting a real opcode instruction to its alias form. */
2097
2098 /* ROR <Wd>, <Ws>, #<shift>
2099 is equivalent to:
2100 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2101 static int
2102 convert_extr_to_ror (aarch64_inst *inst)
2103 {
2104 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2105 {
2106 copy_operand_info (inst, 2, 3);
2107 inst->operands[3].type = AARCH64_OPND_NIL;
2108 return 1;
2109 }
2110 return 0;
2111 }
2112
2113 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2114 is equivalent to:
2115 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2116 static int
2117 convert_shll_to_xtl (aarch64_inst *inst)
2118 {
2119 if (inst->operands[2].imm.value == 0)
2120 {
2121 inst->operands[2].type = AARCH64_OPND_NIL;
2122 return 1;
2123 }
2124 return 0;
2125 }
2126
2127 /* Convert
2128 UBFM <Xd>, <Xn>, #<shift>, #63.
2129 to
2130 LSR <Xd>, <Xn>, #<shift>. */
2131 static int
2132 convert_bfm_to_sr (aarch64_inst *inst)
2133 {
2134 int64_t imms, val;
2135
2136 imms = inst->operands[3].imm.value;
2137 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2138 if (imms == val)
2139 {
2140 inst->operands[3].type = AARCH64_OPND_NIL;
2141 return 1;
2142 }
2143
2144 return 0;
2145 }
2146
2147 /* Convert MOV to ORR. */
2148 static int
2149 convert_orr_to_mov (aarch64_inst *inst)
2150 {
2151 /* MOV <Vd>.<T>, <Vn>.<T>
2152 is equivalent to:
2153 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2154 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2155 {
2156 inst->operands[2].type = AARCH64_OPND_NIL;
2157 return 1;
2158 }
2159 return 0;
2160 }
2161
2162 /* When <imms> >= <immr>, the instruction written:
2163 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2164 is equivalent to:
2165 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2166
2167 static int
2168 convert_bfm_to_bfx (aarch64_inst *inst)
2169 {
2170 int64_t immr, imms;
2171
2172 immr = inst->operands[2].imm.value;
2173 imms = inst->operands[3].imm.value;
2174 if (imms >= immr)
2175 {
2176 int64_t lsb = immr;
2177 inst->operands[2].imm.value = lsb;
2178 inst->operands[3].imm.value = imms + 1 - lsb;
2179 /* The two opcodes have different qualifiers for
2180 the immediate operands; reset to help the checking. */
2181 reset_operand_qualifier (inst, 2);
2182 reset_operand_qualifier (inst, 3);
2183 return 1;
2184 }
2185
2186 return 0;
2187 }
2188
2189 /* When <imms> < <immr>, the instruction written:
2190 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2191 is equivalent to:
2192 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2193
2194 static int
2195 convert_bfm_to_bfi (aarch64_inst *inst)
2196 {
2197 int64_t immr, imms, val;
2198
2199 immr = inst->operands[2].imm.value;
2200 imms = inst->operands[3].imm.value;
2201 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2202 if (imms < immr)
2203 {
2204 inst->operands[2].imm.value = (val - immr) & (val - 1);
2205 inst->operands[3].imm.value = imms + 1;
2206 /* The two opcodes have different qualifiers for
2207 the immediate operands; reset to help the checking. */
2208 reset_operand_qualifier (inst, 2);
2209 reset_operand_qualifier (inst, 3);
2210 return 1;
2211 }
2212
2213 return 0;
2214 }
2215
2216 /* The instruction written:
2217 BFC <Xd>, #<lsb>, #<width>
2218 is equivalent to:
2219 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2220
2221 static int
2222 convert_bfm_to_bfc (aarch64_inst *inst)
2223 {
2224 int64_t immr, imms, val;
2225
2226 /* Should have been assured by the base opcode value. */
2227 assert (inst->operands[1].reg.regno == 0x1f);
2228
2229 immr = inst->operands[2].imm.value;
2230 imms = inst->operands[3].imm.value;
2231 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2232 if (imms < immr)
2233 {
2234 /* Drop XZR from the second operand. */
2235 copy_operand_info (inst, 1, 2);
2236 copy_operand_info (inst, 2, 3);
2237 inst->operands[3].type = AARCH64_OPND_NIL;
2238
2239 /* Recalculate the immediates. */
2240 inst->operands[1].imm.value = (val - immr) & (val - 1);
2241 inst->operands[2].imm.value = imms + 1;
2242
2243 /* The two opcodes have different qualifiers for the operands; reset to
2244 help the checking. */
2245 reset_operand_qualifier (inst, 1);
2246 reset_operand_qualifier (inst, 2);
2247 reset_operand_qualifier (inst, 3);
2248
2249 return 1;
2250 }
2251
2252 return 0;
2253 }
2254
2255 /* The instruction written:
2256 LSL <Xd>, <Xn>, #<shift>
2257 is equivalent to:
2258 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2259
2260 static int
2261 convert_ubfm_to_lsl (aarch64_inst *inst)
2262 {
2263 int64_t immr = inst->operands[2].imm.value;
2264 int64_t imms = inst->operands[3].imm.value;
2265 int64_t val
2266 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2267
2268 if ((immr == 0 && imms == val) || immr == imms + 1)
2269 {
2270 inst->operands[3].type = AARCH64_OPND_NIL;
2271 inst->operands[2].imm.value = val - imms;
2272 return 1;
2273 }
2274
2275 return 0;
2276 }
2277
2278 /* CINC <Wd>, <Wn>, <cond>
2279 is equivalent to:
2280 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2281 where <cond> is not AL or NV. */
2282
2283 static int
2284 convert_from_csel (aarch64_inst *inst)
2285 {
2286 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2287 && (inst->operands[3].cond->value & 0xe) != 0xe)
2288 {
2289 copy_operand_info (inst, 2, 3);
2290 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2291 inst->operands[3].type = AARCH64_OPND_NIL;
2292 return 1;
2293 }
2294 return 0;
2295 }
2296
2297 /* CSET <Wd>, <cond>
2298 is equivalent to:
2299 CSINC <Wd>, WZR, WZR, invert(<cond>)
2300 where <cond> is not AL or NV. */
2301
2302 static int
2303 convert_csinc_to_cset (aarch64_inst *inst)
2304 {
2305 if (inst->operands[1].reg.regno == 0x1f
2306 && inst->operands[2].reg.regno == 0x1f
2307 && (inst->operands[3].cond->value & 0xe) != 0xe)
2308 {
2309 copy_operand_info (inst, 1, 3);
2310 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2311 inst->operands[3].type = AARCH64_OPND_NIL;
2312 inst->operands[2].type = AARCH64_OPND_NIL;
2313 return 1;
2314 }
2315 return 0;
2316 }
2317
2318 /* MOV <Wd>, #<imm>
2319 is equivalent to:
2320 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2321
2322 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2323 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2324 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2325 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2326 machine-instruction mnemonic must be used. */
2327
2328 static int
2329 convert_movewide_to_mov (aarch64_inst *inst)
2330 {
2331 uint64_t value = inst->operands[1].imm.value;
2332 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2333 if (value == 0 && inst->operands[1].shifter.amount != 0)
2334 return 0;
2335 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2336 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2337 value <<= inst->operands[1].shifter.amount;
2338 /* As an alias convertor, it has to be clear that the INST->OPCODE
2339 is the opcode of the real instruction. */
2340 if (inst->opcode->op == OP_MOVN)
2341 {
2342 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2343 value = ~value;
2344 /* A MOVN has an immediate that could be encoded by MOVZ. */
2345 if (aarch64_wide_constant_p (value, is32, NULL))
2346 return 0;
2347 }
2348 inst->operands[1].imm.value = value;
2349 inst->operands[1].shifter.amount = 0;
2350 return 1;
2351 }
2352
2353 /* MOV <Wd>, #<imm>
2354 is equivalent to:
2355 ORR <Wd>, WZR, #<imm>.
2356
2357 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2358 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2359 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2360 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2361 machine-instruction mnemonic must be used. */
2362
2363 static int
2364 convert_movebitmask_to_mov (aarch64_inst *inst)
2365 {
2366 int is32;
2367 uint64_t value;
2368
2369 /* Should have been assured by the base opcode value. */
2370 assert (inst->operands[1].reg.regno == 0x1f);
2371 copy_operand_info (inst, 1, 2);
2372 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2373 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2374 value = inst->operands[1].imm.value;
2375 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2376 instruction. */
2377 if (inst->operands[0].reg.regno != 0x1f
2378 && (aarch64_wide_constant_p (value, is32, NULL)
2379 || aarch64_wide_constant_p (~value, is32, NULL)))
2380 return 0;
2381
2382 inst->operands[2].type = AARCH64_OPND_NIL;
2383 return 1;
2384 }
2385
2386 /* Some alias opcodes are disassembled by being converted from their real-form.
2387 N.B. INST->OPCODE is the real opcode rather than the alias. */
2388
2389 static int
2390 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2391 {
2392 switch (alias->op)
2393 {
2394 case OP_ASR_IMM:
2395 case OP_LSR_IMM:
2396 return convert_bfm_to_sr (inst);
2397 case OP_LSL_IMM:
2398 return convert_ubfm_to_lsl (inst);
2399 case OP_CINC:
2400 case OP_CINV:
2401 case OP_CNEG:
2402 return convert_from_csel (inst);
2403 case OP_CSET:
2404 case OP_CSETM:
2405 return convert_csinc_to_cset (inst);
2406 case OP_UBFX:
2407 case OP_BFXIL:
2408 case OP_SBFX:
2409 return convert_bfm_to_bfx (inst);
2410 case OP_SBFIZ:
2411 case OP_BFI:
2412 case OP_UBFIZ:
2413 return convert_bfm_to_bfi (inst);
2414 case OP_BFC:
2415 return convert_bfm_to_bfc (inst);
2416 case OP_MOV_V:
2417 return convert_orr_to_mov (inst);
2418 case OP_MOV_IMM_WIDE:
2419 case OP_MOV_IMM_WIDEN:
2420 return convert_movewide_to_mov (inst);
2421 case OP_MOV_IMM_LOG:
2422 return convert_movebitmask_to_mov (inst);
2423 case OP_ROR_IMM:
2424 return convert_extr_to_ror (inst);
2425 case OP_SXTL:
2426 case OP_SXTL2:
2427 case OP_UXTL:
2428 case OP_UXTL2:
2429 return convert_shll_to_xtl (inst);
2430 default:
2431 return 0;
2432 }
2433 }
2434
2435 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2436 aarch64_inst *, int);
2437
2438 /* Given the instruction information in *INST, check if the instruction has
2439 any alias form that can be used to represent *INST. If the answer is yes,
2440 update *INST to be in the form of the determined alias. */
2441
2442 /* In the opcode description table, the following flags are used in opcode
2443 entries to help establish the relations between the real and alias opcodes:
2444
2445 F_ALIAS: opcode is an alias
2446 F_HAS_ALIAS: opcode has alias(es)
2447 F_P1
2448 F_P2
2449 F_P3: Disassembly preference priority 1-3 (the larger the
2450 higher). If nothing is specified, it is the priority
2451 0 by default, i.e. the lowest priority.
2452
2453 Although the relation between the machine and the alias instructions are not
2454 explicitly described, it can be easily determined from the base opcode
2455 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2456 description entries:
2457
2458 The mask of an alias opcode must be equal to or a super-set (i.e. more
2459 constrained) of that of the aliased opcode; so is the base opcode value.
2460
2461 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2462 && (opcode->mask & real->mask) == real->mask
2463 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2464 then OPCODE is an alias of, and only of, the REAL instruction
2465
2466 The alias relationship is forced flat-structured to keep related algorithm
2467 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2468
2469 During the disassembling, the decoding decision tree (in
2470 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2471 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2472 not specified), the disassembler will check whether there is any alias
2473 instruction exists for this real instruction. If there is, the disassembler
2474 will try to disassemble the 32-bit binary again using the alias's rule, or
2475 try to convert the IR to the form of the alias. In the case of the multiple
2476 aliases, the aliases are tried one by one from the highest priority
2477 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2478 first succeeds first adopted.
2479
2480 You may ask why there is a need for the conversion of IR from one form to
2481 another in handling certain aliases. This is because on one hand it avoids
2482 adding more operand code to handle unusual encoding/decoding; on other
2483 hand, during the disassembling, the conversion is an effective approach to
2484 check the condition of an alias (as an alias may be adopted only if certain
2485 conditions are met).
2486
2487 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2488 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2489 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2490
2491 static void
2492 determine_disassembling_preference (struct aarch64_inst *inst)
2493 {
2494 const aarch64_opcode *opcode;
2495 const aarch64_opcode *alias;
2496
2497 opcode = inst->opcode;
2498
2499 /* This opcode does not have an alias, so use itself. */
2500 if (!opcode_has_alias (opcode))
2501 return;
2502
2503 alias = aarch64_find_alias_opcode (opcode);
2504 assert (alias);
2505
2506 #ifdef DEBUG_AARCH64
2507 if (debug_dump)
2508 {
2509 const aarch64_opcode *tmp = alias;
2510 printf ("#### LIST orderd: ");
2511 while (tmp)
2512 {
2513 printf ("%s, ", tmp->name);
2514 tmp = aarch64_find_next_alias_opcode (tmp);
2515 }
2516 printf ("\n");
2517 }
2518 #endif /* DEBUG_AARCH64 */
2519
2520 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2521 {
2522 DEBUG_TRACE ("try %s", alias->name);
2523 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2524
2525 /* An alias can be a pseudo opcode which will never be used in the
2526 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2527 aliasing AND. */
2528 if (pseudo_opcode_p (alias))
2529 {
2530 DEBUG_TRACE ("skip pseudo %s", alias->name);
2531 continue;
2532 }
2533
2534 if ((inst->value & alias->mask) != alias->opcode)
2535 {
2536 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2537 continue;
2538 }
2539 /* No need to do any complicated transformation on operands, if the alias
2540 opcode does not have any operand. */
2541 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2542 {
2543 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2544 aarch64_replace_opcode (inst, alias);
2545 return;
2546 }
2547 if (alias->flags & F_CONV)
2548 {
2549 aarch64_inst copy;
2550 memcpy (&copy, inst, sizeof (aarch64_inst));
2551 /* ALIAS is the preference as long as the instruction can be
2552 successfully converted to the form of ALIAS. */
2553 if (convert_to_alias (&copy, alias) == 1)
2554 {
2555 aarch64_replace_opcode (&copy, alias);
2556 assert (aarch64_match_operands_constraint (&copy, NULL));
2557 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2558 memcpy (inst, &copy, sizeof (aarch64_inst));
2559 return;
2560 }
2561 }
2562 else
2563 {
2564 /* Directly decode the alias opcode. */
2565 aarch64_inst temp;
2566 memset (&temp, '\0', sizeof (aarch64_inst));
2567 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
2568 {
2569 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2570 memcpy (inst, &temp, sizeof (aarch64_inst));
2571 return;
2572 }
2573 }
2574 }
2575 }
2576
2577 /* Some instructions (including all SVE ones) use the instruction class
2578 to describe how a qualifiers_list index is represented in the instruction
2579 encoding. If INST is such an instruction, decode the appropriate fields
2580 and fill in the operand qualifiers accordingly. Return true if no
2581 problems are found. */
2582
2583 static bfd_boolean
2584 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2585 {
2586 int i, variant;
2587
2588 variant = 0;
2589 switch (inst->opcode->iclass)
2590 {
2591 case sve_cpy:
2592 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2593 break;
2594
2595 case sve_index:
2596 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2597 if ((i & 31) == 0)
2598 return FALSE;
2599 while ((i & 1) == 0)
2600 {
2601 i >>= 1;
2602 variant += 1;
2603 }
2604 break;
2605
2606 case sve_limm:
2607 /* Pick the smallest applicable element size. */
2608 if ((inst->value & 0x20600) == 0x600)
2609 variant = 0;
2610 else if ((inst->value & 0x20400) == 0x400)
2611 variant = 1;
2612 else if ((inst->value & 0x20000) == 0)
2613 variant = 2;
2614 else
2615 variant = 3;
2616 break;
2617
2618 case sve_misc:
2619 /* sve_misc instructions have only a single variant. */
2620 break;
2621
2622 case sve_movprfx:
2623 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2624 break;
2625
2626 case sve_pred_zm:
2627 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2628 break;
2629
2630 case sve_shift_pred:
2631 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2632 sve_shift:
2633 if (i == 0)
2634 return FALSE;
2635 while (i != 1)
2636 {
2637 i >>= 1;
2638 variant += 1;
2639 }
2640 break;
2641
2642 case sve_shift_unpred:
2643 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2644 goto sve_shift;
2645
2646 case sve_size_bhs:
2647 variant = extract_field (FLD_size, inst->value, 0);
2648 if (variant >= 3)
2649 return FALSE;
2650 break;
2651
2652 case sve_size_bhsd:
2653 variant = extract_field (FLD_size, inst->value, 0);
2654 break;
2655
2656 case sve_size_hsd:
2657 i = extract_field (FLD_size, inst->value, 0);
2658 if (i < 1)
2659 return FALSE;
2660 variant = i - 1;
2661 break;
2662
2663 case sve_size_sd:
2664 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2665 break;
2666
2667 default:
2668 /* No mapping between instruction class and qualifiers. */
2669 return TRUE;
2670 }
2671
2672 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2673 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2674 return TRUE;
2675 }
2676 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2677 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2678 return 1.
2679
2680 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2681 determined and used to disassemble CODE; this is done just before the
2682 return. */
2683
2684 static int
2685 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2686 aarch64_inst *inst, int noaliases_p)
2687 {
2688 int i;
2689
2690 DEBUG_TRACE ("enter with %s", opcode->name);
2691
2692 assert (opcode && inst);
2693
2694 /* Check the base opcode. */
2695 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2696 {
2697 DEBUG_TRACE ("base opcode match FAIL");
2698 goto decode_fail;
2699 }
2700
2701 /* Clear inst. */
2702 memset (inst, '\0', sizeof (aarch64_inst));
2703
2704 inst->opcode = opcode;
2705 inst->value = code;
2706
2707 /* Assign operand codes and indexes. */
2708 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2709 {
2710 if (opcode->operands[i] == AARCH64_OPND_NIL)
2711 break;
2712 inst->operands[i].type = opcode->operands[i];
2713 inst->operands[i].idx = i;
2714 }
2715
2716 /* Call the opcode decoder indicated by flags. */
2717 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2718 {
2719 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2720 goto decode_fail;
2721 }
2722
2723 /* Possibly use the instruction class to determine the correct
2724 qualifier. */
2725 if (!aarch64_decode_variant_using_iclass (inst))
2726 {
2727 DEBUG_TRACE ("iclass-based decoder FAIL");
2728 goto decode_fail;
2729 }
2730
2731 /* Call operand decoders. */
2732 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2733 {
2734 const aarch64_operand *opnd;
2735 enum aarch64_opnd type;
2736
2737 type = opcode->operands[i];
2738 if (type == AARCH64_OPND_NIL)
2739 break;
2740 opnd = &aarch64_operands[type];
2741 if (operand_has_extractor (opnd)
2742 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2743 {
2744 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2745 goto decode_fail;
2746 }
2747 }
2748
2749 /* If the opcode has a verifier, then check it now. */
2750 if (opcode->verifier && ! opcode->verifier (opcode, code))
2751 {
2752 DEBUG_TRACE ("operand verifier FAIL");
2753 goto decode_fail;
2754 }
2755
2756 /* Match the qualifiers. */
2757 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2758 {
2759 /* Arriving here, the CODE has been determined as a valid instruction
2760 of OPCODE and *INST has been filled with information of this OPCODE
2761 instruction. Before the return, check if the instruction has any
2762 alias and should be disassembled in the form of its alias instead.
2763 If the answer is yes, *INST will be updated. */
2764 if (!noaliases_p)
2765 determine_disassembling_preference (inst);
2766 DEBUG_TRACE ("SUCCESS");
2767 return 1;
2768 }
2769 else
2770 {
2771 DEBUG_TRACE ("constraint matching FAIL");
2772 }
2773
2774 decode_fail:
2775 return 0;
2776 }
2777 \f
2778 /* This does some user-friendly fix-up to *INST. It is currently focus on
2779 the adjustment of qualifiers to help the printed instruction
2780 recognized/understood more easily. */
2781
2782 static void
2783 user_friendly_fixup (aarch64_inst *inst)
2784 {
2785 switch (inst->opcode->iclass)
2786 {
2787 case testbranch:
2788 /* TBNZ Xn|Wn, #uimm6, label
2789 Test and Branch Not Zero: conditionally jumps to label if bit number
2790 uimm6 in register Xn is not zero. The bit number implies the width of
2791 the register, which may be written and should be disassembled as Wn if
2792 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2793 */
2794 if (inst->operands[1].imm.value < 32)
2795 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2796 break;
2797 default: break;
2798 }
2799 }
2800
2801 /* Decode INSN and fill in *INST the instruction information. An alias
2802 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2803 success. */
2804
2805 int
2806 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2807 bfd_boolean noaliases_p)
2808 {
2809 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2810
2811 #ifdef DEBUG_AARCH64
2812 if (debug_dump)
2813 {
2814 const aarch64_opcode *tmp = opcode;
2815 printf ("\n");
2816 DEBUG_TRACE ("opcode lookup:");
2817 while (tmp != NULL)
2818 {
2819 aarch64_verbose (" %s", tmp->name);
2820 tmp = aarch64_find_next_opcode (tmp);
2821 }
2822 }
2823 #endif /* DEBUG_AARCH64 */
2824
2825 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2826 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2827 opcode field and value, apart from the difference that one of them has an
2828 extra field as part of the opcode, but such a field is used for operand
2829 encoding in other opcode(s) ('immh' in the case of the example). */
2830 while (opcode != NULL)
2831 {
2832 /* But only one opcode can be decoded successfully for, as the
2833 decoding routine will check the constraint carefully. */
2834 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2835 return ERR_OK;
2836 opcode = aarch64_find_next_opcode (opcode);
2837 }
2838
2839 return ERR_UND;
2840 }
2841
2842 /* Print operands. */
2843
2844 static void
2845 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2846 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2847 {
2848 int i, pcrel_p, num_printed;
2849 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2850 {
2851 char str[128];
2852 /* We regard the opcode operand info more, however we also look into
2853 the inst->operands to support the disassembling of the optional
2854 operand.
2855 The two operand code should be the same in all cases, apart from
2856 when the operand can be optional. */
2857 if (opcode->operands[i] == AARCH64_OPND_NIL
2858 || opnds[i].type == AARCH64_OPND_NIL)
2859 break;
2860
2861 /* Generate the operand string in STR. */
2862 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2863 &info->target);
2864
2865 /* Print the delimiter (taking account of omitted operand(s)). */
2866 if (str[0] != '\0')
2867 (*info->fprintf_func) (info->stream, "%s",
2868 num_printed++ == 0 ? "\t" : ", ");
2869
2870 /* Print the operand. */
2871 if (pcrel_p)
2872 (*info->print_address_func) (info->target, info);
2873 else
2874 (*info->fprintf_func) (info->stream, "%s", str);
2875 }
2876 }
2877
2878 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
2879
2880 static void
2881 remove_dot_suffix (char *name, const aarch64_inst *inst)
2882 {
2883 char *ptr;
2884 size_t len;
2885
2886 ptr = strchr (inst->opcode->name, '.');
2887 assert (ptr && inst->cond);
2888 len = ptr - inst->opcode->name;
2889 assert (len < 8);
2890 strncpy (name, inst->opcode->name, len);
2891 name[len] = '\0';
2892 }
2893
2894 /* Print the instruction mnemonic name. */
2895
2896 static void
2897 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2898 {
2899 if (inst->opcode->flags & F_COND)
2900 {
2901 /* For instructions that are truly conditionally executed, e.g. b.cond,
2902 prepare the full mnemonic name with the corresponding condition
2903 suffix. */
2904 char name[8];
2905
2906 remove_dot_suffix (name, inst);
2907 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2908 }
2909 else
2910 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2911 }
2912
2913 /* Decide whether we need to print a comment after the operands of
2914 instruction INST. */
2915
2916 static void
2917 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
2918 {
2919 if (inst->opcode->flags & F_COND)
2920 {
2921 char name[8];
2922 unsigned int i, num_conds;
2923
2924 remove_dot_suffix (name, inst);
2925 num_conds = ARRAY_SIZE (inst->cond->names);
2926 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
2927 (*info->fprintf_func) (info->stream, "%s %s.%s",
2928 i == 1 ? " //" : ",",
2929 name, inst->cond->names[i]);
2930 }
2931 }
2932
2933 /* Print the instruction according to *INST. */
2934
2935 static void
2936 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2937 struct disassemble_info *info)
2938 {
2939 print_mnemonic_name (inst, info);
2940 print_operands (pc, inst->opcode, inst->operands, info);
2941 print_comment (inst, info);
2942 }
2943
2944 /* Entry-point of the instruction disassembler and printer. */
2945
2946 static void
2947 print_insn_aarch64_word (bfd_vma pc,
2948 uint32_t word,
2949 struct disassemble_info *info)
2950 {
2951 static const char *err_msg[6] =
2952 {
2953 [ERR_OK] = "_",
2954 [-ERR_UND] = "undefined",
2955 [-ERR_UNP] = "unpredictable",
2956 [-ERR_NYI] = "NYI"
2957 };
2958
2959 int ret;
2960 aarch64_inst inst;
2961
2962 info->insn_info_valid = 1;
2963 info->branch_delay_insns = 0;
2964 info->data_size = 0;
2965 info->target = 0;
2966 info->target2 = 0;
2967
2968 if (info->flags & INSN_HAS_RELOC)
2969 /* If the instruction has a reloc associated with it, then
2970 the offset field in the instruction will actually be the
2971 addend for the reloc. (If we are using REL type relocs).
2972 In such cases, we can ignore the pc when computing
2973 addresses, since the addend is not currently pc-relative. */
2974 pc = 0;
2975
2976 ret = aarch64_decode_insn (word, &inst, no_aliases);
2977
2978 if (((word >> 21) & 0x3ff) == 1)
2979 {
2980 /* RESERVED for ALES. */
2981 assert (ret != ERR_OK);
2982 ret = ERR_NYI;
2983 }
2984
2985 switch (ret)
2986 {
2987 case ERR_UND:
2988 case ERR_UNP:
2989 case ERR_NYI:
2990 /* Handle undefined instructions. */
2991 info->insn_type = dis_noninsn;
2992 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2993 word, err_msg[-ret]);
2994 break;
2995 case ERR_OK:
2996 user_friendly_fixup (&inst);
2997 print_aarch64_insn (pc, &inst, info);
2998 break;
2999 default:
3000 abort ();
3001 }
3002 }
3003
3004 /* Disallow mapping symbols ($x, $d etc) from
3005 being displayed in symbol relative addresses. */
3006
3007 bfd_boolean
3008 aarch64_symbol_is_valid (asymbol * sym,
3009 struct disassemble_info * info ATTRIBUTE_UNUSED)
3010 {
3011 const char * name;
3012
3013 if (sym == NULL)
3014 return FALSE;
3015
3016 name = bfd_asymbol_name (sym);
3017
3018 return name
3019 && (name[0] != '$'
3020 || (name[1] != 'x' && name[1] != 'd')
3021 || (name[2] != '\0' && name[2] != '.'));
3022 }
3023
3024 /* Print data bytes on INFO->STREAM. */
3025
3026 static void
3027 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3028 uint32_t word,
3029 struct disassemble_info *info)
3030 {
3031 switch (info->bytes_per_chunk)
3032 {
3033 case 1:
3034 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3035 break;
3036 case 2:
3037 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3038 break;
3039 case 4:
3040 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3041 break;
3042 default:
3043 abort ();
3044 }
3045 }
3046
3047 /* Try to infer the code or data type from a symbol.
3048 Returns nonzero if *MAP_TYPE was set. */
3049
3050 static int
3051 get_sym_code_type (struct disassemble_info *info, int n,
3052 enum map_type *map_type)
3053 {
3054 elf_symbol_type *es;
3055 unsigned int type;
3056 const char *name;
3057
3058 es = *(elf_symbol_type **)(info->symtab + n);
3059 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3060
3061 /* If the symbol has function type then use that. */
3062 if (type == STT_FUNC)
3063 {
3064 *map_type = MAP_INSN;
3065 return TRUE;
3066 }
3067
3068 /* Check for mapping symbols. */
3069 name = bfd_asymbol_name(info->symtab[n]);
3070 if (name[0] == '$'
3071 && (name[1] == 'x' || name[1] == 'd')
3072 && (name[2] == '\0' || name[2] == '.'))
3073 {
3074 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3075 return TRUE;
3076 }
3077
3078 return FALSE;
3079 }
3080
3081 /* Entry-point of the AArch64 disassembler. */
3082
3083 int
3084 print_insn_aarch64 (bfd_vma pc,
3085 struct disassemble_info *info)
3086 {
3087 bfd_byte buffer[INSNLEN];
3088 int status;
3089 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
3090 bfd_boolean found = FALSE;
3091 unsigned int size = 4;
3092 unsigned long data;
3093
3094 if (info->disassembler_options)
3095 {
3096 set_default_aarch64_dis_options (info);
3097
3098 parse_aarch64_dis_options (info->disassembler_options);
3099
3100 /* To avoid repeated parsing of these options, we remove them here. */
3101 info->disassembler_options = NULL;
3102 }
3103
3104 /* Aarch64 instructions are always little-endian */
3105 info->endian_code = BFD_ENDIAN_LITTLE;
3106
3107 /* First check the full symtab for a mapping symbol, even if there
3108 are no usable non-mapping symbols for this address. */
3109 if (info->symtab_size != 0
3110 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3111 {
3112 enum map_type type = MAP_INSN;
3113 int last_sym = -1;
3114 bfd_vma addr;
3115 int n;
3116
3117 if (pc <= last_mapping_addr)
3118 last_mapping_sym = -1;
3119
3120 /* Start scanning at the start of the function, or wherever
3121 we finished last time. */
3122 n = info->symtab_pos + 1;
3123 if (n < last_mapping_sym)
3124 n = last_mapping_sym;
3125
3126 /* Scan up to the location being disassembled. */
3127 for (; n < info->symtab_size; n++)
3128 {
3129 addr = bfd_asymbol_value (info->symtab[n]);
3130 if (addr > pc)
3131 break;
3132 if ((info->section == NULL
3133 || info->section == info->symtab[n]->section)
3134 && get_sym_code_type (info, n, &type))
3135 {
3136 last_sym = n;
3137 found = TRUE;
3138 }
3139 }
3140
3141 if (!found)
3142 {
3143 n = info->symtab_pos;
3144 if (n < last_mapping_sym)
3145 n = last_mapping_sym;
3146
3147 /* No mapping symbol found at this address. Look backwards
3148 for a preceeding one. */
3149 for (; n >= 0; n--)
3150 {
3151 if (get_sym_code_type (info, n, &type))
3152 {
3153 last_sym = n;
3154 found = TRUE;
3155 break;
3156 }
3157 }
3158 }
3159
3160 last_mapping_sym = last_sym;
3161 last_type = type;
3162
3163 /* Look a little bit ahead to see if we should print out
3164 less than four bytes of data. If there's a symbol,
3165 mapping or otherwise, after two bytes then don't
3166 print more. */
3167 if (last_type == MAP_DATA)
3168 {
3169 size = 4 - (pc & 3);
3170 for (n = last_sym + 1; n < info->symtab_size; n++)
3171 {
3172 addr = bfd_asymbol_value (info->symtab[n]);
3173 if (addr > pc)
3174 {
3175 if (addr - pc < size)
3176 size = addr - pc;
3177 break;
3178 }
3179 }
3180 /* If the next symbol is after three bytes, we need to
3181 print only part of the data, so that we can use either
3182 .byte or .short. */
3183 if (size == 3)
3184 size = (pc & 1) ? 1 : 2;
3185 }
3186 }
3187
3188 if (last_type == MAP_DATA)
3189 {
3190 /* size was set above. */
3191 info->bytes_per_chunk = size;
3192 info->display_endian = info->endian;
3193 printer = print_insn_data;
3194 }
3195 else
3196 {
3197 info->bytes_per_chunk = size = INSNLEN;
3198 info->display_endian = info->endian_code;
3199 printer = print_insn_aarch64_word;
3200 }
3201
3202 status = (*info->read_memory_func) (pc, buffer, size, info);
3203 if (status != 0)
3204 {
3205 (*info->memory_error_func) (status, pc, info);
3206 return -1;
3207 }
3208
3209 data = bfd_get_bits (buffer, size * 8,
3210 info->display_endian == BFD_ENDIAN_BIG);
3211
3212 (*printer) (pc, data, info);
3213
3214 return size;
3215 }
3216 \f
3217 void
3218 print_aarch64_disassembler_options (FILE *stream)
3219 {
3220 fprintf (stream, _("\n\
3221 The following AARCH64 specific disassembler options are supported for use\n\
3222 with the -M switch (multiple options should be separated by commas):\n"));
3223
3224 fprintf (stream, _("\n\
3225 no-aliases Don't print instruction aliases.\n"));
3226
3227 fprintf (stream, _("\n\
3228 aliases Do print instruction aliases.\n"));
3229
3230 #ifdef DEBUG_AARCH64
3231 fprintf (stream, _("\n\
3232 debug_dump Temp switch for debug trace.\n"));
3233 #endif /* DEBUG_AARCH64 */
3234
3235 fprintf (stream, _("\n"));
3236 }
This page took 0.096173 seconds and 5 git commands to generate.