Don't compare boolean values against TRUE or FALSE
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "dis-asm.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define ERR_OK 0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33
34 #define INSNLEN 4
35
36 /* Cached mapping symbol state. */
37 enum map_type
38 {
39 MAP_INSN,
40 MAP_DATA
41 };
42
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46
47 /* Other options */
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \f
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 #ifdef DEBUG_AARCH64
73 if (CONST_STRNEQ (option, "debug_dump"))
74 {
75 debug_dump = 1;
76 return;
77 }
78 #endif /* DEBUG_AARCH64 */
79
80 /* Invalid option. */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
82 }
83
84 static void
85 parse_aarch64_dis_options (const char *options)
86 {
87 const char *option_end;
88
89 if (options == NULL)
90 return;
91
92 while (*options != '\0')
93 {
94 /* Skip empty options. */
95 if (*options == ',')
96 {
97 options++;
98 continue;
99 }
100
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
104 option_end++;
105
106 parse_aarch64_dis_option (options, option_end - options);
107
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
111 }
112 }
113 \f
114 /* Functions doing the instruction disassembling. */
115
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
119
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
125
126 aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129 uint32_t num;
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
132 va_list va;
133
134 va_start (va, mask);
135 num = va_arg (va, uint32_t);
136 assert (num <= 5);
137 aarch64_insn value = 0x0;
138 while (num--)
139 {
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
144 }
145 return value;
146 }
147
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
150
151 static aarch64_insn
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
153 {
154 aarch64_insn value;
155 unsigned int i;
156 enum aarch64_field_kind kind;
157
158 value = 0;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
160 {
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
164 }
165 return value;
166 }
167
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
171 {
172 uint32_t ret = value;
173
174 assert (i < 32);
175 if ((value >> i) & 0x1)
176 {
177 uint32_t val = (uint32_t)(-1) << i;
178 ret = ret | val;
179 }
180 return (int32_t) ret;
181 }
182
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
185
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
189 {
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
191 assert (value <= 0x1
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
193 return qualifier;
194 }
195
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
198
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
201 {
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
203
204 /* Instructions using vector type 2H should not call this function. Skip over
205 the 2H qualifier. */
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
207 qualifier += 1;
208
209 assert (value <= 0x8
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
211 return qualifier;
212 }
213
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
217 {
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
219
220 assert (value <= 0x4
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
222 return qualifier;
223 }
224
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
229
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
232 {
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
237 i, qualifiers))
238 return qualifiers[i];
239 else
240 return AARCH64_OPND_QLF_NIL;
241 }
242
243 /* Operand extractors. */
244
245 int
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED)
249 {
250 info->reg.regno = extract_field (self->fields[0], code, 0);
251 return 1;
252 }
253
254 int
255 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
256 const aarch64_insn code ATTRIBUTE_UNUSED,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED)
258 {
259 assert (info->idx == 1
260 || info->idx ==3);
261 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
262 return 1;
263 }
264
265 /* e.g. IC <ic_op>{, <Xt>}. */
266 int
267 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
268 const aarch64_insn code,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED)
270 {
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 assert (info->idx == 1
273 && (aarch64_get_operand_class (inst->operands[0].type)
274 == AARCH64_OPND_CLASS_SYSTEM));
275 /* This will make the constraint checking happy and more importantly will
276 help the disassembler determine whether this operand is optional or
277 not. */
278 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
279
280 return 1;
281 }
282
283 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
284 int
285 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED)
288 {
289 /* regno */
290 info->reglane.regno = extract_field (self->fields[0], code,
291 inst->opcode->mask);
292
293 /* Index and/or type. */
294 if (inst->opcode->iclass == asisdone
295 || inst->opcode->iclass == asimdins)
296 {
297 if (info->type == AARCH64_OPND_En
298 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
299 {
300 unsigned shift;
301 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
302 assert (info->idx == 1); /* Vn */
303 aarch64_insn value = extract_field (FLD_imm4, code, 0);
304 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
305 info->qualifier = get_expected_qualifier (inst, info->idx);
306 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
307 info->reglane.index = value >> shift;
308 }
309 else
310 {
311 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
312 imm5<3:0> <V>
313 0000 RESERVED
314 xxx1 B
315 xx10 H
316 x100 S
317 1000 D */
318 int pos = -1;
319 aarch64_insn value = extract_field (FLD_imm5, code, 0);
320 while (++pos <= 3 && (value & 0x1) == 0)
321 value >>= 1;
322 if (pos > 3)
323 return 0;
324 info->qualifier = get_sreg_qualifier_from_value (pos);
325 info->reglane.index = (unsigned) (value >> 1);
326 }
327 }
328 else
329 {
330 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
331 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
332
333 /* Need information in other operand(s) to help decoding. */
334 info->qualifier = get_expected_qualifier (inst, info->idx);
335 switch (info->qualifier)
336 {
337 case AARCH64_OPND_QLF_S_H:
338 /* h:l:m */
339 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
340 FLD_M);
341 info->reglane.regno &= 0xf;
342 break;
343 case AARCH64_OPND_QLF_S_S:
344 /* h:l */
345 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
346 break;
347 case AARCH64_OPND_QLF_S_D:
348 /* H */
349 info->reglane.index = extract_field (FLD_H, code, 0);
350 break;
351 default:
352 return 0;
353 }
354
355 if (inst->opcode->op == OP_FCMLA_ELEM)
356 {
357 /* Complex operand takes two elements. */
358 if (info->reglane.index & 1)
359 return 0;
360 info->reglane.index /= 2;
361 }
362 }
363
364 return 1;
365 }
366
367 int
368 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
369 const aarch64_insn code,
370 const aarch64_inst *inst ATTRIBUTE_UNUSED)
371 {
372 /* R */
373 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
374 /* len */
375 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
376 return 1;
377 }
378
379 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
380 int
381 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
382 aarch64_opnd_info *info, const aarch64_insn code,
383 const aarch64_inst *inst)
384 {
385 aarch64_insn value;
386 /* Number of elements in each structure to be loaded/stored. */
387 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
388
389 struct
390 {
391 unsigned is_reserved;
392 unsigned num_regs;
393 unsigned num_elements;
394 } data [] =
395 { {0, 4, 4},
396 {1, 4, 4},
397 {0, 4, 1},
398 {0, 4, 2},
399 {0, 3, 3},
400 {1, 3, 3},
401 {0, 3, 1},
402 {0, 1, 1},
403 {0, 2, 2},
404 {1, 2, 2},
405 {0, 2, 1},
406 };
407
408 /* Rt */
409 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
410 /* opcode */
411 value = extract_field (FLD_opcode, code, 0);
412 if (expected_num != data[value].num_elements || data[value].is_reserved)
413 return 0;
414 info->reglist.num_regs = data[value].num_regs;
415
416 return 1;
417 }
418
419 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
420 lanes instructions. */
421 int
422 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
423 aarch64_opnd_info *info, const aarch64_insn code,
424 const aarch64_inst *inst)
425 {
426 aarch64_insn value;
427
428 /* Rt */
429 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
430 /* S */
431 value = extract_field (FLD_S, code, 0);
432
433 /* Number of registers is equal to the number of elements in
434 each structure to be loaded/stored. */
435 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
436 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
437
438 /* Except when it is LD1R. */
439 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
440 info->reglist.num_regs = 2;
441
442 return 1;
443 }
444
445 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
446 load/store single element instructions. */
447 int
448 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
449 aarch64_opnd_info *info, const aarch64_insn code,
450 const aarch64_inst *inst ATTRIBUTE_UNUSED)
451 {
452 aarch64_field field = {0, 0};
453 aarch64_insn QSsize; /* fields Q:S:size. */
454 aarch64_insn opcodeh2; /* opcode<2:1> */
455
456 /* Rt */
457 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
458
459 /* Decode the index, opcode<2:1> and size. */
460 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
461 opcodeh2 = extract_field_2 (&field, code, 0);
462 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
463 switch (opcodeh2)
464 {
465 case 0x0:
466 info->qualifier = AARCH64_OPND_QLF_S_B;
467 /* Index encoded in "Q:S:size". */
468 info->reglist.index = QSsize;
469 break;
470 case 0x1:
471 if (QSsize & 0x1)
472 /* UND. */
473 return 0;
474 info->qualifier = AARCH64_OPND_QLF_S_H;
475 /* Index encoded in "Q:S:size<1>". */
476 info->reglist.index = QSsize >> 1;
477 break;
478 case 0x2:
479 if ((QSsize >> 1) & 0x1)
480 /* UND. */
481 return 0;
482 if ((QSsize & 0x1) == 0)
483 {
484 info->qualifier = AARCH64_OPND_QLF_S_S;
485 /* Index encoded in "Q:S". */
486 info->reglist.index = QSsize >> 2;
487 }
488 else
489 {
490 if (extract_field (FLD_S, code, 0))
491 /* UND */
492 return 0;
493 info->qualifier = AARCH64_OPND_QLF_S_D;
494 /* Index encoded in "Q". */
495 info->reglist.index = QSsize >> 3;
496 }
497 break;
498 default:
499 return 0;
500 }
501
502 info->reglist.has_index = 1;
503 info->reglist.num_regs = 0;
504 /* Number of registers is equal to the number of elements in
505 each structure to be loaded/stored. */
506 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
507 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
508
509 return 1;
510 }
511
512 /* Decode fields immh:immb and/or Q for e.g.
513 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
514 or SSHR <V><d>, <V><n>, #<shift>. */
515
516 int
517 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
518 aarch64_opnd_info *info, const aarch64_insn code,
519 const aarch64_inst *inst)
520 {
521 int pos;
522 aarch64_insn Q, imm, immh;
523 enum aarch64_insn_class iclass = inst->opcode->iclass;
524
525 immh = extract_field (FLD_immh, code, 0);
526 if (immh == 0)
527 return 0;
528 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
529 pos = 4;
530 /* Get highest set bit in immh. */
531 while (--pos >= 0 && (immh & 0x8) == 0)
532 immh <<= 1;
533
534 assert ((iclass == asimdshf || iclass == asisdshf)
535 && (info->type == AARCH64_OPND_IMM_VLSR
536 || info->type == AARCH64_OPND_IMM_VLSL));
537
538 if (iclass == asimdshf)
539 {
540 Q = extract_field (FLD_Q, code, 0);
541 /* immh Q <T>
542 0000 x SEE AdvSIMD modified immediate
543 0001 0 8B
544 0001 1 16B
545 001x 0 4H
546 001x 1 8H
547 01xx 0 2S
548 01xx 1 4S
549 1xxx 0 RESERVED
550 1xxx 1 2D */
551 info->qualifier =
552 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
553 }
554 else
555 info->qualifier = get_sreg_qualifier_from_value (pos);
556
557 if (info->type == AARCH64_OPND_IMM_VLSR)
558 /* immh <shift>
559 0000 SEE AdvSIMD modified immediate
560 0001 (16-UInt(immh:immb))
561 001x (32-UInt(immh:immb))
562 01xx (64-UInt(immh:immb))
563 1xxx (128-UInt(immh:immb)) */
564 info->imm.value = (16 << pos) - imm;
565 else
566 /* immh:immb
567 immh <shift>
568 0000 SEE AdvSIMD modified immediate
569 0001 (UInt(immh:immb)-8)
570 001x (UInt(immh:immb)-16)
571 01xx (UInt(immh:immb)-32)
572 1xxx (UInt(immh:immb)-64) */
573 info->imm.value = imm - (8 << pos);
574
575 return 1;
576 }
577
578 /* Decode shift immediate for e.g. sshr (imm). */
579 int
580 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
581 aarch64_opnd_info *info, const aarch64_insn code,
582 const aarch64_inst *inst ATTRIBUTE_UNUSED)
583 {
584 int64_t imm;
585 aarch64_insn val;
586 val = extract_field (FLD_size, code, 0);
587 switch (val)
588 {
589 case 0: imm = 8; break;
590 case 1: imm = 16; break;
591 case 2: imm = 32; break;
592 default: return 0;
593 }
594 info->imm.value = imm;
595 return 1;
596 }
597
598 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
599 value in the field(s) will be extracted as unsigned immediate value. */
600 int
601 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
602 const aarch64_insn code,
603 const aarch64_inst *inst ATTRIBUTE_UNUSED)
604 {
605 int64_t imm;
606
607 imm = extract_all_fields (self, code);
608
609 if (operand_need_sign_extension (self))
610 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
611
612 if (operand_need_shift_by_two (self))
613 imm <<= 2;
614
615 if (info->type == AARCH64_OPND_ADDR_ADRP)
616 imm <<= 12;
617
618 info->imm.value = imm;
619 return 1;
620 }
621
622 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
623 int
624 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
625 const aarch64_insn code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED)
627 {
628 aarch64_ext_imm (self, info, code, inst);
629 info->shifter.kind = AARCH64_MOD_LSL;
630 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
631 return 1;
632 }
633
634 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
635 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
636 int
637 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
638 aarch64_opnd_info *info,
639 const aarch64_insn code,
640 const aarch64_inst *inst ATTRIBUTE_UNUSED)
641 {
642 uint64_t imm;
643 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
644 aarch64_field field = {0, 0};
645
646 assert (info->idx == 1);
647
648 if (info->type == AARCH64_OPND_SIMD_FPIMM)
649 info->imm.is_fp = 1;
650
651 /* a:b:c:d:e:f:g:h */
652 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
653 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
654 {
655 /* Either MOVI <Dd>, #<imm>
656 or MOVI <Vd>.2D, #<imm>.
657 <imm> is a 64-bit immediate
658 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
659 encoded in "a:b:c:d:e:f:g:h". */
660 int i;
661 unsigned abcdefgh = imm;
662 for (imm = 0ull, i = 0; i < 8; i++)
663 if (((abcdefgh >> i) & 0x1) != 0)
664 imm |= 0xffull << (8 * i);
665 }
666 info->imm.value = imm;
667
668 /* cmode */
669 info->qualifier = get_expected_qualifier (inst, info->idx);
670 switch (info->qualifier)
671 {
672 case AARCH64_OPND_QLF_NIL:
673 /* no shift */
674 info->shifter.kind = AARCH64_MOD_NONE;
675 return 1;
676 case AARCH64_OPND_QLF_LSL:
677 /* shift zeros */
678 info->shifter.kind = AARCH64_MOD_LSL;
679 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
680 {
681 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
682 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
683 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
684 default: assert (0); return 0;
685 }
686 /* 00: 0; 01: 8; 10:16; 11:24. */
687 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
688 break;
689 case AARCH64_OPND_QLF_MSL:
690 /* shift ones */
691 info->shifter.kind = AARCH64_MOD_MSL;
692 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
693 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
694 break;
695 default:
696 assert (0);
697 return 0;
698 }
699
700 return 1;
701 }
702
703 /* Decode an 8-bit floating-point immediate. */
704 int
705 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
706 const aarch64_insn code,
707 const aarch64_inst *inst ATTRIBUTE_UNUSED)
708 {
709 info->imm.value = extract_all_fields (self, code);
710 info->imm.is_fp = 1;
711 return 1;
712 }
713
714 /* Decode a 1-bit rotate immediate (#90 or #270). */
715 int
716 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
717 const aarch64_insn code,
718 const aarch64_inst *inst ATTRIBUTE_UNUSED)
719 {
720 uint64_t rot = extract_field (self->fields[0], code, 0);
721 assert (rot < 2U);
722 info->imm.value = rot * 180 + 90;
723 return 1;
724 }
725
726 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
727 int
728 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
729 const aarch64_insn code,
730 const aarch64_inst *inst ATTRIBUTE_UNUSED)
731 {
732 uint64_t rot = extract_field (self->fields[0], code, 0);
733 assert (rot < 4U);
734 info->imm.value = rot * 90;
735 return 1;
736 }
737
738 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
739 int
740 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
741 aarch64_opnd_info *info, const aarch64_insn code,
742 const aarch64_inst *inst ATTRIBUTE_UNUSED)
743 {
744 info->imm.value = 64- extract_field (FLD_scale, code, 0);
745 return 1;
746 }
747
748 /* Decode arithmetic immediate for e.g.
749 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
750 int
751 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
752 aarch64_opnd_info *info, const aarch64_insn code,
753 const aarch64_inst *inst ATTRIBUTE_UNUSED)
754 {
755 aarch64_insn value;
756
757 info->shifter.kind = AARCH64_MOD_LSL;
758 /* shift */
759 value = extract_field (FLD_shift, code, 0);
760 if (value >= 2)
761 return 0;
762 info->shifter.amount = value ? 12 : 0;
763 /* imm12 (unsigned) */
764 info->imm.value = extract_field (FLD_imm12, code, 0);
765
766 return 1;
767 }
768
769 /* Return true if VALUE is a valid logical immediate encoding, storing the
770 decoded value in *RESULT if so. ESIZE is the number of bytes in the
771 decoded immediate. */
772 static int
773 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
774 {
775 uint64_t imm, mask;
776 uint32_t N, R, S;
777 unsigned simd_size;
778
779 /* value is N:immr:imms. */
780 S = value & 0x3f;
781 R = (value >> 6) & 0x3f;
782 N = (value >> 12) & 0x1;
783
784 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
785 (in other words, right rotated by R), then replicated. */
786 if (N != 0)
787 {
788 simd_size = 64;
789 mask = 0xffffffffffffffffull;
790 }
791 else
792 {
793 switch (S)
794 {
795 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
796 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
797 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
798 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
799 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
800 default: return 0;
801 }
802 mask = (1ull << simd_size) - 1;
803 /* Top bits are IGNORED. */
804 R &= simd_size - 1;
805 }
806
807 if (simd_size > esize * 8)
808 return 0;
809
810 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
811 if (S == simd_size - 1)
812 return 0;
813 /* S+1 consecutive bits to 1. */
814 /* NOTE: S can't be 63 due to detection above. */
815 imm = (1ull << (S + 1)) - 1;
816 /* Rotate to the left by simd_size - R. */
817 if (R != 0)
818 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
819 /* Replicate the value according to SIMD size. */
820 switch (simd_size)
821 {
822 case 2: imm = (imm << 2) | imm;
823 /* Fall through. */
824 case 4: imm = (imm << 4) | imm;
825 /* Fall through. */
826 case 8: imm = (imm << 8) | imm;
827 /* Fall through. */
828 case 16: imm = (imm << 16) | imm;
829 /* Fall through. */
830 case 32: imm = (imm << 32) | imm;
831 /* Fall through. */
832 case 64: break;
833 default: assert (0); return 0;
834 }
835
836 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
837
838 return 1;
839 }
840
841 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
842 int
843 aarch64_ext_limm (const aarch64_operand *self,
844 aarch64_opnd_info *info, const aarch64_insn code,
845 const aarch64_inst *inst)
846 {
847 uint32_t esize;
848 aarch64_insn value;
849
850 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
851 self->fields[2]);
852 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
853 return decode_limm (esize, value, &info->imm.value);
854 }
855
856 /* Decode a logical immediate for the BIC alias of AND (etc.). */
857 int
858 aarch64_ext_inv_limm (const aarch64_operand *self,
859 aarch64_opnd_info *info, const aarch64_insn code,
860 const aarch64_inst *inst)
861 {
862 if (!aarch64_ext_limm (self, info, code, inst))
863 return 0;
864 info->imm.value = ~info->imm.value;
865 return 1;
866 }
867
868 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
869 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
870 int
871 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
872 aarch64_opnd_info *info,
873 const aarch64_insn code, const aarch64_inst *inst)
874 {
875 aarch64_insn value;
876
877 /* Rt */
878 info->reg.regno = extract_field (FLD_Rt, code, 0);
879
880 /* size */
881 value = extract_field (FLD_ldst_size, code, 0);
882 if (inst->opcode->iclass == ldstpair_indexed
883 || inst->opcode->iclass == ldstnapair_offs
884 || inst->opcode->iclass == ldstpair_off
885 || inst->opcode->iclass == loadlit)
886 {
887 enum aarch64_opnd_qualifier qualifier;
888 switch (value)
889 {
890 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
891 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
892 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
893 default: return 0;
894 }
895 info->qualifier = qualifier;
896 }
897 else
898 {
899 /* opc1:size */
900 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
901 if (value > 0x4)
902 return 0;
903 info->qualifier = get_sreg_qualifier_from_value (value);
904 }
905
906 return 1;
907 }
908
909 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
910 int
911 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
912 aarch64_opnd_info *info,
913 aarch64_insn code,
914 const aarch64_inst *inst ATTRIBUTE_UNUSED)
915 {
916 /* Rn */
917 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
918 return 1;
919 }
920
921 /* Decode the address operand for e.g.
922 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
923 int
924 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
925 aarch64_opnd_info *info,
926 aarch64_insn code, const aarch64_inst *inst)
927 {
928 aarch64_insn S, value;
929
930 /* Rn */
931 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
932 /* Rm */
933 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
934 /* option */
935 value = extract_field (FLD_option, code, 0);
936 info->shifter.kind =
937 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
938 /* Fix-up the shifter kind; although the table-driven approach is
939 efficient, it is slightly inflexible, thus needing this fix-up. */
940 if (info->shifter.kind == AARCH64_MOD_UXTX)
941 info->shifter.kind = AARCH64_MOD_LSL;
942 /* S */
943 S = extract_field (FLD_S, code, 0);
944 if (S == 0)
945 {
946 info->shifter.amount = 0;
947 info->shifter.amount_present = 0;
948 }
949 else
950 {
951 int size;
952 /* Need information in other operand(s) to help achieve the decoding
953 from 'S' field. */
954 info->qualifier = get_expected_qualifier (inst, info->idx);
955 /* Get the size of the data element that is accessed, which may be
956 different from that of the source register size, e.g. in strb/ldrb. */
957 size = aarch64_get_qualifier_esize (info->qualifier);
958 info->shifter.amount = get_logsz (size);
959 info->shifter.amount_present = 1;
960 }
961
962 return 1;
963 }
964
965 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
966 int
967 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
968 aarch64_insn code, const aarch64_inst *inst)
969 {
970 aarch64_insn imm;
971 info->qualifier = get_expected_qualifier (inst, info->idx);
972
973 /* Rn */
974 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
975 /* simm (imm9 or imm7) */
976 imm = extract_field (self->fields[0], code, 0);
977 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
978 if (self->fields[0] == FLD_imm7)
979 /* scaled immediate in ld/st pair instructions. */
980 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
981 /* qualifier */
982 if (inst->opcode->iclass == ldst_unscaled
983 || inst->opcode->iclass == ldstnapair_offs
984 || inst->opcode->iclass == ldstpair_off
985 || inst->opcode->iclass == ldst_unpriv)
986 info->addr.writeback = 0;
987 else
988 {
989 /* pre/post- index */
990 info->addr.writeback = 1;
991 if (extract_field (self->fields[1], code, 0) == 1)
992 info->addr.preind = 1;
993 else
994 info->addr.postind = 1;
995 }
996
997 return 1;
998 }
999
1000 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1001 int
1002 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1003 aarch64_insn code,
1004 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1005 {
1006 int shift;
1007 info->qualifier = get_expected_qualifier (inst, info->idx);
1008 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1009 /* Rn */
1010 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1011 /* uimm12 */
1012 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1013 return 1;
1014 }
1015
1016 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1017 int
1018 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1019 aarch64_insn code,
1020 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1021 {
1022 aarch64_insn imm;
1023
1024 info->qualifier = get_expected_qualifier (inst, info->idx);
1025 /* Rn */
1026 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1027 /* simm10 */
1028 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1029 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1030 if (extract_field (self->fields[3], code, 0) == 1) {
1031 info->addr.writeback = 1;
1032 info->addr.preind = 1;
1033 }
1034 return 1;
1035 }
1036
1037 /* Decode the address operand for e.g.
1038 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1039 int
1040 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1041 aarch64_opnd_info *info,
1042 aarch64_insn code, const aarch64_inst *inst)
1043 {
1044 /* The opcode dependent area stores the number of elements in
1045 each structure to be loaded/stored. */
1046 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1047
1048 /* Rn */
1049 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1050 /* Rm | #<amount> */
1051 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1052 if (info->addr.offset.regno == 31)
1053 {
1054 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1055 /* Special handling of loading single structure to all lane. */
1056 info->addr.offset.imm = (is_ld1r ? 1
1057 : inst->operands[0].reglist.num_regs)
1058 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1059 else
1060 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1061 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1062 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1063 }
1064 else
1065 info->addr.offset.is_reg = 1;
1066 info->addr.writeback = 1;
1067
1068 return 1;
1069 }
1070
1071 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1072 int
1073 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1074 aarch64_opnd_info *info,
1075 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1076 {
1077 aarch64_insn value;
1078 /* cond */
1079 value = extract_field (FLD_cond, code, 0);
1080 info->cond = get_cond_from_value (value);
1081 return 1;
1082 }
1083
1084 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1085 int
1086 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1087 aarch64_opnd_info *info,
1088 aarch64_insn code,
1089 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1090 {
1091 /* op0:op1:CRn:CRm:op2 */
1092 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1093 FLD_CRm, FLD_op2);
1094 return 1;
1095 }
1096
1097 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1098 int
1099 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1100 aarch64_opnd_info *info, aarch64_insn code,
1101 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1102 {
1103 int i;
1104 /* op1:op2 */
1105 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1106 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1107 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1108 return 1;
1109 /* Reserved value in <pstatefield>. */
1110 return 0;
1111 }
1112
1113 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1114 int
1115 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1116 aarch64_opnd_info *info,
1117 aarch64_insn code,
1118 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1119 {
1120 int i;
1121 aarch64_insn value;
1122 const aarch64_sys_ins_reg *sysins_ops;
1123 /* op0:op1:CRn:CRm:op2 */
1124 value = extract_fields (code, 0, 5,
1125 FLD_op0, FLD_op1, FLD_CRn,
1126 FLD_CRm, FLD_op2);
1127
1128 switch (info->type)
1129 {
1130 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1131 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1132 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1133 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1134 default: assert (0); return 0;
1135 }
1136
1137 for (i = 0; sysins_ops[i].name != NULL; ++i)
1138 if (sysins_ops[i].value == value)
1139 {
1140 info->sysins_op = sysins_ops + i;
1141 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1142 info->sysins_op->name,
1143 (unsigned)info->sysins_op->value,
1144 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1145 return 1;
1146 }
1147
1148 return 0;
1149 }
1150
1151 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1152
1153 int
1154 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1155 aarch64_opnd_info *info,
1156 aarch64_insn code,
1157 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1158 {
1159 /* CRm */
1160 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1161 return 1;
1162 }
1163
1164 /* Decode the prefetch operation option operand for e.g.
1165 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1166
1167 int
1168 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1169 aarch64_opnd_info *info,
1170 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1171 {
1172 /* prfop in Rt */
1173 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1174 return 1;
1175 }
1176
1177 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1178 to the matching name/value pair in aarch64_hint_options. */
1179
1180 int
1181 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1182 aarch64_opnd_info *info,
1183 aarch64_insn code,
1184 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1185 {
1186 /* CRm:op2. */
1187 unsigned hint_number;
1188 int i;
1189
1190 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1191
1192 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1193 {
1194 if (hint_number == aarch64_hint_options[i].value)
1195 {
1196 info->hint_option = &(aarch64_hint_options[i]);
1197 return 1;
1198 }
1199 }
1200
1201 return 0;
1202 }
1203
1204 /* Decode the extended register operand for e.g.
1205 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1206 int
1207 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1208 aarch64_opnd_info *info,
1209 aarch64_insn code,
1210 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1211 {
1212 aarch64_insn value;
1213
1214 /* Rm */
1215 info->reg.regno = extract_field (FLD_Rm, code, 0);
1216 /* option */
1217 value = extract_field (FLD_option, code, 0);
1218 info->shifter.kind =
1219 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1220 /* imm3 */
1221 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1222
1223 /* This makes the constraint checking happy. */
1224 info->shifter.operator_present = 1;
1225
1226 /* Assume inst->operands[0].qualifier has been resolved. */
1227 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1228 info->qualifier = AARCH64_OPND_QLF_W;
1229 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1230 && (info->shifter.kind == AARCH64_MOD_UXTX
1231 || info->shifter.kind == AARCH64_MOD_SXTX))
1232 info->qualifier = AARCH64_OPND_QLF_X;
1233
1234 return 1;
1235 }
1236
1237 /* Decode the shifted register operand for e.g.
1238 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1239 int
1240 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1241 aarch64_opnd_info *info,
1242 aarch64_insn code,
1243 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1244 {
1245 aarch64_insn value;
1246
1247 /* Rm */
1248 info->reg.regno = extract_field (FLD_Rm, code, 0);
1249 /* shift */
1250 value = extract_field (FLD_shift, code, 0);
1251 info->shifter.kind =
1252 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1253 if (info->shifter.kind == AARCH64_MOD_ROR
1254 && inst->opcode->iclass != log_shift)
1255 /* ROR is not available for the shifted register operand in arithmetic
1256 instructions. */
1257 return 0;
1258 /* imm6 */
1259 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1260
1261 /* This makes the constraint checking happy. */
1262 info->shifter.operator_present = 1;
1263
1264 return 1;
1265 }
1266
1267 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1268 where <offset> is given by the OFFSET parameter and where <factor> is
1269 1 plus SELF's operand-dependent value. fields[0] specifies the field
1270 that holds <base>. */
1271 static int
1272 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1273 aarch64_opnd_info *info, aarch64_insn code,
1274 int64_t offset)
1275 {
1276 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1277 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1278 info->addr.offset.is_reg = FALSE;
1279 info->addr.writeback = FALSE;
1280 info->addr.preind = TRUE;
1281 if (offset != 0)
1282 info->shifter.kind = AARCH64_MOD_MUL_VL;
1283 info->shifter.amount = 1;
1284 info->shifter.operator_present = (info->addr.offset.imm != 0);
1285 info->shifter.amount_present = FALSE;
1286 return 1;
1287 }
1288
1289 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1290 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1291 SELF's operand-dependent value. fields[0] specifies the field that
1292 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1293 int
1294 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1295 aarch64_opnd_info *info, aarch64_insn code,
1296 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1297 {
1298 int offset;
1299
1300 offset = extract_field (FLD_SVE_imm4, code, 0);
1301 offset = ((offset + 8) & 15) - 8;
1302 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1303 }
1304
1305 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1306 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1307 SELF's operand-dependent value. fields[0] specifies the field that
1308 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1309 int
1310 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1311 aarch64_opnd_info *info, aarch64_insn code,
1312 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1313 {
1314 int offset;
1315
1316 offset = extract_field (FLD_SVE_imm6, code, 0);
1317 offset = (((offset + 32) & 63) - 32);
1318 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1319 }
1320
1321 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1322 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1323 SELF's operand-dependent value. fields[0] specifies the field that
1324 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1325 and imm3 fields, with imm3 being the less-significant part. */
1326 int
1327 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1328 aarch64_opnd_info *info,
1329 aarch64_insn code,
1330 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1331 {
1332 int offset;
1333
1334 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1335 offset = (((offset + 256) & 511) - 256);
1336 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1337 }
1338
1339 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1340 is given by the OFFSET parameter and where <shift> is SELF's operand-
1341 dependent value. fields[0] specifies the base register field <base>. */
1342 static int
1343 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1344 aarch64_opnd_info *info, aarch64_insn code,
1345 int64_t offset)
1346 {
1347 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1348 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1349 info->addr.offset.is_reg = FALSE;
1350 info->addr.writeback = FALSE;
1351 info->addr.preind = TRUE;
1352 info->shifter.operator_present = FALSE;
1353 info->shifter.amount_present = FALSE;
1354 return 1;
1355 }
1356
1357 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1358 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1359 value. fields[0] specifies the base register field. */
1360 int
1361 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1362 aarch64_opnd_info *info, aarch64_insn code,
1363 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1364 {
1365 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1366 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1367 }
1368
1369 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1370 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1371 value. fields[0] specifies the base register field. */
1372 int
1373 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1374 aarch64_opnd_info *info, aarch64_insn code,
1375 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1376 {
1377 int offset = extract_field (FLD_SVE_imm6, code, 0);
1378 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1379 }
1380
1381 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1382 is SELF's operand-dependent value. fields[0] specifies the base
1383 register field and fields[1] specifies the offset register field. */
1384 int
1385 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1386 aarch64_opnd_info *info, aarch64_insn code,
1387 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1388 {
1389 int index_regno;
1390
1391 index_regno = extract_field (self->fields[1], code, 0);
1392 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1393 return 0;
1394
1395 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1396 info->addr.offset.regno = index_regno;
1397 info->addr.offset.is_reg = TRUE;
1398 info->addr.writeback = FALSE;
1399 info->addr.preind = TRUE;
1400 info->shifter.kind = AARCH64_MOD_LSL;
1401 info->shifter.amount = get_operand_specific_data (self);
1402 info->shifter.operator_present = (info->shifter.amount != 0);
1403 info->shifter.amount_present = (info->shifter.amount != 0);
1404 return 1;
1405 }
1406
1407 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1408 <shift> is SELF's operand-dependent value. fields[0] specifies the
1409 base register field, fields[1] specifies the offset register field and
1410 fields[2] is a single-bit field that selects SXTW over UXTW. */
1411 int
1412 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1413 aarch64_opnd_info *info, aarch64_insn code,
1414 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1415 {
1416 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1417 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1418 info->addr.offset.is_reg = TRUE;
1419 info->addr.writeback = FALSE;
1420 info->addr.preind = TRUE;
1421 if (extract_field (self->fields[2], code, 0))
1422 info->shifter.kind = AARCH64_MOD_SXTW;
1423 else
1424 info->shifter.kind = AARCH64_MOD_UXTW;
1425 info->shifter.amount = get_operand_specific_data (self);
1426 info->shifter.operator_present = TRUE;
1427 info->shifter.amount_present = (info->shifter.amount != 0);
1428 return 1;
1429 }
1430
1431 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1432 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1433 fields[0] specifies the base register field. */
1434 int
1435 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1436 aarch64_opnd_info *info, aarch64_insn code,
1437 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1438 {
1439 int offset = extract_field (FLD_imm5, code, 0);
1440 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1441 }
1442
1443 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1444 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1445 number. fields[0] specifies the base register field and fields[1]
1446 specifies the offset register field. */
1447 static int
1448 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1449 aarch64_insn code, enum aarch64_modifier_kind kind)
1450 {
1451 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1452 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1453 info->addr.offset.is_reg = TRUE;
1454 info->addr.writeback = FALSE;
1455 info->addr.preind = TRUE;
1456 info->shifter.kind = kind;
1457 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1458 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1459 || info->shifter.amount != 0);
1460 info->shifter.amount_present = (info->shifter.amount != 0);
1461 return 1;
1462 }
1463
1464 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1465 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1466 field and fields[1] specifies the offset register field. */
1467 int
1468 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1469 aarch64_opnd_info *info, aarch64_insn code,
1470 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1471 {
1472 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1473 }
1474
1475 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1476 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1477 field and fields[1] specifies the offset register field. */
1478 int
1479 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1480 aarch64_opnd_info *info, aarch64_insn code,
1481 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1482 {
1483 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1484 }
1485
1486 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1487 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1488 field and fields[1] specifies the offset register field. */
1489 int
1490 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1491 aarch64_opnd_info *info, aarch64_insn code,
1492 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1493 {
1494 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1495 }
1496
1497 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1498 has the raw field value and that the low 8 bits decode to VALUE. */
1499 static int
1500 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1501 {
1502 info->shifter.kind = AARCH64_MOD_LSL;
1503 info->shifter.amount = 0;
1504 if (info->imm.value & 0x100)
1505 {
1506 if (value == 0)
1507 /* Decode 0x100 as #0, LSL #8. */
1508 info->shifter.amount = 8;
1509 else
1510 value *= 256;
1511 }
1512 info->shifter.operator_present = (info->shifter.amount != 0);
1513 info->shifter.amount_present = (info->shifter.amount != 0);
1514 info->imm.value = value;
1515 return 1;
1516 }
1517
1518 /* Decode an SVE ADD/SUB immediate. */
1519 int
1520 aarch64_ext_sve_aimm (const aarch64_operand *self,
1521 aarch64_opnd_info *info, const aarch64_insn code,
1522 const aarch64_inst *inst)
1523 {
1524 return (aarch64_ext_imm (self, info, code, inst)
1525 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1526 }
1527
1528 /* Decode an SVE CPY/DUP immediate. */
1529 int
1530 aarch64_ext_sve_asimm (const aarch64_operand *self,
1531 aarch64_opnd_info *info, const aarch64_insn code,
1532 const aarch64_inst *inst)
1533 {
1534 return (aarch64_ext_imm (self, info, code, inst)
1535 && decode_sve_aimm (info, (int8_t) info->imm.value));
1536 }
1537
1538 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1539 The fields array specifies which field to use. */
1540 int
1541 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1542 aarch64_opnd_info *info, aarch64_insn code,
1543 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1544 {
1545 if (extract_field (self->fields[0], code, 0))
1546 info->imm.value = 0x3f800000;
1547 else
1548 info->imm.value = 0x3f000000;
1549 info->imm.is_fp = TRUE;
1550 return 1;
1551 }
1552
1553 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1554 The fields array specifies which field to use. */
1555 int
1556 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1557 aarch64_opnd_info *info, aarch64_insn code,
1558 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1559 {
1560 if (extract_field (self->fields[0], code, 0))
1561 info->imm.value = 0x40000000;
1562 else
1563 info->imm.value = 0x3f000000;
1564 info->imm.is_fp = TRUE;
1565 return 1;
1566 }
1567
1568 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1569 The fields array specifies which field to use. */
1570 int
1571 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1572 aarch64_opnd_info *info, aarch64_insn code,
1573 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1574 {
1575 if (extract_field (self->fields[0], code, 0))
1576 info->imm.value = 0x3f800000;
1577 else
1578 info->imm.value = 0x0;
1579 info->imm.is_fp = TRUE;
1580 return 1;
1581 }
1582
1583 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1584 array specifies which field to use for Zn. MM is encoded in the
1585 concatenation of imm5 and SVE_tszh, with imm5 being the less
1586 significant part. */
1587 int
1588 aarch64_ext_sve_index (const aarch64_operand *self,
1589 aarch64_opnd_info *info, aarch64_insn code,
1590 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1591 {
1592 int val;
1593
1594 info->reglane.regno = extract_field (self->fields[0], code, 0);
1595 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1596 if ((val & 31) == 0)
1597 return 0;
1598 while ((val & 1) == 0)
1599 val /= 2;
1600 info->reglane.index = val / 2;
1601 return 1;
1602 }
1603
1604 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1605 int
1606 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1607 aarch64_opnd_info *info, const aarch64_insn code,
1608 const aarch64_inst *inst)
1609 {
1610 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1611 return (aarch64_ext_limm (self, info, code, inst)
1612 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1613 }
1614
1615 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1616 and where MM occupies the most-significant part. The operand-dependent
1617 value specifies the number of bits in Zn. */
1618 int
1619 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1620 aarch64_opnd_info *info, aarch64_insn code,
1621 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1622 {
1623 unsigned int reg_bits = get_operand_specific_data (self);
1624 unsigned int val = extract_all_fields (self, code);
1625 info->reglane.regno = val & ((1 << reg_bits) - 1);
1626 info->reglane.index = val >> reg_bits;
1627 return 1;
1628 }
1629
1630 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1631 to use for Zn. The opcode-dependent value specifies the number
1632 of registers in the list. */
1633 int
1634 aarch64_ext_sve_reglist (const aarch64_operand *self,
1635 aarch64_opnd_info *info, aarch64_insn code,
1636 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1637 {
1638 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1639 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1640 return 1;
1641 }
1642
1643 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1644 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1645 field. */
1646 int
1647 aarch64_ext_sve_scale (const aarch64_operand *self,
1648 aarch64_opnd_info *info, aarch64_insn code,
1649 const aarch64_inst *inst)
1650 {
1651 int val;
1652
1653 if (!aarch64_ext_imm (self, info, code, inst))
1654 return 0;
1655 val = extract_field (FLD_SVE_imm4, code, 0);
1656 info->shifter.kind = AARCH64_MOD_MUL;
1657 info->shifter.amount = val + 1;
1658 info->shifter.operator_present = (val != 0);
1659 info->shifter.amount_present = (val != 0);
1660 return 1;
1661 }
1662
1663 /* Return the top set bit in VALUE, which is expected to be relatively
1664 small. */
1665 static uint64_t
1666 get_top_bit (uint64_t value)
1667 {
1668 while ((value & -value) != value)
1669 value -= value & -value;
1670 return value;
1671 }
1672
1673 /* Decode an SVE shift-left immediate. */
1674 int
1675 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1676 aarch64_opnd_info *info, const aarch64_insn code,
1677 const aarch64_inst *inst)
1678 {
1679 if (!aarch64_ext_imm (self, info, code, inst)
1680 || info->imm.value == 0)
1681 return 0;
1682
1683 info->imm.value -= get_top_bit (info->imm.value);
1684 return 1;
1685 }
1686
1687 /* Decode an SVE shift-right immediate. */
1688 int
1689 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1690 aarch64_opnd_info *info, const aarch64_insn code,
1691 const aarch64_inst *inst)
1692 {
1693 if (!aarch64_ext_imm (self, info, code, inst)
1694 || info->imm.value == 0)
1695 return 0;
1696
1697 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1698 return 1;
1699 }
1700 \f
1701 /* Bitfields that are commonly used to encode certain operands' information
1702 may be partially used as part of the base opcode in some instructions.
1703 For example, the bit 1 of the field 'size' in
1704 FCVTXN <Vb><d>, <Va><n>
1705 is actually part of the base opcode, while only size<0> is available
1706 for encoding the register type. Another example is the AdvSIMD
1707 instruction ORR (register), in which the field 'size' is also used for
1708 the base opcode, leaving only the field 'Q' available to encode the
1709 vector register arrangement specifier '8B' or '16B'.
1710
1711 This function tries to deduce the qualifier from the value of partially
1712 constrained field(s). Given the VALUE of such a field or fields, the
1713 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1714 operand encoding), the function returns the matching qualifier or
1715 AARCH64_OPND_QLF_NIL if nothing matches.
1716
1717 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1718 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1719 may end with AARCH64_OPND_QLF_NIL. */
1720
1721 static enum aarch64_opnd_qualifier
1722 get_qualifier_from_partial_encoding (aarch64_insn value,
1723 const enum aarch64_opnd_qualifier* \
1724 candidates,
1725 aarch64_insn mask)
1726 {
1727 int i;
1728 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1729 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1730 {
1731 aarch64_insn standard_value;
1732 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1733 break;
1734 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1735 if ((standard_value & mask) == (value & mask))
1736 return candidates[i];
1737 }
1738 return AARCH64_OPND_QLF_NIL;
1739 }
1740
1741 /* Given a list of qualifier sequences, return all possible valid qualifiers
1742 for operand IDX in QUALIFIERS.
1743 Assume QUALIFIERS is an array whose length is large enough. */
1744
1745 static void
1746 get_operand_possible_qualifiers (int idx,
1747 const aarch64_opnd_qualifier_seq_t *list,
1748 enum aarch64_opnd_qualifier *qualifiers)
1749 {
1750 int i;
1751 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1752 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1753 break;
1754 }
1755
1756 /* Decode the size Q field for e.g. SHADD.
1757 We tag one operand with the qualifer according to the code;
1758 whether the qualifier is valid for this opcode or not, it is the
1759 duty of the semantic checking. */
1760
1761 static int
1762 decode_sizeq (aarch64_inst *inst)
1763 {
1764 int idx;
1765 enum aarch64_opnd_qualifier qualifier;
1766 aarch64_insn code;
1767 aarch64_insn value, mask;
1768 enum aarch64_field_kind fld_sz;
1769 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1770
1771 if (inst->opcode->iclass == asisdlse
1772 || inst->opcode->iclass == asisdlsep
1773 || inst->opcode->iclass == asisdlso
1774 || inst->opcode->iclass == asisdlsop)
1775 fld_sz = FLD_vldst_size;
1776 else
1777 fld_sz = FLD_size;
1778
1779 code = inst->value;
1780 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1781 /* Obtain the info that which bits of fields Q and size are actually
1782 available for operand encoding. Opcodes like FMAXNM and FMLA have
1783 size[1] unavailable. */
1784 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1785
1786 /* The index of the operand we are going to tag a qualifier and the qualifer
1787 itself are reasoned from the value of the size and Q fields and the
1788 possible valid qualifier lists. */
1789 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1790 DEBUG_TRACE ("key idx: %d", idx);
1791
1792 /* For most related instruciton, size:Q are fully available for operand
1793 encoding. */
1794 if (mask == 0x7)
1795 {
1796 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1797 return 1;
1798 }
1799
1800 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1801 candidates);
1802 #ifdef DEBUG_AARCH64
1803 if (debug_dump)
1804 {
1805 int i;
1806 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1807 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1808 DEBUG_TRACE ("qualifier %d: %s", i,
1809 aarch64_get_qualifier_name(candidates[i]));
1810 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1811 }
1812 #endif /* DEBUG_AARCH64 */
1813
1814 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1815
1816 if (qualifier == AARCH64_OPND_QLF_NIL)
1817 return 0;
1818
1819 inst->operands[idx].qualifier = qualifier;
1820 return 1;
1821 }
1822
1823 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1824 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1825
1826 static int
1827 decode_asimd_fcvt (aarch64_inst *inst)
1828 {
1829 aarch64_field field = {0, 0};
1830 aarch64_insn value;
1831 enum aarch64_opnd_qualifier qualifier;
1832
1833 gen_sub_field (FLD_size, 0, 1, &field);
1834 value = extract_field_2 (&field, inst->value, 0);
1835 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1836 : AARCH64_OPND_QLF_V_2D;
1837 switch (inst->opcode->op)
1838 {
1839 case OP_FCVTN:
1840 case OP_FCVTN2:
1841 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1842 inst->operands[1].qualifier = qualifier;
1843 break;
1844 case OP_FCVTL:
1845 case OP_FCVTL2:
1846 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1847 inst->operands[0].qualifier = qualifier;
1848 break;
1849 default:
1850 assert (0);
1851 return 0;
1852 }
1853
1854 return 1;
1855 }
1856
1857 /* Decode size[0], i.e. bit 22, for
1858 e.g. FCVTXN <Vb><d>, <Va><n>. */
1859
1860 static int
1861 decode_asisd_fcvtxn (aarch64_inst *inst)
1862 {
1863 aarch64_field field = {0, 0};
1864 gen_sub_field (FLD_size, 0, 1, &field);
1865 if (!extract_field_2 (&field, inst->value, 0))
1866 return 0;
1867 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1868 return 1;
1869 }
1870
1871 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1872 static int
1873 decode_fcvt (aarch64_inst *inst)
1874 {
1875 enum aarch64_opnd_qualifier qualifier;
1876 aarch64_insn value;
1877 const aarch64_field field = {15, 2};
1878
1879 /* opc dstsize */
1880 value = extract_field_2 (&field, inst->value, 0);
1881 switch (value)
1882 {
1883 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1884 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1885 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1886 default: return 0;
1887 }
1888 inst->operands[0].qualifier = qualifier;
1889
1890 return 1;
1891 }
1892
1893 /* Do miscellaneous decodings that are not common enough to be driven by
1894 flags. */
1895
1896 static int
1897 do_misc_decoding (aarch64_inst *inst)
1898 {
1899 unsigned int value;
1900 switch (inst->opcode->op)
1901 {
1902 case OP_FCVT:
1903 return decode_fcvt (inst);
1904
1905 case OP_FCVTN:
1906 case OP_FCVTN2:
1907 case OP_FCVTL:
1908 case OP_FCVTL2:
1909 return decode_asimd_fcvt (inst);
1910
1911 case OP_FCVTXN_S:
1912 return decode_asisd_fcvtxn (inst);
1913
1914 case OP_MOV_P_P:
1915 case OP_MOVS_P_P:
1916 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1917 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
1918 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1919
1920 case OP_MOV_Z_P_Z:
1921 return (extract_field (FLD_SVE_Zd, inst->value, 0)
1922 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1923
1924 case OP_MOV_Z_V:
1925 /* Index must be zero. */
1926 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1927 return value > 0 && value <= 16 && value == (value & -value);
1928
1929 case OP_MOV_Z_Z:
1930 return (extract_field (FLD_SVE_Zn, inst->value, 0)
1931 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1932
1933 case OP_MOV_Z_Zi:
1934 /* Index must be nonzero. */
1935 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1936 return value > 0 && value != (value & -value);
1937
1938 case OP_MOVM_P_P_P:
1939 return (extract_field (FLD_SVE_Pd, inst->value, 0)
1940 == extract_field (FLD_SVE_Pm, inst->value, 0));
1941
1942 case OP_MOVZS_P_P_P:
1943 case OP_MOVZ_P_P_P:
1944 return (extract_field (FLD_SVE_Pn, inst->value, 0)
1945 == extract_field (FLD_SVE_Pm, inst->value, 0));
1946
1947 case OP_NOTS_P_P_P_Z:
1948 case OP_NOT_P_P_P_Z:
1949 return (extract_field (FLD_SVE_Pm, inst->value, 0)
1950 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1951
1952 default:
1953 return 0;
1954 }
1955 }
1956
1957 /* Opcodes that have fields shared by multiple operands are usually flagged
1958 with flags. In this function, we detect such flags, decode the related
1959 field(s) and store the information in one of the related operands. The
1960 'one' operand is not any operand but one of the operands that can
1961 accommadate all the information that has been decoded. */
1962
1963 static int
1964 do_special_decoding (aarch64_inst *inst)
1965 {
1966 int idx;
1967 aarch64_insn value;
1968 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1969 if (inst->opcode->flags & F_COND)
1970 {
1971 value = extract_field (FLD_cond2, inst->value, 0);
1972 inst->cond = get_cond_from_value (value);
1973 }
1974 /* 'sf' field. */
1975 if (inst->opcode->flags & F_SF)
1976 {
1977 idx = select_operand_for_sf_field_coding (inst->opcode);
1978 value = extract_field (FLD_sf, inst->value, 0);
1979 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1980 if ((inst->opcode->flags & F_N)
1981 && extract_field (FLD_N, inst->value, 0) != value)
1982 return 0;
1983 }
1984 /* 'sf' field. */
1985 if (inst->opcode->flags & F_LSE_SZ)
1986 {
1987 idx = select_operand_for_sf_field_coding (inst->opcode);
1988 value = extract_field (FLD_lse_sz, inst->value, 0);
1989 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1990 }
1991 /* size:Q fields. */
1992 if (inst->opcode->flags & F_SIZEQ)
1993 return decode_sizeq (inst);
1994
1995 if (inst->opcode->flags & F_FPTYPE)
1996 {
1997 idx = select_operand_for_fptype_field_coding (inst->opcode);
1998 value = extract_field (FLD_type, inst->value, 0);
1999 switch (value)
2000 {
2001 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2002 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2003 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2004 default: return 0;
2005 }
2006 }
2007
2008 if (inst->opcode->flags & F_SSIZE)
2009 {
2010 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2011 of the base opcode. */
2012 aarch64_insn mask;
2013 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2014 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2015 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2016 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2017 /* For most related instruciton, the 'size' field is fully available for
2018 operand encoding. */
2019 if (mask == 0x3)
2020 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2021 else
2022 {
2023 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2024 candidates);
2025 inst->operands[idx].qualifier
2026 = get_qualifier_from_partial_encoding (value, candidates, mask);
2027 }
2028 }
2029
2030 if (inst->opcode->flags & F_T)
2031 {
2032 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2033 int num = 0;
2034 unsigned val, Q;
2035 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2036 == AARCH64_OPND_CLASS_SIMD_REG);
2037 /* imm5<3:0> q <t>
2038 0000 x reserved
2039 xxx1 0 8b
2040 xxx1 1 16b
2041 xx10 0 4h
2042 xx10 1 8h
2043 x100 0 2s
2044 x100 1 4s
2045 1000 0 reserved
2046 1000 1 2d */
2047 val = extract_field (FLD_imm5, inst->value, 0);
2048 while ((val & 0x1) == 0 && ++num <= 3)
2049 val >>= 1;
2050 if (num > 3)
2051 return 0;
2052 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2053 inst->operands[0].qualifier =
2054 get_vreg_qualifier_from_value ((num << 1) | Q);
2055 }
2056
2057 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2058 {
2059 /* Use Rt to encode in the case of e.g.
2060 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2061 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2062 if (idx == -1)
2063 {
2064 /* Otherwise use the result operand, which has to be a integer
2065 register. */
2066 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2067 == AARCH64_OPND_CLASS_INT_REG);
2068 idx = 0;
2069 }
2070 assert (idx == 0 || idx == 1);
2071 value = extract_field (FLD_Q, inst->value, 0);
2072 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2073 }
2074
2075 if (inst->opcode->flags & F_LDS_SIZE)
2076 {
2077 aarch64_field field = {0, 0};
2078 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2079 == AARCH64_OPND_CLASS_INT_REG);
2080 gen_sub_field (FLD_opc, 0, 1, &field);
2081 value = extract_field_2 (&field, inst->value, 0);
2082 inst->operands[0].qualifier
2083 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2084 }
2085
2086 /* Miscellaneous decoding; done as the last step. */
2087 if (inst->opcode->flags & F_MISC)
2088 return do_misc_decoding (inst);
2089
2090 return 1;
2091 }
2092
2093 /* Converters converting a real opcode instruction to its alias form. */
2094
2095 /* ROR <Wd>, <Ws>, #<shift>
2096 is equivalent to:
2097 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2098 static int
2099 convert_extr_to_ror (aarch64_inst *inst)
2100 {
2101 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2102 {
2103 copy_operand_info (inst, 2, 3);
2104 inst->operands[3].type = AARCH64_OPND_NIL;
2105 return 1;
2106 }
2107 return 0;
2108 }
2109
2110 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2111 is equivalent to:
2112 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2113 static int
2114 convert_shll_to_xtl (aarch64_inst *inst)
2115 {
2116 if (inst->operands[2].imm.value == 0)
2117 {
2118 inst->operands[2].type = AARCH64_OPND_NIL;
2119 return 1;
2120 }
2121 return 0;
2122 }
2123
2124 /* Convert
2125 UBFM <Xd>, <Xn>, #<shift>, #63.
2126 to
2127 LSR <Xd>, <Xn>, #<shift>. */
2128 static int
2129 convert_bfm_to_sr (aarch64_inst *inst)
2130 {
2131 int64_t imms, val;
2132
2133 imms = inst->operands[3].imm.value;
2134 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2135 if (imms == val)
2136 {
2137 inst->operands[3].type = AARCH64_OPND_NIL;
2138 return 1;
2139 }
2140
2141 return 0;
2142 }
2143
2144 /* Convert MOV to ORR. */
2145 static int
2146 convert_orr_to_mov (aarch64_inst *inst)
2147 {
2148 /* MOV <Vd>.<T>, <Vn>.<T>
2149 is equivalent to:
2150 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2151 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2152 {
2153 inst->operands[2].type = AARCH64_OPND_NIL;
2154 return 1;
2155 }
2156 return 0;
2157 }
2158
2159 /* When <imms> >= <immr>, the instruction written:
2160 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2161 is equivalent to:
2162 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2163
2164 static int
2165 convert_bfm_to_bfx (aarch64_inst *inst)
2166 {
2167 int64_t immr, imms;
2168
2169 immr = inst->operands[2].imm.value;
2170 imms = inst->operands[3].imm.value;
2171 if (imms >= immr)
2172 {
2173 int64_t lsb = immr;
2174 inst->operands[2].imm.value = lsb;
2175 inst->operands[3].imm.value = imms + 1 - lsb;
2176 /* The two opcodes have different qualifiers for
2177 the immediate operands; reset to help the checking. */
2178 reset_operand_qualifier (inst, 2);
2179 reset_operand_qualifier (inst, 3);
2180 return 1;
2181 }
2182
2183 return 0;
2184 }
2185
2186 /* When <imms> < <immr>, the instruction written:
2187 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2188 is equivalent to:
2189 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2190
2191 static int
2192 convert_bfm_to_bfi (aarch64_inst *inst)
2193 {
2194 int64_t immr, imms, val;
2195
2196 immr = inst->operands[2].imm.value;
2197 imms = inst->operands[3].imm.value;
2198 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2199 if (imms < immr)
2200 {
2201 inst->operands[2].imm.value = (val - immr) & (val - 1);
2202 inst->operands[3].imm.value = imms + 1;
2203 /* The two opcodes have different qualifiers for
2204 the immediate operands; reset to help the checking. */
2205 reset_operand_qualifier (inst, 2);
2206 reset_operand_qualifier (inst, 3);
2207 return 1;
2208 }
2209
2210 return 0;
2211 }
2212
2213 /* The instruction written:
2214 BFC <Xd>, #<lsb>, #<width>
2215 is equivalent to:
2216 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2217
2218 static int
2219 convert_bfm_to_bfc (aarch64_inst *inst)
2220 {
2221 int64_t immr, imms, val;
2222
2223 /* Should have been assured by the base opcode value. */
2224 assert (inst->operands[1].reg.regno == 0x1f);
2225
2226 immr = inst->operands[2].imm.value;
2227 imms = inst->operands[3].imm.value;
2228 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2229 if (imms < immr)
2230 {
2231 /* Drop XZR from the second operand. */
2232 copy_operand_info (inst, 1, 2);
2233 copy_operand_info (inst, 2, 3);
2234 inst->operands[3].type = AARCH64_OPND_NIL;
2235
2236 /* Recalculate the immediates. */
2237 inst->operands[1].imm.value = (val - immr) & (val - 1);
2238 inst->operands[2].imm.value = imms + 1;
2239
2240 /* The two opcodes have different qualifiers for the operands; reset to
2241 help the checking. */
2242 reset_operand_qualifier (inst, 1);
2243 reset_operand_qualifier (inst, 2);
2244 reset_operand_qualifier (inst, 3);
2245
2246 return 1;
2247 }
2248
2249 return 0;
2250 }
2251
2252 /* The instruction written:
2253 LSL <Xd>, <Xn>, #<shift>
2254 is equivalent to:
2255 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2256
2257 static int
2258 convert_ubfm_to_lsl (aarch64_inst *inst)
2259 {
2260 int64_t immr = inst->operands[2].imm.value;
2261 int64_t imms = inst->operands[3].imm.value;
2262 int64_t val
2263 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2264
2265 if ((immr == 0 && imms == val) || immr == imms + 1)
2266 {
2267 inst->operands[3].type = AARCH64_OPND_NIL;
2268 inst->operands[2].imm.value = val - imms;
2269 return 1;
2270 }
2271
2272 return 0;
2273 }
2274
2275 /* CINC <Wd>, <Wn>, <cond>
2276 is equivalent to:
2277 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2278 where <cond> is not AL or NV. */
2279
2280 static int
2281 convert_from_csel (aarch64_inst *inst)
2282 {
2283 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2284 && (inst->operands[3].cond->value & 0xe) != 0xe)
2285 {
2286 copy_operand_info (inst, 2, 3);
2287 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2288 inst->operands[3].type = AARCH64_OPND_NIL;
2289 return 1;
2290 }
2291 return 0;
2292 }
2293
2294 /* CSET <Wd>, <cond>
2295 is equivalent to:
2296 CSINC <Wd>, WZR, WZR, invert(<cond>)
2297 where <cond> is not AL or NV. */
2298
2299 static int
2300 convert_csinc_to_cset (aarch64_inst *inst)
2301 {
2302 if (inst->operands[1].reg.regno == 0x1f
2303 && inst->operands[2].reg.regno == 0x1f
2304 && (inst->operands[3].cond->value & 0xe) != 0xe)
2305 {
2306 copy_operand_info (inst, 1, 3);
2307 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2308 inst->operands[3].type = AARCH64_OPND_NIL;
2309 inst->operands[2].type = AARCH64_OPND_NIL;
2310 return 1;
2311 }
2312 return 0;
2313 }
2314
2315 /* MOV <Wd>, #<imm>
2316 is equivalent to:
2317 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2318
2319 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2320 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2321 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2322 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2323 machine-instruction mnemonic must be used. */
2324
2325 static int
2326 convert_movewide_to_mov (aarch64_inst *inst)
2327 {
2328 uint64_t value = inst->operands[1].imm.value;
2329 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2330 if (value == 0 && inst->operands[1].shifter.amount != 0)
2331 return 0;
2332 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2333 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2334 value <<= inst->operands[1].shifter.amount;
2335 /* As an alias convertor, it has to be clear that the INST->OPCODE
2336 is the opcode of the real instruction. */
2337 if (inst->opcode->op == OP_MOVN)
2338 {
2339 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2340 value = ~value;
2341 /* A MOVN has an immediate that could be encoded by MOVZ. */
2342 if (aarch64_wide_constant_p (value, is32, NULL))
2343 return 0;
2344 }
2345 inst->operands[1].imm.value = value;
2346 inst->operands[1].shifter.amount = 0;
2347 return 1;
2348 }
2349
2350 /* MOV <Wd>, #<imm>
2351 is equivalent to:
2352 ORR <Wd>, WZR, #<imm>.
2353
2354 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2355 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2356 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2357 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2358 machine-instruction mnemonic must be used. */
2359
2360 static int
2361 convert_movebitmask_to_mov (aarch64_inst *inst)
2362 {
2363 int is32;
2364 uint64_t value;
2365
2366 /* Should have been assured by the base opcode value. */
2367 assert (inst->operands[1].reg.regno == 0x1f);
2368 copy_operand_info (inst, 1, 2);
2369 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2370 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2371 value = inst->operands[1].imm.value;
2372 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2373 instruction. */
2374 if (inst->operands[0].reg.regno != 0x1f
2375 && (aarch64_wide_constant_p (value, is32, NULL)
2376 || aarch64_wide_constant_p (~value, is32, NULL)))
2377 return 0;
2378
2379 inst->operands[2].type = AARCH64_OPND_NIL;
2380 return 1;
2381 }
2382
2383 /* Some alias opcodes are disassembled by being converted from their real-form.
2384 N.B. INST->OPCODE is the real opcode rather than the alias. */
2385
2386 static int
2387 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2388 {
2389 switch (alias->op)
2390 {
2391 case OP_ASR_IMM:
2392 case OP_LSR_IMM:
2393 return convert_bfm_to_sr (inst);
2394 case OP_LSL_IMM:
2395 return convert_ubfm_to_lsl (inst);
2396 case OP_CINC:
2397 case OP_CINV:
2398 case OP_CNEG:
2399 return convert_from_csel (inst);
2400 case OP_CSET:
2401 case OP_CSETM:
2402 return convert_csinc_to_cset (inst);
2403 case OP_UBFX:
2404 case OP_BFXIL:
2405 case OP_SBFX:
2406 return convert_bfm_to_bfx (inst);
2407 case OP_SBFIZ:
2408 case OP_BFI:
2409 case OP_UBFIZ:
2410 return convert_bfm_to_bfi (inst);
2411 case OP_BFC:
2412 return convert_bfm_to_bfc (inst);
2413 case OP_MOV_V:
2414 return convert_orr_to_mov (inst);
2415 case OP_MOV_IMM_WIDE:
2416 case OP_MOV_IMM_WIDEN:
2417 return convert_movewide_to_mov (inst);
2418 case OP_MOV_IMM_LOG:
2419 return convert_movebitmask_to_mov (inst);
2420 case OP_ROR_IMM:
2421 return convert_extr_to_ror (inst);
2422 case OP_SXTL:
2423 case OP_SXTL2:
2424 case OP_UXTL:
2425 case OP_UXTL2:
2426 return convert_shll_to_xtl (inst);
2427 default:
2428 return 0;
2429 }
2430 }
2431
2432 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2433 aarch64_inst *, int);
2434
2435 /* Given the instruction information in *INST, check if the instruction has
2436 any alias form that can be used to represent *INST. If the answer is yes,
2437 update *INST to be in the form of the determined alias. */
2438
2439 /* In the opcode description table, the following flags are used in opcode
2440 entries to help establish the relations between the real and alias opcodes:
2441
2442 F_ALIAS: opcode is an alias
2443 F_HAS_ALIAS: opcode has alias(es)
2444 F_P1
2445 F_P2
2446 F_P3: Disassembly preference priority 1-3 (the larger the
2447 higher). If nothing is specified, it is the priority
2448 0 by default, i.e. the lowest priority.
2449
2450 Although the relation between the machine and the alias instructions are not
2451 explicitly described, it can be easily determined from the base opcode
2452 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2453 description entries:
2454
2455 The mask of an alias opcode must be equal to or a super-set (i.e. more
2456 constrained) of that of the aliased opcode; so is the base opcode value.
2457
2458 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2459 && (opcode->mask & real->mask) == real->mask
2460 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2461 then OPCODE is an alias of, and only of, the REAL instruction
2462
2463 The alias relationship is forced flat-structured to keep related algorithm
2464 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2465
2466 During the disassembling, the decoding decision tree (in
2467 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2468 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2469 not specified), the disassembler will check whether there is any alias
2470 instruction exists for this real instruction. If there is, the disassembler
2471 will try to disassemble the 32-bit binary again using the alias's rule, or
2472 try to convert the IR to the form of the alias. In the case of the multiple
2473 aliases, the aliases are tried one by one from the highest priority
2474 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2475 first succeeds first adopted.
2476
2477 You may ask why there is a need for the conversion of IR from one form to
2478 another in handling certain aliases. This is because on one hand it avoids
2479 adding more operand code to handle unusual encoding/decoding; on other
2480 hand, during the disassembling, the conversion is an effective approach to
2481 check the condition of an alias (as an alias may be adopted only if certain
2482 conditions are met).
2483
2484 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2485 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2486 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2487
2488 static void
2489 determine_disassembling_preference (struct aarch64_inst *inst)
2490 {
2491 const aarch64_opcode *opcode;
2492 const aarch64_opcode *alias;
2493
2494 opcode = inst->opcode;
2495
2496 /* This opcode does not have an alias, so use itself. */
2497 if (!opcode_has_alias (opcode))
2498 return;
2499
2500 alias = aarch64_find_alias_opcode (opcode);
2501 assert (alias);
2502
2503 #ifdef DEBUG_AARCH64
2504 if (debug_dump)
2505 {
2506 const aarch64_opcode *tmp = alias;
2507 printf ("#### LIST orderd: ");
2508 while (tmp)
2509 {
2510 printf ("%s, ", tmp->name);
2511 tmp = aarch64_find_next_alias_opcode (tmp);
2512 }
2513 printf ("\n");
2514 }
2515 #endif /* DEBUG_AARCH64 */
2516
2517 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2518 {
2519 DEBUG_TRACE ("try %s", alias->name);
2520 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2521
2522 /* An alias can be a pseudo opcode which will never be used in the
2523 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2524 aliasing AND. */
2525 if (pseudo_opcode_p (alias))
2526 {
2527 DEBUG_TRACE ("skip pseudo %s", alias->name);
2528 continue;
2529 }
2530
2531 if ((inst->value & alias->mask) != alias->opcode)
2532 {
2533 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2534 continue;
2535 }
2536 /* No need to do any complicated transformation on operands, if the alias
2537 opcode does not have any operand. */
2538 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2539 {
2540 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2541 aarch64_replace_opcode (inst, alias);
2542 return;
2543 }
2544 if (alias->flags & F_CONV)
2545 {
2546 aarch64_inst copy;
2547 memcpy (&copy, inst, sizeof (aarch64_inst));
2548 /* ALIAS is the preference as long as the instruction can be
2549 successfully converted to the form of ALIAS. */
2550 if (convert_to_alias (&copy, alias) == 1)
2551 {
2552 aarch64_replace_opcode (&copy, alias);
2553 assert (aarch64_match_operands_constraint (&copy, NULL));
2554 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2555 memcpy (inst, &copy, sizeof (aarch64_inst));
2556 return;
2557 }
2558 }
2559 else
2560 {
2561 /* Directly decode the alias opcode. */
2562 aarch64_inst temp;
2563 memset (&temp, '\0', sizeof (aarch64_inst));
2564 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
2565 {
2566 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2567 memcpy (inst, &temp, sizeof (aarch64_inst));
2568 return;
2569 }
2570 }
2571 }
2572 }
2573
2574 /* Some instructions (including all SVE ones) use the instruction class
2575 to describe how a qualifiers_list index is represented in the instruction
2576 encoding. If INST is such an instruction, decode the appropriate fields
2577 and fill in the operand qualifiers accordingly. Return true if no
2578 problems are found. */
2579
2580 static bfd_boolean
2581 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2582 {
2583 int i, variant;
2584
2585 variant = 0;
2586 switch (inst->opcode->iclass)
2587 {
2588 case sve_cpy:
2589 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2590 break;
2591
2592 case sve_index:
2593 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2594 if ((i & 31) == 0)
2595 return FALSE;
2596 while ((i & 1) == 0)
2597 {
2598 i >>= 1;
2599 variant += 1;
2600 }
2601 break;
2602
2603 case sve_limm:
2604 /* Pick the smallest applicable element size. */
2605 if ((inst->value & 0x20600) == 0x600)
2606 variant = 0;
2607 else if ((inst->value & 0x20400) == 0x400)
2608 variant = 1;
2609 else if ((inst->value & 0x20000) == 0)
2610 variant = 2;
2611 else
2612 variant = 3;
2613 break;
2614
2615 case sve_misc:
2616 /* sve_misc instructions have only a single variant. */
2617 break;
2618
2619 case sve_movprfx:
2620 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2621 break;
2622
2623 case sve_pred_zm:
2624 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2625 break;
2626
2627 case sve_shift_pred:
2628 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2629 sve_shift:
2630 if (i == 0)
2631 return FALSE;
2632 while (i != 1)
2633 {
2634 i >>= 1;
2635 variant += 1;
2636 }
2637 break;
2638
2639 case sve_shift_unpred:
2640 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2641 goto sve_shift;
2642
2643 case sve_size_bhs:
2644 variant = extract_field (FLD_size, inst->value, 0);
2645 if (variant >= 3)
2646 return FALSE;
2647 break;
2648
2649 case sve_size_bhsd:
2650 variant = extract_field (FLD_size, inst->value, 0);
2651 break;
2652
2653 case sve_size_hsd:
2654 i = extract_field (FLD_size, inst->value, 0);
2655 if (i < 1)
2656 return FALSE;
2657 variant = i - 1;
2658 break;
2659
2660 case sve_size_sd:
2661 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2662 break;
2663
2664 default:
2665 /* No mapping between instruction class and qualifiers. */
2666 return TRUE;
2667 }
2668
2669 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2670 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2671 return TRUE;
2672 }
2673 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2674 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2675 return 1.
2676
2677 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2678 determined and used to disassemble CODE; this is done just before the
2679 return. */
2680
2681 static int
2682 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2683 aarch64_inst *inst, int noaliases_p)
2684 {
2685 int i;
2686
2687 DEBUG_TRACE ("enter with %s", opcode->name);
2688
2689 assert (opcode && inst);
2690
2691 /* Check the base opcode. */
2692 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2693 {
2694 DEBUG_TRACE ("base opcode match FAIL");
2695 goto decode_fail;
2696 }
2697
2698 /* Clear inst. */
2699 memset (inst, '\0', sizeof (aarch64_inst));
2700
2701 inst->opcode = opcode;
2702 inst->value = code;
2703
2704 /* Assign operand codes and indexes. */
2705 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2706 {
2707 if (opcode->operands[i] == AARCH64_OPND_NIL)
2708 break;
2709 inst->operands[i].type = opcode->operands[i];
2710 inst->operands[i].idx = i;
2711 }
2712
2713 /* Call the opcode decoder indicated by flags. */
2714 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2715 {
2716 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2717 goto decode_fail;
2718 }
2719
2720 /* Possibly use the instruction class to determine the correct
2721 qualifier. */
2722 if (!aarch64_decode_variant_using_iclass (inst))
2723 {
2724 DEBUG_TRACE ("iclass-based decoder FAIL");
2725 goto decode_fail;
2726 }
2727
2728 /* Call operand decoders. */
2729 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2730 {
2731 const aarch64_operand *opnd;
2732 enum aarch64_opnd type;
2733
2734 type = opcode->operands[i];
2735 if (type == AARCH64_OPND_NIL)
2736 break;
2737 opnd = &aarch64_operands[type];
2738 if (operand_has_extractor (opnd)
2739 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2740 {
2741 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2742 goto decode_fail;
2743 }
2744 }
2745
2746 /* If the opcode has a verifier, then check it now. */
2747 if (opcode->verifier && ! opcode->verifier (opcode, code))
2748 {
2749 DEBUG_TRACE ("operand verifier FAIL");
2750 goto decode_fail;
2751 }
2752
2753 /* Match the qualifiers. */
2754 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2755 {
2756 /* Arriving here, the CODE has been determined as a valid instruction
2757 of OPCODE and *INST has been filled with information of this OPCODE
2758 instruction. Before the return, check if the instruction has any
2759 alias and should be disassembled in the form of its alias instead.
2760 If the answer is yes, *INST will be updated. */
2761 if (!noaliases_p)
2762 determine_disassembling_preference (inst);
2763 DEBUG_TRACE ("SUCCESS");
2764 return 1;
2765 }
2766 else
2767 {
2768 DEBUG_TRACE ("constraint matching FAIL");
2769 }
2770
2771 decode_fail:
2772 return 0;
2773 }
2774 \f
2775 /* This does some user-friendly fix-up to *INST. It is currently focus on
2776 the adjustment of qualifiers to help the printed instruction
2777 recognized/understood more easily. */
2778
2779 static void
2780 user_friendly_fixup (aarch64_inst *inst)
2781 {
2782 switch (inst->opcode->iclass)
2783 {
2784 case testbranch:
2785 /* TBNZ Xn|Wn, #uimm6, label
2786 Test and Branch Not Zero: conditionally jumps to label if bit number
2787 uimm6 in register Xn is not zero. The bit number implies the width of
2788 the register, which may be written and should be disassembled as Wn if
2789 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2790 */
2791 if (inst->operands[1].imm.value < 32)
2792 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2793 break;
2794 default: break;
2795 }
2796 }
2797
2798 /* Decode INSN and fill in *INST the instruction information. An alias
2799 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2800 success. */
2801
2802 int
2803 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2804 bfd_boolean noaliases_p)
2805 {
2806 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2807
2808 #ifdef DEBUG_AARCH64
2809 if (debug_dump)
2810 {
2811 const aarch64_opcode *tmp = opcode;
2812 printf ("\n");
2813 DEBUG_TRACE ("opcode lookup:");
2814 while (tmp != NULL)
2815 {
2816 aarch64_verbose (" %s", tmp->name);
2817 tmp = aarch64_find_next_opcode (tmp);
2818 }
2819 }
2820 #endif /* DEBUG_AARCH64 */
2821
2822 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2823 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2824 opcode field and value, apart from the difference that one of them has an
2825 extra field as part of the opcode, but such a field is used for operand
2826 encoding in other opcode(s) ('immh' in the case of the example). */
2827 while (opcode != NULL)
2828 {
2829 /* But only one opcode can be decoded successfully for, as the
2830 decoding routine will check the constraint carefully. */
2831 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2832 return ERR_OK;
2833 opcode = aarch64_find_next_opcode (opcode);
2834 }
2835
2836 return ERR_UND;
2837 }
2838
2839 /* Print operands. */
2840
2841 static void
2842 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2843 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2844 {
2845 int i, pcrel_p, num_printed;
2846 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2847 {
2848 char str[128];
2849 /* We regard the opcode operand info more, however we also look into
2850 the inst->operands to support the disassembling of the optional
2851 operand.
2852 The two operand code should be the same in all cases, apart from
2853 when the operand can be optional. */
2854 if (opcode->operands[i] == AARCH64_OPND_NIL
2855 || opnds[i].type == AARCH64_OPND_NIL)
2856 break;
2857
2858 /* Generate the operand string in STR. */
2859 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2860 &info->target);
2861
2862 /* Print the delimiter (taking account of omitted operand(s)). */
2863 if (str[0] != '\0')
2864 (*info->fprintf_func) (info->stream, "%s",
2865 num_printed++ == 0 ? "\t" : ", ");
2866
2867 /* Print the operand. */
2868 if (pcrel_p)
2869 (*info->print_address_func) (info->target, info);
2870 else
2871 (*info->fprintf_func) (info->stream, "%s", str);
2872 }
2873 }
2874
2875 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
2876
2877 static void
2878 remove_dot_suffix (char *name, const aarch64_inst *inst)
2879 {
2880 char *ptr;
2881 size_t len;
2882
2883 ptr = strchr (inst->opcode->name, '.');
2884 assert (ptr && inst->cond);
2885 len = ptr - inst->opcode->name;
2886 assert (len < 8);
2887 strncpy (name, inst->opcode->name, len);
2888 name[len] = '\0';
2889 }
2890
2891 /* Print the instruction mnemonic name. */
2892
2893 static void
2894 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2895 {
2896 if (inst->opcode->flags & F_COND)
2897 {
2898 /* For instructions that are truly conditionally executed, e.g. b.cond,
2899 prepare the full mnemonic name with the corresponding condition
2900 suffix. */
2901 char name[8];
2902
2903 remove_dot_suffix (name, inst);
2904 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2905 }
2906 else
2907 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2908 }
2909
2910 /* Decide whether we need to print a comment after the operands of
2911 instruction INST. */
2912
2913 static void
2914 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
2915 {
2916 if (inst->opcode->flags & F_COND)
2917 {
2918 char name[8];
2919 unsigned int i, num_conds;
2920
2921 remove_dot_suffix (name, inst);
2922 num_conds = ARRAY_SIZE (inst->cond->names);
2923 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
2924 (*info->fprintf_func) (info->stream, "%s %s.%s",
2925 i == 1 ? " //" : ",",
2926 name, inst->cond->names[i]);
2927 }
2928 }
2929
2930 /* Print the instruction according to *INST. */
2931
2932 static void
2933 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2934 struct disassemble_info *info)
2935 {
2936 print_mnemonic_name (inst, info);
2937 print_operands (pc, inst->opcode, inst->operands, info);
2938 print_comment (inst, info);
2939 }
2940
2941 /* Entry-point of the instruction disassembler and printer. */
2942
2943 static void
2944 print_insn_aarch64_word (bfd_vma pc,
2945 uint32_t word,
2946 struct disassemble_info *info)
2947 {
2948 static const char *err_msg[6] =
2949 {
2950 [ERR_OK] = "_",
2951 [-ERR_UND] = "undefined",
2952 [-ERR_UNP] = "unpredictable",
2953 [-ERR_NYI] = "NYI"
2954 };
2955
2956 int ret;
2957 aarch64_inst inst;
2958
2959 info->insn_info_valid = 1;
2960 info->branch_delay_insns = 0;
2961 info->data_size = 0;
2962 info->target = 0;
2963 info->target2 = 0;
2964
2965 if (info->flags & INSN_HAS_RELOC)
2966 /* If the instruction has a reloc associated with it, then
2967 the offset field in the instruction will actually be the
2968 addend for the reloc. (If we are using REL type relocs).
2969 In such cases, we can ignore the pc when computing
2970 addresses, since the addend is not currently pc-relative. */
2971 pc = 0;
2972
2973 ret = aarch64_decode_insn (word, &inst, no_aliases);
2974
2975 if (((word >> 21) & 0x3ff) == 1)
2976 {
2977 /* RESERVED for ALES. */
2978 assert (ret != ERR_OK);
2979 ret = ERR_NYI;
2980 }
2981
2982 switch (ret)
2983 {
2984 case ERR_UND:
2985 case ERR_UNP:
2986 case ERR_NYI:
2987 /* Handle undefined instructions. */
2988 info->insn_type = dis_noninsn;
2989 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2990 word, err_msg[-ret]);
2991 break;
2992 case ERR_OK:
2993 user_friendly_fixup (&inst);
2994 print_aarch64_insn (pc, &inst, info);
2995 break;
2996 default:
2997 abort ();
2998 }
2999 }
3000
3001 /* Disallow mapping symbols ($x, $d etc) from
3002 being displayed in symbol relative addresses. */
3003
3004 bfd_boolean
3005 aarch64_symbol_is_valid (asymbol * sym,
3006 struct disassemble_info * info ATTRIBUTE_UNUSED)
3007 {
3008 const char * name;
3009
3010 if (sym == NULL)
3011 return FALSE;
3012
3013 name = bfd_asymbol_name (sym);
3014
3015 return name
3016 && (name[0] != '$'
3017 || (name[1] != 'x' && name[1] != 'd')
3018 || (name[2] != '\0' && name[2] != '.'));
3019 }
3020
3021 /* Print data bytes on INFO->STREAM. */
3022
3023 static void
3024 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3025 uint32_t word,
3026 struct disassemble_info *info)
3027 {
3028 switch (info->bytes_per_chunk)
3029 {
3030 case 1:
3031 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3032 break;
3033 case 2:
3034 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3035 break;
3036 case 4:
3037 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3038 break;
3039 default:
3040 abort ();
3041 }
3042 }
3043
3044 /* Try to infer the code or data type from a symbol.
3045 Returns nonzero if *MAP_TYPE was set. */
3046
3047 static int
3048 get_sym_code_type (struct disassemble_info *info, int n,
3049 enum map_type *map_type)
3050 {
3051 elf_symbol_type *es;
3052 unsigned int type;
3053 const char *name;
3054
3055 es = *(elf_symbol_type **)(info->symtab + n);
3056 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3057
3058 /* If the symbol has function type then use that. */
3059 if (type == STT_FUNC)
3060 {
3061 *map_type = MAP_INSN;
3062 return TRUE;
3063 }
3064
3065 /* Check for mapping symbols. */
3066 name = bfd_asymbol_name(info->symtab[n]);
3067 if (name[0] == '$'
3068 && (name[1] == 'x' || name[1] == 'd')
3069 && (name[2] == '\0' || name[2] == '.'))
3070 {
3071 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3072 return TRUE;
3073 }
3074
3075 return FALSE;
3076 }
3077
3078 /* Entry-point of the AArch64 disassembler. */
3079
3080 int
3081 print_insn_aarch64 (bfd_vma pc,
3082 struct disassemble_info *info)
3083 {
3084 bfd_byte buffer[INSNLEN];
3085 int status;
3086 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
3087 bfd_boolean found = FALSE;
3088 unsigned int size = 4;
3089 unsigned long data;
3090
3091 if (info->disassembler_options)
3092 {
3093 set_default_aarch64_dis_options (info);
3094
3095 parse_aarch64_dis_options (info->disassembler_options);
3096
3097 /* To avoid repeated parsing of these options, we remove them here. */
3098 info->disassembler_options = NULL;
3099 }
3100
3101 /* Aarch64 instructions are always little-endian */
3102 info->endian_code = BFD_ENDIAN_LITTLE;
3103
3104 /* First check the full symtab for a mapping symbol, even if there
3105 are no usable non-mapping symbols for this address. */
3106 if (info->symtab_size != 0
3107 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3108 {
3109 enum map_type type = MAP_INSN;
3110 int last_sym = -1;
3111 bfd_vma addr;
3112 int n;
3113
3114 if (pc <= last_mapping_addr)
3115 last_mapping_sym = -1;
3116
3117 /* Start scanning at the start of the function, or wherever
3118 we finished last time. */
3119 n = info->symtab_pos + 1;
3120 if (n < last_mapping_sym)
3121 n = last_mapping_sym;
3122
3123 /* Scan up to the location being disassembled. */
3124 for (; n < info->symtab_size; n++)
3125 {
3126 addr = bfd_asymbol_value (info->symtab[n]);
3127 if (addr > pc)
3128 break;
3129 if ((info->section == NULL
3130 || info->section == info->symtab[n]->section)
3131 && get_sym_code_type (info, n, &type))
3132 {
3133 last_sym = n;
3134 found = TRUE;
3135 }
3136 }
3137
3138 if (!found)
3139 {
3140 n = info->symtab_pos;
3141 if (n < last_mapping_sym)
3142 n = last_mapping_sym;
3143
3144 /* No mapping symbol found at this address. Look backwards
3145 for a preceeding one. */
3146 for (; n >= 0; n--)
3147 {
3148 if (get_sym_code_type (info, n, &type))
3149 {
3150 last_sym = n;
3151 found = TRUE;
3152 break;
3153 }
3154 }
3155 }
3156
3157 last_mapping_sym = last_sym;
3158 last_type = type;
3159
3160 /* Look a little bit ahead to see if we should print out
3161 less than four bytes of data. If there's a symbol,
3162 mapping or otherwise, after two bytes then don't
3163 print more. */
3164 if (last_type == MAP_DATA)
3165 {
3166 size = 4 - (pc & 3);
3167 for (n = last_sym + 1; n < info->symtab_size; n++)
3168 {
3169 addr = bfd_asymbol_value (info->symtab[n]);
3170 if (addr > pc)
3171 {
3172 if (addr - pc < size)
3173 size = addr - pc;
3174 break;
3175 }
3176 }
3177 /* If the next symbol is after three bytes, we need to
3178 print only part of the data, so that we can use either
3179 .byte or .short. */
3180 if (size == 3)
3181 size = (pc & 1) ? 1 : 2;
3182 }
3183 }
3184
3185 if (last_type == MAP_DATA)
3186 {
3187 /* size was set above. */
3188 info->bytes_per_chunk = size;
3189 info->display_endian = info->endian;
3190 printer = print_insn_data;
3191 }
3192 else
3193 {
3194 info->bytes_per_chunk = size = INSNLEN;
3195 info->display_endian = info->endian_code;
3196 printer = print_insn_aarch64_word;
3197 }
3198
3199 status = (*info->read_memory_func) (pc, buffer, size, info);
3200 if (status != 0)
3201 {
3202 (*info->memory_error_func) (status, pc, info);
3203 return -1;
3204 }
3205
3206 data = bfd_get_bits (buffer, size * 8,
3207 info->display_endian == BFD_ENDIAN_BIG);
3208
3209 (*printer) (pc, data, info);
3210
3211 return size;
3212 }
3213 \f
3214 void
3215 print_aarch64_disassembler_options (FILE *stream)
3216 {
3217 fprintf (stream, _("\n\
3218 The following AARCH64 specific disassembler options are supported for use\n\
3219 with the -M switch (multiple options should be separated by commas):\n"));
3220
3221 fprintf (stream, _("\n\
3222 no-aliases Don't print instruction aliases.\n"));
3223
3224 fprintf (stream, _("\n\
3225 aliases Do print instruction aliases.\n"));
3226
3227 #ifdef DEBUG_AARCH64
3228 fprintf (stream, _("\n\
3229 debug_dump Temp switch for debug trace.\n"));
3230 #endif /* DEBUG_AARCH64 */
3231
3232 fprintf (stream, _("\n"));
3233 }
This page took 0.099707 seconds and 5 git commands to generate.