[AArch64][Patch 4/5] Support HINT aliases taking operands.
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "dis-asm.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define ERR_OK 0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33
34 #define INSNLEN 4
35
36 /* Cached mapping symbol state. */
37 enum map_type
38 {
39 MAP_INSN,
40 MAP_DATA
41 };
42
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46
47 /* Other options */
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \f
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 #ifdef DEBUG_AARCH64
73 if (CONST_STRNEQ (option, "debug_dump"))
74 {
75 debug_dump = 1;
76 return;
77 }
78 #endif /* DEBUG_AARCH64 */
79
80 /* Invalid option. */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
82 }
83
84 static void
85 parse_aarch64_dis_options (const char *options)
86 {
87 const char *option_end;
88
89 if (options == NULL)
90 return;
91
92 while (*options != '\0')
93 {
94 /* Skip empty options. */
95 if (*options == ',')
96 {
97 options++;
98 continue;
99 }
100
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
104 option_end++;
105
106 parse_aarch64_dis_option (options, option_end - options);
107
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
111 }
112 }
113 \f
114 /* Functions doing the instruction disassembling. */
115
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
119
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
125
126 static inline aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129 uint32_t num;
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
132 va_list va;
133
134 va_start (va, mask);
135 num = va_arg (va, uint32_t);
136 assert (num <= 5);
137 aarch64_insn value = 0x0;
138 while (num--)
139 {
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
144 }
145 return value;
146 }
147
148 /* Sign-extend bit I of VALUE. */
149 static inline int32_t
150 sign_extend (aarch64_insn value, unsigned i)
151 {
152 uint32_t ret = value;
153
154 assert (i < 32);
155 if ((value >> i) & 0x1)
156 {
157 uint32_t val = (uint32_t)(-1) << i;
158 ret = ret | val;
159 }
160 return (int32_t) ret;
161 }
162
163 /* N.B. the following inline helpfer functions create a dependency on the
164 order of operand qualifier enumerators. */
165
166 /* Given VALUE, return qualifier for a general purpose register. */
167 static inline enum aarch64_opnd_qualifier
168 get_greg_qualifier_from_value (aarch64_insn value)
169 {
170 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
171 assert (value <= 0x1
172 && aarch64_get_qualifier_standard_value (qualifier) == value);
173 return qualifier;
174 }
175
176 /* Given VALUE, return qualifier for a vector register. */
177 static inline enum aarch64_opnd_qualifier
178 get_vreg_qualifier_from_value (aarch64_insn value)
179 {
180 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
181
182 assert (value <= 0x8
183 && aarch64_get_qualifier_standard_value (qualifier) == value);
184 return qualifier;
185 }
186
187 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
188 static inline enum aarch64_opnd_qualifier
189 get_sreg_qualifier_from_value (aarch64_insn value)
190 {
191 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
192
193 assert (value <= 0x4
194 && aarch64_get_qualifier_standard_value (qualifier) == value);
195 return qualifier;
196 }
197
198 /* Given the instruction in *INST which is probably half way through the
199 decoding and our caller wants to know the expected qualifier for operand
200 I. Return such a qualifier if we can establish it; otherwise return
201 AARCH64_OPND_QLF_NIL. */
202
203 static aarch64_opnd_qualifier_t
204 get_expected_qualifier (const aarch64_inst *inst, int i)
205 {
206 aarch64_opnd_qualifier_seq_t qualifiers;
207 /* Should not be called if the qualifier is known. */
208 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
209 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
210 i, qualifiers))
211 return qualifiers[i];
212 else
213 return AARCH64_OPND_QLF_NIL;
214 }
215
216 /* Operand extractors. */
217
218 int
219 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
220 const aarch64_insn code,
221 const aarch64_inst *inst ATTRIBUTE_UNUSED)
222 {
223 info->reg.regno = extract_field (self->fields[0], code, 0);
224 return 1;
225 }
226
227 int
228 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
229 const aarch64_insn code ATTRIBUTE_UNUSED,
230 const aarch64_inst *inst ATTRIBUTE_UNUSED)
231 {
232 assert (info->idx == 1
233 || info->idx ==3);
234 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
235 return 1;
236 }
237
238 /* e.g. IC <ic_op>{, <Xt>}. */
239 int
240 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
241 const aarch64_insn code,
242 const aarch64_inst *inst ATTRIBUTE_UNUSED)
243 {
244 info->reg.regno = extract_field (self->fields[0], code, 0);
245 assert (info->idx == 1
246 && (aarch64_get_operand_class (inst->operands[0].type)
247 == AARCH64_OPND_CLASS_SYSTEM));
248 /* This will make the constraint checking happy and more importantly will
249 help the disassembler determine whether this operand is optional or
250 not. */
251 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
252
253 return 1;
254 }
255
256 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
257 int
258 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
259 const aarch64_insn code,
260 const aarch64_inst *inst ATTRIBUTE_UNUSED)
261 {
262 /* regno */
263 info->reglane.regno = extract_field (self->fields[0], code,
264 inst->opcode->mask);
265
266 /* Index and/or type. */
267 if (inst->opcode->iclass == asisdone
268 || inst->opcode->iclass == asimdins)
269 {
270 if (info->type == AARCH64_OPND_En
271 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
272 {
273 unsigned shift;
274 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
275 assert (info->idx == 1); /* Vn */
276 aarch64_insn value = extract_field (FLD_imm4, code, 0);
277 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
278 info->qualifier = get_expected_qualifier (inst, info->idx);
279 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
280 info->reglane.index = value >> shift;
281 }
282 else
283 {
284 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
285 imm5<3:0> <V>
286 0000 RESERVED
287 xxx1 B
288 xx10 H
289 x100 S
290 1000 D */
291 int pos = -1;
292 aarch64_insn value = extract_field (FLD_imm5, code, 0);
293 while (++pos <= 3 && (value & 0x1) == 0)
294 value >>= 1;
295 if (pos > 3)
296 return 0;
297 info->qualifier = get_sreg_qualifier_from_value (pos);
298 info->reglane.index = (unsigned) (value >> 1);
299 }
300 }
301 else
302 {
303 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
304 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
305
306 /* Need information in other operand(s) to help decoding. */
307 info->qualifier = get_expected_qualifier (inst, info->idx);
308 switch (info->qualifier)
309 {
310 case AARCH64_OPND_QLF_S_H:
311 /* h:l:m */
312 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
313 FLD_M);
314 info->reglane.regno &= 0xf;
315 break;
316 case AARCH64_OPND_QLF_S_S:
317 /* h:l */
318 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
319 break;
320 case AARCH64_OPND_QLF_S_D:
321 /* H */
322 info->reglane.index = extract_field (FLD_H, code, 0);
323 break;
324 default:
325 return 0;
326 }
327 }
328
329 return 1;
330 }
331
332 int
333 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
334 const aarch64_insn code,
335 const aarch64_inst *inst ATTRIBUTE_UNUSED)
336 {
337 /* R */
338 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
339 /* len */
340 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
341 return 1;
342 }
343
344 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
345 int
346 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
347 aarch64_opnd_info *info, const aarch64_insn code,
348 const aarch64_inst *inst)
349 {
350 aarch64_insn value;
351 /* Number of elements in each structure to be loaded/stored. */
352 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
353
354 struct
355 {
356 unsigned is_reserved;
357 unsigned num_regs;
358 unsigned num_elements;
359 } data [] =
360 { {0, 4, 4},
361 {1, 4, 4},
362 {0, 4, 1},
363 {0, 4, 2},
364 {0, 3, 3},
365 {1, 3, 3},
366 {0, 3, 1},
367 {0, 1, 1},
368 {0, 2, 2},
369 {1, 2, 2},
370 {0, 2, 1},
371 };
372
373 /* Rt */
374 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
375 /* opcode */
376 value = extract_field (FLD_opcode, code, 0);
377 if (expected_num != data[value].num_elements || data[value].is_reserved)
378 return 0;
379 info->reglist.num_regs = data[value].num_regs;
380
381 return 1;
382 }
383
384 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
385 lanes instructions. */
386 int
387 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
388 aarch64_opnd_info *info, const aarch64_insn code,
389 const aarch64_inst *inst)
390 {
391 aarch64_insn value;
392
393 /* Rt */
394 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
395 /* S */
396 value = extract_field (FLD_S, code, 0);
397
398 /* Number of registers is equal to the number of elements in
399 each structure to be loaded/stored. */
400 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
401 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
402
403 /* Except when it is LD1R. */
404 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
405 info->reglist.num_regs = 2;
406
407 return 1;
408 }
409
410 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
411 load/store single element instructions. */
412 int
413 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
414 aarch64_opnd_info *info, const aarch64_insn code,
415 const aarch64_inst *inst ATTRIBUTE_UNUSED)
416 {
417 aarch64_field field = {0, 0};
418 aarch64_insn QSsize; /* fields Q:S:size. */
419 aarch64_insn opcodeh2; /* opcode<2:1> */
420
421 /* Rt */
422 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
423
424 /* Decode the index, opcode<2:1> and size. */
425 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
426 opcodeh2 = extract_field_2 (&field, code, 0);
427 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
428 switch (opcodeh2)
429 {
430 case 0x0:
431 info->qualifier = AARCH64_OPND_QLF_S_B;
432 /* Index encoded in "Q:S:size". */
433 info->reglist.index = QSsize;
434 break;
435 case 0x1:
436 if (QSsize & 0x1)
437 /* UND. */
438 return 0;
439 info->qualifier = AARCH64_OPND_QLF_S_H;
440 /* Index encoded in "Q:S:size<1>". */
441 info->reglist.index = QSsize >> 1;
442 break;
443 case 0x2:
444 if ((QSsize >> 1) & 0x1)
445 /* UND. */
446 return 0;
447 if ((QSsize & 0x1) == 0)
448 {
449 info->qualifier = AARCH64_OPND_QLF_S_S;
450 /* Index encoded in "Q:S". */
451 info->reglist.index = QSsize >> 2;
452 }
453 else
454 {
455 if (extract_field (FLD_S, code, 0))
456 /* UND */
457 return 0;
458 info->qualifier = AARCH64_OPND_QLF_S_D;
459 /* Index encoded in "Q". */
460 info->reglist.index = QSsize >> 3;
461 }
462 break;
463 default:
464 return 0;
465 }
466
467 info->reglist.has_index = 1;
468 info->reglist.num_regs = 0;
469 /* Number of registers is equal to the number of elements in
470 each structure to be loaded/stored. */
471 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
472 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
473
474 return 1;
475 }
476
477 /* Decode fields immh:immb and/or Q for e.g.
478 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
479 or SSHR <V><d>, <V><n>, #<shift>. */
480
481 int
482 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
483 aarch64_opnd_info *info, const aarch64_insn code,
484 const aarch64_inst *inst)
485 {
486 int pos;
487 aarch64_insn Q, imm, immh;
488 enum aarch64_insn_class iclass = inst->opcode->iclass;
489
490 immh = extract_field (FLD_immh, code, 0);
491 if (immh == 0)
492 return 0;
493 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
494 pos = 4;
495 /* Get highest set bit in immh. */
496 while (--pos >= 0 && (immh & 0x8) == 0)
497 immh <<= 1;
498
499 assert ((iclass == asimdshf || iclass == asisdshf)
500 && (info->type == AARCH64_OPND_IMM_VLSR
501 || info->type == AARCH64_OPND_IMM_VLSL));
502
503 if (iclass == asimdshf)
504 {
505 Q = extract_field (FLD_Q, code, 0);
506 /* immh Q <T>
507 0000 x SEE AdvSIMD modified immediate
508 0001 0 8B
509 0001 1 16B
510 001x 0 4H
511 001x 1 8H
512 01xx 0 2S
513 01xx 1 4S
514 1xxx 0 RESERVED
515 1xxx 1 2D */
516 info->qualifier =
517 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
518 }
519 else
520 info->qualifier = get_sreg_qualifier_from_value (pos);
521
522 if (info->type == AARCH64_OPND_IMM_VLSR)
523 /* immh <shift>
524 0000 SEE AdvSIMD modified immediate
525 0001 (16-UInt(immh:immb))
526 001x (32-UInt(immh:immb))
527 01xx (64-UInt(immh:immb))
528 1xxx (128-UInt(immh:immb)) */
529 info->imm.value = (16 << pos) - imm;
530 else
531 /* immh:immb
532 immh <shift>
533 0000 SEE AdvSIMD modified immediate
534 0001 (UInt(immh:immb)-8)
535 001x (UInt(immh:immb)-16)
536 01xx (UInt(immh:immb)-32)
537 1xxx (UInt(immh:immb)-64) */
538 info->imm.value = imm - (8 << pos);
539
540 return 1;
541 }
542
543 /* Decode shift immediate for e.g. sshr (imm). */
544 int
545 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
546 aarch64_opnd_info *info, const aarch64_insn code,
547 const aarch64_inst *inst ATTRIBUTE_UNUSED)
548 {
549 int64_t imm;
550 aarch64_insn val;
551 val = extract_field (FLD_size, code, 0);
552 switch (val)
553 {
554 case 0: imm = 8; break;
555 case 1: imm = 16; break;
556 case 2: imm = 32; break;
557 default: return 0;
558 }
559 info->imm.value = imm;
560 return 1;
561 }
562
563 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
564 value in the field(s) will be extracted as unsigned immediate value. */
565 int
566 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
567 const aarch64_insn code,
568 const aarch64_inst *inst ATTRIBUTE_UNUSED)
569 {
570 int64_t imm;
571 /* Maximum of two fields to extract. */
572 assert (self->fields[2] == FLD_NIL);
573
574 if (self->fields[1] == FLD_NIL)
575 imm = extract_field (self->fields[0], code, 0);
576 else
577 /* e.g. TBZ b5:b40. */
578 imm = extract_fields (code, 0, 2, self->fields[0], self->fields[1]);
579
580 if (info->type == AARCH64_OPND_FPIMM)
581 info->imm.is_fp = 1;
582
583 if (operand_need_sign_extension (self))
584 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
585
586 if (operand_need_shift_by_two (self))
587 imm <<= 2;
588
589 if (info->type == AARCH64_OPND_ADDR_ADRP)
590 imm <<= 12;
591
592 info->imm.value = imm;
593 return 1;
594 }
595
596 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
597 int
598 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
599 const aarch64_insn code,
600 const aarch64_inst *inst ATTRIBUTE_UNUSED)
601 {
602 aarch64_ext_imm (self, info, code, inst);
603 info->shifter.kind = AARCH64_MOD_LSL;
604 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
605 return 1;
606 }
607
608 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
609 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
610 int
611 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
612 aarch64_opnd_info *info,
613 const aarch64_insn code,
614 const aarch64_inst *inst ATTRIBUTE_UNUSED)
615 {
616 uint64_t imm;
617 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
618 aarch64_field field = {0, 0};
619
620 assert (info->idx == 1);
621
622 if (info->type == AARCH64_OPND_SIMD_FPIMM)
623 info->imm.is_fp = 1;
624
625 /* a:b:c:d:e:f:g:h */
626 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
627 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
628 {
629 /* Either MOVI <Dd>, #<imm>
630 or MOVI <Vd>.2D, #<imm>.
631 <imm> is a 64-bit immediate
632 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
633 encoded in "a:b:c:d:e:f:g:h". */
634 int i;
635 unsigned abcdefgh = imm;
636 for (imm = 0ull, i = 0; i < 8; i++)
637 if (((abcdefgh >> i) & 0x1) != 0)
638 imm |= 0xffull << (8 * i);
639 }
640 info->imm.value = imm;
641
642 /* cmode */
643 info->qualifier = get_expected_qualifier (inst, info->idx);
644 switch (info->qualifier)
645 {
646 case AARCH64_OPND_QLF_NIL:
647 /* no shift */
648 info->shifter.kind = AARCH64_MOD_NONE;
649 return 1;
650 case AARCH64_OPND_QLF_LSL:
651 /* shift zeros */
652 info->shifter.kind = AARCH64_MOD_LSL;
653 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
654 {
655 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
656 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
657 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
658 default: assert (0); return 0;
659 }
660 /* 00: 0; 01: 8; 10:16; 11:24. */
661 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
662 break;
663 case AARCH64_OPND_QLF_MSL:
664 /* shift ones */
665 info->shifter.kind = AARCH64_MOD_MSL;
666 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
667 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
668 break;
669 default:
670 assert (0);
671 return 0;
672 }
673
674 return 1;
675 }
676
677 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
678 int
679 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
680 aarch64_opnd_info *info, const aarch64_insn code,
681 const aarch64_inst *inst ATTRIBUTE_UNUSED)
682 {
683 info->imm.value = 64- extract_field (FLD_scale, code, 0);
684 return 1;
685 }
686
687 /* Decode arithmetic immediate for e.g.
688 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
689 int
690 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
691 aarch64_opnd_info *info, const aarch64_insn code,
692 const aarch64_inst *inst ATTRIBUTE_UNUSED)
693 {
694 aarch64_insn value;
695
696 info->shifter.kind = AARCH64_MOD_LSL;
697 /* shift */
698 value = extract_field (FLD_shift, code, 0);
699 if (value >= 2)
700 return 0;
701 info->shifter.amount = value ? 12 : 0;
702 /* imm12 (unsigned) */
703 info->imm.value = extract_field (FLD_imm12, code, 0);
704
705 return 1;
706 }
707
708 /* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
709
710 int
711 aarch64_ext_limm (const aarch64_operand *self ATTRIBUTE_UNUSED,
712 aarch64_opnd_info *info, const aarch64_insn code,
713 const aarch64_inst *inst ATTRIBUTE_UNUSED)
714 {
715 uint64_t imm, mask;
716 uint32_t sf;
717 uint32_t N, R, S;
718 unsigned simd_size;
719 aarch64_insn value;
720
721 value = extract_fields (code, 0, 3, FLD_N, FLD_immr, FLD_imms);
722 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_W
723 || inst->operands[0].qualifier == AARCH64_OPND_QLF_X);
724 sf = aarch64_get_qualifier_esize (inst->operands[0].qualifier) != 4;
725
726 /* value is N:immr:imms. */
727 S = value & 0x3f;
728 R = (value >> 6) & 0x3f;
729 N = (value >> 12) & 0x1;
730
731 if (sf == 0 && N == 1)
732 return 0;
733
734 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
735 (in other words, right rotated by R), then replicated. */
736 if (N != 0)
737 {
738 simd_size = 64;
739 mask = 0xffffffffffffffffull;
740 }
741 else
742 {
743 switch (S)
744 {
745 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
746 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
747 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
748 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
749 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
750 default: return 0;
751 }
752 mask = (1ull << simd_size) - 1;
753 /* Top bits are IGNORED. */
754 R &= simd_size - 1;
755 }
756 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
757 if (S == simd_size - 1)
758 return 0;
759 /* S+1 consecutive bits to 1. */
760 /* NOTE: S can't be 63 due to detection above. */
761 imm = (1ull << (S + 1)) - 1;
762 /* Rotate to the left by simd_size - R. */
763 if (R != 0)
764 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
765 /* Replicate the value according to SIMD size. */
766 switch (simd_size)
767 {
768 case 2: imm = (imm << 2) | imm;
769 case 4: imm = (imm << 4) | imm;
770 case 8: imm = (imm << 8) | imm;
771 case 16: imm = (imm << 16) | imm;
772 case 32: imm = (imm << 32) | imm;
773 case 64: break;
774 default: assert (0); return 0;
775 }
776
777 info->imm.value = sf ? imm : imm & 0xffffffff;
778
779 return 1;
780 }
781
782 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
783 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
784 int
785 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
786 aarch64_opnd_info *info,
787 const aarch64_insn code, const aarch64_inst *inst)
788 {
789 aarch64_insn value;
790
791 /* Rt */
792 info->reg.regno = extract_field (FLD_Rt, code, 0);
793
794 /* size */
795 value = extract_field (FLD_ldst_size, code, 0);
796 if (inst->opcode->iclass == ldstpair_indexed
797 || inst->opcode->iclass == ldstnapair_offs
798 || inst->opcode->iclass == ldstpair_off
799 || inst->opcode->iclass == loadlit)
800 {
801 enum aarch64_opnd_qualifier qualifier;
802 switch (value)
803 {
804 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
805 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
806 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
807 default: return 0;
808 }
809 info->qualifier = qualifier;
810 }
811 else
812 {
813 /* opc1:size */
814 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
815 if (value > 0x4)
816 return 0;
817 info->qualifier = get_sreg_qualifier_from_value (value);
818 }
819
820 return 1;
821 }
822
823 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
824 int
825 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
826 aarch64_opnd_info *info,
827 aarch64_insn code,
828 const aarch64_inst *inst ATTRIBUTE_UNUSED)
829 {
830 /* Rn */
831 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
832 return 1;
833 }
834
835 /* Decode the address operand for e.g.
836 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
837 int
838 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
839 aarch64_opnd_info *info,
840 aarch64_insn code, const aarch64_inst *inst)
841 {
842 aarch64_insn S, value;
843
844 /* Rn */
845 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
846 /* Rm */
847 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
848 /* option */
849 value = extract_field (FLD_option, code, 0);
850 info->shifter.kind =
851 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
852 /* Fix-up the shifter kind; although the table-driven approach is
853 efficient, it is slightly inflexible, thus needing this fix-up. */
854 if (info->shifter.kind == AARCH64_MOD_UXTX)
855 info->shifter.kind = AARCH64_MOD_LSL;
856 /* S */
857 S = extract_field (FLD_S, code, 0);
858 if (S == 0)
859 {
860 info->shifter.amount = 0;
861 info->shifter.amount_present = 0;
862 }
863 else
864 {
865 int size;
866 /* Need information in other operand(s) to help achieve the decoding
867 from 'S' field. */
868 info->qualifier = get_expected_qualifier (inst, info->idx);
869 /* Get the size of the data element that is accessed, which may be
870 different from that of the source register size, e.g. in strb/ldrb. */
871 size = aarch64_get_qualifier_esize (info->qualifier);
872 info->shifter.amount = get_logsz (size);
873 info->shifter.amount_present = 1;
874 }
875
876 return 1;
877 }
878
879 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
880 int
881 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
882 aarch64_insn code, const aarch64_inst *inst)
883 {
884 aarch64_insn imm;
885 info->qualifier = get_expected_qualifier (inst, info->idx);
886
887 /* Rn */
888 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
889 /* simm (imm9 or imm7) */
890 imm = extract_field (self->fields[0], code, 0);
891 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
892 if (self->fields[0] == FLD_imm7)
893 /* scaled immediate in ld/st pair instructions. */
894 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
895 /* qualifier */
896 if (inst->opcode->iclass == ldst_unscaled
897 || inst->opcode->iclass == ldstnapair_offs
898 || inst->opcode->iclass == ldstpair_off
899 || inst->opcode->iclass == ldst_unpriv)
900 info->addr.writeback = 0;
901 else
902 {
903 /* pre/post- index */
904 info->addr.writeback = 1;
905 if (extract_field (self->fields[1], code, 0) == 1)
906 info->addr.preind = 1;
907 else
908 info->addr.postind = 1;
909 }
910
911 return 1;
912 }
913
914 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
915 int
916 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
917 aarch64_insn code,
918 const aarch64_inst *inst ATTRIBUTE_UNUSED)
919 {
920 int shift;
921 info->qualifier = get_expected_qualifier (inst, info->idx);
922 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
923 /* Rn */
924 info->addr.base_regno = extract_field (self->fields[0], code, 0);
925 /* uimm12 */
926 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
927 return 1;
928 }
929
930 /* Decode the address operand for e.g.
931 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
932 int
933 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
934 aarch64_opnd_info *info,
935 aarch64_insn code, const aarch64_inst *inst)
936 {
937 /* The opcode dependent area stores the number of elements in
938 each structure to be loaded/stored. */
939 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
940
941 /* Rn */
942 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
943 /* Rm | #<amount> */
944 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
945 if (info->addr.offset.regno == 31)
946 {
947 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
948 /* Special handling of loading single structure to all lane. */
949 info->addr.offset.imm = (is_ld1r ? 1
950 : inst->operands[0].reglist.num_regs)
951 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
952 else
953 info->addr.offset.imm = inst->operands[0].reglist.num_regs
954 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
955 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
956 }
957 else
958 info->addr.offset.is_reg = 1;
959 info->addr.writeback = 1;
960
961 return 1;
962 }
963
964 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
965 int
966 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
967 aarch64_opnd_info *info,
968 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
969 {
970 aarch64_insn value;
971 /* cond */
972 value = extract_field (FLD_cond, code, 0);
973 info->cond = get_cond_from_value (value);
974 return 1;
975 }
976
977 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
978 int
979 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
980 aarch64_opnd_info *info,
981 aarch64_insn code,
982 const aarch64_inst *inst ATTRIBUTE_UNUSED)
983 {
984 /* op0:op1:CRn:CRm:op2 */
985 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
986 FLD_CRm, FLD_op2);
987 return 1;
988 }
989
990 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
991 int
992 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
993 aarch64_opnd_info *info, aarch64_insn code,
994 const aarch64_inst *inst ATTRIBUTE_UNUSED)
995 {
996 int i;
997 /* op1:op2 */
998 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
999 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1000 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1001 return 1;
1002 /* Reserved value in <pstatefield>. */
1003 return 0;
1004 }
1005
1006 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1007 int
1008 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1009 aarch64_opnd_info *info,
1010 aarch64_insn code,
1011 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1012 {
1013 int i;
1014 aarch64_insn value;
1015 const aarch64_sys_ins_reg *sysins_ops;
1016 /* op0:op1:CRn:CRm:op2 */
1017 value = extract_fields (code, 0, 5,
1018 FLD_op0, FLD_op1, FLD_CRn,
1019 FLD_CRm, FLD_op2);
1020
1021 switch (info->type)
1022 {
1023 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1024 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1025 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1026 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1027 default: assert (0); return 0;
1028 }
1029
1030 for (i = 0; sysins_ops[i].name != NULL; ++i)
1031 if (sysins_ops[i].value == value)
1032 {
1033 info->sysins_op = sysins_ops + i;
1034 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1035 info->sysins_op->name,
1036 (unsigned)info->sysins_op->value,
1037 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1038 return 1;
1039 }
1040
1041 return 0;
1042 }
1043
1044 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1045
1046 int
1047 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1048 aarch64_opnd_info *info,
1049 aarch64_insn code,
1050 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1051 {
1052 /* CRm */
1053 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1054 return 1;
1055 }
1056
1057 /* Decode the prefetch operation option operand for e.g.
1058 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1059
1060 int
1061 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1062 aarch64_opnd_info *info,
1063 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1064 {
1065 /* prfop in Rt */
1066 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1067 return 1;
1068 }
1069
1070 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1071 to the matching name/value pair in aarch64_hint_options. */
1072
1073 int
1074 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1075 aarch64_opnd_info *info,
1076 aarch64_insn code,
1077 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1078 {
1079 /* CRm:op2. */
1080 unsigned hint_number;
1081 int i;
1082
1083 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1084
1085 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1086 {
1087 if (hint_number == aarch64_hint_options[i].value)
1088 {
1089 info->hint_option = &(aarch64_hint_options[i]);
1090 return 1;
1091 }
1092 }
1093
1094 return 0;
1095 }
1096
1097 /* Decode the extended register operand for e.g.
1098 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1099 int
1100 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1101 aarch64_opnd_info *info,
1102 aarch64_insn code,
1103 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1104 {
1105 aarch64_insn value;
1106
1107 /* Rm */
1108 info->reg.regno = extract_field (FLD_Rm, code, 0);
1109 /* option */
1110 value = extract_field (FLD_option, code, 0);
1111 info->shifter.kind =
1112 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1113 /* imm3 */
1114 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1115
1116 /* This makes the constraint checking happy. */
1117 info->shifter.operator_present = 1;
1118
1119 /* Assume inst->operands[0].qualifier has been resolved. */
1120 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1121 info->qualifier = AARCH64_OPND_QLF_W;
1122 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1123 && (info->shifter.kind == AARCH64_MOD_UXTX
1124 || info->shifter.kind == AARCH64_MOD_SXTX))
1125 info->qualifier = AARCH64_OPND_QLF_X;
1126
1127 return 1;
1128 }
1129
1130 /* Decode the shifted register operand for e.g.
1131 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1132 int
1133 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1134 aarch64_opnd_info *info,
1135 aarch64_insn code,
1136 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1137 {
1138 aarch64_insn value;
1139
1140 /* Rm */
1141 info->reg.regno = extract_field (FLD_Rm, code, 0);
1142 /* shift */
1143 value = extract_field (FLD_shift, code, 0);
1144 info->shifter.kind =
1145 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1146 if (info->shifter.kind == AARCH64_MOD_ROR
1147 && inst->opcode->iclass != log_shift)
1148 /* ROR is not available for the shifted register operand in arithmetic
1149 instructions. */
1150 return 0;
1151 /* imm6 */
1152 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1153
1154 /* This makes the constraint checking happy. */
1155 info->shifter.operator_present = 1;
1156
1157 return 1;
1158 }
1159 \f
1160 /* Bitfields that are commonly used to encode certain operands' information
1161 may be partially used as part of the base opcode in some instructions.
1162 For example, the bit 1 of the field 'size' in
1163 FCVTXN <Vb><d>, <Va><n>
1164 is actually part of the base opcode, while only size<0> is available
1165 for encoding the register type. Another example is the AdvSIMD
1166 instruction ORR (register), in which the field 'size' is also used for
1167 the base opcode, leaving only the field 'Q' available to encode the
1168 vector register arrangement specifier '8B' or '16B'.
1169
1170 This function tries to deduce the qualifier from the value of partially
1171 constrained field(s). Given the VALUE of such a field or fields, the
1172 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1173 operand encoding), the function returns the matching qualifier or
1174 AARCH64_OPND_QLF_NIL if nothing matches.
1175
1176 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1177 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1178 may end with AARCH64_OPND_QLF_NIL. */
1179
1180 static enum aarch64_opnd_qualifier
1181 get_qualifier_from_partial_encoding (aarch64_insn value,
1182 const enum aarch64_opnd_qualifier* \
1183 candidates,
1184 aarch64_insn mask)
1185 {
1186 int i;
1187 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1188 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1189 {
1190 aarch64_insn standard_value;
1191 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1192 break;
1193 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1194 if ((standard_value & mask) == (value & mask))
1195 return candidates[i];
1196 }
1197 return AARCH64_OPND_QLF_NIL;
1198 }
1199
1200 /* Given a list of qualifier sequences, return all possible valid qualifiers
1201 for operand IDX in QUALIFIERS.
1202 Assume QUALIFIERS is an array whose length is large enough. */
1203
1204 static void
1205 get_operand_possible_qualifiers (int idx,
1206 const aarch64_opnd_qualifier_seq_t *list,
1207 enum aarch64_opnd_qualifier *qualifiers)
1208 {
1209 int i;
1210 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1211 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1212 break;
1213 }
1214
1215 /* Decode the size Q field for e.g. SHADD.
1216 We tag one operand with the qualifer according to the code;
1217 whether the qualifier is valid for this opcode or not, it is the
1218 duty of the semantic checking. */
1219
1220 static int
1221 decode_sizeq (aarch64_inst *inst)
1222 {
1223 int idx;
1224 enum aarch64_opnd_qualifier qualifier;
1225 aarch64_insn code;
1226 aarch64_insn value, mask;
1227 enum aarch64_field_kind fld_sz;
1228 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1229
1230 if (inst->opcode->iclass == asisdlse
1231 || inst->opcode->iclass == asisdlsep
1232 || inst->opcode->iclass == asisdlso
1233 || inst->opcode->iclass == asisdlsop)
1234 fld_sz = FLD_vldst_size;
1235 else
1236 fld_sz = FLD_size;
1237
1238 code = inst->value;
1239 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1240 /* Obtain the info that which bits of fields Q and size are actually
1241 available for operand encoding. Opcodes like FMAXNM and FMLA have
1242 size[1] unavailable. */
1243 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1244
1245 /* The index of the operand we are going to tag a qualifier and the qualifer
1246 itself are reasoned from the value of the size and Q fields and the
1247 possible valid qualifier lists. */
1248 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1249 DEBUG_TRACE ("key idx: %d", idx);
1250
1251 /* For most related instruciton, size:Q are fully available for operand
1252 encoding. */
1253 if (mask == 0x7)
1254 {
1255 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1256 return 1;
1257 }
1258
1259 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1260 candidates);
1261 #ifdef DEBUG_AARCH64
1262 if (debug_dump)
1263 {
1264 int i;
1265 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1266 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1267 DEBUG_TRACE ("qualifier %d: %s", i,
1268 aarch64_get_qualifier_name(candidates[i]));
1269 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1270 }
1271 #endif /* DEBUG_AARCH64 */
1272
1273 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1274
1275 if (qualifier == AARCH64_OPND_QLF_NIL)
1276 return 0;
1277
1278 inst->operands[idx].qualifier = qualifier;
1279 return 1;
1280 }
1281
1282 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1283 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1284
1285 static int
1286 decode_asimd_fcvt (aarch64_inst *inst)
1287 {
1288 aarch64_field field = {0, 0};
1289 aarch64_insn value;
1290 enum aarch64_opnd_qualifier qualifier;
1291
1292 gen_sub_field (FLD_size, 0, 1, &field);
1293 value = extract_field_2 (&field, inst->value, 0);
1294 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1295 : AARCH64_OPND_QLF_V_2D;
1296 switch (inst->opcode->op)
1297 {
1298 case OP_FCVTN:
1299 case OP_FCVTN2:
1300 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1301 inst->operands[1].qualifier = qualifier;
1302 break;
1303 case OP_FCVTL:
1304 case OP_FCVTL2:
1305 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1306 inst->operands[0].qualifier = qualifier;
1307 break;
1308 default:
1309 assert (0);
1310 return 0;
1311 }
1312
1313 return 1;
1314 }
1315
1316 /* Decode size[0], i.e. bit 22, for
1317 e.g. FCVTXN <Vb><d>, <Va><n>. */
1318
1319 static int
1320 decode_asisd_fcvtxn (aarch64_inst *inst)
1321 {
1322 aarch64_field field = {0, 0};
1323 gen_sub_field (FLD_size, 0, 1, &field);
1324 if (!extract_field_2 (&field, inst->value, 0))
1325 return 0;
1326 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1327 return 1;
1328 }
1329
1330 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1331 static int
1332 decode_fcvt (aarch64_inst *inst)
1333 {
1334 enum aarch64_opnd_qualifier qualifier;
1335 aarch64_insn value;
1336 const aarch64_field field = {15, 2};
1337
1338 /* opc dstsize */
1339 value = extract_field_2 (&field, inst->value, 0);
1340 switch (value)
1341 {
1342 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1343 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1344 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1345 default: return 0;
1346 }
1347 inst->operands[0].qualifier = qualifier;
1348
1349 return 1;
1350 }
1351
1352 /* Do miscellaneous decodings that are not common enough to be driven by
1353 flags. */
1354
1355 static int
1356 do_misc_decoding (aarch64_inst *inst)
1357 {
1358 switch (inst->opcode->op)
1359 {
1360 case OP_FCVT:
1361 return decode_fcvt (inst);
1362 case OP_FCVTN:
1363 case OP_FCVTN2:
1364 case OP_FCVTL:
1365 case OP_FCVTL2:
1366 return decode_asimd_fcvt (inst);
1367 case OP_FCVTXN_S:
1368 return decode_asisd_fcvtxn (inst);
1369 default:
1370 return 0;
1371 }
1372 }
1373
1374 /* Opcodes that have fields shared by multiple operands are usually flagged
1375 with flags. In this function, we detect such flags, decode the related
1376 field(s) and store the information in one of the related operands. The
1377 'one' operand is not any operand but one of the operands that can
1378 accommadate all the information that has been decoded. */
1379
1380 static int
1381 do_special_decoding (aarch64_inst *inst)
1382 {
1383 int idx;
1384 aarch64_insn value;
1385 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1386 if (inst->opcode->flags & F_COND)
1387 {
1388 value = extract_field (FLD_cond2, inst->value, 0);
1389 inst->cond = get_cond_from_value (value);
1390 }
1391 /* 'sf' field. */
1392 if (inst->opcode->flags & F_SF)
1393 {
1394 idx = select_operand_for_sf_field_coding (inst->opcode);
1395 value = extract_field (FLD_sf, inst->value, 0);
1396 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1397 if ((inst->opcode->flags & F_N)
1398 && extract_field (FLD_N, inst->value, 0) != value)
1399 return 0;
1400 }
1401 /* 'sf' field. */
1402 if (inst->opcode->flags & F_LSE_SZ)
1403 {
1404 idx = select_operand_for_sf_field_coding (inst->opcode);
1405 value = extract_field (FLD_lse_sz, inst->value, 0);
1406 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1407 }
1408 /* size:Q fields. */
1409 if (inst->opcode->flags & F_SIZEQ)
1410 return decode_sizeq (inst);
1411
1412 if (inst->opcode->flags & F_FPTYPE)
1413 {
1414 idx = select_operand_for_fptype_field_coding (inst->opcode);
1415 value = extract_field (FLD_type, inst->value, 0);
1416 switch (value)
1417 {
1418 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1419 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1420 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1421 default: return 0;
1422 }
1423 }
1424
1425 if (inst->opcode->flags & F_SSIZE)
1426 {
1427 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1428 of the base opcode. */
1429 aarch64_insn mask;
1430 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1431 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1432 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1433 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1434 /* For most related instruciton, the 'size' field is fully available for
1435 operand encoding. */
1436 if (mask == 0x3)
1437 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1438 else
1439 {
1440 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1441 candidates);
1442 inst->operands[idx].qualifier
1443 = get_qualifier_from_partial_encoding (value, candidates, mask);
1444 }
1445 }
1446
1447 if (inst->opcode->flags & F_T)
1448 {
1449 /* Num of consecutive '0's on the right side of imm5<3:0>. */
1450 int num = 0;
1451 unsigned val, Q;
1452 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1453 == AARCH64_OPND_CLASS_SIMD_REG);
1454 /* imm5<3:0> q <t>
1455 0000 x reserved
1456 xxx1 0 8b
1457 xxx1 1 16b
1458 xx10 0 4h
1459 xx10 1 8h
1460 x100 0 2s
1461 x100 1 4s
1462 1000 0 reserved
1463 1000 1 2d */
1464 val = extract_field (FLD_imm5, inst->value, 0);
1465 while ((val & 0x1) == 0 && ++num <= 3)
1466 val >>= 1;
1467 if (num > 3)
1468 return 0;
1469 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1470 inst->operands[0].qualifier =
1471 get_vreg_qualifier_from_value ((num << 1) | Q);
1472 }
1473
1474 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1475 {
1476 /* Use Rt to encode in the case of e.g.
1477 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1478 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1479 if (idx == -1)
1480 {
1481 /* Otherwise use the result operand, which has to be a integer
1482 register. */
1483 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1484 == AARCH64_OPND_CLASS_INT_REG);
1485 idx = 0;
1486 }
1487 assert (idx == 0 || idx == 1);
1488 value = extract_field (FLD_Q, inst->value, 0);
1489 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1490 }
1491
1492 if (inst->opcode->flags & F_LDS_SIZE)
1493 {
1494 aarch64_field field = {0, 0};
1495 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1496 == AARCH64_OPND_CLASS_INT_REG);
1497 gen_sub_field (FLD_opc, 0, 1, &field);
1498 value = extract_field_2 (&field, inst->value, 0);
1499 inst->operands[0].qualifier
1500 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
1501 }
1502
1503 /* Miscellaneous decoding; done as the last step. */
1504 if (inst->opcode->flags & F_MISC)
1505 return do_misc_decoding (inst);
1506
1507 return 1;
1508 }
1509
1510 /* Converters converting a real opcode instruction to its alias form. */
1511
1512 /* ROR <Wd>, <Ws>, #<shift>
1513 is equivalent to:
1514 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1515 static int
1516 convert_extr_to_ror (aarch64_inst *inst)
1517 {
1518 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1519 {
1520 copy_operand_info (inst, 2, 3);
1521 inst->operands[3].type = AARCH64_OPND_NIL;
1522 return 1;
1523 }
1524 return 0;
1525 }
1526
1527 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1528 is equivalent to:
1529 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1530 static int
1531 convert_shll_to_xtl (aarch64_inst *inst)
1532 {
1533 if (inst->operands[2].imm.value == 0)
1534 {
1535 inst->operands[2].type = AARCH64_OPND_NIL;
1536 return 1;
1537 }
1538 return 0;
1539 }
1540
1541 /* Convert
1542 UBFM <Xd>, <Xn>, #<shift>, #63.
1543 to
1544 LSR <Xd>, <Xn>, #<shift>. */
1545 static int
1546 convert_bfm_to_sr (aarch64_inst *inst)
1547 {
1548 int64_t imms, val;
1549
1550 imms = inst->operands[3].imm.value;
1551 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1552 if (imms == val)
1553 {
1554 inst->operands[3].type = AARCH64_OPND_NIL;
1555 return 1;
1556 }
1557
1558 return 0;
1559 }
1560
1561 /* Convert MOV to ORR. */
1562 static int
1563 convert_orr_to_mov (aarch64_inst *inst)
1564 {
1565 /* MOV <Vd>.<T>, <Vn>.<T>
1566 is equivalent to:
1567 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1568 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1569 {
1570 inst->operands[2].type = AARCH64_OPND_NIL;
1571 return 1;
1572 }
1573 return 0;
1574 }
1575
1576 /* When <imms> >= <immr>, the instruction written:
1577 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1578 is equivalent to:
1579 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1580
1581 static int
1582 convert_bfm_to_bfx (aarch64_inst *inst)
1583 {
1584 int64_t immr, imms;
1585
1586 immr = inst->operands[2].imm.value;
1587 imms = inst->operands[3].imm.value;
1588 if (imms >= immr)
1589 {
1590 int64_t lsb = immr;
1591 inst->operands[2].imm.value = lsb;
1592 inst->operands[3].imm.value = imms + 1 - lsb;
1593 /* The two opcodes have different qualifiers for
1594 the immediate operands; reset to help the checking. */
1595 reset_operand_qualifier (inst, 2);
1596 reset_operand_qualifier (inst, 3);
1597 return 1;
1598 }
1599
1600 return 0;
1601 }
1602
1603 /* When <imms> < <immr>, the instruction written:
1604 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1605 is equivalent to:
1606 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1607
1608 static int
1609 convert_bfm_to_bfi (aarch64_inst *inst)
1610 {
1611 int64_t immr, imms, val;
1612
1613 immr = inst->operands[2].imm.value;
1614 imms = inst->operands[3].imm.value;
1615 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1616 if (imms < immr)
1617 {
1618 inst->operands[2].imm.value = (val - immr) & (val - 1);
1619 inst->operands[3].imm.value = imms + 1;
1620 /* The two opcodes have different qualifiers for
1621 the immediate operands; reset to help the checking. */
1622 reset_operand_qualifier (inst, 2);
1623 reset_operand_qualifier (inst, 3);
1624 return 1;
1625 }
1626
1627 return 0;
1628 }
1629
1630 /* The instruction written:
1631 BFC <Xd>, #<lsb>, #<width>
1632 is equivalent to:
1633 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1634
1635 static int
1636 convert_bfm_to_bfc (aarch64_inst *inst)
1637 {
1638 int64_t immr, imms, val;
1639
1640 /* Should have been assured by the base opcode value. */
1641 assert (inst->operands[1].reg.regno == 0x1f);
1642
1643 immr = inst->operands[2].imm.value;
1644 imms = inst->operands[3].imm.value;
1645 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1646 if (imms < immr)
1647 {
1648 /* Drop XZR from the second operand. */
1649 copy_operand_info (inst, 1, 2);
1650 copy_operand_info (inst, 2, 3);
1651 inst->operands[3].type = AARCH64_OPND_NIL;
1652
1653 /* Recalculate the immediates. */
1654 inst->operands[1].imm.value = (val - immr) & (val - 1);
1655 inst->operands[2].imm.value = imms + 1;
1656
1657 /* The two opcodes have different qualifiers for the operands; reset to
1658 help the checking. */
1659 reset_operand_qualifier (inst, 1);
1660 reset_operand_qualifier (inst, 2);
1661 reset_operand_qualifier (inst, 3);
1662
1663 return 1;
1664 }
1665
1666 return 0;
1667 }
1668
1669 /* The instruction written:
1670 LSL <Xd>, <Xn>, #<shift>
1671 is equivalent to:
1672 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1673
1674 static int
1675 convert_ubfm_to_lsl (aarch64_inst *inst)
1676 {
1677 int64_t immr = inst->operands[2].imm.value;
1678 int64_t imms = inst->operands[3].imm.value;
1679 int64_t val
1680 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1681
1682 if ((immr == 0 && imms == val) || immr == imms + 1)
1683 {
1684 inst->operands[3].type = AARCH64_OPND_NIL;
1685 inst->operands[2].imm.value = val - imms;
1686 return 1;
1687 }
1688
1689 return 0;
1690 }
1691
1692 /* CINC <Wd>, <Wn>, <cond>
1693 is equivalent to:
1694 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
1695 where <cond> is not AL or NV. */
1696
1697 static int
1698 convert_from_csel (aarch64_inst *inst)
1699 {
1700 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
1701 && (inst->operands[3].cond->value & 0xe) != 0xe)
1702 {
1703 copy_operand_info (inst, 2, 3);
1704 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
1705 inst->operands[3].type = AARCH64_OPND_NIL;
1706 return 1;
1707 }
1708 return 0;
1709 }
1710
1711 /* CSET <Wd>, <cond>
1712 is equivalent to:
1713 CSINC <Wd>, WZR, WZR, invert(<cond>)
1714 where <cond> is not AL or NV. */
1715
1716 static int
1717 convert_csinc_to_cset (aarch64_inst *inst)
1718 {
1719 if (inst->operands[1].reg.regno == 0x1f
1720 && inst->operands[2].reg.regno == 0x1f
1721 && (inst->operands[3].cond->value & 0xe) != 0xe)
1722 {
1723 copy_operand_info (inst, 1, 3);
1724 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
1725 inst->operands[3].type = AARCH64_OPND_NIL;
1726 inst->operands[2].type = AARCH64_OPND_NIL;
1727 return 1;
1728 }
1729 return 0;
1730 }
1731
1732 /* MOV <Wd>, #<imm>
1733 is equivalent to:
1734 MOVZ <Wd>, #<imm16>, LSL #<shift>.
1735
1736 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1737 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1738 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1739 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1740 machine-instruction mnemonic must be used. */
1741
1742 static int
1743 convert_movewide_to_mov (aarch64_inst *inst)
1744 {
1745 uint64_t value = inst->operands[1].imm.value;
1746 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
1747 if (value == 0 && inst->operands[1].shifter.amount != 0)
1748 return 0;
1749 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1750 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
1751 value <<= inst->operands[1].shifter.amount;
1752 /* As an alias convertor, it has to be clear that the INST->OPCODE
1753 is the opcode of the real instruction. */
1754 if (inst->opcode->op == OP_MOVN)
1755 {
1756 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1757 value = ~value;
1758 /* A MOVN has an immediate that could be encoded by MOVZ. */
1759 if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
1760 return 0;
1761 }
1762 inst->operands[1].imm.value = value;
1763 inst->operands[1].shifter.amount = 0;
1764 return 1;
1765 }
1766
1767 /* MOV <Wd>, #<imm>
1768 is equivalent to:
1769 ORR <Wd>, WZR, #<imm>.
1770
1771 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1772 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1773 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1774 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1775 machine-instruction mnemonic must be used. */
1776
1777 static int
1778 convert_movebitmask_to_mov (aarch64_inst *inst)
1779 {
1780 int is32;
1781 uint64_t value;
1782
1783 /* Should have been assured by the base opcode value. */
1784 assert (inst->operands[1].reg.regno == 0x1f);
1785 copy_operand_info (inst, 1, 2);
1786 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1787 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1788 value = inst->operands[1].imm.value;
1789 /* ORR has an immediate that could be generated by a MOVZ or MOVN
1790 instruction. */
1791 if (inst->operands[0].reg.regno != 0x1f
1792 && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
1793 || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
1794 return 0;
1795
1796 inst->operands[2].type = AARCH64_OPND_NIL;
1797 return 1;
1798 }
1799
1800 /* Some alias opcodes are disassembled by being converted from their real-form.
1801 N.B. INST->OPCODE is the real opcode rather than the alias. */
1802
1803 static int
1804 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
1805 {
1806 switch (alias->op)
1807 {
1808 case OP_ASR_IMM:
1809 case OP_LSR_IMM:
1810 return convert_bfm_to_sr (inst);
1811 case OP_LSL_IMM:
1812 return convert_ubfm_to_lsl (inst);
1813 case OP_CINC:
1814 case OP_CINV:
1815 case OP_CNEG:
1816 return convert_from_csel (inst);
1817 case OP_CSET:
1818 case OP_CSETM:
1819 return convert_csinc_to_cset (inst);
1820 case OP_UBFX:
1821 case OP_BFXIL:
1822 case OP_SBFX:
1823 return convert_bfm_to_bfx (inst);
1824 case OP_SBFIZ:
1825 case OP_BFI:
1826 case OP_UBFIZ:
1827 return convert_bfm_to_bfi (inst);
1828 case OP_BFC:
1829 return convert_bfm_to_bfc (inst);
1830 case OP_MOV_V:
1831 return convert_orr_to_mov (inst);
1832 case OP_MOV_IMM_WIDE:
1833 case OP_MOV_IMM_WIDEN:
1834 return convert_movewide_to_mov (inst);
1835 case OP_MOV_IMM_LOG:
1836 return convert_movebitmask_to_mov (inst);
1837 case OP_ROR_IMM:
1838 return convert_extr_to_ror (inst);
1839 case OP_SXTL:
1840 case OP_SXTL2:
1841 case OP_UXTL:
1842 case OP_UXTL2:
1843 return convert_shll_to_xtl (inst);
1844 default:
1845 return 0;
1846 }
1847 }
1848
1849 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
1850 aarch64_inst *, int);
1851
1852 /* Given the instruction information in *INST, check if the instruction has
1853 any alias form that can be used to represent *INST. If the answer is yes,
1854 update *INST to be in the form of the determined alias. */
1855
1856 /* In the opcode description table, the following flags are used in opcode
1857 entries to help establish the relations between the real and alias opcodes:
1858
1859 F_ALIAS: opcode is an alias
1860 F_HAS_ALIAS: opcode has alias(es)
1861 F_P1
1862 F_P2
1863 F_P3: Disassembly preference priority 1-3 (the larger the
1864 higher). If nothing is specified, it is the priority
1865 0 by default, i.e. the lowest priority.
1866
1867 Although the relation between the machine and the alias instructions are not
1868 explicitly described, it can be easily determined from the base opcode
1869 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
1870 description entries:
1871
1872 The mask of an alias opcode must be equal to or a super-set (i.e. more
1873 constrained) of that of the aliased opcode; so is the base opcode value.
1874
1875 if (opcode_has_alias (real) && alias_opcode_p (opcode)
1876 && (opcode->mask & real->mask) == real->mask
1877 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
1878 then OPCODE is an alias of, and only of, the REAL instruction
1879
1880 The alias relationship is forced flat-structured to keep related algorithm
1881 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
1882
1883 During the disassembling, the decoding decision tree (in
1884 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
1885 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
1886 not specified), the disassembler will check whether there is any alias
1887 instruction exists for this real instruction. If there is, the disassembler
1888 will try to disassemble the 32-bit binary again using the alias's rule, or
1889 try to convert the IR to the form of the alias. In the case of the multiple
1890 aliases, the aliases are tried one by one from the highest priority
1891 (currently the flag F_P3) to the lowest priority (no priority flag), and the
1892 first succeeds first adopted.
1893
1894 You may ask why there is a need for the conversion of IR from one form to
1895 another in handling certain aliases. This is because on one hand it avoids
1896 adding more operand code to handle unusual encoding/decoding; on other
1897 hand, during the disassembling, the conversion is an effective approach to
1898 check the condition of an alias (as an alias may be adopted only if certain
1899 conditions are met).
1900
1901 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
1902 aarch64_opcode_table and generated aarch64_find_alias_opcode and
1903 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
1904
1905 static void
1906 determine_disassembling_preference (struct aarch64_inst *inst)
1907 {
1908 const aarch64_opcode *opcode;
1909 const aarch64_opcode *alias;
1910
1911 opcode = inst->opcode;
1912
1913 /* This opcode does not have an alias, so use itself. */
1914 if (opcode_has_alias (opcode) == FALSE)
1915 return;
1916
1917 alias = aarch64_find_alias_opcode (opcode);
1918 assert (alias);
1919
1920 #ifdef DEBUG_AARCH64
1921 if (debug_dump)
1922 {
1923 const aarch64_opcode *tmp = alias;
1924 printf ("#### LIST orderd: ");
1925 while (tmp)
1926 {
1927 printf ("%s, ", tmp->name);
1928 tmp = aarch64_find_next_alias_opcode (tmp);
1929 }
1930 printf ("\n");
1931 }
1932 #endif /* DEBUG_AARCH64 */
1933
1934 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
1935 {
1936 DEBUG_TRACE ("try %s", alias->name);
1937 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
1938
1939 /* An alias can be a pseudo opcode which will never be used in the
1940 disassembly, e.g. BIC logical immediate is such a pseudo opcode
1941 aliasing AND. */
1942 if (pseudo_opcode_p (alias))
1943 {
1944 DEBUG_TRACE ("skip pseudo %s", alias->name);
1945 continue;
1946 }
1947
1948 if ((inst->value & alias->mask) != alias->opcode)
1949 {
1950 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
1951 continue;
1952 }
1953 /* No need to do any complicated transformation on operands, if the alias
1954 opcode does not have any operand. */
1955 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
1956 {
1957 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
1958 aarch64_replace_opcode (inst, alias);
1959 return;
1960 }
1961 if (alias->flags & F_CONV)
1962 {
1963 aarch64_inst copy;
1964 memcpy (&copy, inst, sizeof (aarch64_inst));
1965 /* ALIAS is the preference as long as the instruction can be
1966 successfully converted to the form of ALIAS. */
1967 if (convert_to_alias (&copy, alias) == 1)
1968 {
1969 aarch64_replace_opcode (&copy, alias);
1970 assert (aarch64_match_operands_constraint (&copy, NULL));
1971 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
1972 memcpy (inst, &copy, sizeof (aarch64_inst));
1973 return;
1974 }
1975 }
1976 else
1977 {
1978 /* Directly decode the alias opcode. */
1979 aarch64_inst temp;
1980 memset (&temp, '\0', sizeof (aarch64_inst));
1981 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
1982 {
1983 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
1984 memcpy (inst, &temp, sizeof (aarch64_inst));
1985 return;
1986 }
1987 }
1988 }
1989 }
1990
1991 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
1992 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
1993 return 1.
1994
1995 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
1996 determined and used to disassemble CODE; this is done just before the
1997 return. */
1998
1999 static int
2000 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2001 aarch64_inst *inst, int noaliases_p)
2002 {
2003 int i;
2004
2005 DEBUG_TRACE ("enter with %s", opcode->name);
2006
2007 assert (opcode && inst);
2008
2009 /* Check the base opcode. */
2010 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2011 {
2012 DEBUG_TRACE ("base opcode match FAIL");
2013 goto decode_fail;
2014 }
2015
2016 /* Clear inst. */
2017 memset (inst, '\0', sizeof (aarch64_inst));
2018
2019 inst->opcode = opcode;
2020 inst->value = code;
2021
2022 /* Assign operand codes and indexes. */
2023 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2024 {
2025 if (opcode->operands[i] == AARCH64_OPND_NIL)
2026 break;
2027 inst->operands[i].type = opcode->operands[i];
2028 inst->operands[i].idx = i;
2029 }
2030
2031 /* Call the opcode decoder indicated by flags. */
2032 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2033 {
2034 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2035 goto decode_fail;
2036 }
2037
2038 /* Call operand decoders. */
2039 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2040 {
2041 const aarch64_operand *opnd;
2042 enum aarch64_opnd type;
2043 type = opcode->operands[i];
2044 if (type == AARCH64_OPND_NIL)
2045 break;
2046 opnd = &aarch64_operands[type];
2047 if (operand_has_extractor (opnd)
2048 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2049 {
2050 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2051 goto decode_fail;
2052 }
2053 }
2054
2055 /* Match the qualifiers. */
2056 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2057 {
2058 /* Arriving here, the CODE has been determined as a valid instruction
2059 of OPCODE and *INST has been filled with information of this OPCODE
2060 instruction. Before the return, check if the instruction has any
2061 alias and should be disassembled in the form of its alias instead.
2062 If the answer is yes, *INST will be updated. */
2063 if (!noaliases_p)
2064 determine_disassembling_preference (inst);
2065 DEBUG_TRACE ("SUCCESS");
2066 return 1;
2067 }
2068 else
2069 {
2070 DEBUG_TRACE ("constraint matching FAIL");
2071 }
2072
2073 decode_fail:
2074 return 0;
2075 }
2076 \f
2077 /* This does some user-friendly fix-up to *INST. It is currently focus on
2078 the adjustment of qualifiers to help the printed instruction
2079 recognized/understood more easily. */
2080
2081 static void
2082 user_friendly_fixup (aarch64_inst *inst)
2083 {
2084 switch (inst->opcode->iclass)
2085 {
2086 case testbranch:
2087 /* TBNZ Xn|Wn, #uimm6, label
2088 Test and Branch Not Zero: conditionally jumps to label if bit number
2089 uimm6 in register Xn is not zero. The bit number implies the width of
2090 the register, which may be written and should be disassembled as Wn if
2091 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2092 */
2093 if (inst->operands[1].imm.value < 32)
2094 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2095 break;
2096 default: break;
2097 }
2098 }
2099
2100 /* Decode INSN and fill in *INST the instruction information. An alias
2101 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2102 success. */
2103
2104 int
2105 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2106 bfd_boolean noaliases_p)
2107 {
2108 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2109
2110 #ifdef DEBUG_AARCH64
2111 if (debug_dump)
2112 {
2113 const aarch64_opcode *tmp = opcode;
2114 printf ("\n");
2115 DEBUG_TRACE ("opcode lookup:");
2116 while (tmp != NULL)
2117 {
2118 aarch64_verbose (" %s", tmp->name);
2119 tmp = aarch64_find_next_opcode (tmp);
2120 }
2121 }
2122 #endif /* DEBUG_AARCH64 */
2123
2124 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2125 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2126 opcode field and value, apart from the difference that one of them has an
2127 extra field as part of the opcode, but such a field is used for operand
2128 encoding in other opcode(s) ('immh' in the case of the example). */
2129 while (opcode != NULL)
2130 {
2131 /* But only one opcode can be decoded successfully for, as the
2132 decoding routine will check the constraint carefully. */
2133 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2134 return ERR_OK;
2135 opcode = aarch64_find_next_opcode (opcode);
2136 }
2137
2138 return ERR_UND;
2139 }
2140
2141 /* Print operands. */
2142
2143 static void
2144 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2145 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2146 {
2147 int i, pcrel_p, num_printed;
2148 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2149 {
2150 const size_t size = 128;
2151 char str[size];
2152 /* We regard the opcode operand info more, however we also look into
2153 the inst->operands to support the disassembling of the optional
2154 operand.
2155 The two operand code should be the same in all cases, apart from
2156 when the operand can be optional. */
2157 if (opcode->operands[i] == AARCH64_OPND_NIL
2158 || opnds[i].type == AARCH64_OPND_NIL)
2159 break;
2160
2161 /* Generate the operand string in STR. */
2162 aarch64_print_operand (str, size, pc, opcode, opnds, i, &pcrel_p,
2163 &info->target);
2164
2165 /* Print the delimiter (taking account of omitted operand(s)). */
2166 if (str[0] != '\0')
2167 (*info->fprintf_func) (info->stream, "%s",
2168 num_printed++ == 0 ? "\t" : ", ");
2169
2170 /* Print the operand. */
2171 if (pcrel_p)
2172 (*info->print_address_func) (info->target, info);
2173 else
2174 (*info->fprintf_func) (info->stream, "%s", str);
2175 }
2176 }
2177
2178 /* Print the instruction mnemonic name. */
2179
2180 static void
2181 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2182 {
2183 if (inst->opcode->flags & F_COND)
2184 {
2185 /* For instructions that are truly conditionally executed, e.g. b.cond,
2186 prepare the full mnemonic name with the corresponding condition
2187 suffix. */
2188 char name[8], *ptr;
2189 size_t len;
2190
2191 ptr = strchr (inst->opcode->name, '.');
2192 assert (ptr && inst->cond);
2193 len = ptr - inst->opcode->name;
2194 assert (len < 8);
2195 strncpy (name, inst->opcode->name, len);
2196 name [len] = '\0';
2197 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2198 }
2199 else
2200 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2201 }
2202
2203 /* Print the instruction according to *INST. */
2204
2205 static void
2206 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2207 struct disassemble_info *info)
2208 {
2209 print_mnemonic_name (inst, info);
2210 print_operands (pc, inst->opcode, inst->operands, info);
2211 }
2212
2213 /* Entry-point of the instruction disassembler and printer. */
2214
2215 static void
2216 print_insn_aarch64_word (bfd_vma pc,
2217 uint32_t word,
2218 struct disassemble_info *info)
2219 {
2220 static const char *err_msg[6] =
2221 {
2222 [ERR_OK] = "_",
2223 [-ERR_UND] = "undefined",
2224 [-ERR_UNP] = "unpredictable",
2225 [-ERR_NYI] = "NYI"
2226 };
2227
2228 int ret;
2229 aarch64_inst inst;
2230
2231 info->insn_info_valid = 1;
2232 info->branch_delay_insns = 0;
2233 info->data_size = 0;
2234 info->target = 0;
2235 info->target2 = 0;
2236
2237 if (info->flags & INSN_HAS_RELOC)
2238 /* If the instruction has a reloc associated with it, then
2239 the offset field in the instruction will actually be the
2240 addend for the reloc. (If we are using REL type relocs).
2241 In such cases, we can ignore the pc when computing
2242 addresses, since the addend is not currently pc-relative. */
2243 pc = 0;
2244
2245 ret = aarch64_decode_insn (word, &inst, no_aliases);
2246
2247 if (((word >> 21) & 0x3ff) == 1)
2248 {
2249 /* RESERVED for ALES. */
2250 assert (ret != ERR_OK);
2251 ret = ERR_NYI;
2252 }
2253
2254 switch (ret)
2255 {
2256 case ERR_UND:
2257 case ERR_UNP:
2258 case ERR_NYI:
2259 /* Handle undefined instructions. */
2260 info->insn_type = dis_noninsn;
2261 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2262 word, err_msg[-ret]);
2263 break;
2264 case ERR_OK:
2265 user_friendly_fixup (&inst);
2266 print_aarch64_insn (pc, &inst, info);
2267 break;
2268 default:
2269 abort ();
2270 }
2271 }
2272
2273 /* Disallow mapping symbols ($x, $d etc) from
2274 being displayed in symbol relative addresses. */
2275
2276 bfd_boolean
2277 aarch64_symbol_is_valid (asymbol * sym,
2278 struct disassemble_info * info ATTRIBUTE_UNUSED)
2279 {
2280 const char * name;
2281
2282 if (sym == NULL)
2283 return FALSE;
2284
2285 name = bfd_asymbol_name (sym);
2286
2287 return name
2288 && (name[0] != '$'
2289 || (name[1] != 'x' && name[1] != 'd')
2290 || (name[2] != '\0' && name[2] != '.'));
2291 }
2292
2293 /* Print data bytes on INFO->STREAM. */
2294
2295 static void
2296 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2297 uint32_t word,
2298 struct disassemble_info *info)
2299 {
2300 switch (info->bytes_per_chunk)
2301 {
2302 case 1:
2303 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2304 break;
2305 case 2:
2306 info->fprintf_func (info->stream, ".short\t0x%04x", word);
2307 break;
2308 case 4:
2309 info->fprintf_func (info->stream, ".word\t0x%08x", word);
2310 break;
2311 default:
2312 abort ();
2313 }
2314 }
2315
2316 /* Try to infer the code or data type from a symbol.
2317 Returns nonzero if *MAP_TYPE was set. */
2318
2319 static int
2320 get_sym_code_type (struct disassemble_info *info, int n,
2321 enum map_type *map_type)
2322 {
2323 elf_symbol_type *es;
2324 unsigned int type;
2325 const char *name;
2326
2327 es = *(elf_symbol_type **)(info->symtab + n);
2328 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2329
2330 /* If the symbol has function type then use that. */
2331 if (type == STT_FUNC)
2332 {
2333 *map_type = MAP_INSN;
2334 return TRUE;
2335 }
2336
2337 /* Check for mapping symbols. */
2338 name = bfd_asymbol_name(info->symtab[n]);
2339 if (name[0] == '$'
2340 && (name[1] == 'x' || name[1] == 'd')
2341 && (name[2] == '\0' || name[2] == '.'))
2342 {
2343 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
2344 return TRUE;
2345 }
2346
2347 return FALSE;
2348 }
2349
2350 /* Entry-point of the AArch64 disassembler. */
2351
2352 int
2353 print_insn_aarch64 (bfd_vma pc,
2354 struct disassemble_info *info)
2355 {
2356 bfd_byte buffer[INSNLEN];
2357 int status;
2358 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
2359 bfd_boolean found = FALSE;
2360 unsigned int size = 4;
2361 unsigned long data;
2362
2363 if (info->disassembler_options)
2364 {
2365 set_default_aarch64_dis_options (info);
2366
2367 parse_aarch64_dis_options (info->disassembler_options);
2368
2369 /* To avoid repeated parsing of these options, we remove them here. */
2370 info->disassembler_options = NULL;
2371 }
2372
2373 /* Aarch64 instructions are always little-endian */
2374 info->endian_code = BFD_ENDIAN_LITTLE;
2375
2376 /* First check the full symtab for a mapping symbol, even if there
2377 are no usable non-mapping symbols for this address. */
2378 if (info->symtab_size != 0
2379 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
2380 {
2381 enum map_type type = MAP_INSN;
2382 int last_sym = -1;
2383 bfd_vma addr;
2384 int n;
2385
2386 if (pc <= last_mapping_addr)
2387 last_mapping_sym = -1;
2388
2389 /* Start scanning at the start of the function, or wherever
2390 we finished last time. */
2391 n = info->symtab_pos + 1;
2392 if (n < last_mapping_sym)
2393 n = last_mapping_sym;
2394
2395 /* Scan up to the location being disassembled. */
2396 for (; n < info->symtab_size; n++)
2397 {
2398 addr = bfd_asymbol_value (info->symtab[n]);
2399 if (addr > pc)
2400 break;
2401 if ((info->section == NULL
2402 || info->section == info->symtab[n]->section)
2403 && get_sym_code_type (info, n, &type))
2404 {
2405 last_sym = n;
2406 found = TRUE;
2407 }
2408 }
2409
2410 if (!found)
2411 {
2412 n = info->symtab_pos;
2413 if (n < last_mapping_sym)
2414 n = last_mapping_sym;
2415
2416 /* No mapping symbol found at this address. Look backwards
2417 for a preceeding one. */
2418 for (; n >= 0; n--)
2419 {
2420 if (get_sym_code_type (info, n, &type))
2421 {
2422 last_sym = n;
2423 found = TRUE;
2424 break;
2425 }
2426 }
2427 }
2428
2429 last_mapping_sym = last_sym;
2430 last_type = type;
2431
2432 /* Look a little bit ahead to see if we should print out
2433 less than four bytes of data. If there's a symbol,
2434 mapping or otherwise, after two bytes then don't
2435 print more. */
2436 if (last_type == MAP_DATA)
2437 {
2438 size = 4 - (pc & 3);
2439 for (n = last_sym + 1; n < info->symtab_size; n++)
2440 {
2441 addr = bfd_asymbol_value (info->symtab[n]);
2442 if (addr > pc)
2443 {
2444 if (addr - pc < size)
2445 size = addr - pc;
2446 break;
2447 }
2448 }
2449 /* If the next symbol is after three bytes, we need to
2450 print only part of the data, so that we can use either
2451 .byte or .short. */
2452 if (size == 3)
2453 size = (pc & 1) ? 1 : 2;
2454 }
2455 }
2456
2457 if (last_type == MAP_DATA)
2458 {
2459 /* size was set above. */
2460 info->bytes_per_chunk = size;
2461 info->display_endian = info->endian;
2462 printer = print_insn_data;
2463 }
2464 else
2465 {
2466 info->bytes_per_chunk = size = INSNLEN;
2467 info->display_endian = info->endian_code;
2468 printer = print_insn_aarch64_word;
2469 }
2470
2471 status = (*info->read_memory_func) (pc, buffer, size, info);
2472 if (status != 0)
2473 {
2474 (*info->memory_error_func) (status, pc, info);
2475 return -1;
2476 }
2477
2478 data = bfd_get_bits (buffer, size * 8,
2479 info->display_endian == BFD_ENDIAN_BIG);
2480
2481 (*printer) (pc, data, info);
2482
2483 return size;
2484 }
2485 \f
2486 void
2487 print_aarch64_disassembler_options (FILE *stream)
2488 {
2489 fprintf (stream, _("\n\
2490 The following AARCH64 specific disassembler options are supported for use\n\
2491 with the -M switch (multiple options should be separated by commas):\n"));
2492
2493 fprintf (stream, _("\n\
2494 no-aliases Don't print instruction aliases.\n"));
2495
2496 fprintf (stream, _("\n\
2497 aliases Do print instruction aliases.\n"));
2498
2499 #ifdef DEBUG_AARCH64
2500 fprintf (stream, _("\n\
2501 debug_dump Temp switch for debug trace.\n"));
2502 #endif /* DEBUG_AARCH64 */
2503
2504 fprintf (stream, _("\n"));
2505 }
This page took 0.137877 seconds and 5 git commands to generate.