* gdb.mi/mi-var-create-rtti.exp: Create a variable of
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "dis-asm.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27
28 #if !defined(EMBEDDED_ENV)
29 #define SYMTAB_AVAILABLE 1
30 #include "elf-bfd.h"
31 #include "elf/aarch64.h"
32 #endif
33
34 #define ERR_OK 0
35 #define ERR_UND -1
36 #define ERR_UNP -3
37 #define ERR_NYI -5
38
39 #define INSNLEN 4
40
41 /* Cached mapping symbol state. */
42 enum map_type
43 {
44 MAP_INSN,
45 MAP_DATA
46 };
47
48 static enum map_type last_type;
49 static int last_mapping_sym = -1;
50 static bfd_vma last_mapping_addr = 0;
51
52 /* Other options */
53 static int no_aliases = 0; /* If set disassemble as most general inst. */
54 \f
55
56 static void
57 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
58 {
59 }
60
61 static void
62 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
63 {
64 /* Try to match options that are simple flags */
65 if (CONST_STRNEQ (option, "no-aliases"))
66 {
67 no_aliases = 1;
68 return;
69 }
70
71 if (CONST_STRNEQ (option, "aliases"))
72 {
73 no_aliases = 0;
74 return;
75 }
76
77 #ifdef DEBUG_AARCH64
78 if (CONST_STRNEQ (option, "debug_dump"))
79 {
80 debug_dump = 1;
81 return;
82 }
83 #endif /* DEBUG_AARCH64 */
84
85 /* Invalid option. */
86 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
87 }
88
89 static void
90 parse_aarch64_dis_options (const char *options)
91 {
92 const char *option_end;
93
94 if (options == NULL)
95 return;
96
97 while (*options != '\0')
98 {
99 /* Skip empty options. */
100 if (*options == ',')
101 {
102 options++;
103 continue;
104 }
105
106 /* We know that *options is neither NUL or a comma. */
107 option_end = options + 1;
108 while (*option_end != ',' && *option_end != '\0')
109 option_end++;
110
111 parse_aarch64_dis_option (options, option_end - options);
112
113 /* Go on to the next one. If option_end points to a comma, it
114 will be skipped above. */
115 options = option_end;
116 }
117 }
118 \f
119 /* Functions doing the instruction disassembling. */
120
121 /* The unnamed arguments consist of the number of fields and information about
122 these fields where the VALUE will be extracted from CODE and returned.
123 MASK can be zero or the base mask of the opcode.
124
125 N.B. the fields are required to be in such an order than the most signficant
126 field for VALUE comes the first, e.g. the <index> in
127 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
128 is encoded in H:L:M in some cases, the the fields H:L:M should be passed in
129 the order of H, L, M. */
130
131 static inline aarch64_insn
132 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
133 {
134 uint32_t num;
135 const aarch64_field *field;
136 enum aarch64_field_kind kind;
137 va_list va;
138
139 va_start (va, mask);
140 num = va_arg (va, uint32_t);
141 assert (num <= 5);
142 aarch64_insn value = 0x0;
143 while (num--)
144 {
145 kind = va_arg (va, enum aarch64_field_kind);
146 field = &fields[kind];
147 value <<= field->width;
148 value |= extract_field (kind, code, mask);
149 }
150 return value;
151 }
152
153 /* Sign-extend bit I of VALUE. */
154 static inline int32_t
155 sign_extend (aarch64_insn value, unsigned i)
156 {
157 uint32_t ret = value;
158
159 assert (i < 32);
160 if ((value >> i) & 0x1)
161 {
162 uint32_t val = (uint32_t)(-1) << i;
163 ret = ret | val;
164 }
165 return (int32_t) ret;
166 }
167
168 /* N.B. the following inline helpfer functions create a dependency on the
169 order of operand qualifier enumerators. */
170
171 /* Given VALUE, return qualifier for a general purpose register. */
172 static inline enum aarch64_opnd_qualifier
173 get_greg_qualifier_from_value (aarch64_insn value)
174 {
175 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
176 assert (value <= 0x1
177 && aarch64_get_qualifier_standard_value (qualifier) == value);
178 return qualifier;
179 }
180
181 /* Given VALUE, return qualifier for a vector register. */
182 static inline enum aarch64_opnd_qualifier
183 get_vreg_qualifier_from_value (aarch64_insn value)
184 {
185 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
186
187 assert (value <= 0x8
188 && aarch64_get_qualifier_standard_value (qualifier) == value);
189 return qualifier;
190 }
191
192 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
193 static inline enum aarch64_opnd_qualifier
194 get_sreg_qualifier_from_value (aarch64_insn value)
195 {
196 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
197
198 assert (value <= 0x4
199 && aarch64_get_qualifier_standard_value (qualifier) == value);
200 return qualifier;
201 }
202
203 /* Given the instruction in *INST which is probably half way through the
204 decoding and our caller wants to know the expected qualifier for operand
205 I. Return such a qualifier if we can establish it; otherwise return
206 AARCH64_OPND_QLF_NIL. */
207
208 static aarch64_opnd_qualifier_t
209 get_expected_qualifier (const aarch64_inst *inst, int i)
210 {
211 aarch64_opnd_qualifier_seq_t qualifiers;
212 /* Should not be called if the qualifier is known. */
213 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
214 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
215 i, qualifiers))
216 return qualifiers[i];
217 else
218 return AARCH64_OPND_QLF_NIL;
219 }
220
221 /* Operand extractors. */
222
223 int
224 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
225 const aarch64_insn code,
226 const aarch64_inst *inst ATTRIBUTE_UNUSED)
227 {
228 info->reg.regno = extract_field (self->fields[0], code, 0);
229 return 1;
230 }
231
232 /* e.g. IC <ic_op>{, <Xt>}. */
233 int
234 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
235 const aarch64_insn code,
236 const aarch64_inst *inst ATTRIBUTE_UNUSED)
237 {
238 info->reg.regno = extract_field (self->fields[0], code, 0);
239 assert (info->idx == 1
240 && (aarch64_get_operand_class (inst->operands[0].type)
241 == AARCH64_OPND_CLASS_SYSTEM));
242 /* This will make the constraint checking happy and more importantly will
243 help the disassembler determine whether this operand is optional or
244 not. */
245 info->present = inst->operands[0].sysins_op->has_xt;
246
247 return 1;
248 }
249
250 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
251 int
252 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
253 const aarch64_insn code,
254 const aarch64_inst *inst ATTRIBUTE_UNUSED)
255 {
256 /* regno */
257 info->reglane.regno = extract_field (self->fields[0], code,
258 inst->opcode->mask);
259
260 /* Index and/or type. */
261 if (inst->opcode->iclass == asisdone
262 || inst->opcode->iclass == asimdins)
263 {
264 if (info->type == AARCH64_OPND_En
265 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
266 {
267 unsigned shift;
268 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
269 assert (info->idx == 1); /* Vn */
270 aarch64_insn value = extract_field (FLD_imm4, code, 0);
271 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
272 info->qualifier = get_expected_qualifier (inst, info->idx);
273 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
274 info->reglane.index = value >> shift;
275 }
276 else
277 {
278 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
279 imm5<3:0> <V>
280 0000 RESERVED
281 xxx1 B
282 xx10 H
283 x100 S
284 1000 D */
285 int pos = -1;
286 aarch64_insn value = extract_field (FLD_imm5, code, 0);
287 while (++pos <= 3 && (value & 0x1) == 0)
288 value >>= 1;
289 if (pos > 3)
290 return 0;
291 info->qualifier = get_sreg_qualifier_from_value (pos);
292 info->reglane.index = (unsigned) (value >> 1);
293 }
294 }
295 else
296 {
297 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
298 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
299
300 /* Need information in other operand(s) to help decoding. */
301 info->qualifier = get_expected_qualifier (inst, info->idx);
302 switch (info->qualifier)
303 {
304 case AARCH64_OPND_QLF_S_H:
305 /* h:l:m */
306 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
307 FLD_M);
308 info->reglane.regno &= 0xf;
309 break;
310 case AARCH64_OPND_QLF_S_S:
311 /* h:l */
312 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
313 break;
314 case AARCH64_OPND_QLF_S_D:
315 /* H */
316 info->reglane.index = extract_field (FLD_H, code, 0);
317 break;
318 default:
319 return 0;
320 }
321 }
322
323 return 1;
324 }
325
326 int
327 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
328 const aarch64_insn code,
329 const aarch64_inst *inst ATTRIBUTE_UNUSED)
330 {
331 /* R */
332 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
333 /* len */
334 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
335 return 1;
336 }
337
338 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
339 int
340 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
341 aarch64_opnd_info *info, const aarch64_insn code,
342 const aarch64_inst *inst)
343 {
344 aarch64_insn value;
345 /* Number of elements in each structure to be loaded/stored. */
346 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
347
348 struct
349 {
350 unsigned is_reserved;
351 unsigned num_regs;
352 unsigned num_elements;
353 } data [] =
354 { {0, 4, 4},
355 {1, 4, 4},
356 {0, 4, 1},
357 {0, 4, 2},
358 {0, 3, 3},
359 {1, 3, 3},
360 {0, 3, 1},
361 {0, 1, 1},
362 {0, 2, 2},
363 {1, 2, 2},
364 {0, 2, 1},
365 };
366
367 /* Rt */
368 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
369 /* opcode */
370 value = extract_field (FLD_opcode, code, 0);
371 if (expected_num != data[value].num_elements || data[value].is_reserved)
372 return 0;
373 info->reglist.num_regs = data[value].num_regs;
374
375 return 1;
376 }
377
378 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
379 lanes instructions. */
380 int
381 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
382 aarch64_opnd_info *info, const aarch64_insn code,
383 const aarch64_inst *inst)
384 {
385 aarch64_insn value;
386
387 /* Rt */
388 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
389 /* S */
390 value = extract_field (FLD_S, code, 0);
391
392 /* Number of registers is equal to the number of elements in
393 each structure to be loaded/stored. */
394 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
395 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
396
397 /* Except when it is LD1R. */
398 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
399 info->reglist.num_regs = 2;
400
401 return 1;
402 }
403
404 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
405 load/store single element instructions. */
406 int
407 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
408 aarch64_opnd_info *info, const aarch64_insn code,
409 const aarch64_inst *inst ATTRIBUTE_UNUSED)
410 {
411 aarch64_field field = {0, 0};
412 aarch64_insn QSsize; /* fields Q:S:size. */
413 aarch64_insn opcodeh2; /* opcode<2:1> */
414
415 /* Rt */
416 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
417
418 /* Decode the index, opcode<2:1> and size. */
419 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
420 opcodeh2 = extract_field_2 (&field, code, 0);
421 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
422 switch (opcodeh2)
423 {
424 case 0x0:
425 info->qualifier = AARCH64_OPND_QLF_S_B;
426 /* Index encoded in "Q:S:size". */
427 info->reglist.index = QSsize;
428 break;
429 case 0x1:
430 info->qualifier = AARCH64_OPND_QLF_S_H;
431 /* Index encoded in "Q:S:size<1>". */
432 info->reglist.index = QSsize >> 1;
433 break;
434 case 0x2:
435 if ((QSsize & 0x1) == 0)
436 {
437 info->qualifier = AARCH64_OPND_QLF_S_S;
438 /* Index encoded in "Q:S". */
439 info->reglist.index = QSsize >> 2;
440 }
441 else
442 {
443 info->qualifier = AARCH64_OPND_QLF_S_D;
444 /* Index encoded in "Q". */
445 info->reglist.index = QSsize >> 3;
446 if (extract_field (FLD_S, code, 0))
447 /* UND */
448 return 0;
449 }
450 break;
451 default:
452 return 0;
453 }
454
455 info->reglist.has_index = 1;
456 info->reglist.num_regs = 0;
457 /* Number of registers is equal to the number of elements in
458 each structure to be loaded/stored. */
459 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
460 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
461
462 return 1;
463 }
464
465 /* Decode fields immh:immb and/or Q for e.g.
466 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
467 or SSHR <V><d>, <V><n>, #<shift>. */
468
469 int
470 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
471 aarch64_opnd_info *info, const aarch64_insn code,
472 const aarch64_inst *inst)
473 {
474 int pos;
475 aarch64_insn Q, imm, immh;
476 enum aarch64_insn_class iclass = inst->opcode->iclass;
477
478 immh = extract_field (FLD_immh, code, 0);
479 if (immh == 0)
480 return 0;
481 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
482 pos = 4;
483 /* Get highest set bit in immh. */
484 while (--pos >= 0 && (immh & 0x8) == 0)
485 immh <<= 1;
486
487 assert ((iclass == asimdshf || iclass == asisdshf)
488 && (info->type == AARCH64_OPND_IMM_VLSR
489 || info->type == AARCH64_OPND_IMM_VLSL));
490
491 if (iclass == asimdshf)
492 {
493 Q = extract_field (FLD_Q, code, 0);
494 /* immh Q <T>
495 0000 x SEE AdvSIMD modified immediate
496 0001 0 8B
497 0001 1 16B
498 001x 0 4H
499 001x 1 8H
500 01xx 0 2S
501 01xx 1 4S
502 1xxx 0 RESERVED
503 1xxx 1 2D */
504 info->qualifier =
505 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
506 }
507 else
508 info->qualifier = get_sreg_qualifier_from_value (pos);
509
510 if (info->type == AARCH64_OPND_IMM_VLSR)
511 /* immh <shift>
512 0000 SEE AdvSIMD modified immediate
513 0001 (16-UInt(immh:immb))
514 001x (32-UInt(immh:immb))
515 01xx (64-UInt(immh:immb))
516 1xxx (128-UInt(immh:immb)) */
517 info->imm.value = (16 << pos) - imm;
518 else
519 /* immh:immb
520 immh <shift>
521 0000 SEE AdvSIMD modified immediate
522 0001 (UInt(immh:immb)-8)
523 001x (UInt(immh:immb)-16)
524 01xx (UInt(immh:immb)-32)
525 1xxx (UInt(immh:immb)-64) */
526 info->imm.value = imm - (8 << pos);
527
528 return 1;
529 }
530
531 /* Decode shift immediate for e.g. sshr (imm). */
532 int
533 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
534 aarch64_opnd_info *info, const aarch64_insn code,
535 const aarch64_inst *inst ATTRIBUTE_UNUSED)
536 {
537 int64_t imm;
538 aarch64_insn val;
539 val = extract_field (FLD_size, code, 0);
540 switch (val)
541 {
542 case 0: imm = 8; break;
543 case 1: imm = 16; break;
544 case 2: imm = 32; break;
545 default: return 0;
546 }
547 info->imm.value = imm;
548 return 1;
549 }
550
551 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
552 value in the field(s) will be extracted as unsigned immediate value. */
553 int
554 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
555 const aarch64_insn code,
556 const aarch64_inst *inst ATTRIBUTE_UNUSED)
557 {
558 int64_t imm;
559 /* Maximum of two fields to extract. */
560 assert (self->fields[2] == FLD_NIL);
561
562 if (self->fields[1] == FLD_NIL)
563 imm = extract_field (self->fields[0], code, 0);
564 else
565 /* e.g. TBZ b5:b40. */
566 imm = extract_fields (code, 0, 2, self->fields[0], self->fields[1]);
567
568 if (info->type == AARCH64_OPND_FPIMM)
569 info->imm.is_fp = 1;
570
571 if (operand_need_sign_extension (self))
572 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
573
574 if (operand_need_shift_by_two (self))
575 imm <<= 2;
576
577 if (info->type == AARCH64_OPND_ADDR_ADRP)
578 imm <<= 12;
579
580 info->imm.value = imm;
581 return 1;
582 }
583
584 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
585 int
586 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
587 const aarch64_insn code,
588 const aarch64_inst *inst ATTRIBUTE_UNUSED)
589 {
590 aarch64_ext_imm (self, info, code, inst);
591 info->shifter.kind = AARCH64_MOD_LSL;
592 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
593 return 1;
594 }
595
596 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
597 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
598 int
599 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
600 aarch64_opnd_info *info,
601 const aarch64_insn code,
602 const aarch64_inst *inst ATTRIBUTE_UNUSED)
603 {
604 uint64_t imm;
605 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
606 aarch64_field field = {0, 0};
607
608 assert (info->idx == 1);
609
610 if (info->type == AARCH64_OPND_SIMD_FPIMM)
611 info->imm.is_fp = 1;
612
613 /* a:b:c:d:e:f:g:h */
614 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
615 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
616 {
617 /* Either MOVI <Dd>, #<imm>
618 or MOVI <Vd>.2D, #<imm>.
619 <imm> is a 64-bit immediate
620 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
621 encoded in "a:b:c:d:e:f:g:h". */
622 int i;
623 unsigned abcdefgh = imm;
624 for (imm = 0ull, i = 0; i < 8; i++)
625 if (((abcdefgh >> i) & 0x1) != 0)
626 imm |= 0xffull << (8 * i);
627 }
628 info->imm.value = imm;
629
630 /* cmode */
631 info->qualifier = get_expected_qualifier (inst, info->idx);
632 switch (info->qualifier)
633 {
634 case AARCH64_OPND_QLF_NIL:
635 /* no shift */
636 info->shifter.kind = AARCH64_MOD_NONE;
637 return 1;
638 case AARCH64_OPND_QLF_LSL:
639 /* shift zeros */
640 info->shifter.kind = AARCH64_MOD_LSL;
641 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
642 {
643 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
644 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
645 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
646 default: assert (0); return 0;
647 }
648 /* 00: 0; 01: 8; 10:16; 11:24. */
649 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
650 break;
651 case AARCH64_OPND_QLF_MSL:
652 /* shift ones */
653 info->shifter.kind = AARCH64_MOD_MSL;
654 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
655 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
656 break;
657 default:
658 assert (0);
659 return 0;
660 }
661
662 return 1;
663 }
664
665 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
666 int
667 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
668 aarch64_opnd_info *info, const aarch64_insn code,
669 const aarch64_inst *inst ATTRIBUTE_UNUSED)
670 {
671 info->imm.value = 64- extract_field (FLD_scale, code, 0);
672 return 1;
673 }
674
675 /* Decode arithmetic immediate for e.g.
676 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
677 int
678 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
679 aarch64_opnd_info *info, const aarch64_insn code,
680 const aarch64_inst *inst ATTRIBUTE_UNUSED)
681 {
682 aarch64_insn value;
683
684 info->shifter.kind = AARCH64_MOD_LSL;
685 /* shift */
686 value = extract_field (FLD_shift, code, 0);
687 if (value >= 2)
688 return 0;
689 info->shifter.amount = value ? 12 : 0;
690 /* imm12 (unsigned) */
691 info->imm.value = extract_field (FLD_imm12, code, 0);
692
693 return 1;
694 }
695
696 /* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
697
698 int
699 aarch64_ext_limm (const aarch64_operand *self ATTRIBUTE_UNUSED,
700 aarch64_opnd_info *info, const aarch64_insn code,
701 const aarch64_inst *inst ATTRIBUTE_UNUSED)
702 {
703 uint64_t imm, mask;
704 uint32_t sf;
705 uint32_t N, R, S;
706 unsigned simd_size;
707 aarch64_insn value;
708
709 value = extract_fields (code, 0, 3, FLD_N, FLD_immr, FLD_imms);
710 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_W
711 || inst->operands[0].qualifier == AARCH64_OPND_QLF_X);
712 sf = aarch64_get_qualifier_esize (inst->operands[0].qualifier) != 4;
713
714 /* value is N:immr:imms. */
715 S = value & 0x3f;
716 R = (value >> 6) & 0x3f;
717 N = (value >> 12) & 0x1;
718
719 if (sf == 0 && N == 1)
720 return 0;
721
722 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
723 (in other words, right rotated by R), then replicated. */
724 if (N != 0)
725 {
726 simd_size = 64;
727 mask = 0xffffffffffffffffull;
728 }
729 else
730 {
731 switch (S)
732 {
733 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
734 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
735 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
736 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
737 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
738 default: return 0;
739 }
740 mask = (1ull << simd_size) - 1;
741 /* Top bits are IGNORED. */
742 R &= simd_size - 1;
743 }
744 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
745 if (S == simd_size - 1)
746 return 0;
747 /* S+1 consecutive bits to 1. */
748 /* NOTE: S can't be 63 due to detection above. */
749 imm = (1ull << (S + 1)) - 1;
750 /* Rotate to the left by simd_size - R. */
751 if (R != 0)
752 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
753 /* Replicate the value according to SIMD size. */
754 switch (simd_size)
755 {
756 case 2: imm = (imm << 2) | imm;
757 case 4: imm = (imm << 4) | imm;
758 case 8: imm = (imm << 8) | imm;
759 case 16: imm = (imm << 16) | imm;
760 case 32: imm = (imm << 32) | imm;
761 case 64: break;
762 default: assert (0); return 0;
763 }
764
765 info->imm.value = sf ? imm : imm & 0xffffffff;
766
767 return 1;
768 }
769
770 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
771 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
772 int
773 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
774 aarch64_opnd_info *info,
775 const aarch64_insn code, const aarch64_inst *inst)
776 {
777 aarch64_insn value;
778
779 /* Rt */
780 info->reg.regno = extract_field (FLD_Rt, code, 0);
781
782 /* size */
783 value = extract_field (FLD_ldst_size, code, 0);
784 if (inst->opcode->iclass == ldstpair_indexed
785 || inst->opcode->iclass == ldstnapair_offs
786 || inst->opcode->iclass == ldstpair_off
787 || inst->opcode->iclass == loadlit)
788 {
789 enum aarch64_opnd_qualifier qualifier;
790 switch (value)
791 {
792 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
793 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
794 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
795 default: return 0;
796 }
797 info->qualifier = qualifier;
798 }
799 else
800 {
801 /* opc1:size */
802 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
803 if (value > 0x4)
804 return 0;
805 info->qualifier = get_sreg_qualifier_from_value (value);
806 }
807
808 return 1;
809 }
810
811 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
812 int
813 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
814 aarch64_opnd_info *info,
815 aarch64_insn code,
816 const aarch64_inst *inst ATTRIBUTE_UNUSED)
817 {
818 /* Rn */
819 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
820 return 1;
821 }
822
823 /* Decode the address operand for e.g.
824 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
825 int
826 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
827 aarch64_opnd_info *info,
828 aarch64_insn code, const aarch64_inst *inst)
829 {
830 aarch64_insn S, value;
831
832 /* Rn */
833 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
834 /* Rm */
835 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
836 /* option */
837 value = extract_field (FLD_option, code, 0);
838 info->shifter.kind =
839 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
840 /* Fix-up the shifter kind; although the table-driven approach is
841 efficient, it is slightly inflexible, thus needing this fix-up. */
842 if (info->shifter.kind == AARCH64_MOD_UXTX)
843 info->shifter.kind = AARCH64_MOD_LSL;
844 /* S */
845 S = extract_field (FLD_S, code, 0);
846 if (S == 0)
847 {
848 info->shifter.amount = 0;
849 info->shifter.amount_present = 0;
850 }
851 else
852 {
853 int size;
854 /* Need information in other operand(s) to help achieve the decoding
855 from 'S' field. */
856 info->qualifier = get_expected_qualifier (inst, info->idx);
857 /* Get the size of the data element that is accessed, which may be
858 different from that of the source register size, e.g. in strb/ldrb. */
859 size = aarch64_get_qualifier_esize (info->qualifier);
860 info->shifter.amount = get_logsz (size);
861 info->shifter.amount_present = 1;
862 }
863
864 return 1;
865 }
866
867 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
868 int
869 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
870 aarch64_insn code, const aarch64_inst *inst)
871 {
872 aarch64_insn imm;
873 info->qualifier = get_expected_qualifier (inst, info->idx);
874
875 /* Rn */
876 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
877 /* simm (imm9 or imm7) */
878 imm = extract_field (self->fields[0], code, 0);
879 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
880 if (self->fields[0] == FLD_imm7)
881 /* scaled immediate in ld/st pair instructions. */
882 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
883 /* qualifier */
884 if (inst->opcode->iclass == ldst_unscaled
885 || inst->opcode->iclass == ldstnapair_offs
886 || inst->opcode->iclass == ldstpair_off
887 || inst->opcode->iclass == ldst_unpriv)
888 info->addr.writeback = 0;
889 else
890 {
891 /* pre/post- index */
892 info->addr.writeback = 1;
893 if (extract_field (self->fields[1], code, 0) == 1)
894 info->addr.preind = 1;
895 else
896 info->addr.postind = 1;
897 }
898
899 return 1;
900 }
901
902 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
903 int
904 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
905 aarch64_insn code,
906 const aarch64_inst *inst ATTRIBUTE_UNUSED)
907 {
908 int shift;
909 info->qualifier = get_expected_qualifier (inst, info->idx);
910 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
911 /* Rn */
912 info->addr.base_regno = extract_field (self->fields[0], code, 0);
913 /* uimm12 */
914 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
915 return 1;
916 }
917
918 /* Decode the address operand for e.g.
919 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
920 int
921 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
922 aarch64_opnd_info *info,
923 aarch64_insn code, const aarch64_inst *inst)
924 {
925 /* The opcode dependent area stores the number of elements in
926 each structure to be loaded/stored. */
927 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
928
929 /* Rn */
930 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
931 /* Rm | #<amount> */
932 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
933 if (info->addr.offset.regno == 31)
934 {
935 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
936 /* Special handling of loading single structure to all lane. */
937 info->addr.offset.imm = (is_ld1r ? 1
938 : inst->operands[0].reglist.num_regs)
939 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
940 else
941 info->addr.offset.imm = inst->operands[0].reglist.num_regs
942 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
943 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
944 }
945 else
946 info->addr.offset.is_reg = 1;
947 info->addr.writeback = 1;
948
949 return 1;
950 }
951
952 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
953 int
954 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
955 aarch64_opnd_info *info,
956 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
957 {
958 aarch64_insn value;
959 /* cond */
960 value = extract_field (FLD_cond, code, 0);
961 info->cond = get_cond_from_value (value);
962 return 1;
963 }
964
965 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
966 int
967 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
968 aarch64_opnd_info *info,
969 aarch64_insn code,
970 const aarch64_inst *inst ATTRIBUTE_UNUSED)
971 {
972 /* op0:op1:CRn:CRm:op2 */
973 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
974 FLD_CRm, FLD_op2);
975 return 1;
976 }
977
978 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
979 int
980 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
981 aarch64_opnd_info *info, aarch64_insn code,
982 const aarch64_inst *inst ATTRIBUTE_UNUSED)
983 {
984 int i;
985 /* op1:op2 */
986 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
987 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
988 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
989 return 1;
990 /* Reserved value in <pstatefield>. */
991 return 0;
992 }
993
994 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
995 int
996 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
997 aarch64_opnd_info *info,
998 aarch64_insn code,
999 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1000 {
1001 int i;
1002 aarch64_insn value;
1003 const aarch64_sys_ins_reg *sysins_ops;
1004 /* op0:op1:CRn:CRm:op2 */
1005 value = extract_fields (code, 0, 5,
1006 FLD_op0, FLD_op1, FLD_CRn,
1007 FLD_CRm, FLD_op2);
1008
1009 switch (info->type)
1010 {
1011 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1012 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1013 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1014 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1015 default: assert (0); return 0;
1016 }
1017
1018 for (i = 0; sysins_ops[i].template != NULL; ++i)
1019 if (sysins_ops[i].value == value)
1020 {
1021 info->sysins_op = sysins_ops + i;
1022 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1023 info->sysins_op->template,
1024 (unsigned)info->sysins_op->value,
1025 info->sysins_op->has_xt, i);
1026 return 1;
1027 }
1028
1029 return 0;
1030 }
1031
1032 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1033
1034 int
1035 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1036 aarch64_opnd_info *info,
1037 aarch64_insn code,
1038 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1039 {
1040 /* CRm */
1041 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1042 return 1;
1043 }
1044
1045 /* Decode the prefetch operation option operand for e.g.
1046 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1047
1048 int
1049 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1050 aarch64_opnd_info *info,
1051 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1052 {
1053 /* prfop in Rt */
1054 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1055 return 1;
1056 }
1057
1058 /* Decode the extended register operand for e.g.
1059 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1060 int
1061 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1062 aarch64_opnd_info *info,
1063 aarch64_insn code,
1064 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1065 {
1066 aarch64_insn value;
1067
1068 /* Rm */
1069 info->reg.regno = extract_field (FLD_Rm, code, 0);
1070 /* option */
1071 value = extract_field (FLD_option, code, 0);
1072 info->shifter.kind =
1073 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1074 /* imm3 */
1075 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1076
1077 /* This makes the constraint checking happy. */
1078 info->shifter.operator_present = 1;
1079
1080 /* Assume inst->operands[0].qualifier has been resolved. */
1081 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1082 info->qualifier = AARCH64_OPND_QLF_W;
1083 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1084 && (info->shifter.kind == AARCH64_MOD_UXTX
1085 || info->shifter.kind == AARCH64_MOD_SXTX))
1086 info->qualifier = AARCH64_OPND_QLF_X;
1087
1088 return 1;
1089 }
1090
1091 /* Decode the shifted register operand for e.g.
1092 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1093 int
1094 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1095 aarch64_opnd_info *info,
1096 aarch64_insn code,
1097 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1098 {
1099 aarch64_insn value;
1100
1101 /* Rm */
1102 info->reg.regno = extract_field (FLD_Rm, code, 0);
1103 /* shift */
1104 value = extract_field (FLD_shift, code, 0);
1105 info->shifter.kind =
1106 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1107 if (info->shifter.kind == AARCH64_MOD_ROR
1108 && inst->opcode->iclass != log_shift)
1109 /* ROR is not available for the shifted register operand in arithmetic
1110 instructions. */
1111 return 0;
1112 /* imm6 */
1113 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1114
1115 /* This makes the constraint checking happy. */
1116 info->shifter.operator_present = 1;
1117
1118 return 1;
1119 }
1120 \f
1121 /* Bitfields that are commonly used to encode certain operands' information
1122 may be partially used as part of the base opcode in some instructions.
1123 For example, the bit 1 of the field 'size' in
1124 FCVTXN <Vb><d>, <Va><n>
1125 is actually part of the base opcode, while only size<0> is available
1126 for encoding the register type. Another example is the AdvSIMD
1127 instruction ORR (register), in which the field 'size' is also used for
1128 the base opcode, leaving only the field 'Q' available to encode the
1129 vector register arrangement specifier '8B' or '16B'.
1130
1131 This function tries to deduce the qualifier from the value of partially
1132 constrained field(s). Given the VALUE of such a field or fields, the
1133 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1134 operand encoding), the function returns the matching qualifier or
1135 AARCH64_OPND_QLF_NIL if nothing matches.
1136
1137 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1138 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1139 may end with AARCH64_OPND_QLF_NIL. */
1140
1141 static enum aarch64_opnd_qualifier
1142 get_qualifier_from_partial_encoding (aarch64_insn value,
1143 const enum aarch64_opnd_qualifier* \
1144 candidates,
1145 aarch64_insn mask)
1146 {
1147 int i;
1148 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1149 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1150 {
1151 aarch64_insn standard_value;
1152 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1153 break;
1154 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1155 if ((standard_value & mask) == (value & mask))
1156 return candidates[i];
1157 }
1158 return AARCH64_OPND_QLF_NIL;
1159 }
1160
1161 /* Given a list of qualifier sequences, return all possible valid qualifiers
1162 for operand IDX in QUALIFIERS.
1163 Assume QUALIFIERS is an array whose length is large enough. */
1164
1165 static void
1166 get_operand_possible_qualifiers (int idx,
1167 const aarch64_opnd_qualifier_seq_t *list,
1168 enum aarch64_opnd_qualifier *qualifiers)
1169 {
1170 int i;
1171 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1172 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1173 break;
1174 }
1175
1176 /* Decode the size Q field for e.g. SHADD.
1177 We tag one operand with the qualifer according to the code;
1178 whether the qualifier is valid for this opcode or not, it is the
1179 duty of the semantic checking. */
1180
1181 static int
1182 decode_sizeq (aarch64_inst *inst)
1183 {
1184 int idx;
1185 enum aarch64_opnd_qualifier qualifier;
1186 aarch64_insn code;
1187 aarch64_insn value, mask;
1188 enum aarch64_field_kind fld_sz;
1189 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1190
1191 if (inst->opcode->iclass == asisdlse
1192 || inst->opcode->iclass == asisdlsep
1193 || inst->opcode->iclass == asisdlso
1194 || inst->opcode->iclass == asisdlsop)
1195 fld_sz = FLD_vldst_size;
1196 else
1197 fld_sz = FLD_size;
1198
1199 code = inst->value;
1200 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1201 /* Obtain the info that which bits of fields Q and size are actually
1202 available for operand encoding. Opcodes like FMAXNM and FMLA have
1203 size[1] unavailable. */
1204 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1205
1206 /* The index of the operand we are going to tag a qualifier and the qualifer
1207 itself are reasoned from the value of the size and Q fields and the
1208 possible valid qualifier lists. */
1209 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1210 DEBUG_TRACE ("key idx: %d", idx);
1211
1212 /* For most related instruciton, size:Q are fully available for operand
1213 encoding. */
1214 if (mask == 0x7)
1215 {
1216 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1217 return 1;
1218 }
1219
1220 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1221 candidates);
1222 #ifdef DEBUG_AARCH64
1223 if (debug_dump)
1224 {
1225 int i;
1226 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1227 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1228 DEBUG_TRACE ("qualifier %d: %s", i,
1229 aarch64_get_qualifier_name(candidates[i]));
1230 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1231 }
1232 #endif /* DEBUG_AARCH64 */
1233
1234 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1235
1236 if (qualifier == AARCH64_OPND_QLF_NIL)
1237 return 0;
1238
1239 inst->operands[idx].qualifier = qualifier;
1240 return 1;
1241 }
1242
1243 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1244 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1245
1246 static int
1247 decode_asimd_fcvt (aarch64_inst *inst)
1248 {
1249 aarch64_field field = {0, 0};
1250 aarch64_insn value;
1251 enum aarch64_opnd_qualifier qualifier;
1252
1253 gen_sub_field (FLD_size, 0, 1, &field);
1254 value = extract_field_2 (&field, inst->value, 0);
1255 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1256 : AARCH64_OPND_QLF_V_2D;
1257 switch (inst->opcode->op)
1258 {
1259 case OP_FCVTN:
1260 case OP_FCVTN2:
1261 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1262 inst->operands[1].qualifier = qualifier;
1263 break;
1264 case OP_FCVTL:
1265 case OP_FCVTL2:
1266 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1267 inst->operands[0].qualifier = qualifier;
1268 break;
1269 default:
1270 assert (0);
1271 return 0;
1272 }
1273
1274 return 1;
1275 }
1276
1277 /* Decode size[0], i.e. bit 22, for
1278 e.g. FCVTXN <Vb><d>, <Va><n>. */
1279
1280 static int
1281 decode_asisd_fcvtxn (aarch64_inst *inst)
1282 {
1283 aarch64_field field = {0, 0};
1284 gen_sub_field (FLD_size, 0, 1, &field);
1285 if (!extract_field_2 (&field, inst->value, 0))
1286 return 0;
1287 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1288 return 1;
1289 }
1290
1291 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1292 static int
1293 decode_fcvt (aarch64_inst *inst)
1294 {
1295 enum aarch64_opnd_qualifier qualifier;
1296 aarch64_insn value;
1297 const aarch64_field field = {15, 2};
1298
1299 /* opc dstsize */
1300 value = extract_field_2 (&field, inst->value, 0);
1301 switch (value)
1302 {
1303 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1304 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1305 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1306 default: return 0;
1307 }
1308 inst->operands[0].qualifier = qualifier;
1309
1310 return 1;
1311 }
1312
1313 /* Do miscellaneous decodings that are not common enough to be driven by
1314 flags. */
1315
1316 static int
1317 do_misc_decoding (aarch64_inst *inst)
1318 {
1319 switch (inst->opcode->op)
1320 {
1321 case OP_FCVT:
1322 return decode_fcvt (inst);
1323 case OP_FCVTN:
1324 case OP_FCVTN2:
1325 case OP_FCVTL:
1326 case OP_FCVTL2:
1327 return decode_asimd_fcvt (inst);
1328 case OP_FCVTXN_S:
1329 return decode_asisd_fcvtxn (inst);
1330 default:
1331 return 0;
1332 }
1333 }
1334
1335 /* Opcodes that have fields shared by multiple operands are usually flagged
1336 with flags. In this function, we detect such flags, decode the related
1337 field(s) and store the information in one of the related operands. The
1338 'one' operand is not any operand but one of the operands that can
1339 accommadate all the information that has been decoded. */
1340
1341 static int
1342 do_special_decoding (aarch64_inst *inst)
1343 {
1344 int idx;
1345 aarch64_insn value;
1346 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1347 if (inst->opcode->flags & F_COND)
1348 {
1349 value = extract_field (FLD_cond2, inst->value, 0);
1350 inst->cond = get_cond_from_value (value);
1351 }
1352 /* 'sf' field. */
1353 if (inst->opcode->flags & F_SF)
1354 {
1355 idx = select_operand_for_sf_field_coding (inst->opcode);
1356 value = extract_field (FLD_sf, inst->value, 0);
1357 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1358 if ((inst->opcode->flags & F_N)
1359 && extract_field (FLD_N, inst->value, 0) != value)
1360 return 0;
1361 }
1362 /* size:Q fields. */
1363 if (inst->opcode->flags & F_SIZEQ)
1364 return decode_sizeq (inst);
1365
1366 if (inst->opcode->flags & F_FPTYPE)
1367 {
1368 idx = select_operand_for_fptype_field_coding (inst->opcode);
1369 value = extract_field (FLD_type, inst->value, 0);
1370 switch (value)
1371 {
1372 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1373 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1374 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1375 default: return 0;
1376 }
1377 }
1378
1379 if (inst->opcode->flags & F_SSIZE)
1380 {
1381 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1382 of the base opcode. */
1383 aarch64_insn mask;
1384 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1385 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1386 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1387 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1388 /* For most related instruciton, the 'size' field is fully available for
1389 operand encoding. */
1390 if (mask == 0x3)
1391 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1392 else
1393 {
1394 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1395 candidates);
1396 inst->operands[idx].qualifier
1397 = get_qualifier_from_partial_encoding (value, candidates, mask);
1398 }
1399 }
1400
1401 if (inst->opcode->flags & F_T)
1402 {
1403 /* Num of consecutive '0's on the right side of imm5<3:0>. */
1404 int num = 0;
1405 unsigned val, Q;
1406 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1407 == AARCH64_OPND_CLASS_SIMD_REG);
1408 /* imm5<3:0> q <t>
1409 0000 x reserved
1410 xxx1 0 8b
1411 xxx1 1 16b
1412 xx10 0 4h
1413 xx10 1 8h
1414 x100 0 2s
1415 x100 1 4s
1416 1000 0 reserved
1417 1000 1 2d */
1418 val = extract_field (FLD_imm5, inst->value, 0);
1419 while ((val & 0x1) == 0 && ++num <= 3)
1420 val >>= 1;
1421 if (num > 3)
1422 return 0;
1423 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1424 inst->operands[0].qualifier =
1425 get_vreg_qualifier_from_value ((num << 1) | Q);
1426 }
1427
1428 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1429 {
1430 /* Use Rt to encode in the case of e.g.
1431 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1432 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1433 if (idx == -1)
1434 {
1435 /* Otherwise use the result operand, which has to be a integer
1436 register. */
1437 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1438 == AARCH64_OPND_CLASS_INT_REG);
1439 idx = 0;
1440 }
1441 assert (idx == 0 || idx == 1);
1442 value = extract_field (FLD_Q, inst->value, 0);
1443 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1444 }
1445
1446 if (inst->opcode->flags & F_LDS_SIZE)
1447 {
1448 aarch64_field field = {0, 0};
1449 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1450 == AARCH64_OPND_CLASS_INT_REG);
1451 gen_sub_field (FLD_opc, 0, 1, &field);
1452 value = extract_field_2 (&field, inst->value, 0);
1453 inst->operands[0].qualifier
1454 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
1455 }
1456
1457 /* Miscellaneous decoding; done as the last step. */
1458 if (inst->opcode->flags & F_MISC)
1459 return do_misc_decoding (inst);
1460
1461 return 1;
1462 }
1463
1464 /* Converters converting a real opcode instruction to its alias form. */
1465
1466 /* ROR <Wd>, <Ws>, #<shift>
1467 is equivalent to:
1468 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1469 static int
1470 convert_extr_to_ror (aarch64_inst *inst)
1471 {
1472 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1473 {
1474 copy_operand_info (inst, 2, 3);
1475 inst->operands[3].type = AARCH64_OPND_NIL;
1476 return 1;
1477 }
1478 return 0;
1479 }
1480
1481 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1482 is equivalent to:
1483 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1484 static int
1485 convert_shll_to_xtl (aarch64_inst *inst)
1486 {
1487 if (inst->operands[2].imm.value == 0)
1488 {
1489 inst->operands[2].type = AARCH64_OPND_NIL;
1490 return 1;
1491 }
1492 return 0;
1493 }
1494
1495 /* Convert
1496 UBFM <Xd>, <Xn>, #<shift>, #63.
1497 to
1498 LSR <Xd>, <Xn>, #<shift>. */
1499 static int
1500 convert_bfm_to_sr (aarch64_inst *inst)
1501 {
1502 int64_t imms, val;
1503
1504 imms = inst->operands[3].imm.value;
1505 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1506 if (imms == val)
1507 {
1508 inst->operands[3].type = AARCH64_OPND_NIL;
1509 return 1;
1510 }
1511
1512 return 0;
1513 }
1514
1515 /* Convert MOV to ORR. */
1516 static int
1517 convert_orr_to_mov (aarch64_inst *inst)
1518 {
1519 /* MOV <Vd>.<T>, <Vn>.<T>
1520 is equivalent to:
1521 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1522 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1523 {
1524 inst->operands[2].type = AARCH64_OPND_NIL;
1525 return 1;
1526 }
1527 return 0;
1528 }
1529
1530 /* When <imms> >= <immr>, the instruction written:
1531 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1532 is equivalent to:
1533 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1534
1535 static int
1536 convert_bfm_to_bfx (aarch64_inst *inst)
1537 {
1538 int64_t immr, imms;
1539
1540 immr = inst->operands[2].imm.value;
1541 imms = inst->operands[3].imm.value;
1542 if (imms >= immr)
1543 {
1544 int64_t lsb = immr;
1545 inst->operands[2].imm.value = lsb;
1546 inst->operands[3].imm.value = imms + 1 - lsb;
1547 /* The two opcodes have different qualifiers for
1548 the immediate operands; reset to help the checking. */
1549 reset_operand_qualifier (inst, 2);
1550 reset_operand_qualifier (inst, 3);
1551 return 1;
1552 }
1553
1554 return 0;
1555 }
1556
1557 /* When <imms> < <immr>, the instruction written:
1558 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1559 is equivalent to:
1560 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1561
1562 static int
1563 convert_bfm_to_bfi (aarch64_inst *inst)
1564 {
1565 int64_t immr, imms, val;
1566
1567 immr = inst->operands[2].imm.value;
1568 imms = inst->operands[3].imm.value;
1569 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1570 if (imms < immr)
1571 {
1572 inst->operands[2].imm.value = (val - immr) & (val - 1);
1573 inst->operands[3].imm.value = imms + 1;
1574 /* The two opcodes have different qualifiers for
1575 the immediate operands; reset to help the checking. */
1576 reset_operand_qualifier (inst, 2);
1577 reset_operand_qualifier (inst, 3);
1578 return 1;
1579 }
1580
1581 return 0;
1582 }
1583
1584 /* The instruction written:
1585 LSL <Xd>, <Xn>, #<shift>
1586 is equivalent to:
1587 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1588
1589 static int
1590 convert_ubfm_to_lsl (aarch64_inst *inst)
1591 {
1592 int64_t immr = inst->operands[2].imm.value;
1593 int64_t imms = inst->operands[3].imm.value;
1594 int64_t val
1595 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1596
1597 if ((immr == 0 && imms == val) || immr == imms + 1)
1598 {
1599 inst->operands[3].type = AARCH64_OPND_NIL;
1600 inst->operands[2].imm.value = val - imms;
1601 return 1;
1602 }
1603
1604 return 0;
1605 }
1606
1607 /* CINC <Wd>, <Wn>, <cond>
1608 is equivalent to:
1609 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1610
1611 static int
1612 convert_from_csel (aarch64_inst *inst)
1613 {
1614 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1615 {
1616 copy_operand_info (inst, 2, 3);
1617 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
1618 inst->operands[3].type = AARCH64_OPND_NIL;
1619 return 1;
1620 }
1621 return 0;
1622 }
1623
1624 /* CSET <Wd>, <cond>
1625 is equivalent to:
1626 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1627
1628 static int
1629 convert_csinc_to_cset (aarch64_inst *inst)
1630 {
1631 if (inst->operands[1].reg.regno == 0x1f
1632 && inst->operands[2].reg.regno == 0x1f)
1633 {
1634 copy_operand_info (inst, 1, 3);
1635 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
1636 inst->operands[3].type = AARCH64_OPND_NIL;
1637 inst->operands[2].type = AARCH64_OPND_NIL;
1638 return 1;
1639 }
1640 return 0;
1641 }
1642
1643 /* MOV <Wd>, #<imm>
1644 is equivalent to:
1645 MOVZ <Wd>, #<imm16>, LSL #<shift>.
1646
1647 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1648 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1649 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1650 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1651 machine-instruction mnemonic must be used. */
1652
1653 static int
1654 convert_movewide_to_mov (aarch64_inst *inst)
1655 {
1656 uint64_t value = inst->operands[1].imm.value;
1657 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
1658 if (value == 0 && inst->operands[1].shifter.amount != 0)
1659 return 0;
1660 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1661 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
1662 value <<= inst->operands[1].shifter.amount;
1663 /* As an alias convertor, it has to be clear that the INST->OPCODE
1664 is the opcode of the real instruction. */
1665 if (inst->opcode->op == OP_MOVN)
1666 {
1667 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1668 value = ~value;
1669 /* A MOVN has an immediate that could be encoded by MOVZ. */
1670 if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
1671 return 0;
1672 }
1673 inst->operands[1].imm.value = value;
1674 inst->operands[1].shifter.amount = 0;
1675 return 1;
1676 }
1677
1678 /* MOV <Wd>, #<imm>
1679 is equivalent to:
1680 ORR <Wd>, WZR, #<imm>.
1681
1682 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1683 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1684 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1685 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1686 machine-instruction mnemonic must be used. */
1687
1688 static int
1689 convert_movebitmask_to_mov (aarch64_inst *inst)
1690 {
1691 int is32;
1692 uint64_t value;
1693
1694 /* Should have been assured by the base opcode value. */
1695 assert (inst->operands[1].reg.regno == 0x1f);
1696 copy_operand_info (inst, 1, 2);
1697 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1698 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1699 value = inst->operands[1].imm.value;
1700 /* ORR has an immediate that could be generated by a MOVZ or MOVN
1701 instruction. */
1702 if (inst->operands[0].reg.regno != 0x1f
1703 && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
1704 || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
1705 return 0;
1706
1707 inst->operands[2].type = AARCH64_OPND_NIL;
1708 return 1;
1709 }
1710
1711 /* Some alias opcodes are disassembled by being converted from their real-form.
1712 N.B. INST->OPCODE is the real opcode rather than the alias. */
1713
1714 static int
1715 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
1716 {
1717 switch (alias->op)
1718 {
1719 case OP_ASR_IMM:
1720 case OP_LSR_IMM:
1721 return convert_bfm_to_sr (inst);
1722 case OP_LSL_IMM:
1723 return convert_ubfm_to_lsl (inst);
1724 case OP_CINC:
1725 case OP_CINV:
1726 case OP_CNEG:
1727 return convert_from_csel (inst);
1728 case OP_CSET:
1729 case OP_CSETM:
1730 return convert_csinc_to_cset (inst);
1731 case OP_UBFX:
1732 case OP_BFXIL:
1733 case OP_SBFX:
1734 return convert_bfm_to_bfx (inst);
1735 case OP_SBFIZ:
1736 case OP_BFI:
1737 case OP_UBFIZ:
1738 return convert_bfm_to_bfi (inst);
1739 case OP_MOV_V:
1740 return convert_orr_to_mov (inst);
1741 case OP_MOV_IMM_WIDE:
1742 case OP_MOV_IMM_WIDEN:
1743 return convert_movewide_to_mov (inst);
1744 case OP_MOV_IMM_LOG:
1745 return convert_movebitmask_to_mov (inst);
1746 case OP_ROR_IMM:
1747 return convert_extr_to_ror (inst);
1748 case OP_SXTL:
1749 case OP_SXTL2:
1750 case OP_UXTL:
1751 case OP_UXTL2:
1752 return convert_shll_to_xtl (inst);
1753 default:
1754 return 0;
1755 }
1756 }
1757
1758 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
1759 aarch64_inst *, int);
1760
1761 /* Given the instruction information in *INST, check if the instruction has
1762 any alias form that can be used to represent *INST. If the answer is yes,
1763 update *INST to be in the form of the determined alias. */
1764
1765 /* In the opcode description table, the following flags are used in opcode
1766 entries to help establish the relations between the real and alias opcodes:
1767
1768 F_ALIAS: opcode is an alias
1769 F_HAS_ALIAS: opcode has alias(es)
1770 F_P1
1771 F_P2
1772 F_P3: Disassembly preference priority 1-3 (the larger the
1773 higher). If nothing is specified, it is the priority
1774 0 by default, i.e. the lowest priority.
1775
1776 Although the relation between the machine and the alias instructions are not
1777 explicitly described, it can be easily determined from the base opcode
1778 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
1779 description entries:
1780
1781 The mask of an alias opcode must be equal to or a super-set (i.e. more
1782 constrained) of that of the aliased opcode; so is the base opcode value.
1783
1784 if (opcode_has_alias (real) && alias_opcode_p (opcode)
1785 && (opcode->mask & real->mask) == real->mask
1786 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
1787 then OPCODE is an alias of, and only of, the REAL instruction
1788
1789 The alias relationship is forced flat-structured to keep related algorithm
1790 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
1791
1792 During the disassembling, the decoding decision tree (in
1793 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
1794 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
1795 not specified), the disassembler will check whether there is any alias
1796 instruction exists for this real instruction. If there is, the disassembler
1797 will try to disassemble the 32-bit binary again using the alias's rule, or
1798 try to convert the IR to the form of the alias. In the case of the multiple
1799 aliases, the aliases are tried one by one from the highest priority
1800 (currently the flag F_P3) to the lowest priority (no priority flag), and the
1801 first succeeds first adopted.
1802
1803 You may ask why there is a need for the conversion of IR from one form to
1804 another in handling certain aliases. This is because on one hand it avoids
1805 adding more operand code to handle unusual encoding/decoding; on other
1806 hand, during the disassembling, the conversion is an effective approach to
1807 check the condition of an alias (as an alias may be adopted only if certain
1808 conditions are met).
1809
1810 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
1811 aarch64_opcode_table and generated aarch64_find_alias_opcode and
1812 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
1813
1814 static void
1815 determine_disassembling_preference (struct aarch64_inst *inst)
1816 {
1817 const aarch64_opcode *opcode;
1818 const aarch64_opcode *alias;
1819
1820 opcode = inst->opcode;
1821
1822 /* This opcode does not have an alias, so use itself. */
1823 if (opcode_has_alias (opcode) == FALSE)
1824 return;
1825
1826 alias = aarch64_find_alias_opcode (opcode);
1827 assert (alias);
1828
1829 #ifdef DEBUG_AARCH64
1830 if (debug_dump)
1831 {
1832 const aarch64_opcode *tmp = alias;
1833 printf ("#### LIST orderd: ");
1834 while (tmp)
1835 {
1836 printf ("%s, ", tmp->name);
1837 tmp = aarch64_find_next_alias_opcode (tmp);
1838 }
1839 printf ("\n");
1840 }
1841 #endif /* DEBUG_AARCH64 */
1842
1843 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
1844 {
1845 DEBUG_TRACE ("try %s", alias->name);
1846 assert (alias_opcode_p (alias));
1847
1848 /* An alias can be a pseudo opcode which will never be used in the
1849 disassembly, e.g. BIC logical immediate is such a pseudo opcode
1850 aliasing AND. */
1851 if (pseudo_opcode_p (alias))
1852 {
1853 DEBUG_TRACE ("skip pseudo %s", alias->name);
1854 continue;
1855 }
1856
1857 if ((inst->value & alias->mask) != alias->opcode)
1858 {
1859 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
1860 continue;
1861 }
1862 /* No need to do any complicated transformation on operands, if the alias
1863 opcode does not have any operand. */
1864 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
1865 {
1866 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
1867 aarch64_replace_opcode (inst, alias);
1868 return;
1869 }
1870 if (alias->flags & F_CONV)
1871 {
1872 aarch64_inst copy;
1873 memcpy (&copy, inst, sizeof (aarch64_inst));
1874 /* ALIAS is the preference as long as the instruction can be
1875 successfully converted to the form of ALIAS. */
1876 if (convert_to_alias (&copy, alias) == 1)
1877 {
1878 aarch64_replace_opcode (&copy, alias);
1879 assert (aarch64_match_operands_constraint (&copy, NULL));
1880 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
1881 memcpy (inst, &copy, sizeof (aarch64_inst));
1882 return;
1883 }
1884 }
1885 else
1886 {
1887 /* Directly decode the alias opcode. */
1888 aarch64_inst temp;
1889 memset (&temp, '\0', sizeof (aarch64_inst));
1890 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
1891 {
1892 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
1893 memcpy (inst, &temp, sizeof (aarch64_inst));
1894 return;
1895 }
1896 }
1897 }
1898 }
1899
1900 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
1901 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
1902 return 1.
1903
1904 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
1905 determined and used to disassemble CODE; this is done just before the
1906 return. */
1907
1908 static int
1909 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
1910 aarch64_inst *inst, int noaliases_p)
1911 {
1912 int i;
1913
1914 DEBUG_TRACE ("enter with %s", opcode->name);
1915
1916 assert (opcode && inst);
1917
1918 /* Check the base opcode. */
1919 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
1920 {
1921 DEBUG_TRACE ("base opcode match FAIL");
1922 goto decode_fail;
1923 }
1924
1925 /* Clear inst. */
1926 memset (inst, '\0', sizeof (aarch64_inst));
1927
1928 inst->opcode = opcode;
1929 inst->value = code;
1930
1931 /* Assign operand codes and indexes. */
1932 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1933 {
1934 if (opcode->operands[i] == AARCH64_OPND_NIL)
1935 break;
1936 inst->operands[i].type = opcode->operands[i];
1937 inst->operands[i].idx = i;
1938 }
1939
1940 /* Call the opcode decoder indicated by flags. */
1941 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
1942 {
1943 DEBUG_TRACE ("opcode flag-based decoder FAIL");
1944 goto decode_fail;
1945 }
1946
1947 /* Call operand decoders. */
1948 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1949 {
1950 const aarch64_operand *opnd;
1951 enum aarch64_opnd type;
1952 type = opcode->operands[i];
1953 if (type == AARCH64_OPND_NIL)
1954 break;
1955 opnd = &aarch64_operands[type];
1956 if (operand_has_extractor (opnd)
1957 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
1958 {
1959 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
1960 goto decode_fail;
1961 }
1962 }
1963
1964 /* Match the qualifiers. */
1965 if (aarch64_match_operands_constraint (inst, NULL) == 1)
1966 {
1967 /* Arriving here, the CODE has been determined as a valid instruction
1968 of OPCODE and *INST has been filled with information of this OPCODE
1969 instruction. Before the return, check if the instruction has any
1970 alias and should be disassembled in the form of its alias instead.
1971 If the answer is yes, *INST will be updated. */
1972 if (!noaliases_p)
1973 determine_disassembling_preference (inst);
1974 DEBUG_TRACE ("SUCCESS");
1975 return 1;
1976 }
1977 else
1978 {
1979 DEBUG_TRACE ("constraint matching FAIL");
1980 }
1981
1982 decode_fail:
1983 return 0;
1984 }
1985 \f
1986 /* This does some user-friendly fix-up to *INST. It is currently focus on
1987 the adjustment of qualifiers to help the printed instruction
1988 recognized/understood more easily. */
1989
1990 static void
1991 user_friendly_fixup (aarch64_inst *inst)
1992 {
1993 switch (inst->opcode->iclass)
1994 {
1995 case testbranch:
1996 /* TBNZ Xn|Wn, #uimm6, label
1997 Test and Branch Not Zero: conditionally jumps to label if bit number
1998 uimm6 in register Xn is not zero. The bit number implies the width of
1999 the register, which may be written and should be disassembled as Wn if
2000 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2001 */
2002 if (inst->operands[1].imm.value < 32)
2003 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2004 break;
2005 default: break;
2006 }
2007 }
2008
2009 /* Decode INSN and fill in *INST the instruction information. */
2010
2011 static int
2012 disas_aarch64_insn (uint64_t pc ATTRIBUTE_UNUSED, uint32_t insn,
2013 aarch64_inst *inst)
2014 {
2015 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2016
2017 #ifdef DEBUG_AARCH64
2018 if (debug_dump)
2019 {
2020 const aarch64_opcode *tmp = opcode;
2021 printf ("\n");
2022 DEBUG_TRACE ("opcode lookup:");
2023 while (tmp != NULL)
2024 {
2025 aarch64_verbose (" %s", tmp->name);
2026 tmp = aarch64_find_next_opcode (tmp);
2027 }
2028 }
2029 #endif /* DEBUG_AARCH64 */
2030
2031 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2032 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2033 opcode field and value, apart from the difference that one of them has an
2034 extra field as part of the opcode, but such a field is used for operand
2035 encoding in other opcode(s) ('immh' in the case of the example). */
2036 while (opcode != NULL)
2037 {
2038 /* But only one opcode can be decoded successfully for, as the
2039 decoding routine will check the constraint carefully. */
2040 if (aarch64_opcode_decode (opcode, insn, inst, no_aliases) == 1)
2041 return ERR_OK;
2042 opcode = aarch64_find_next_opcode (opcode);
2043 }
2044
2045 return ERR_UND;
2046 }
2047
2048 /* Print operands. */
2049
2050 static void
2051 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2052 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2053 {
2054 int i, pcrel_p, num_printed;
2055 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2056 {
2057 const size_t size = 128;
2058 char str[size];
2059 /* We regard the opcode operand info more, however we also look into
2060 the inst->operands to support the disassembling of the optional
2061 operand.
2062 The two operand code should be the same in all cases, apart from
2063 when the operand can be optional. */
2064 if (opcode->operands[i] == AARCH64_OPND_NIL
2065 || opnds[i].type == AARCH64_OPND_NIL)
2066 break;
2067
2068 /* Generate the operand string in STR. */
2069 aarch64_print_operand (str, size, pc, opcode, opnds, i, &pcrel_p,
2070 &info->target);
2071
2072 /* Print the delimiter (taking account of omitted operand(s)). */
2073 if (str[0] != '\0')
2074 (*info->fprintf_func) (info->stream, "%s",
2075 num_printed++ == 0 ? "\t" : ", ");
2076
2077 /* Print the operand. */
2078 if (pcrel_p)
2079 (*info->print_address_func) (info->target, info);
2080 else
2081 (*info->fprintf_func) (info->stream, "%s", str);
2082 }
2083 }
2084
2085 /* Print the instruction mnemonic name. */
2086
2087 static void
2088 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2089 {
2090 if (inst->opcode->flags & F_COND)
2091 {
2092 /* For instructions that are truly conditionally executed, e.g. b.cond,
2093 prepare the full mnemonic name with the corresponding condition
2094 suffix. */
2095 char name[8], *ptr;
2096 size_t len;
2097
2098 ptr = strchr (inst->opcode->name, '.');
2099 assert (ptr && inst->cond);
2100 len = ptr - inst->opcode->name;
2101 assert (len < 8);
2102 strncpy (name, inst->opcode->name, len);
2103 name [len] = '\0';
2104 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2105 }
2106 else
2107 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2108 }
2109
2110 /* Print the instruction according to *INST. */
2111
2112 static void
2113 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2114 struct disassemble_info *info)
2115 {
2116 print_mnemonic_name (inst, info);
2117 print_operands (pc, inst->opcode, inst->operands, info);
2118 }
2119
2120 /* Entry-point of the instruction disassembler and printer. */
2121
2122 static void
2123 print_insn_aarch64_word (bfd_vma pc,
2124 uint32_t word,
2125 struct disassemble_info *info)
2126 {
2127 static const char *err_msg[6] =
2128 {
2129 [ERR_OK] = "_",
2130 [-ERR_UND] = "undefined",
2131 [-ERR_UNP] = "unpredictable",
2132 [-ERR_NYI] = "NYI"
2133 };
2134
2135 int ret;
2136 aarch64_inst inst;
2137
2138 info->insn_info_valid = 1;
2139 info->branch_delay_insns = 0;
2140 info->data_size = 0;
2141 info->target = 0;
2142 info->target2 = 0;
2143
2144 if (info->flags & INSN_HAS_RELOC)
2145 /* If the instruction has a reloc associated with it, then
2146 the offset field in the instruction will actually be the
2147 addend for the reloc. (If we are using REL type relocs).
2148 In such cases, we can ignore the pc when computing
2149 addresses, since the addend is not currently pc-relative. */
2150 pc = 0;
2151
2152 ret = disas_aarch64_insn (pc, word, &inst);
2153
2154 if (((word >> 21) & 0x3ff) == 1)
2155 {
2156 /* RESERVED for ALES. */
2157 assert (ret != ERR_OK);
2158 ret = ERR_NYI;
2159 }
2160
2161 switch (ret)
2162 {
2163 case ERR_UND:
2164 case ERR_UNP:
2165 case ERR_NYI:
2166 /* Handle undefined instructions. */
2167 info->insn_type = dis_noninsn;
2168 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2169 word, err_msg[-ret]);
2170 break;
2171 case ERR_OK:
2172 user_friendly_fixup (&inst);
2173 print_aarch64_insn (pc, &inst, info);
2174 break;
2175 default:
2176 abort ();
2177 }
2178 }
2179
2180 /* Disallow mapping symbols ($x, $d etc) from
2181 being displayed in symbol relative addresses. */
2182
2183 bfd_boolean
2184 aarch64_symbol_is_valid (asymbol * sym,
2185 struct disassemble_info * info ATTRIBUTE_UNUSED)
2186 {
2187 const char * name;
2188
2189 if (sym == NULL)
2190 return FALSE;
2191
2192 name = bfd_asymbol_name (sym);
2193
2194 return name
2195 && (name[0] != '$'
2196 || (name[1] != 'x' && name[1] != 'd')
2197 || (name[2] != '\0' && name[2] != '.'));
2198 }
2199
2200 /* Print data bytes on INFO->STREAM. */
2201
2202 static void
2203 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2204 uint32_t word,
2205 struct disassemble_info *info)
2206 {
2207 switch (info->bytes_per_chunk)
2208 {
2209 case 1:
2210 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2211 break;
2212 case 2:
2213 info->fprintf_func (info->stream, ".short\t0x%04x", word);
2214 break;
2215 case 4:
2216 info->fprintf_func (info->stream, ".word\t0x%08x", word);
2217 break;
2218 default:
2219 abort ();
2220 }
2221 }
2222
2223 /* Try to infer the code or data type from a symbol.
2224 Returns nonzero if *MAP_TYPE was set. */
2225
2226 static int
2227 get_sym_code_type (struct disassemble_info *info, int n,
2228 enum map_type *map_type)
2229 {
2230 elf_symbol_type *es;
2231 unsigned int type;
2232 const char *name;
2233
2234 es = *(elf_symbol_type **)(info->symtab + n);
2235 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2236
2237 /* If the symbol has function type then use that. */
2238 if (type == STT_FUNC)
2239 {
2240 *map_type = MAP_INSN;
2241 return TRUE;
2242 }
2243
2244 /* Check for mapping symbols. */
2245 name = bfd_asymbol_name(info->symtab[n]);
2246 if (name[0] == '$'
2247 && (name[1] == 'x' || name[1] == 'd')
2248 && (name[2] == '\0' || name[2] == '.'))
2249 {
2250 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
2251 return TRUE;
2252 }
2253
2254 return FALSE;
2255 }
2256
2257 /* Entry-point of the AArch64 disassembler. */
2258
2259 int
2260 print_insn_aarch64 (bfd_vma pc,
2261 struct disassemble_info *info)
2262 {
2263 bfd_byte buffer[INSNLEN];
2264 int status;
2265 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
2266 bfd_boolean found = FALSE;
2267 unsigned int size = 4;
2268 unsigned long data;
2269
2270 if (info->disassembler_options)
2271 {
2272 set_default_aarch64_dis_options (info);
2273
2274 parse_aarch64_dis_options (info->disassembler_options);
2275
2276 /* To avoid repeated parsing of these options, we remove them here. */
2277 info->disassembler_options = NULL;
2278 }
2279
2280 /* Aarch64 instructions are always little-endian */
2281 info->endian_code = BFD_ENDIAN_LITTLE;
2282
2283 /* First check the full symtab for a mapping symbol, even if there
2284 are no usable non-mapping symbols for this address. */
2285 if (info->symtab_size != 0
2286 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
2287 {
2288 enum map_type type = MAP_INSN;
2289 int last_sym = -1;
2290 bfd_vma addr;
2291 int n;
2292
2293 if (pc <= last_mapping_addr)
2294 last_mapping_sym = -1;
2295
2296 /* Start scanning at the start of the function, or wherever
2297 we finished last time. */
2298 n = info->symtab_pos + 1;
2299 if (n < last_mapping_sym)
2300 n = last_mapping_sym;
2301
2302 /* Scan up to the location being disassembled. */
2303 for (; n < info->symtab_size; n++)
2304 {
2305 addr = bfd_asymbol_value (info->symtab[n]);
2306 if (addr > pc)
2307 break;
2308 if ((info->section == NULL
2309 || info->section == info->symtab[n]->section)
2310 && get_sym_code_type (info, n, &type))
2311 {
2312 last_sym = n;
2313 found = TRUE;
2314 }
2315 }
2316
2317 if (!found)
2318 {
2319 n = info->symtab_pos;
2320 if (n < last_mapping_sym)
2321 n = last_mapping_sym;
2322
2323 /* No mapping symbol found at this address. Look backwards
2324 for a preceeding one. */
2325 for (; n >= 0; n--)
2326 {
2327 if (get_sym_code_type (info, n, &type))
2328 {
2329 last_sym = n;
2330 found = TRUE;
2331 break;
2332 }
2333 }
2334 }
2335
2336 last_mapping_sym = last_sym;
2337 last_type = type;
2338
2339 /* Look a little bit ahead to see if we should print out
2340 less than four bytes of data. If there's a symbol,
2341 mapping or otherwise, after two bytes then don't
2342 print more. */
2343 if (last_type == MAP_DATA)
2344 {
2345 size = 4 - (pc & 3);
2346 for (n = last_sym + 1; n < info->symtab_size; n++)
2347 {
2348 addr = bfd_asymbol_value (info->symtab[n]);
2349 if (addr > pc)
2350 {
2351 if (addr - pc < size)
2352 size = addr - pc;
2353 break;
2354 }
2355 }
2356 /* If the next symbol is after three bytes, we need to
2357 print only part of the data, so that we can use either
2358 .byte or .short. */
2359 if (size == 3)
2360 size = (pc & 1) ? 1 : 2;
2361 }
2362 }
2363
2364 if (last_type == MAP_DATA)
2365 {
2366 /* size was set above. */
2367 info->bytes_per_chunk = size;
2368 info->display_endian = info->endian;
2369 printer = print_insn_data;
2370 }
2371 else
2372 {
2373 info->bytes_per_chunk = size = INSNLEN;
2374 info->display_endian = info->endian_code;
2375 printer = print_insn_aarch64_word;
2376 }
2377
2378 status = (*info->read_memory_func) (pc, buffer, size, info);
2379 if (status != 0)
2380 {
2381 (*info->memory_error_func) (status, pc, info);
2382 return -1;
2383 }
2384
2385 data = bfd_get_bits (buffer, size * 8,
2386 info->display_endian == BFD_ENDIAN_BIG);
2387
2388 (*printer) (pc, data, info);
2389
2390 return size;
2391 }
2392 \f
2393 void
2394 print_aarch64_disassembler_options (FILE *stream)
2395 {
2396 fprintf (stream, _("\n\
2397 The following AARCH64 specific disassembler options are supported for use\n\
2398 with the -M switch (multiple options should be separated by commas):\n"));
2399
2400 fprintf (stream, _("\n\
2401 no-aliases Don't print instruction aliases.\n"));
2402
2403 fprintf (stream, _("\n\
2404 aliases Do print instruction aliases.\n"));
2405
2406 #ifdef DEBUG_AARCH64
2407 fprintf (stream, _("\n\
2408 debug_dump Temp switch for debug trace.\n"));
2409 #endif /* DEBUG_AARCH64 */
2410
2411 fprintf (stream, _("\n"));
2412 }
This page took 0.132702 seconds and 4 git commands to generate.