Add support for 64-bit ARM architecture: AArch64
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "dis-asm.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27
28 #if !defined(EMBEDDED_ENV)
29 #define SYMTAB_AVAILABLE 1
30 #include "elf-bfd.h"
31 #include "elf/aarch64.h"
32 #endif
33
34 #define ERR_OK 0
35 #define ERR_UND -1
36 #define ERR_UNP -3
37 #define ERR_NYI -5
38
39 #define INSNLEN 4
40
41 /* Cached mapping symbol state. */
42 enum map_type
43 {
44 MAP_INSN,
45 MAP_DATA
46 };
47
48 static enum map_type last_type;
49 static int last_mapping_sym = -1;
50 static bfd_vma last_mapping_addr = 0;
51
52 /* Other options */
53 static int no_aliases = 0; /* If set disassemble as most general inst. */
54 \f
55
56 static void
57 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
58 {
59 }
60
61 static void
62 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
63 {
64 /* Try to match options that are simple flags */
65 if (CONST_STRNEQ (option, "no-aliases"))
66 {
67 no_aliases = 1;
68 return;
69 }
70
71 if (CONST_STRNEQ (option, "aliases"))
72 {
73 no_aliases = 0;
74 return;
75 }
76
77 #ifdef DEBUG_AARCH64
78 if (CONST_STRNEQ (option, "debug_dump"))
79 {
80 debug_dump = 1;
81 return;
82 }
83 #endif /* DEBUG_AARCH64 */
84
85 /* Invalid option. */
86 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
87 }
88
89 static void
90 parse_aarch64_dis_options (const char *options)
91 {
92 const char *option_end;
93
94 if (options == NULL)
95 return;
96
97 while (*options != '\0')
98 {
99 /* Skip empty options. */
100 if (*options == ',')
101 {
102 options++;
103 continue;
104 }
105
106 /* We know that *options is neither NUL or a comma. */
107 option_end = options + 1;
108 while (*option_end != ',' && *option_end != '\0')
109 option_end++;
110
111 parse_aarch64_dis_option (options, option_end - options);
112
113 /* Go on to the next one. If option_end points to a comma, it
114 will be skipped above. */
115 options = option_end;
116 }
117 }
118 \f
119 /* Functions doing the instruction disassembling. */
120
121 /* The unnamed arguments consist of the number of fields and information about
122 these fields where the VALUE will be extracted from CODE and returned.
123 MASK can be zero or the base mask of the opcode.
124
125 N.B. the fields are required to be in such an order than the most signficant
126 field for VALUE comes the first, e.g. the <index> in
127 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
128 is encoded in H:L:M in some cases, the the fields H:L:M should be passed in
129 the order of H, L, M. */
130
131 static inline aarch64_insn
132 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
133 {
134 uint32_t num;
135 const aarch64_field *field;
136 enum aarch64_field_kind kind;
137 va_list va;
138
139 va_start (va, mask);
140 num = va_arg (va, uint32_t);
141 assert (num <= 5);
142 aarch64_insn value = 0x0;
143 while (num--)
144 {
145 kind = va_arg (va, enum aarch64_field_kind);
146 field = &fields[kind];
147 value <<= field->width;
148 value |= extract_field (kind, code, mask);
149 }
150 return value;
151 }
152
153 /* Sign-extend bit I of VALUE. */
154 static inline int32_t
155 sign_extend (aarch64_insn value, unsigned i)
156 {
157 uint32_t ret = value;
158
159 assert (i < 32);
160 if ((value >> i) & 0x1)
161 {
162 uint32_t val = (uint32_t)(-1) << i;
163 ret = ret | val;
164 }
165 return (int32_t) ret;
166 }
167
168 /* N.B. the following inline helpfer functions create a dependency on the
169 order of operand qualifier enumerators. */
170
171 /* Given VALUE, return qualifier for a general purpose register. */
172 static inline enum aarch64_opnd_qualifier
173 get_greg_qualifier_from_value (aarch64_insn value)
174 {
175 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
176 assert (value <= 0x1
177 && aarch64_get_qualifier_standard_value (qualifier) == value);
178 return qualifier;
179 }
180
181 /* Given VALUE, return qualifier for a vector register. */
182 static inline enum aarch64_opnd_qualifier
183 get_vreg_qualifier_from_value (aarch64_insn value)
184 {
185 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
186
187 assert (value <= 0x8
188 && aarch64_get_qualifier_standard_value (qualifier) == value);
189 return qualifier;
190 }
191
192 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
193 static inline enum aarch64_opnd_qualifier
194 get_sreg_qualifier_from_value (aarch64_insn value)
195 {
196 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
197
198 assert (value <= 0x4
199 && aarch64_get_qualifier_standard_value (qualifier) == value);
200 return qualifier;
201 }
202
203 /* Given the instruction in *INST which is probably half way through the
204 decoding and our caller wants to know the expected qualifier for operand
205 I. Return such a qualifier if we can establish it; otherwise return
206 AARCH64_OPND_QLF_NIL. */
207
208 static aarch64_opnd_qualifier_t
209 get_expected_qualifier (const aarch64_inst *inst, int i)
210 {
211 aarch64_opnd_qualifier_seq_t qualifiers;
212 /* Should not be called if the qualifier is known. */
213 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
214 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
215 i, qualifiers))
216 return qualifiers[i];
217 else
218 return AARCH64_OPND_QLF_NIL;
219 }
220
221 /* Operand extractors. */
222
223 int
224 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
225 const aarch64_insn code,
226 const aarch64_inst *inst ATTRIBUTE_UNUSED)
227 {
228 info->reg.regno = extract_field (self->fields[0], code, 0);
229 return 1;
230 }
231
232 /* e.g. IC <ic_op>{, <Xt>}. */
233 int
234 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
235 const aarch64_insn code,
236 const aarch64_inst *inst ATTRIBUTE_UNUSED)
237 {
238 info->reg.regno = extract_field (self->fields[0], code, 0);
239 assert (info->idx == 1
240 && (aarch64_get_operand_class (inst->operands[0].type)
241 == AARCH64_OPND_CLASS_SYSTEM));
242 /* This will make the constraint checking happy and more importantly will
243 help the disassembler determine whether this operand is optional or
244 not. */
245 info->present = inst->operands[0].sysins_op->has_xt;
246
247 return 1;
248 }
249
250 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
251 int
252 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
253 const aarch64_insn code,
254 const aarch64_inst *inst ATTRIBUTE_UNUSED)
255 {
256 /* regno */
257 info->reglane.regno = extract_field (self->fields[0], code,
258 inst->opcode->mask);
259
260 /* Index and/or type. */
261 if (inst->opcode->iclass == asisdone
262 || inst->opcode->iclass == asimdins)
263 {
264 if (info->type == AARCH64_OPND_En
265 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
266 {
267 unsigned shift;
268 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
269 assert (info->idx == 1); /* Vn */
270 aarch64_insn value = extract_field (FLD_imm4, code, 0);
271 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
272 info->qualifier = get_expected_qualifier (inst, info->idx);
273 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
274 info->reglane.index = value >> shift;
275 }
276 else
277 {
278 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
279 imm5<3:0> <V>
280 0000 RESERVED
281 xxx1 B
282 xx10 H
283 x100 S
284 1000 D */
285 int pos = -1;
286 aarch64_insn value = extract_field (FLD_imm5, code, 0);
287 while (++pos <= 3 && (value & 0x1) == 0)
288 value >>= 1;
289 if (pos > 3)
290 return 0;
291 info->qualifier = get_sreg_qualifier_from_value (pos);
292 info->reglane.index = (unsigned) (value >> 1);
293 }
294 }
295 else
296 {
297 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
298 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
299
300 /* Need information in other operand(s) to help decoding. */
301 info->qualifier = get_expected_qualifier (inst, info->idx);
302 switch (info->qualifier)
303 {
304 case AARCH64_OPND_QLF_S_H:
305 /* h:l:m */
306 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
307 FLD_M);
308 info->reglane.regno &= 0xf;
309 break;
310 case AARCH64_OPND_QLF_S_S:
311 /* h:l */
312 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
313 break;
314 case AARCH64_OPND_QLF_S_D:
315 /* H */
316 info->reglane.index = extract_field (FLD_H, code, 0);
317 break;
318 default:
319 return 0;
320 }
321 }
322
323 return 1;
324 }
325
326 int
327 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
328 const aarch64_insn code,
329 const aarch64_inst *inst ATTRIBUTE_UNUSED)
330 {
331 /* R */
332 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
333 /* len */
334 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
335 return 1;
336 }
337
338 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
339 int
340 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
341 aarch64_opnd_info *info, const aarch64_insn code,
342 const aarch64_inst *inst)
343 {
344 aarch64_insn value;
345 /* Number of elements in each structure to be loaded/stored. */
346 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
347
348 struct
349 {
350 unsigned is_reserved;
351 unsigned num_regs;
352 unsigned num_elements;
353 } data [] =
354 { {0, 4, 4},
355 {1, 4, 4},
356 {0, 4, 1},
357 {0, 4, 2},
358 {0, 3, 3},
359 {1, 3, 3},
360 {0, 3, 1},
361 {0, 1, 1},
362 {0, 2, 2},
363 {1, 2, 2},
364 {0, 2, 1},
365 };
366
367 /* Rt */
368 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
369 /* opcode */
370 value = extract_field (FLD_opcode, code, 0);
371 if (expected_num != data[value].num_elements || data[value].is_reserved)
372 return 0;
373 info->reglist.num_regs = data[value].num_regs;
374
375 return 1;
376 }
377
378 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
379 lanes instructions. */
380 int
381 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
382 aarch64_opnd_info *info, const aarch64_insn code,
383 const aarch64_inst *inst)
384 {
385 aarch64_insn value;
386
387 /* Rt */
388 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
389 /* S */
390 value = extract_field (FLD_S, code, 0);
391
392 /* Number of registers is equal to the number of elements in
393 each structure to be loaded/stored. */
394 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
395 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
396
397 /* Except when it is LD1R. */
398 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
399 info->reglist.num_regs = 2;
400
401 return 1;
402 }
403
404 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
405 load/store single element instructions. */
406 int
407 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
408 aarch64_opnd_info *info, const aarch64_insn code,
409 const aarch64_inst *inst ATTRIBUTE_UNUSED)
410 {
411 aarch64_field field = {0, 0};
412 aarch64_insn QSsize; /* fields Q:S:size. */
413 aarch64_insn opcodeh2; /* opcode<2:1> */
414
415 /* Rt */
416 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
417
418 /* Decode the index, opcode<2:1> and size. */
419 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
420 opcodeh2 = extract_field_2 (&field, code, 0);
421 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
422 switch (opcodeh2)
423 {
424 case 0x0:
425 info->qualifier = AARCH64_OPND_QLF_S_B;
426 /* Index encoded in "Q:S:size". */
427 info->reglist.index = QSsize;
428 break;
429 case 0x1:
430 info->qualifier = AARCH64_OPND_QLF_S_H;
431 /* Index encoded in "Q:S:size<1>". */
432 info->reglist.index = QSsize >> 1;
433 break;
434 case 0x2:
435 if ((QSsize & 0x1) == 0)
436 {
437 info->qualifier = AARCH64_OPND_QLF_S_S;
438 /* Index encoded in "Q:S". */
439 info->reglist.index = QSsize >> 2;
440 }
441 else
442 {
443 info->qualifier = AARCH64_OPND_QLF_S_D;
444 /* Index encoded in "Q". */
445 info->reglist.index = QSsize >> 3;
446 if (extract_field (FLD_S, code, 0))
447 /* UND */
448 return 0;
449 }
450 break;
451 default:
452 return 0;
453 }
454
455 info->reglist.has_index = 1;
456 info->reglist.num_regs = 0;
457 /* Number of registers is equal to the number of elements in
458 each structure to be loaded/stored. */
459 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
460 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
461
462 return 1;
463 }
464
465 /* Decode fields immh:immb and/or Q for e.g.
466 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
467 or SSHR <V><d>, <V><n>, #<shift>. */
468
469 int
470 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
471 aarch64_opnd_info *info, const aarch64_insn code,
472 const aarch64_inst *inst)
473 {
474 int pos;
475 aarch64_insn Q, imm, immh;
476 enum aarch64_insn_class iclass = inst->opcode->iclass;
477
478 immh = extract_field (FLD_immh, code, 0);
479 if (immh == 0)
480 return 0;
481 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
482 pos = 4;
483 /* Get highest set bit in immh. */
484 while (--pos >= 0 && (immh & 0x8) == 0)
485 immh <<= 1;
486
487 assert ((iclass == asimdshf || iclass == asisdshf)
488 && (info->type == AARCH64_OPND_IMM_VLSR
489 || info->type == AARCH64_OPND_IMM_VLSL));
490
491 if (iclass == asimdshf)
492 {
493 Q = extract_field (FLD_Q, code, 0);
494 /* immh Q <T>
495 0000 x SEE AdvSIMD modified immediate
496 0001 0 8B
497 0001 1 16B
498 001x 0 4H
499 001x 1 8H
500 01xx 0 2S
501 01xx 1 4S
502 1xxx 0 RESERVED
503 1xxx 1 2D */
504 info->qualifier =
505 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
506 }
507 else
508 info->qualifier = get_sreg_qualifier_from_value (pos);
509
510 if (info->type == AARCH64_OPND_IMM_VLSR)
511 /* immh <shift>
512 0000 SEE AdvSIMD modified immediate
513 0001 (16-UInt(immh:immb))
514 001x (32-UInt(immh:immb))
515 01xx (64-UInt(immh:immb))
516 1xxx (128-UInt(immh:immb)) */
517 info->imm.value = (16 << pos) - imm;
518 else
519 /* immh:immb
520 immh <shift>
521 0000 SEE AdvSIMD modified immediate
522 0001 (UInt(immh:immb)-8)
523 001x (UInt(immh:immb)-16)
524 01xx (UInt(immh:immb)-32)
525 1xxx (UInt(immh:immb)-64) */
526 info->imm.value = imm - (8 << pos);
527
528 return 1;
529 }
530
531 /* Decode shift immediate for e.g. sshr (imm). */
532 int
533 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
534 aarch64_opnd_info *info, const aarch64_insn code,
535 const aarch64_inst *inst ATTRIBUTE_UNUSED)
536 {
537 int64_t imm;
538 aarch64_insn val;
539 val = extract_field (FLD_size, code, 0);
540 switch (val)
541 {
542 case 0: imm = 8; break;
543 case 1: imm = 16; break;
544 case 2: imm = 32; break;
545 default: return 0;
546 }
547 info->imm.value = imm;
548 return 1;
549 }
550
551 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
552 value in the field(s) will be extracted as unsigned immediate value. */
553 int
554 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
555 const aarch64_insn code,
556 const aarch64_inst *inst ATTRIBUTE_UNUSED)
557 {
558 int64_t imm;
559 /* Maximum of two fields to extract. */
560 assert (self->fields[2] == FLD_NIL);
561
562 if (self->fields[1] == FLD_NIL)
563 imm = extract_field (self->fields[0], code, 0);
564 else
565 /* e.g. TBZ b5:b40. */
566 imm = extract_fields (code, 0, 2, self->fields[0], self->fields[1]);
567
568 if (info->type == AARCH64_OPND_FPIMM)
569 info->imm.is_fp = 1;
570
571 if (operand_need_sign_extension (self))
572 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
573
574 if (operand_need_shift_by_two (self))
575 imm <<= 2;
576
577 if (info->type == AARCH64_OPND_ADDR_ADRP)
578 imm <<= 12;
579
580 info->imm.value = imm;
581 return 1;
582 }
583
584 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
585 int
586 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
587 const aarch64_insn code,
588 const aarch64_inst *inst ATTRIBUTE_UNUSED)
589 {
590 aarch64_ext_imm (self, info, code, inst);
591 info->shifter.kind = AARCH64_MOD_LSL;
592 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
593 return 1;
594 }
595
596 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
597 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
598 int
599 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
600 aarch64_opnd_info *info,
601 const aarch64_insn code,
602 const aarch64_inst *inst ATTRIBUTE_UNUSED)
603 {
604 uint64_t imm;
605 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
606 aarch64_field field = {0, 0};
607
608 assert (info->idx == 1);
609
610 if (info->type == AARCH64_OPND_SIMD_FPIMM)
611 info->imm.is_fp = 1;
612
613 /* a:b:c:d:e:f:g:h */
614 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
615 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
616 {
617 /* Either MOVI <Dd>, #<imm>
618 or MOVI <Vd>.2D, #<imm>.
619 <imm> is a 64-bit immediate
620 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
621 encoded in "a:b:c:d:e:f:g:h". */
622 int i;
623 unsigned abcdefgh = imm;
624 for (imm = 0ull, i = 0; i < 8; i++)
625 if (((abcdefgh >> i) & 0x1) != 0)
626 imm |= 0xffull << (8 * i);
627 }
628 info->imm.value = imm;
629
630 /* cmode */
631 info->qualifier = get_expected_qualifier (inst, info->idx);
632 switch (info->qualifier)
633 {
634 case AARCH64_OPND_QLF_NIL:
635 /* no shift */
636 info->shifter.kind = AARCH64_MOD_NONE;
637 return 1;
638 case AARCH64_OPND_QLF_LSL:
639 /* shift zeros */
640 info->shifter.kind = AARCH64_MOD_LSL;
641 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
642 {
643 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
644 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
645 default: assert (0); return 0;
646 }
647 /* 00: 0; 01: 8; 10:16; 11:24. */
648 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
649 break;
650 case AARCH64_OPND_QLF_MSL:
651 /* shift ones */
652 info->shifter.kind = AARCH64_MOD_MSL;
653 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
654 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
655 break;
656 default:
657 assert (0);
658 return 0;
659 }
660
661 return 1;
662 }
663
664 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
665 int
666 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
667 aarch64_opnd_info *info, const aarch64_insn code,
668 const aarch64_inst *inst ATTRIBUTE_UNUSED)
669 {
670 info->imm.value = 64- extract_field (FLD_scale, code, 0);
671 return 1;
672 }
673
674 /* Decode arithmetic immediate for e.g.
675 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
676 int
677 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
678 aarch64_opnd_info *info, const aarch64_insn code,
679 const aarch64_inst *inst ATTRIBUTE_UNUSED)
680 {
681 aarch64_insn value;
682
683 info->shifter.kind = AARCH64_MOD_LSL;
684 /* shift */
685 value = extract_field (FLD_shift, code, 0);
686 if (value >= 2)
687 return 0;
688 info->shifter.amount = value ? 12 : 0;
689 /* imm12 (unsigned) */
690 info->imm.value = extract_field (FLD_imm12, code, 0);
691
692 return 1;
693 }
694
695 /* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
696
697 int
698 aarch64_ext_limm (const aarch64_operand *self ATTRIBUTE_UNUSED,
699 aarch64_opnd_info *info, const aarch64_insn code,
700 const aarch64_inst *inst ATTRIBUTE_UNUSED)
701 {
702 uint64_t imm, mask;
703 uint32_t sf;
704 uint32_t N, R, S;
705 unsigned simd_size;
706 aarch64_insn value;
707
708 value = extract_fields (code, 0, 3, FLD_N, FLD_immr, FLD_imms);
709 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_W
710 || inst->operands[0].qualifier == AARCH64_OPND_QLF_X);
711 sf = aarch64_get_qualifier_esize (inst->operands[0].qualifier) != 4;
712
713 /* value is N:immr:imms. */
714 S = value & 0x3f;
715 R = (value >> 6) & 0x3f;
716 N = (value >> 12) & 0x1;
717
718 if (sf == 0 && N == 1)
719 return 0;
720
721 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
722 (in other words, right rotated by R), then replicated. */
723 if (N != 0)
724 {
725 simd_size = 64;
726 mask = 0xffffffffffffffffull;
727 }
728 else
729 {
730 switch (S)
731 {
732 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
733 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
734 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
735 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
736 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
737 default: return 0;
738 }
739 mask = (1ull << simd_size) - 1;
740 /* Top bits are IGNORED. */
741 R &= simd_size - 1;
742 }
743 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
744 if (S == simd_size - 1)
745 return 0;
746 /* S+1 consecutive bits to 1. */
747 /* NOTE: S can't be 63 due to detection above. */
748 imm = (1ull << (S + 1)) - 1;
749 /* Rotate to the left by simd_size - R. */
750 if (R != 0)
751 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
752 /* Replicate the value according to SIMD size. */
753 switch (simd_size)
754 {
755 case 2: imm = (imm << 2) | imm;
756 case 4: imm = (imm << 4) | imm;
757 case 8: imm = (imm << 8) | imm;
758 case 16: imm = (imm << 16) | imm;
759 case 32: imm = (imm << 32) | imm;
760 case 64: break;
761 default: assert (0); return 0;
762 }
763
764 info->imm.value = sf ? imm : imm & 0xffffffff;
765
766 return 1;
767 }
768
769 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
770 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
771 int
772 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
773 aarch64_opnd_info *info,
774 const aarch64_insn code, const aarch64_inst *inst)
775 {
776 aarch64_insn value;
777
778 /* Rt */
779 info->reg.regno = extract_field (FLD_Rt, code, 0);
780
781 /* size */
782 value = extract_field (FLD_ldst_size, code, 0);
783 if (inst->opcode->iclass == ldstpair_indexed
784 || inst->opcode->iclass == ldstnapair_offs
785 || inst->opcode->iclass == ldstpair_off
786 || inst->opcode->iclass == loadlit)
787 {
788 enum aarch64_opnd_qualifier qualifier;
789 switch (value)
790 {
791 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
792 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
793 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
794 default: return 0;
795 }
796 info->qualifier = qualifier;
797 }
798 else
799 {
800 /* opc1:size */
801 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
802 if (value > 0x4)
803 return 0;
804 info->qualifier = get_sreg_qualifier_from_value (value);
805 }
806
807 return 1;
808 }
809
810 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
811 int
812 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
813 aarch64_opnd_info *info,
814 aarch64_insn code,
815 const aarch64_inst *inst ATTRIBUTE_UNUSED)
816 {
817 /* Rn */
818 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
819 return 1;
820 }
821
822 /* Decode the address operand for e.g.
823 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
824 int
825 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
826 aarch64_opnd_info *info,
827 aarch64_insn code, const aarch64_inst *inst)
828 {
829 aarch64_insn S, value;
830
831 /* Rn */
832 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
833 /* Rm */
834 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
835 /* option */
836 value = extract_field (FLD_option, code, 0);
837 info->shifter.kind =
838 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
839 /* Fix-up the shifter kind; although the table-driven approach is
840 efficient, it is slightly inflexible, thus needing this fix-up. */
841 if (info->shifter.kind == AARCH64_MOD_UXTX)
842 info->shifter.kind = AARCH64_MOD_LSL;
843 /* S */
844 S = extract_field (FLD_S, code, 0);
845 if (S == 0)
846 {
847 info->shifter.amount = 0;
848 info->shifter.amount_present = 0;
849 }
850 else
851 {
852 int size;
853 /* Need information in other operand(s) to help achieve the decoding
854 from 'S' field. */
855 info->qualifier = get_expected_qualifier (inst, info->idx);
856 /* Get the size of the data element that is accessed, which may be
857 different from that of the source register size, e.g. in strb/ldrb. */
858 size = aarch64_get_qualifier_esize (info->qualifier);
859 info->shifter.amount = get_logsz (size);
860 info->shifter.amount_present = 1;
861 }
862
863 return 1;
864 }
865
866 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
867 int
868 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
869 aarch64_insn code, const aarch64_inst *inst)
870 {
871 aarch64_insn imm;
872 info->qualifier = get_expected_qualifier (inst, info->idx);
873
874 /* Rn */
875 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
876 /* simm (imm9 or imm7) */
877 imm = extract_field (self->fields[0], code, 0);
878 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
879 if (self->fields[0] == FLD_imm7)
880 /* scaled immediate in ld/st pair instructions. */
881 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
882 /* qualifier */
883 if (inst->opcode->iclass == ldst_unscaled
884 || inst->opcode->iclass == ldstnapair_offs
885 || inst->opcode->iclass == ldstpair_off
886 || inst->opcode->iclass == ldst_unpriv)
887 info->addr.writeback = 0;
888 else
889 {
890 /* pre/post- index */
891 info->addr.writeback = 1;
892 if (extract_field (self->fields[1], code, 0) == 1)
893 info->addr.preind = 1;
894 else
895 info->addr.postind = 1;
896 }
897
898 return 1;
899 }
900
901 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
902 int
903 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
904 aarch64_insn code,
905 const aarch64_inst *inst ATTRIBUTE_UNUSED)
906 {
907 int shift;
908 info->qualifier = get_expected_qualifier (inst, info->idx);
909 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
910 /* Rn */
911 info->addr.base_regno = extract_field (self->fields[0], code, 0);
912 /* uimm12 */
913 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
914 return 1;
915 }
916
917 /* Decode the address operand for e.g.
918 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
919 int
920 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
921 aarch64_opnd_info *info,
922 aarch64_insn code, const aarch64_inst *inst)
923 {
924 /* The opcode dependent area stores the number of elements in
925 each structure to be loaded/stored. */
926 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
927
928 /* Rn */
929 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
930 /* Rm | #<amount> */
931 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
932 if (info->addr.offset.regno == 31)
933 {
934 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
935 /* Special handling of loading single structure to all lane. */
936 info->addr.offset.imm = (is_ld1r ? 1
937 : inst->operands[0].reglist.num_regs)
938 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
939 else
940 info->addr.offset.imm = inst->operands[0].reglist.num_regs
941 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
942 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
943 }
944 else
945 info->addr.offset.is_reg = 1;
946 info->addr.writeback = 1;
947
948 return 1;
949 }
950
951 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
952 int
953 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
954 aarch64_opnd_info *info,
955 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
956 {
957 aarch64_insn value;
958 /* cond */
959 value = extract_field (FLD_cond, code, 0);
960 info->cond = get_cond_from_value (value);
961 return 1;
962 }
963
964 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
965 int
966 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
967 aarch64_opnd_info *info,
968 aarch64_insn code,
969 const aarch64_inst *inst ATTRIBUTE_UNUSED)
970 {
971 /* op0:op1:CRn:CRm:op2 */
972 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
973 FLD_CRm, FLD_op2);
974 return 1;
975 }
976
977 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
978 int
979 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
980 aarch64_opnd_info *info, aarch64_insn code,
981 const aarch64_inst *inst ATTRIBUTE_UNUSED)
982 {
983 int i;
984 /* op1:op2 */
985 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
986 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
987 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
988 return 1;
989 /* Reserved value in <pstatefield>. */
990 return 0;
991 }
992
993 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
994 int
995 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
996 aarch64_opnd_info *info,
997 aarch64_insn code,
998 const aarch64_inst *inst ATTRIBUTE_UNUSED)
999 {
1000 int i;
1001 aarch64_insn value;
1002 const aarch64_sys_ins_reg *sysins_ops;
1003 /* op0:op1:CRn:CRm:op2 */
1004 value = extract_fields (code, 0, 5,
1005 FLD_op0, FLD_op1, FLD_CRn,
1006 FLD_CRm, FLD_op2);
1007
1008 switch (info->type)
1009 {
1010 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1011 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1012 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1013 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1014 default: assert (0); return 0;
1015 }
1016
1017 for (i = 0; sysins_ops[i].template != NULL; ++i)
1018 if (sysins_ops[i].value == value)
1019 {
1020 info->sysins_op = sysins_ops + i;
1021 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1022 info->sysins_op->template,
1023 (unsigned)info->sysins_op->value,
1024 info->sysins_op->has_xt, i);
1025 return 1;
1026 }
1027
1028 return 0;
1029 }
1030
1031 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1032
1033 int
1034 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1035 aarch64_opnd_info *info,
1036 aarch64_insn code,
1037 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1038 {
1039 /* CRm */
1040 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1041 return 1;
1042 }
1043
1044 /* Decode the prefetch operation option operand for e.g.
1045 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1046
1047 int
1048 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1049 aarch64_opnd_info *info,
1050 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1051 {
1052 /* prfop in Rt */
1053 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1054 return 1;
1055 }
1056
1057 /* Decode the extended register operand for e.g.
1058 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1059 int
1060 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1061 aarch64_opnd_info *info,
1062 aarch64_insn code,
1063 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1064 {
1065 aarch64_insn value;
1066
1067 /* Rm */
1068 info->reg.regno = extract_field (FLD_Rm, code, 0);
1069 /* option */
1070 value = extract_field (FLD_option, code, 0);
1071 info->shifter.kind =
1072 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1073 /* imm3 */
1074 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1075
1076 /* This makes the constraint checking happy. */
1077 info->shifter.operator_present = 1;
1078
1079 /* Assume inst->operands[0].qualifier has been resolved. */
1080 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1081 info->qualifier = AARCH64_OPND_QLF_W;
1082 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1083 && (info->shifter.kind == AARCH64_MOD_UXTX
1084 || info->shifter.kind == AARCH64_MOD_SXTX))
1085 info->qualifier = AARCH64_OPND_QLF_X;
1086
1087 return 1;
1088 }
1089
1090 /* Decode the shifted register operand for e.g.
1091 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1092 int
1093 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1094 aarch64_opnd_info *info,
1095 aarch64_insn code,
1096 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1097 {
1098 aarch64_insn value;
1099
1100 /* Rm */
1101 info->reg.regno = extract_field (FLD_Rm, code, 0);
1102 /* shift */
1103 value = extract_field (FLD_shift, code, 0);
1104 info->shifter.kind =
1105 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1106 if (info->shifter.kind == AARCH64_MOD_ROR
1107 && inst->opcode->iclass != log_shift)
1108 /* ROR is not available for the shifted register operand in arithmetic
1109 instructions. */
1110 return 0;
1111 /* imm6 */
1112 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1113
1114 /* This makes the constraint checking happy. */
1115 info->shifter.operator_present = 1;
1116
1117 return 1;
1118 }
1119 \f
1120 /* Bitfields that are commonly used to encode certain operands' information
1121 may be partially used as part of the base opcode in some instructions.
1122 For example, the bit 1 of the field 'size' in
1123 FCVTXN <Vb><d>, <Va><n>
1124 is actually part of the base opcode, while only size<0> is available
1125 for encoding the register type. Another example is the AdvSIMD
1126 instruction ORR (register), in which the field 'size' is also used for
1127 the base opcode, leaving only the field 'Q' available to encode the
1128 vector register arrangement specifier '8B' or '16B'.
1129
1130 This function tries to deduce the qualifier from the value of partially
1131 constrained field(s). Given the VALUE of such a field or fields, the
1132 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1133 operand encoding), the function returns the matching qualifier or
1134 AARCH64_OPND_QLF_NIL if nothing matches.
1135
1136 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1137 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1138 may end with AARCH64_OPND_QLF_NIL. */
1139
1140 static enum aarch64_opnd_qualifier
1141 get_qualifier_from_partial_encoding (aarch64_insn value,
1142 const enum aarch64_opnd_qualifier* \
1143 candidates,
1144 aarch64_insn mask)
1145 {
1146 int i;
1147 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1148 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1149 {
1150 aarch64_insn standard_value;
1151 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1152 break;
1153 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1154 if ((standard_value & mask) == (value & mask))
1155 return candidates[i];
1156 }
1157 return AARCH64_OPND_QLF_NIL;
1158 }
1159
1160 /* Given a list of qualifier sequences, return all possible valid qualifiers
1161 for operand IDX in QUALIFIERS.
1162 Assume QUALIFIERS is an array whose length is large enough. */
1163
1164 static void
1165 get_operand_possible_qualifiers (int idx,
1166 const aarch64_opnd_qualifier_seq_t *list,
1167 enum aarch64_opnd_qualifier *qualifiers)
1168 {
1169 int i;
1170 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1171 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1172 break;
1173 }
1174
1175 /* Decode the size Q field for e.g. SHADD.
1176 We tag one operand with the qualifer according to the code;
1177 whether the qualifier is valid for this opcode or not, it is the
1178 duty of the semantic checking. */
1179
1180 static int
1181 decode_sizeq (aarch64_inst *inst)
1182 {
1183 int idx;
1184 enum aarch64_opnd_qualifier qualifier;
1185 aarch64_insn code;
1186 aarch64_insn value, mask;
1187 enum aarch64_field_kind fld_sz;
1188 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1189
1190 if (inst->opcode->iclass == asisdlse
1191 || inst->opcode->iclass == asisdlsep
1192 || inst->opcode->iclass == asisdlso
1193 || inst->opcode->iclass == asisdlsop)
1194 fld_sz = FLD_vldst_size;
1195 else
1196 fld_sz = FLD_size;
1197
1198 code = inst->value;
1199 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1200 /* Obtain the info that which bits of fields Q and size are actually
1201 available for operand encoding. Opcodes like FMAXNM and FMLA have
1202 size[1] unavailable. */
1203 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1204
1205 /* The index of the operand we are going to tag a qualifier and the qualifer
1206 itself are reasoned from the value of the size and Q fields and the
1207 possible valid qualifier lists. */
1208 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1209 DEBUG_TRACE ("key idx: %d", idx);
1210
1211 /* For most related instruciton, size:Q are fully available for operand
1212 encoding. */
1213 if (mask == 0x7)
1214 {
1215 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1216 return 1;
1217 }
1218
1219 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1220 candidates);
1221 #ifdef DEBUG_AARCH64
1222 if (debug_dump)
1223 {
1224 int i;
1225 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1226 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1227 DEBUG_TRACE ("qualifier %d: %s", i,
1228 aarch64_get_qualifier_name(candidates[i]));
1229 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1230 }
1231 #endif /* DEBUG_AARCH64 */
1232
1233 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1234
1235 if (qualifier == AARCH64_OPND_QLF_NIL)
1236 return 0;
1237
1238 inst->operands[idx].qualifier = qualifier;
1239 return 1;
1240 }
1241
1242 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1243 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1244
1245 static int
1246 decode_asimd_fcvt (aarch64_inst *inst)
1247 {
1248 aarch64_field field = {0, 0};
1249 aarch64_insn value;
1250 enum aarch64_opnd_qualifier qualifier;
1251
1252 gen_sub_field (FLD_size, 0, 1, &field);
1253 value = extract_field_2 (&field, inst->value, 0);
1254 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1255 : AARCH64_OPND_QLF_V_2D;
1256 switch (inst->opcode->op)
1257 {
1258 case OP_FCVTN:
1259 case OP_FCVTN2:
1260 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1261 inst->operands[1].qualifier = qualifier;
1262 break;
1263 case OP_FCVTL:
1264 case OP_FCVTL2:
1265 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1266 inst->operands[0].qualifier = qualifier;
1267 break;
1268 default:
1269 assert (0);
1270 return 0;
1271 }
1272
1273 return 1;
1274 }
1275
1276 /* Decode size[0], i.e. bit 22, for
1277 e.g. FCVTXN <Vb><d>, <Va><n>. */
1278
1279 static int
1280 decode_asisd_fcvtxn (aarch64_inst *inst)
1281 {
1282 aarch64_field field = {0, 0};
1283 gen_sub_field (FLD_size, 0, 1, &field);
1284 if (!extract_field_2 (&field, inst->value, 0))
1285 return 0;
1286 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1287 return 1;
1288 }
1289
1290 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1291 static int
1292 decode_fcvt (aarch64_inst *inst)
1293 {
1294 enum aarch64_opnd_qualifier qualifier;
1295 aarch64_insn value;
1296 const aarch64_field field = {15, 2};
1297
1298 /* opc dstsize */
1299 value = extract_field_2 (&field, inst->value, 0);
1300 switch (value)
1301 {
1302 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1303 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1304 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1305 default: return 0;
1306 }
1307 inst->operands[0].qualifier = qualifier;
1308
1309 return 1;
1310 }
1311
1312 /* Do miscellaneous decodings that are not common enough to be driven by
1313 flags. */
1314
1315 static int
1316 do_misc_decoding (aarch64_inst *inst)
1317 {
1318 switch (inst->opcode->op)
1319 {
1320 case OP_FCVT:
1321 return decode_fcvt (inst);
1322 case OP_FCVTN:
1323 case OP_FCVTN2:
1324 case OP_FCVTL:
1325 case OP_FCVTL2:
1326 return decode_asimd_fcvt (inst);
1327 case OP_FCVTXN_S:
1328 return decode_asisd_fcvtxn (inst);
1329 default:
1330 return 0;
1331 }
1332 }
1333
1334 /* Opcodes that have fields shared by multiple operands are usually flagged
1335 with flags. In this function, we detect such flags, decode the related
1336 field(s) and store the information in one of the related operands. The
1337 'one' operand is not any operand but one of the operands that can
1338 accommadate all the information that has been decoded. */
1339
1340 static int
1341 do_special_decoding (aarch64_inst *inst)
1342 {
1343 int idx;
1344 aarch64_insn value;
1345 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1346 if (inst->opcode->flags & F_COND)
1347 {
1348 value = extract_field (FLD_cond2, inst->value, 0);
1349 inst->cond = get_cond_from_value (value);
1350 }
1351 /* 'sf' field. */
1352 if (inst->opcode->flags & F_SF)
1353 {
1354 idx = select_operand_for_sf_field_coding (inst->opcode);
1355 value = extract_field (FLD_sf, inst->value, 0);
1356 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1357 if ((inst->opcode->flags & F_N)
1358 && extract_field (FLD_N, inst->value, 0) != value)
1359 return 0;
1360 }
1361 /* size:Q fields. */
1362 if (inst->opcode->flags & F_SIZEQ)
1363 return decode_sizeq (inst);
1364
1365 if (inst->opcode->flags & F_FPTYPE)
1366 {
1367 idx = select_operand_for_fptype_field_coding (inst->opcode);
1368 value = extract_field (FLD_type, inst->value, 0);
1369 switch (value)
1370 {
1371 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1372 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1373 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1374 default: return 0;
1375 }
1376 }
1377
1378 if (inst->opcode->flags & F_SSIZE)
1379 {
1380 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1381 of the base opcode. */
1382 aarch64_insn mask;
1383 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1384 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1385 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1386 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1387 /* For most related instruciton, the 'size' field is fully available for
1388 operand encoding. */
1389 if (mask == 0x3)
1390 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1391 else
1392 {
1393 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1394 candidates);
1395 inst->operands[idx].qualifier
1396 = get_qualifier_from_partial_encoding (value, candidates, mask);
1397 }
1398 }
1399
1400 if (inst->opcode->flags & F_T)
1401 {
1402 /* Num of consecutive '0's on the right side of imm5<3:0>. */
1403 int num = 0;
1404 unsigned val, Q;
1405 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1406 == AARCH64_OPND_CLASS_SIMD_REG);
1407 /* imm5<3:0> q <t>
1408 0000 x reserved
1409 xxx1 0 8b
1410 xxx1 1 16b
1411 xx10 0 4h
1412 xx10 1 8h
1413 x100 0 2s
1414 x100 1 4s
1415 1000 0 reserved
1416 1000 1 2d */
1417 val = extract_field (FLD_imm5, inst->value, 0);
1418 while ((val & 0x1) == 0 && ++num <= 3)
1419 val >>= 1;
1420 if (num > 3)
1421 return 0;
1422 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1423 inst->operands[0].qualifier =
1424 get_vreg_qualifier_from_value ((num << 1) | Q);
1425 }
1426
1427 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1428 {
1429 /* Use Rt to encode in the case of e.g.
1430 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1431 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1432 if (idx == -1)
1433 {
1434 /* Otherwise use the result operand, which has to be a integer
1435 register. */
1436 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1437 == AARCH64_OPND_CLASS_INT_REG);
1438 idx = 0;
1439 }
1440 assert (idx == 0 || idx == 1);
1441 value = extract_field (FLD_Q, inst->value, 0);
1442 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1443 }
1444
1445 if (inst->opcode->flags & F_LDS_SIZE)
1446 {
1447 aarch64_field field = {0, 0};
1448 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1449 == AARCH64_OPND_CLASS_INT_REG);
1450 gen_sub_field (FLD_opc, 0, 1, &field);
1451 value = extract_field_2 (&field, inst->value, 0);
1452 inst->operands[0].qualifier
1453 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
1454 }
1455
1456 /* Miscellaneous decoding; done as the last step. */
1457 if (inst->opcode->flags & F_MISC)
1458 return do_misc_decoding (inst);
1459
1460 return 1;
1461 }
1462
1463 /* Converters converting a real opcode instruction to its alias form. */
1464
1465 /* ROR <Wd>, <Ws>, #<shift>
1466 is equivalent to:
1467 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1468 static int
1469 convert_extr_to_ror (aarch64_inst *inst)
1470 {
1471 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1472 {
1473 copy_operand_info (inst, 2, 3);
1474 inst->operands[3].type = AARCH64_OPND_NIL;
1475 return 1;
1476 }
1477 return 0;
1478 }
1479
1480 /* Convert
1481 UBFM <Xd>, <Xn>, #<shift>, #63.
1482 to
1483 LSR <Xd>, <Xn>, #<shift>. */
1484 static int
1485 convert_bfm_to_sr (aarch64_inst *inst)
1486 {
1487 int64_t imms, val;
1488
1489 imms = inst->operands[3].imm.value;
1490 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1491 if (imms == val)
1492 {
1493 inst->operands[3].type = AARCH64_OPND_NIL;
1494 return 1;
1495 }
1496
1497 return 0;
1498 }
1499
1500 /* Convert MOV to ORR. */
1501 static int
1502 convert_orr_to_mov (aarch64_inst *inst)
1503 {
1504 /* MOV <Vd>.<T>, <Vn>.<T>
1505 is equivalent to:
1506 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1507 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1508 {
1509 inst->operands[2].type = AARCH64_OPND_NIL;
1510 return 1;
1511 }
1512 return 0;
1513 }
1514
1515 /* When <imms> >= <immr>, the instruction written:
1516 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1517 is equivalent to:
1518 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1519
1520 static int
1521 convert_bfm_to_bfx (aarch64_inst *inst)
1522 {
1523 int64_t immr, imms;
1524
1525 immr = inst->operands[2].imm.value;
1526 imms = inst->operands[3].imm.value;
1527 if (imms >= immr)
1528 {
1529 int64_t lsb = immr;
1530 inst->operands[2].imm.value = lsb;
1531 inst->operands[3].imm.value = imms + 1 - lsb;
1532 /* The two opcodes have different qualifiers for
1533 the immediate operands; reset to help the checking. */
1534 reset_operand_qualifier (inst, 2);
1535 reset_operand_qualifier (inst, 3);
1536 return 1;
1537 }
1538
1539 return 0;
1540 }
1541
1542 /* When <imms> < <immr>, the instruction written:
1543 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1544 is equivalent to:
1545 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1546
1547 static int
1548 convert_bfm_to_bfi (aarch64_inst *inst)
1549 {
1550 int64_t immr, imms, val;
1551
1552 immr = inst->operands[2].imm.value;
1553 imms = inst->operands[3].imm.value;
1554 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1555 if (imms < immr)
1556 {
1557 inst->operands[2].imm.value = (val - immr) & (val - 1);
1558 inst->operands[3].imm.value = imms + 1;
1559 /* The two opcodes have different qualifiers for
1560 the immediate operands; reset to help the checking. */
1561 reset_operand_qualifier (inst, 2);
1562 reset_operand_qualifier (inst, 3);
1563 return 1;
1564 }
1565
1566 return 0;
1567 }
1568
1569 /* The instruction written:
1570 LSL <Xd>, <Xn>, #<shift>
1571 is equivalent to:
1572 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1573
1574 static int
1575 convert_ubfm_to_lsl (aarch64_inst *inst)
1576 {
1577 int64_t immr = inst->operands[2].imm.value;
1578 int64_t imms = inst->operands[3].imm.value;
1579 int64_t val
1580 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1581
1582 if ((immr == 0 && imms == val) || immr == imms + 1)
1583 {
1584 inst->operands[3].type = AARCH64_OPND_NIL;
1585 inst->operands[2].imm.value = val - imms;
1586 return 1;
1587 }
1588
1589 return 0;
1590 }
1591
1592 /* CINC <Wd>, <Wn>, <cond>
1593 is equivalent to:
1594 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1595
1596 static int
1597 convert_from_csel (aarch64_inst *inst)
1598 {
1599 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1600 {
1601 copy_operand_info (inst, 2, 3);
1602 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
1603 inst->operands[3].type = AARCH64_OPND_NIL;
1604 return 1;
1605 }
1606 return 0;
1607 }
1608
1609 /* CSET <Wd>, <cond>
1610 is equivalent to:
1611 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1612
1613 static int
1614 convert_csinc_to_cset (aarch64_inst *inst)
1615 {
1616 if (inst->operands[1].reg.regno == 0x1f
1617 && inst->operands[2].reg.regno == 0x1f)
1618 {
1619 copy_operand_info (inst, 1, 3);
1620 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
1621 inst->operands[3].type = AARCH64_OPND_NIL;
1622 inst->operands[2].type = AARCH64_OPND_NIL;
1623 return 1;
1624 }
1625 return 0;
1626 }
1627
1628 /* MOV <Wd>, #<imm>
1629 is equivalent to:
1630 MOVZ <Wd>, #<imm16>, LSL #<shift>.
1631
1632 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1633 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1634 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1635 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1636 machine-instruction mnemonic must be used. */
1637
1638 static int
1639 convert_movewide_to_mov (aarch64_inst *inst)
1640 {
1641 uint64_t value = inst->operands[1].imm.value;
1642 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
1643 if (value == 0 && inst->operands[1].shifter.amount != 0)
1644 return 0;
1645 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1646 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
1647 value <<= inst->operands[1].shifter.amount;
1648 /* As an alias convertor, it has to be clear that the INST->OPCODE
1649 is the opcode of the real instruction. */
1650 if (inst->opcode->op == OP_MOVN)
1651 {
1652 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1653 value = ~value;
1654 /* A MOVN has an immediate that could be encoded by MOVZ. */
1655 if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
1656 return 0;
1657 }
1658 inst->operands[1].imm.value = value;
1659 inst->operands[1].shifter.amount = 0;
1660 return 1;
1661 }
1662
1663 /* MOV <Wd>, #<imm>
1664 is equivalent to:
1665 ORR <Wd>, WZR, #<imm>.
1666
1667 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1668 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1669 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1670 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1671 machine-instruction mnemonic must be used. */
1672
1673 static int
1674 convert_movebitmask_to_mov (aarch64_inst *inst)
1675 {
1676 int is32;
1677 uint64_t value;
1678
1679 /* Should have been assured by the base opcode value. */
1680 assert (inst->operands[1].reg.regno == 0x1f);
1681 copy_operand_info (inst, 1, 2);
1682 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1683 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1684 value = inst->operands[1].imm.value;
1685 /* ORR has an immediate that could be generated by a MOVZ or MOVN
1686 instruction. */
1687 if (inst->operands[0].reg.regno != 0x1f
1688 && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
1689 || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
1690 return 0;
1691
1692 inst->operands[2].type = AARCH64_OPND_NIL;
1693 return 1;
1694 }
1695
1696 /* Some alias opcodes are disassembled by being converted from their real-form.
1697 N.B. INST->OPCODE is the real opcode rather than the alias. */
1698
1699 static int
1700 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
1701 {
1702 switch (alias->op)
1703 {
1704 case OP_ASR_IMM:
1705 case OP_LSR_IMM:
1706 return convert_bfm_to_sr (inst);
1707 case OP_LSL_IMM:
1708 return convert_ubfm_to_lsl (inst);
1709 case OP_CINC:
1710 case OP_CINV:
1711 case OP_CNEG:
1712 return convert_from_csel (inst);
1713 case OP_CSET:
1714 case OP_CSETM:
1715 return convert_csinc_to_cset (inst);
1716 case OP_UBFX:
1717 case OP_BFXIL:
1718 case OP_SBFX:
1719 return convert_bfm_to_bfx (inst);
1720 case OP_SBFIZ:
1721 case OP_BFI:
1722 case OP_UBFIZ:
1723 return convert_bfm_to_bfi (inst);
1724 case OP_MOV_V:
1725 return convert_orr_to_mov (inst);
1726 case OP_MOV_IMM_WIDE:
1727 case OP_MOV_IMM_WIDEN:
1728 return convert_movewide_to_mov (inst);
1729 case OP_MOV_IMM_LOG:
1730 return convert_movebitmask_to_mov (inst);
1731 case OP_ROR_IMM:
1732 return convert_extr_to_ror (inst);
1733 default:
1734 return 0;
1735 }
1736 }
1737
1738 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
1739 aarch64_inst *, int);
1740
1741 /* Given the instruction information in *INST, check if the instruction has
1742 any alias form that can be used to represent *INST. If the answer is yes,
1743 update *INST to be in the form of the determined alias. */
1744
1745 /* In the opcode description table, the following flags are used in opcode
1746 entries to help establish the relations between the real and alias opcodes:
1747
1748 F_ALIAS: opcode is an alias
1749 F_HAS_ALIAS: opcode has alias(es)
1750 F_P1
1751 F_P2
1752 F_P3: Disassembly preference priority 1-3 (the larger the
1753 higher). If nothing is specified, it is the priority
1754 0 by default, i.e. the lowest priority.
1755
1756 Although the relation between the machine and the alias instructions are not
1757 explicitly described, it can be easily determined from the base opcode
1758 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
1759 description entries:
1760
1761 The mask of an alias opcode must be equal to or a super-set (i.e. more
1762 constrained) of that of the aliased opcode; so is the base opcode value.
1763
1764 if (opcode_has_alias (real) && alias_opcode_p (opcode)
1765 && (opcode->mask & real->mask) == real->mask
1766 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
1767 then OPCODE is an alias of, and only of, the REAL instruction
1768
1769 The alias relationship is forced flat-structured to keep related algorithm
1770 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
1771
1772 During the disassembling, the decoding decision tree (in
1773 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
1774 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
1775 not specified), the disassembler will check whether there is any alias
1776 instruction exists for this real instruction. If there is, the disassembler
1777 will try to disassemble the 32-bit binary again using the alias's rule, or
1778 try to convert the IR to the form of the alias. In the case of the multiple
1779 aliases, the aliases are tried one by one from the highest priority
1780 (currently the flag F_P3) to the lowest priority (no priority flag), and the
1781 first succeeds first adopted.
1782
1783 You may ask why there is a need for the conversion of IR from one form to
1784 another in handling certain aliases. This is because on one hand it avoids
1785 adding more operand code to handle unusual encoding/decoding; on other
1786 hand, during the disassembling, the conversion is an effective approach to
1787 check the condition of an alias (as an alias may be adopted only if certain
1788 conditions are met).
1789
1790 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
1791 aarch64_opcode_table and generated aarch64_find_alias_opcode and
1792 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
1793
1794 static void
1795 determine_disassembling_preference (struct aarch64_inst *inst)
1796 {
1797 const aarch64_opcode *opcode;
1798 const aarch64_opcode *alias;
1799
1800 opcode = inst->opcode;
1801
1802 /* This opcode does not have an alias, so use itself. */
1803 if (opcode_has_alias (opcode) == FALSE)
1804 return;
1805
1806 alias = aarch64_find_alias_opcode (opcode);
1807 assert (alias);
1808
1809 #ifdef DEBUG_AARCH64
1810 if (debug_dump)
1811 {
1812 const aarch64_opcode *tmp = alias;
1813 printf ("#### LIST orderd: ");
1814 while (tmp)
1815 {
1816 printf ("%s, ", tmp->name);
1817 tmp = aarch64_find_next_alias_opcode (tmp);
1818 }
1819 printf ("\n");
1820 }
1821 #endif /* DEBUG_AARCH64 */
1822
1823 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
1824 {
1825 DEBUG_TRACE ("try %s", alias->name);
1826 assert (alias_opcode_p (alias));
1827
1828 /* An alias can be a pseudo opcode which will never be used in the
1829 disassembly, e.g. BIC logical immediate is such a pseudo opcode
1830 aliasing AND. */
1831 if (pseudo_opcode_p (alias))
1832 {
1833 DEBUG_TRACE ("skip pseudo %s", alias->name);
1834 continue;
1835 }
1836
1837 if ((inst->value & alias->mask) != alias->opcode)
1838 {
1839 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
1840 continue;
1841 }
1842 /* No need to do any complicated transformation on operands, if the alias
1843 opcode does not have any operand. */
1844 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
1845 {
1846 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
1847 aarch64_replace_opcode (inst, alias);
1848 return;
1849 }
1850 if (alias->flags & F_CONV)
1851 {
1852 aarch64_inst copy;
1853 memcpy (&copy, inst, sizeof (aarch64_inst));
1854 /* ALIAS is the preference as long as the instruction can be
1855 successfully converted to the form of ALIAS. */
1856 if (convert_to_alias (&copy, alias) == 1)
1857 {
1858 aarch64_replace_opcode (&copy, alias);
1859 assert (aarch64_match_operands_constraint (&copy, NULL));
1860 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
1861 memcpy (inst, &copy, sizeof (aarch64_inst));
1862 return;
1863 }
1864 }
1865 else
1866 {
1867 /* Directly decode the alias opcode. */
1868 aarch64_inst temp;
1869 memset (&temp, '\0', sizeof (aarch64_inst));
1870 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
1871 {
1872 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
1873 memcpy (inst, &temp, sizeof (aarch64_inst));
1874 return;
1875 }
1876 }
1877 }
1878 }
1879
1880 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
1881 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
1882 return 1.
1883
1884 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
1885 determined and used to disassemble CODE; this is done just before the
1886 return. */
1887
1888 static int
1889 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
1890 aarch64_inst *inst, int noaliases_p)
1891 {
1892 int i;
1893
1894 DEBUG_TRACE ("enter with %s", opcode->name);
1895
1896 assert (opcode && inst);
1897
1898 /* Check the base opcode. */
1899 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
1900 {
1901 DEBUG_TRACE ("base opcode match FAIL");
1902 goto decode_fail;
1903 }
1904
1905 /* Clear inst. */
1906 memset (inst, '\0', sizeof (aarch64_inst));
1907
1908 inst->opcode = opcode;
1909 inst->value = code;
1910
1911 /* Assign operand codes and indexes. */
1912 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1913 {
1914 if (opcode->operands[i] == AARCH64_OPND_NIL)
1915 break;
1916 inst->operands[i].type = opcode->operands[i];
1917 inst->operands[i].idx = i;
1918 }
1919
1920 /* Call the opcode decoder indicated by flags. */
1921 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
1922 {
1923 DEBUG_TRACE ("opcode flag-based decoder FAIL");
1924 goto decode_fail;
1925 }
1926
1927 /* Call operand decoders. */
1928 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1929 {
1930 const aarch64_operand *opnd;
1931 enum aarch64_opnd type;
1932 type = opcode->operands[i];
1933 if (type == AARCH64_OPND_NIL)
1934 break;
1935 opnd = &aarch64_operands[type];
1936 if (operand_has_extractor (opnd)
1937 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
1938 {
1939 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
1940 goto decode_fail;
1941 }
1942 }
1943
1944 /* Match the qualifiers. */
1945 if (aarch64_match_operands_constraint (inst, NULL) == 1)
1946 {
1947 /* Arriving here, the CODE has been determined as a valid instruction
1948 of OPCODE and *INST has been filled with information of this OPCODE
1949 instruction. Before the return, check if the instruction has any
1950 alias and should be disassembled in the form of its alias instead.
1951 If the answer is yes, *INST will be updated. */
1952 if (!noaliases_p)
1953 determine_disassembling_preference (inst);
1954 DEBUG_TRACE ("SUCCESS");
1955 return 1;
1956 }
1957 else
1958 {
1959 DEBUG_TRACE ("constraint matching FAIL");
1960 }
1961
1962 decode_fail:
1963 return 0;
1964 }
1965 \f
1966 /* This does some user-friendly fix-up to *INST. It is currently focus on
1967 the adjustment of qualifiers to help the printed instruction
1968 recognized/understood more easily. */
1969
1970 static void
1971 user_friendly_fixup (aarch64_inst *inst)
1972 {
1973 switch (inst->opcode->iclass)
1974 {
1975 case testbranch:
1976 /* TBNZ Xn|Wn, #uimm6, label
1977 Test and Branch Not Zero: conditionally jumps to label if bit number
1978 uimm6 in register Xn is not zero. The bit number implies the width of
1979 the register, which may be written and should be disassembled as Wn if
1980 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
1981 */
1982 if (inst->operands[1].imm.value < 32)
1983 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
1984 break;
1985 default: break;
1986 }
1987 }
1988
1989 /* Decode INSN and fill in *INST the instruction information. */
1990
1991 static int
1992 disas_aarch64_insn (uint64_t pc ATTRIBUTE_UNUSED, uint32_t insn,
1993 aarch64_inst *inst)
1994 {
1995 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
1996
1997 #ifdef DEBUG_AARCH64
1998 if (debug_dump)
1999 {
2000 const aarch64_opcode *tmp = opcode;
2001 printf ("\n");
2002 DEBUG_TRACE ("opcode lookup:");
2003 while (tmp != NULL)
2004 {
2005 aarch64_verbose (" %s", tmp->name);
2006 tmp = aarch64_find_next_opcode (tmp);
2007 }
2008 }
2009 #endif /* DEBUG_AARCH64 */
2010
2011 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2012 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2013 opcode field and value, apart from the difference that one of them has an
2014 extra field as part of the opcode, but such a field is used for operand
2015 encoding in other opcode(s) ('immh' in the case of the example). */
2016 while (opcode != NULL)
2017 {
2018 /* But only one opcode can be decoded successfully for, as the
2019 decoding routine will check the constraint carefully. */
2020 if (aarch64_opcode_decode (opcode, insn, inst, no_aliases) == 1)
2021 return ERR_OK;
2022 opcode = aarch64_find_next_opcode (opcode);
2023 }
2024
2025 return ERR_UND;
2026 }
2027
2028 /* Print operands. */
2029
2030 static void
2031 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2032 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2033 {
2034 int i, pcrel_p, num_printed;
2035 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2036 {
2037 const size_t size = 128;
2038 char str[size];
2039 /* We regard the opcode operand info more, however we also look into
2040 the inst->operands to support the disassembling of the optional
2041 operand.
2042 The two operand code should be the same in all cases, apart from
2043 when the operand can be optional. */
2044 if (opcode->operands[i] == AARCH64_OPND_NIL
2045 || opnds[i].type == AARCH64_OPND_NIL)
2046 break;
2047
2048 /* Generate the operand string in STR. */
2049 aarch64_print_operand (str, size, pc, opcode, opnds, i, &pcrel_p,
2050 &info->target);
2051
2052 /* Print the delimiter (taking account of omitted operand(s)). */
2053 if (str[0] != '\0')
2054 (*info->fprintf_func) (info->stream, "%s",
2055 num_printed++ == 0 ? "\t" : ", ");
2056
2057 /* Print the operand. */
2058 if (pcrel_p)
2059 (*info->print_address_func) (info->target, info);
2060 else
2061 (*info->fprintf_func) (info->stream, "%s", str);
2062 }
2063 }
2064
2065 /* Print the instruction mnemonic name. */
2066
2067 static void
2068 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2069 {
2070 if (inst->opcode->flags & F_COND)
2071 {
2072 /* For instructions that are truly conditionally executed, e.g. b.cond,
2073 prepare the full mnemonic name with the corresponding condition
2074 suffix. */
2075 char name[8], *ptr;
2076 size_t len;
2077
2078 ptr = strchr (inst->opcode->name, '.');
2079 assert (ptr && inst->cond);
2080 len = ptr - inst->opcode->name;
2081 assert (len < 8);
2082 strncpy (name, inst->opcode->name, len);
2083 name [len] = '\0';
2084 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2085 }
2086 else
2087 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2088 }
2089
2090 /* Print the instruction according to *INST. */
2091
2092 static void
2093 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2094 struct disassemble_info *info)
2095 {
2096 print_mnemonic_name (inst, info);
2097 print_operands (pc, inst->opcode, inst->operands, info);
2098 }
2099
2100 /* Entry-point of the instruction disassembler and printer. */
2101
2102 static void
2103 print_insn_aarch64_word (bfd_vma pc,
2104 uint32_t word,
2105 struct disassemble_info *info)
2106 {
2107 static const char *err_msg[6] =
2108 {
2109 [ERR_OK] = "_",
2110 [-ERR_UND] = "undefined",
2111 [-ERR_UNP] = "unpredictable",
2112 [-ERR_NYI] = "NYI"
2113 };
2114
2115 int ret;
2116 aarch64_inst inst;
2117
2118 info->insn_info_valid = 1;
2119 info->branch_delay_insns = 0;
2120 info->data_size = 0;
2121 info->target = 0;
2122 info->target2 = 0;
2123
2124 if (info->flags & INSN_HAS_RELOC)
2125 /* If the instruction has a reloc associated with it, then
2126 the offset field in the instruction will actually be the
2127 addend for the reloc. (If we are using REL type relocs).
2128 In such cases, we can ignore the pc when computing
2129 addresses, since the addend is not currently pc-relative. */
2130 pc = 0;
2131
2132 ret = disas_aarch64_insn (pc, word, &inst);
2133
2134 if (((word >> 21) & 0x3ff) == 1)
2135 {
2136 /* RESERVED for ALES. */
2137 assert (ret != ERR_OK);
2138 ret = ERR_NYI;
2139 }
2140
2141 switch (ret)
2142 {
2143 case ERR_UND:
2144 case ERR_UNP:
2145 case ERR_NYI:
2146 /* Handle undefined instructions. */
2147 info->insn_type = dis_noninsn;
2148 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2149 word, err_msg[-ret]);
2150 break;
2151 case ERR_OK:
2152 user_friendly_fixup (&inst);
2153 print_aarch64_insn (pc, &inst, info);
2154 break;
2155 default:
2156 abort ();
2157 }
2158 }
2159
2160 /* Disallow mapping symbols ($x, $d etc) from
2161 being displayed in symbol relative addresses. */
2162
2163 bfd_boolean
2164 aarch64_symbol_is_valid (asymbol * sym,
2165 struct disassemble_info * info ATTRIBUTE_UNUSED)
2166 {
2167 const char * name;
2168
2169 if (sym == NULL)
2170 return FALSE;
2171
2172 name = bfd_asymbol_name (sym);
2173
2174 return name
2175 && (name[0] != '$'
2176 || (name[1] != 'x' && name[1] != 'd')
2177 || (name[2] != '\0' && name[2] != '.'));
2178 }
2179
2180 /* Print data bytes on INFO->STREAM. */
2181
2182 static void
2183 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2184 uint32_t word,
2185 struct disassemble_info *info)
2186 {
2187 switch (info->bytes_per_chunk)
2188 {
2189 case 1:
2190 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2191 break;
2192 case 2:
2193 info->fprintf_func (info->stream, ".short\t0x%04x", word);
2194 break;
2195 case 4:
2196 info->fprintf_func (info->stream, ".word\t0x%08x", word);
2197 break;
2198 default:
2199 abort ();
2200 }
2201 }
2202
2203 /* Try to infer the code or data type from a symbol.
2204 Returns nonzero if *MAP_TYPE was set. */
2205
2206 static int
2207 get_sym_code_type (struct disassemble_info *info, int n,
2208 enum map_type *map_type)
2209 {
2210 elf_symbol_type *es;
2211 unsigned int type;
2212 const char *name;
2213
2214 es = *(elf_symbol_type **)(info->symtab + n);
2215 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2216
2217 /* If the symbol has function type then use that. */
2218 if (type == STT_FUNC)
2219 {
2220 *map_type = MAP_INSN;
2221 return TRUE;
2222 }
2223
2224 /* Check for mapping symbols. */
2225 name = bfd_asymbol_name(info->symtab[n]);
2226 if (name[0] == '$'
2227 && (name[1] == 'x' || name[1] == 'd')
2228 && (name[2] == '\0' || name[2] == '.'))
2229 {
2230 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
2231 return TRUE;
2232 }
2233
2234 return FALSE;
2235 }
2236
2237 /* Entry-point of the AArch64 disassembler. */
2238
2239 int
2240 print_insn_aarch64 (bfd_vma pc,
2241 struct disassemble_info *info)
2242 {
2243 bfd_byte buffer[INSNLEN];
2244 int status;
2245 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
2246 bfd_boolean found = FALSE;
2247 unsigned int size = 4;
2248 unsigned long data;
2249
2250 if (info->disassembler_options)
2251 {
2252 set_default_aarch64_dis_options (info);
2253
2254 parse_aarch64_dis_options (info->disassembler_options);
2255
2256 /* To avoid repeated parsing of these options, we remove them here. */
2257 info->disassembler_options = NULL;
2258 }
2259
2260 /* Aarch64 instructions are always little-endian */
2261 info->endian_code = BFD_ENDIAN_LITTLE;
2262
2263 /* First check the full symtab for a mapping symbol, even if there
2264 are no usable non-mapping symbols for this address. */
2265 if (info->symtab_size != 0
2266 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
2267 {
2268 enum map_type type = MAP_INSN;
2269 int last_sym = -1;
2270 bfd_vma addr;
2271 int n;
2272
2273 if (pc <= last_mapping_addr)
2274 last_mapping_sym = -1;
2275
2276 /* Start scanning at the start of the function, or wherever
2277 we finished last time. */
2278 n = info->symtab_pos + 1;
2279 if (n < last_mapping_sym)
2280 n = last_mapping_sym;
2281
2282 /* Scan up to the location being disassembled. */
2283 for (; n < info->symtab_size; n++)
2284 {
2285 addr = bfd_asymbol_value (info->symtab[n]);
2286 if (addr > pc)
2287 break;
2288 if ((info->section == NULL
2289 || info->section == info->symtab[n]->section)
2290 && get_sym_code_type (info, n, &type))
2291 {
2292 last_sym = n;
2293 found = TRUE;
2294 }
2295 }
2296
2297 if (!found)
2298 {
2299 n = info->symtab_pos;
2300 if (n < last_mapping_sym)
2301 n = last_mapping_sym;
2302
2303 /* No mapping symbol found at this address. Look backwards
2304 for a preceeding one. */
2305 for (; n >= 0; n--)
2306 {
2307 if (get_sym_code_type (info, n, &type))
2308 {
2309 last_sym = n;
2310 found = TRUE;
2311 break;
2312 }
2313 }
2314 }
2315
2316 last_mapping_sym = last_sym;
2317 last_type = type;
2318
2319 /* Look a little bit ahead to see if we should print out
2320 less than four bytes of data. If there's a symbol,
2321 mapping or otherwise, after two bytes then don't
2322 print more. */
2323 if (last_type == MAP_DATA)
2324 {
2325 size = 4 - (pc & 3);
2326 for (n = last_sym + 1; n < info->symtab_size; n++)
2327 {
2328 addr = bfd_asymbol_value (info->symtab[n]);
2329 if (addr > pc)
2330 {
2331 if (addr - pc < size)
2332 size = addr - pc;
2333 break;
2334 }
2335 }
2336 /* If the next symbol is after three bytes, we need to
2337 print only part of the data, so that we can use either
2338 .byte or .short. */
2339 if (size == 3)
2340 size = (pc & 1) ? 1 : 2;
2341 }
2342 }
2343
2344 if (last_type == MAP_DATA)
2345 {
2346 /* size was set above. */
2347 info->bytes_per_chunk = size;
2348 info->display_endian = info->endian;
2349 printer = print_insn_data;
2350 }
2351 else
2352 {
2353 info->bytes_per_chunk = size = INSNLEN;
2354 info->display_endian = info->endian_code;
2355 printer = print_insn_aarch64_word;
2356 }
2357
2358 status = (*info->read_memory_func) (pc, buffer, size, info);
2359 if (status != 0)
2360 {
2361 (*info->memory_error_func) (status, pc, info);
2362 return -1;
2363 }
2364
2365 data = bfd_get_bits (buffer, size * 8,
2366 info->display_endian == BFD_ENDIAN_BIG);
2367
2368 (*printer) (pc, data, info);
2369
2370 return size;
2371 }
2372 \f
2373 void
2374 print_aarch64_disassembler_options (FILE *stream)
2375 {
2376 fprintf (stream, _("\n\
2377 The following AARCH64 specific disassembler options are supported for use\n\
2378 with the -M switch (multiple options should be separated by commas):\n"));
2379
2380 fprintf (stream, _("\n\
2381 no-aliases Don't print instruction aliases.\n"));
2382
2383 fprintf (stream, _("\n\
2384 aliases Do print instruction aliases.\n"));
2385
2386 #ifdef DEBUG_AARCH64
2387 fprintf (stream, _("\n\
2388 debug_dump Temp switch for debug trace.\n"));
2389 #endif /* DEBUG_AARCH64 */
2390
2391 fprintf (stream, _("\n"));
2392 }
This page took 0.084248 seconds and 4 git commands to generate.