[AArch64][SVE 26/32] Add SVE MUL VL addressing modes
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "dis-asm.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define ERR_OK 0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33
34 #define INSNLEN 4
35
36 /* Cached mapping symbol state. */
37 enum map_type
38 {
39 MAP_INSN,
40 MAP_DATA
41 };
42
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46
47 /* Other options */
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \f
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 #ifdef DEBUG_AARCH64
73 if (CONST_STRNEQ (option, "debug_dump"))
74 {
75 debug_dump = 1;
76 return;
77 }
78 #endif /* DEBUG_AARCH64 */
79
80 /* Invalid option. */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
82 }
83
84 static void
85 parse_aarch64_dis_options (const char *options)
86 {
87 const char *option_end;
88
89 if (options == NULL)
90 return;
91
92 while (*options != '\0')
93 {
94 /* Skip empty options. */
95 if (*options == ',')
96 {
97 options++;
98 continue;
99 }
100
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
104 option_end++;
105
106 parse_aarch64_dis_option (options, option_end - options);
107
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
111 }
112 }
113 \f
114 /* Functions doing the instruction disassembling. */
115
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
119
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
125
126 static inline aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129 uint32_t num;
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
132 va_list va;
133
134 va_start (va, mask);
135 num = va_arg (va, uint32_t);
136 assert (num <= 5);
137 aarch64_insn value = 0x0;
138 while (num--)
139 {
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
144 }
145 return value;
146 }
147
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
150
151 static aarch64_insn
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
153 {
154 aarch64_insn value;
155 unsigned int i;
156 enum aarch64_field_kind kind;
157
158 value = 0;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
160 {
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
164 }
165 return value;
166 }
167
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
171 {
172 uint32_t ret = value;
173
174 assert (i < 32);
175 if ((value >> i) & 0x1)
176 {
177 uint32_t val = (uint32_t)(-1) << i;
178 ret = ret | val;
179 }
180 return (int32_t) ret;
181 }
182
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
185
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
189 {
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
191 assert (value <= 0x1
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
193 return qualifier;
194 }
195
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
198
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
201 {
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
203
204 /* Instructions using vector type 2H should not call this function. Skip over
205 the 2H qualifier. */
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
207 qualifier += 1;
208
209 assert (value <= 0x8
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
211 return qualifier;
212 }
213
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
217 {
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
219
220 assert (value <= 0x4
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
222 return qualifier;
223 }
224
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
229
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
232 {
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
237 i, qualifiers))
238 return qualifiers[i];
239 else
240 return AARCH64_OPND_QLF_NIL;
241 }
242
243 /* Operand extractors. */
244
245 int
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED)
249 {
250 info->reg.regno = extract_field (self->fields[0], code, 0);
251 return 1;
252 }
253
254 int
255 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
256 const aarch64_insn code ATTRIBUTE_UNUSED,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED)
258 {
259 assert (info->idx == 1
260 || info->idx ==3);
261 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
262 return 1;
263 }
264
265 /* e.g. IC <ic_op>{, <Xt>}. */
266 int
267 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
268 const aarch64_insn code,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED)
270 {
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 assert (info->idx == 1
273 && (aarch64_get_operand_class (inst->operands[0].type)
274 == AARCH64_OPND_CLASS_SYSTEM));
275 /* This will make the constraint checking happy and more importantly will
276 help the disassembler determine whether this operand is optional or
277 not. */
278 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
279
280 return 1;
281 }
282
283 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
284 int
285 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED)
288 {
289 /* regno */
290 info->reglane.regno = extract_field (self->fields[0], code,
291 inst->opcode->mask);
292
293 /* Index and/or type. */
294 if (inst->opcode->iclass == asisdone
295 || inst->opcode->iclass == asimdins)
296 {
297 if (info->type == AARCH64_OPND_En
298 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
299 {
300 unsigned shift;
301 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
302 assert (info->idx == 1); /* Vn */
303 aarch64_insn value = extract_field (FLD_imm4, code, 0);
304 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
305 info->qualifier = get_expected_qualifier (inst, info->idx);
306 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
307 info->reglane.index = value >> shift;
308 }
309 else
310 {
311 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
312 imm5<3:0> <V>
313 0000 RESERVED
314 xxx1 B
315 xx10 H
316 x100 S
317 1000 D */
318 int pos = -1;
319 aarch64_insn value = extract_field (FLD_imm5, code, 0);
320 while (++pos <= 3 && (value & 0x1) == 0)
321 value >>= 1;
322 if (pos > 3)
323 return 0;
324 info->qualifier = get_sreg_qualifier_from_value (pos);
325 info->reglane.index = (unsigned) (value >> 1);
326 }
327 }
328 else
329 {
330 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
331 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
332
333 /* Need information in other operand(s) to help decoding. */
334 info->qualifier = get_expected_qualifier (inst, info->idx);
335 switch (info->qualifier)
336 {
337 case AARCH64_OPND_QLF_S_H:
338 /* h:l:m */
339 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
340 FLD_M);
341 info->reglane.regno &= 0xf;
342 break;
343 case AARCH64_OPND_QLF_S_S:
344 /* h:l */
345 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
346 break;
347 case AARCH64_OPND_QLF_S_D:
348 /* H */
349 info->reglane.index = extract_field (FLD_H, code, 0);
350 break;
351 default:
352 return 0;
353 }
354 }
355
356 return 1;
357 }
358
359 int
360 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
361 const aarch64_insn code,
362 const aarch64_inst *inst ATTRIBUTE_UNUSED)
363 {
364 /* R */
365 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
366 /* len */
367 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
368 return 1;
369 }
370
371 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
372 int
373 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
374 aarch64_opnd_info *info, const aarch64_insn code,
375 const aarch64_inst *inst)
376 {
377 aarch64_insn value;
378 /* Number of elements in each structure to be loaded/stored. */
379 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
380
381 struct
382 {
383 unsigned is_reserved;
384 unsigned num_regs;
385 unsigned num_elements;
386 } data [] =
387 { {0, 4, 4},
388 {1, 4, 4},
389 {0, 4, 1},
390 {0, 4, 2},
391 {0, 3, 3},
392 {1, 3, 3},
393 {0, 3, 1},
394 {0, 1, 1},
395 {0, 2, 2},
396 {1, 2, 2},
397 {0, 2, 1},
398 };
399
400 /* Rt */
401 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
402 /* opcode */
403 value = extract_field (FLD_opcode, code, 0);
404 if (expected_num != data[value].num_elements || data[value].is_reserved)
405 return 0;
406 info->reglist.num_regs = data[value].num_regs;
407
408 return 1;
409 }
410
411 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
412 lanes instructions. */
413 int
414 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
415 aarch64_opnd_info *info, const aarch64_insn code,
416 const aarch64_inst *inst)
417 {
418 aarch64_insn value;
419
420 /* Rt */
421 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
422 /* S */
423 value = extract_field (FLD_S, code, 0);
424
425 /* Number of registers is equal to the number of elements in
426 each structure to be loaded/stored. */
427 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
428 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
429
430 /* Except when it is LD1R. */
431 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
432 info->reglist.num_regs = 2;
433
434 return 1;
435 }
436
437 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
438 load/store single element instructions. */
439 int
440 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
441 aarch64_opnd_info *info, const aarch64_insn code,
442 const aarch64_inst *inst ATTRIBUTE_UNUSED)
443 {
444 aarch64_field field = {0, 0};
445 aarch64_insn QSsize; /* fields Q:S:size. */
446 aarch64_insn opcodeh2; /* opcode<2:1> */
447
448 /* Rt */
449 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
450
451 /* Decode the index, opcode<2:1> and size. */
452 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
453 opcodeh2 = extract_field_2 (&field, code, 0);
454 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
455 switch (opcodeh2)
456 {
457 case 0x0:
458 info->qualifier = AARCH64_OPND_QLF_S_B;
459 /* Index encoded in "Q:S:size". */
460 info->reglist.index = QSsize;
461 break;
462 case 0x1:
463 if (QSsize & 0x1)
464 /* UND. */
465 return 0;
466 info->qualifier = AARCH64_OPND_QLF_S_H;
467 /* Index encoded in "Q:S:size<1>". */
468 info->reglist.index = QSsize >> 1;
469 break;
470 case 0x2:
471 if ((QSsize >> 1) & 0x1)
472 /* UND. */
473 return 0;
474 if ((QSsize & 0x1) == 0)
475 {
476 info->qualifier = AARCH64_OPND_QLF_S_S;
477 /* Index encoded in "Q:S". */
478 info->reglist.index = QSsize >> 2;
479 }
480 else
481 {
482 if (extract_field (FLD_S, code, 0))
483 /* UND */
484 return 0;
485 info->qualifier = AARCH64_OPND_QLF_S_D;
486 /* Index encoded in "Q". */
487 info->reglist.index = QSsize >> 3;
488 }
489 break;
490 default:
491 return 0;
492 }
493
494 info->reglist.has_index = 1;
495 info->reglist.num_regs = 0;
496 /* Number of registers is equal to the number of elements in
497 each structure to be loaded/stored. */
498 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
499 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
500
501 return 1;
502 }
503
504 /* Decode fields immh:immb and/or Q for e.g.
505 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
506 or SSHR <V><d>, <V><n>, #<shift>. */
507
508 int
509 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
510 aarch64_opnd_info *info, const aarch64_insn code,
511 const aarch64_inst *inst)
512 {
513 int pos;
514 aarch64_insn Q, imm, immh;
515 enum aarch64_insn_class iclass = inst->opcode->iclass;
516
517 immh = extract_field (FLD_immh, code, 0);
518 if (immh == 0)
519 return 0;
520 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
521 pos = 4;
522 /* Get highest set bit in immh. */
523 while (--pos >= 0 && (immh & 0x8) == 0)
524 immh <<= 1;
525
526 assert ((iclass == asimdshf || iclass == asisdshf)
527 && (info->type == AARCH64_OPND_IMM_VLSR
528 || info->type == AARCH64_OPND_IMM_VLSL));
529
530 if (iclass == asimdshf)
531 {
532 Q = extract_field (FLD_Q, code, 0);
533 /* immh Q <T>
534 0000 x SEE AdvSIMD modified immediate
535 0001 0 8B
536 0001 1 16B
537 001x 0 4H
538 001x 1 8H
539 01xx 0 2S
540 01xx 1 4S
541 1xxx 0 RESERVED
542 1xxx 1 2D */
543 info->qualifier =
544 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
545 }
546 else
547 info->qualifier = get_sreg_qualifier_from_value (pos);
548
549 if (info->type == AARCH64_OPND_IMM_VLSR)
550 /* immh <shift>
551 0000 SEE AdvSIMD modified immediate
552 0001 (16-UInt(immh:immb))
553 001x (32-UInt(immh:immb))
554 01xx (64-UInt(immh:immb))
555 1xxx (128-UInt(immh:immb)) */
556 info->imm.value = (16 << pos) - imm;
557 else
558 /* immh:immb
559 immh <shift>
560 0000 SEE AdvSIMD modified immediate
561 0001 (UInt(immh:immb)-8)
562 001x (UInt(immh:immb)-16)
563 01xx (UInt(immh:immb)-32)
564 1xxx (UInt(immh:immb)-64) */
565 info->imm.value = imm - (8 << pos);
566
567 return 1;
568 }
569
570 /* Decode shift immediate for e.g. sshr (imm). */
571 int
572 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
573 aarch64_opnd_info *info, const aarch64_insn code,
574 const aarch64_inst *inst ATTRIBUTE_UNUSED)
575 {
576 int64_t imm;
577 aarch64_insn val;
578 val = extract_field (FLD_size, code, 0);
579 switch (val)
580 {
581 case 0: imm = 8; break;
582 case 1: imm = 16; break;
583 case 2: imm = 32; break;
584 default: return 0;
585 }
586 info->imm.value = imm;
587 return 1;
588 }
589
590 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
591 value in the field(s) will be extracted as unsigned immediate value. */
592 int
593 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
594 const aarch64_insn code,
595 const aarch64_inst *inst ATTRIBUTE_UNUSED)
596 {
597 int64_t imm;
598
599 imm = extract_all_fields (self, code);
600
601 if (operand_need_sign_extension (self))
602 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
603
604 if (operand_need_shift_by_two (self))
605 imm <<= 2;
606
607 if (info->type == AARCH64_OPND_ADDR_ADRP)
608 imm <<= 12;
609
610 info->imm.value = imm;
611 return 1;
612 }
613
614 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
615 int
616 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
617 const aarch64_insn code,
618 const aarch64_inst *inst ATTRIBUTE_UNUSED)
619 {
620 aarch64_ext_imm (self, info, code, inst);
621 info->shifter.kind = AARCH64_MOD_LSL;
622 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
623 return 1;
624 }
625
626 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
627 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
628 int
629 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
630 aarch64_opnd_info *info,
631 const aarch64_insn code,
632 const aarch64_inst *inst ATTRIBUTE_UNUSED)
633 {
634 uint64_t imm;
635 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
636 aarch64_field field = {0, 0};
637
638 assert (info->idx == 1);
639
640 if (info->type == AARCH64_OPND_SIMD_FPIMM)
641 info->imm.is_fp = 1;
642
643 /* a:b:c:d:e:f:g:h */
644 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
645 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
646 {
647 /* Either MOVI <Dd>, #<imm>
648 or MOVI <Vd>.2D, #<imm>.
649 <imm> is a 64-bit immediate
650 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
651 encoded in "a:b:c:d:e:f:g:h". */
652 int i;
653 unsigned abcdefgh = imm;
654 for (imm = 0ull, i = 0; i < 8; i++)
655 if (((abcdefgh >> i) & 0x1) != 0)
656 imm |= 0xffull << (8 * i);
657 }
658 info->imm.value = imm;
659
660 /* cmode */
661 info->qualifier = get_expected_qualifier (inst, info->idx);
662 switch (info->qualifier)
663 {
664 case AARCH64_OPND_QLF_NIL:
665 /* no shift */
666 info->shifter.kind = AARCH64_MOD_NONE;
667 return 1;
668 case AARCH64_OPND_QLF_LSL:
669 /* shift zeros */
670 info->shifter.kind = AARCH64_MOD_LSL;
671 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
672 {
673 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
674 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
675 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
676 default: assert (0); return 0;
677 }
678 /* 00: 0; 01: 8; 10:16; 11:24. */
679 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
680 break;
681 case AARCH64_OPND_QLF_MSL:
682 /* shift ones */
683 info->shifter.kind = AARCH64_MOD_MSL;
684 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
685 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
686 break;
687 default:
688 assert (0);
689 return 0;
690 }
691
692 return 1;
693 }
694
695 /* Decode an 8-bit floating-point immediate. */
696 int
697 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
698 const aarch64_insn code,
699 const aarch64_inst *inst ATTRIBUTE_UNUSED)
700 {
701 info->imm.value = extract_all_fields (self, code);
702 info->imm.is_fp = 1;
703 return 1;
704 }
705
706 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
707 int
708 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
709 aarch64_opnd_info *info, const aarch64_insn code,
710 const aarch64_inst *inst ATTRIBUTE_UNUSED)
711 {
712 info->imm.value = 64- extract_field (FLD_scale, code, 0);
713 return 1;
714 }
715
716 /* Decode arithmetic immediate for e.g.
717 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
718 int
719 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
720 aarch64_opnd_info *info, const aarch64_insn code,
721 const aarch64_inst *inst ATTRIBUTE_UNUSED)
722 {
723 aarch64_insn value;
724
725 info->shifter.kind = AARCH64_MOD_LSL;
726 /* shift */
727 value = extract_field (FLD_shift, code, 0);
728 if (value >= 2)
729 return 0;
730 info->shifter.amount = value ? 12 : 0;
731 /* imm12 (unsigned) */
732 info->imm.value = extract_field (FLD_imm12, code, 0);
733
734 return 1;
735 }
736
737 /* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
738
739 int
740 aarch64_ext_limm (const aarch64_operand *self ATTRIBUTE_UNUSED,
741 aarch64_opnd_info *info, const aarch64_insn code,
742 const aarch64_inst *inst ATTRIBUTE_UNUSED)
743 {
744 uint64_t imm, mask;
745 uint32_t sf;
746 uint32_t N, R, S;
747 unsigned simd_size;
748 aarch64_insn value;
749
750 value = extract_fields (code, 0, 3, FLD_N, FLD_immr, FLD_imms);
751 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_W
752 || inst->operands[0].qualifier == AARCH64_OPND_QLF_X);
753 sf = aarch64_get_qualifier_esize (inst->operands[0].qualifier) != 4;
754
755 /* value is N:immr:imms. */
756 S = value & 0x3f;
757 R = (value >> 6) & 0x3f;
758 N = (value >> 12) & 0x1;
759
760 if (sf == 0 && N == 1)
761 return 0;
762
763 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
764 (in other words, right rotated by R), then replicated. */
765 if (N != 0)
766 {
767 simd_size = 64;
768 mask = 0xffffffffffffffffull;
769 }
770 else
771 {
772 switch (S)
773 {
774 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
775 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
776 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
777 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
778 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
779 default: return 0;
780 }
781 mask = (1ull << simd_size) - 1;
782 /* Top bits are IGNORED. */
783 R &= simd_size - 1;
784 }
785 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
786 if (S == simd_size - 1)
787 return 0;
788 /* S+1 consecutive bits to 1. */
789 /* NOTE: S can't be 63 due to detection above. */
790 imm = (1ull << (S + 1)) - 1;
791 /* Rotate to the left by simd_size - R. */
792 if (R != 0)
793 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
794 /* Replicate the value according to SIMD size. */
795 switch (simd_size)
796 {
797 case 2: imm = (imm << 2) | imm;
798 case 4: imm = (imm << 4) | imm;
799 case 8: imm = (imm << 8) | imm;
800 case 16: imm = (imm << 16) | imm;
801 case 32: imm = (imm << 32) | imm;
802 case 64: break;
803 default: assert (0); return 0;
804 }
805
806 info->imm.value = sf ? imm : imm & 0xffffffff;
807
808 return 1;
809 }
810
811 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
812 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
813 int
814 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
815 aarch64_opnd_info *info,
816 const aarch64_insn code, const aarch64_inst *inst)
817 {
818 aarch64_insn value;
819
820 /* Rt */
821 info->reg.regno = extract_field (FLD_Rt, code, 0);
822
823 /* size */
824 value = extract_field (FLD_ldst_size, code, 0);
825 if (inst->opcode->iclass == ldstpair_indexed
826 || inst->opcode->iclass == ldstnapair_offs
827 || inst->opcode->iclass == ldstpair_off
828 || inst->opcode->iclass == loadlit)
829 {
830 enum aarch64_opnd_qualifier qualifier;
831 switch (value)
832 {
833 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
834 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
835 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
836 default: return 0;
837 }
838 info->qualifier = qualifier;
839 }
840 else
841 {
842 /* opc1:size */
843 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
844 if (value > 0x4)
845 return 0;
846 info->qualifier = get_sreg_qualifier_from_value (value);
847 }
848
849 return 1;
850 }
851
852 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
853 int
854 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
855 aarch64_opnd_info *info,
856 aarch64_insn code,
857 const aarch64_inst *inst ATTRIBUTE_UNUSED)
858 {
859 /* Rn */
860 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
861 return 1;
862 }
863
864 /* Decode the address operand for e.g.
865 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
866 int
867 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
868 aarch64_opnd_info *info,
869 aarch64_insn code, const aarch64_inst *inst)
870 {
871 aarch64_insn S, value;
872
873 /* Rn */
874 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
875 /* Rm */
876 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
877 /* option */
878 value = extract_field (FLD_option, code, 0);
879 info->shifter.kind =
880 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
881 /* Fix-up the shifter kind; although the table-driven approach is
882 efficient, it is slightly inflexible, thus needing this fix-up. */
883 if (info->shifter.kind == AARCH64_MOD_UXTX)
884 info->shifter.kind = AARCH64_MOD_LSL;
885 /* S */
886 S = extract_field (FLD_S, code, 0);
887 if (S == 0)
888 {
889 info->shifter.amount = 0;
890 info->shifter.amount_present = 0;
891 }
892 else
893 {
894 int size;
895 /* Need information in other operand(s) to help achieve the decoding
896 from 'S' field. */
897 info->qualifier = get_expected_qualifier (inst, info->idx);
898 /* Get the size of the data element that is accessed, which may be
899 different from that of the source register size, e.g. in strb/ldrb. */
900 size = aarch64_get_qualifier_esize (info->qualifier);
901 info->shifter.amount = get_logsz (size);
902 info->shifter.amount_present = 1;
903 }
904
905 return 1;
906 }
907
908 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
909 int
910 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
911 aarch64_insn code, const aarch64_inst *inst)
912 {
913 aarch64_insn imm;
914 info->qualifier = get_expected_qualifier (inst, info->idx);
915
916 /* Rn */
917 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
918 /* simm (imm9 or imm7) */
919 imm = extract_field (self->fields[0], code, 0);
920 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
921 if (self->fields[0] == FLD_imm7)
922 /* scaled immediate in ld/st pair instructions. */
923 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
924 /* qualifier */
925 if (inst->opcode->iclass == ldst_unscaled
926 || inst->opcode->iclass == ldstnapair_offs
927 || inst->opcode->iclass == ldstpair_off
928 || inst->opcode->iclass == ldst_unpriv)
929 info->addr.writeback = 0;
930 else
931 {
932 /* pre/post- index */
933 info->addr.writeback = 1;
934 if (extract_field (self->fields[1], code, 0) == 1)
935 info->addr.preind = 1;
936 else
937 info->addr.postind = 1;
938 }
939
940 return 1;
941 }
942
943 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
944 int
945 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
946 aarch64_insn code,
947 const aarch64_inst *inst ATTRIBUTE_UNUSED)
948 {
949 int shift;
950 info->qualifier = get_expected_qualifier (inst, info->idx);
951 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
952 /* Rn */
953 info->addr.base_regno = extract_field (self->fields[0], code, 0);
954 /* uimm12 */
955 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
956 return 1;
957 }
958
959 /* Decode the address operand for e.g.
960 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
961 int
962 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
963 aarch64_opnd_info *info,
964 aarch64_insn code, const aarch64_inst *inst)
965 {
966 /* The opcode dependent area stores the number of elements in
967 each structure to be loaded/stored. */
968 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
969
970 /* Rn */
971 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
972 /* Rm | #<amount> */
973 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
974 if (info->addr.offset.regno == 31)
975 {
976 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
977 /* Special handling of loading single structure to all lane. */
978 info->addr.offset.imm = (is_ld1r ? 1
979 : inst->operands[0].reglist.num_regs)
980 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
981 else
982 info->addr.offset.imm = inst->operands[0].reglist.num_regs
983 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
984 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
985 }
986 else
987 info->addr.offset.is_reg = 1;
988 info->addr.writeback = 1;
989
990 return 1;
991 }
992
993 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
994 int
995 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
996 aarch64_opnd_info *info,
997 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
998 {
999 aarch64_insn value;
1000 /* cond */
1001 value = extract_field (FLD_cond, code, 0);
1002 info->cond = get_cond_from_value (value);
1003 return 1;
1004 }
1005
1006 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1007 int
1008 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1009 aarch64_opnd_info *info,
1010 aarch64_insn code,
1011 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1012 {
1013 /* op0:op1:CRn:CRm:op2 */
1014 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1015 FLD_CRm, FLD_op2);
1016 return 1;
1017 }
1018
1019 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1020 int
1021 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1022 aarch64_opnd_info *info, aarch64_insn code,
1023 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1024 {
1025 int i;
1026 /* op1:op2 */
1027 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1028 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1029 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1030 return 1;
1031 /* Reserved value in <pstatefield>. */
1032 return 0;
1033 }
1034
1035 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1036 int
1037 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1038 aarch64_opnd_info *info,
1039 aarch64_insn code,
1040 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1041 {
1042 int i;
1043 aarch64_insn value;
1044 const aarch64_sys_ins_reg *sysins_ops;
1045 /* op0:op1:CRn:CRm:op2 */
1046 value = extract_fields (code, 0, 5,
1047 FLD_op0, FLD_op1, FLD_CRn,
1048 FLD_CRm, FLD_op2);
1049
1050 switch (info->type)
1051 {
1052 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1053 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1054 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1055 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1056 default: assert (0); return 0;
1057 }
1058
1059 for (i = 0; sysins_ops[i].name != NULL; ++i)
1060 if (sysins_ops[i].value == value)
1061 {
1062 info->sysins_op = sysins_ops + i;
1063 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1064 info->sysins_op->name,
1065 (unsigned)info->sysins_op->value,
1066 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1067 return 1;
1068 }
1069
1070 return 0;
1071 }
1072
1073 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1074
1075 int
1076 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1077 aarch64_opnd_info *info,
1078 aarch64_insn code,
1079 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1080 {
1081 /* CRm */
1082 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1083 return 1;
1084 }
1085
1086 /* Decode the prefetch operation option operand for e.g.
1087 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1088
1089 int
1090 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1091 aarch64_opnd_info *info,
1092 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1093 {
1094 /* prfop in Rt */
1095 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1096 return 1;
1097 }
1098
1099 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1100 to the matching name/value pair in aarch64_hint_options. */
1101
1102 int
1103 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1104 aarch64_opnd_info *info,
1105 aarch64_insn code,
1106 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1107 {
1108 /* CRm:op2. */
1109 unsigned hint_number;
1110 int i;
1111
1112 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1113
1114 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1115 {
1116 if (hint_number == aarch64_hint_options[i].value)
1117 {
1118 info->hint_option = &(aarch64_hint_options[i]);
1119 return 1;
1120 }
1121 }
1122
1123 return 0;
1124 }
1125
1126 /* Decode the extended register operand for e.g.
1127 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1128 int
1129 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1130 aarch64_opnd_info *info,
1131 aarch64_insn code,
1132 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1133 {
1134 aarch64_insn value;
1135
1136 /* Rm */
1137 info->reg.regno = extract_field (FLD_Rm, code, 0);
1138 /* option */
1139 value = extract_field (FLD_option, code, 0);
1140 info->shifter.kind =
1141 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1142 /* imm3 */
1143 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1144
1145 /* This makes the constraint checking happy. */
1146 info->shifter.operator_present = 1;
1147
1148 /* Assume inst->operands[0].qualifier has been resolved. */
1149 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1150 info->qualifier = AARCH64_OPND_QLF_W;
1151 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1152 && (info->shifter.kind == AARCH64_MOD_UXTX
1153 || info->shifter.kind == AARCH64_MOD_SXTX))
1154 info->qualifier = AARCH64_OPND_QLF_X;
1155
1156 return 1;
1157 }
1158
1159 /* Decode the shifted register operand for e.g.
1160 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1161 int
1162 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1163 aarch64_opnd_info *info,
1164 aarch64_insn code,
1165 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1166 {
1167 aarch64_insn value;
1168
1169 /* Rm */
1170 info->reg.regno = extract_field (FLD_Rm, code, 0);
1171 /* shift */
1172 value = extract_field (FLD_shift, code, 0);
1173 info->shifter.kind =
1174 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1175 if (info->shifter.kind == AARCH64_MOD_ROR
1176 && inst->opcode->iclass != log_shift)
1177 /* ROR is not available for the shifted register operand in arithmetic
1178 instructions. */
1179 return 0;
1180 /* imm6 */
1181 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1182
1183 /* This makes the constraint checking happy. */
1184 info->shifter.operator_present = 1;
1185
1186 return 1;
1187 }
1188
1189 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1190 where <offset> is given by the OFFSET parameter and where <factor> is
1191 1 plus SELF's operand-dependent value. fields[0] specifies the field
1192 that holds <base>. */
1193 static int
1194 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1195 aarch64_opnd_info *info, aarch64_insn code,
1196 int64_t offset)
1197 {
1198 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1199 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1200 info->addr.offset.is_reg = FALSE;
1201 info->addr.writeback = FALSE;
1202 info->addr.preind = TRUE;
1203 if (offset != 0)
1204 info->shifter.kind = AARCH64_MOD_MUL_VL;
1205 info->shifter.amount = 1;
1206 info->shifter.operator_present = (info->addr.offset.imm != 0);
1207 info->shifter.amount_present = FALSE;
1208 return 1;
1209 }
1210
1211 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1212 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1213 SELF's operand-dependent value. fields[0] specifies the field that
1214 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1215 int
1216 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1217 aarch64_opnd_info *info, aarch64_insn code,
1218 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1219 {
1220 int offset;
1221
1222 offset = extract_field (FLD_SVE_imm4, code, 0);
1223 offset = ((offset + 8) & 15) - 8;
1224 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1225 }
1226
1227 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1228 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1229 SELF's operand-dependent value. fields[0] specifies the field that
1230 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1231 int
1232 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1233 aarch64_opnd_info *info, aarch64_insn code,
1234 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1235 {
1236 int offset;
1237
1238 offset = extract_field (FLD_SVE_imm6, code, 0);
1239 offset = (((offset + 32) & 63) - 32);
1240 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1241 }
1242
1243 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1244 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1245 SELF's operand-dependent value. fields[0] specifies the field that
1246 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1247 and imm3 fields, with imm3 being the less-significant part. */
1248 int
1249 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1250 aarch64_opnd_info *info,
1251 aarch64_insn code,
1252 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1253 {
1254 int offset;
1255
1256 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1257 offset = (((offset + 256) & 511) - 256);
1258 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1259 }
1260
1261 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1262 is given by the OFFSET parameter and where <shift> is SELF's operand-
1263 dependent value. fields[0] specifies the base register field <base>. */
1264 static int
1265 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1266 aarch64_opnd_info *info, aarch64_insn code,
1267 int64_t offset)
1268 {
1269 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1270 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1271 info->addr.offset.is_reg = FALSE;
1272 info->addr.writeback = FALSE;
1273 info->addr.preind = TRUE;
1274 info->shifter.operator_present = FALSE;
1275 info->shifter.amount_present = FALSE;
1276 return 1;
1277 }
1278
1279 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1280 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1281 value. fields[0] specifies the base register field. */
1282 int
1283 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1284 aarch64_opnd_info *info, aarch64_insn code,
1285 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1286 {
1287 int offset = extract_field (FLD_SVE_imm6, code, 0);
1288 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1289 }
1290
1291 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1292 is SELF's operand-dependent value. fields[0] specifies the base
1293 register field and fields[1] specifies the offset register field. */
1294 int
1295 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1296 aarch64_opnd_info *info, aarch64_insn code,
1297 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1298 {
1299 int index;
1300
1301 index = extract_field (self->fields[1], code, 0);
1302 if (index == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1303 return 0;
1304
1305 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1306 info->addr.offset.regno = index;
1307 info->addr.offset.is_reg = TRUE;
1308 info->addr.writeback = FALSE;
1309 info->addr.preind = TRUE;
1310 info->shifter.kind = AARCH64_MOD_LSL;
1311 info->shifter.amount = get_operand_specific_data (self);
1312 info->shifter.operator_present = (info->shifter.amount != 0);
1313 info->shifter.amount_present = (info->shifter.amount != 0);
1314 return 1;
1315 }
1316
1317 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1318 <shift> is SELF's operand-dependent value. fields[0] specifies the
1319 base register field, fields[1] specifies the offset register field and
1320 fields[2] is a single-bit field that selects SXTW over UXTW. */
1321 int
1322 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1323 aarch64_opnd_info *info, aarch64_insn code,
1324 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1325 {
1326 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1327 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1328 info->addr.offset.is_reg = TRUE;
1329 info->addr.writeback = FALSE;
1330 info->addr.preind = TRUE;
1331 if (extract_field (self->fields[2], code, 0))
1332 info->shifter.kind = AARCH64_MOD_SXTW;
1333 else
1334 info->shifter.kind = AARCH64_MOD_UXTW;
1335 info->shifter.amount = get_operand_specific_data (self);
1336 info->shifter.operator_present = TRUE;
1337 info->shifter.amount_present = (info->shifter.amount != 0);
1338 return 1;
1339 }
1340
1341 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1342 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1343 fields[0] specifies the base register field. */
1344 int
1345 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1346 aarch64_opnd_info *info, aarch64_insn code,
1347 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1348 {
1349 int offset = extract_field (FLD_imm5, code, 0);
1350 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1351 }
1352
1353 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1354 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1355 number. fields[0] specifies the base register field and fields[1]
1356 specifies the offset register field. */
1357 static int
1358 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1359 aarch64_insn code, enum aarch64_modifier_kind kind)
1360 {
1361 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1362 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1363 info->addr.offset.is_reg = TRUE;
1364 info->addr.writeback = FALSE;
1365 info->addr.preind = TRUE;
1366 info->shifter.kind = kind;
1367 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1368 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1369 || info->shifter.amount != 0);
1370 info->shifter.amount_present = (info->shifter.amount != 0);
1371 return 1;
1372 }
1373
1374 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1375 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1376 field and fields[1] specifies the offset register field. */
1377 int
1378 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1379 aarch64_opnd_info *info, aarch64_insn code,
1380 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1381 {
1382 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1383 }
1384
1385 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1386 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1387 field and fields[1] specifies the offset register field. */
1388 int
1389 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1390 aarch64_opnd_info *info, aarch64_insn code,
1391 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1392 {
1393 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1394 }
1395
1396 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1397 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1398 field and fields[1] specifies the offset register field. */
1399 int
1400 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1401 aarch64_opnd_info *info, aarch64_insn code,
1402 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1403 {
1404 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1405 }
1406
1407 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1408 array specifies which field to use for Zn. MM is encoded in the
1409 concatenation of imm5 and SVE_tszh, with imm5 being the less
1410 significant part. */
1411 int
1412 aarch64_ext_sve_index (const aarch64_operand *self,
1413 aarch64_opnd_info *info, aarch64_insn code,
1414 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1415 {
1416 int val;
1417
1418 info->reglane.regno = extract_field (self->fields[0], code, 0);
1419 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1420 if ((val & 15) == 0)
1421 return 0;
1422 while ((val & 1) == 0)
1423 val /= 2;
1424 info->reglane.index = val / 2;
1425 return 1;
1426 }
1427
1428 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1429 to use for Zn. The opcode-dependent value specifies the number
1430 of registers in the list. */
1431 int
1432 aarch64_ext_sve_reglist (const aarch64_operand *self,
1433 aarch64_opnd_info *info, aarch64_insn code,
1434 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1435 {
1436 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1437 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1438 return 1;
1439 }
1440
1441 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1442 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1443 field. */
1444 int
1445 aarch64_ext_sve_scale (const aarch64_operand *self,
1446 aarch64_opnd_info *info, aarch64_insn code,
1447 const aarch64_inst *inst)
1448 {
1449 int val;
1450
1451 if (!aarch64_ext_imm (self, info, code, inst))
1452 return 0;
1453 val = extract_field (FLD_SVE_imm4, code, 0);
1454 info->shifter.kind = AARCH64_MOD_MUL;
1455 info->shifter.amount = val + 1;
1456 info->shifter.operator_present = (val != 0);
1457 info->shifter.amount_present = (val != 0);
1458 return 1;
1459 }
1460 \f
1461 /* Bitfields that are commonly used to encode certain operands' information
1462 may be partially used as part of the base opcode in some instructions.
1463 For example, the bit 1 of the field 'size' in
1464 FCVTXN <Vb><d>, <Va><n>
1465 is actually part of the base opcode, while only size<0> is available
1466 for encoding the register type. Another example is the AdvSIMD
1467 instruction ORR (register), in which the field 'size' is also used for
1468 the base opcode, leaving only the field 'Q' available to encode the
1469 vector register arrangement specifier '8B' or '16B'.
1470
1471 This function tries to deduce the qualifier from the value of partially
1472 constrained field(s). Given the VALUE of such a field or fields, the
1473 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1474 operand encoding), the function returns the matching qualifier or
1475 AARCH64_OPND_QLF_NIL if nothing matches.
1476
1477 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1478 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1479 may end with AARCH64_OPND_QLF_NIL. */
1480
1481 static enum aarch64_opnd_qualifier
1482 get_qualifier_from_partial_encoding (aarch64_insn value,
1483 const enum aarch64_opnd_qualifier* \
1484 candidates,
1485 aarch64_insn mask)
1486 {
1487 int i;
1488 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1489 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1490 {
1491 aarch64_insn standard_value;
1492 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1493 break;
1494 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1495 if ((standard_value & mask) == (value & mask))
1496 return candidates[i];
1497 }
1498 return AARCH64_OPND_QLF_NIL;
1499 }
1500
1501 /* Given a list of qualifier sequences, return all possible valid qualifiers
1502 for operand IDX in QUALIFIERS.
1503 Assume QUALIFIERS is an array whose length is large enough. */
1504
1505 static void
1506 get_operand_possible_qualifiers (int idx,
1507 const aarch64_opnd_qualifier_seq_t *list,
1508 enum aarch64_opnd_qualifier *qualifiers)
1509 {
1510 int i;
1511 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1512 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1513 break;
1514 }
1515
1516 /* Decode the size Q field for e.g. SHADD.
1517 We tag one operand with the qualifer according to the code;
1518 whether the qualifier is valid for this opcode or not, it is the
1519 duty of the semantic checking. */
1520
1521 static int
1522 decode_sizeq (aarch64_inst *inst)
1523 {
1524 int idx;
1525 enum aarch64_opnd_qualifier qualifier;
1526 aarch64_insn code;
1527 aarch64_insn value, mask;
1528 enum aarch64_field_kind fld_sz;
1529 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1530
1531 if (inst->opcode->iclass == asisdlse
1532 || inst->opcode->iclass == asisdlsep
1533 || inst->opcode->iclass == asisdlso
1534 || inst->opcode->iclass == asisdlsop)
1535 fld_sz = FLD_vldst_size;
1536 else
1537 fld_sz = FLD_size;
1538
1539 code = inst->value;
1540 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1541 /* Obtain the info that which bits of fields Q and size are actually
1542 available for operand encoding. Opcodes like FMAXNM and FMLA have
1543 size[1] unavailable. */
1544 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1545
1546 /* The index of the operand we are going to tag a qualifier and the qualifer
1547 itself are reasoned from the value of the size and Q fields and the
1548 possible valid qualifier lists. */
1549 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1550 DEBUG_TRACE ("key idx: %d", idx);
1551
1552 /* For most related instruciton, size:Q are fully available for operand
1553 encoding. */
1554 if (mask == 0x7)
1555 {
1556 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1557 return 1;
1558 }
1559
1560 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1561 candidates);
1562 #ifdef DEBUG_AARCH64
1563 if (debug_dump)
1564 {
1565 int i;
1566 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1567 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1568 DEBUG_TRACE ("qualifier %d: %s", i,
1569 aarch64_get_qualifier_name(candidates[i]));
1570 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1571 }
1572 #endif /* DEBUG_AARCH64 */
1573
1574 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1575
1576 if (qualifier == AARCH64_OPND_QLF_NIL)
1577 return 0;
1578
1579 inst->operands[idx].qualifier = qualifier;
1580 return 1;
1581 }
1582
1583 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1584 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1585
1586 static int
1587 decode_asimd_fcvt (aarch64_inst *inst)
1588 {
1589 aarch64_field field = {0, 0};
1590 aarch64_insn value;
1591 enum aarch64_opnd_qualifier qualifier;
1592
1593 gen_sub_field (FLD_size, 0, 1, &field);
1594 value = extract_field_2 (&field, inst->value, 0);
1595 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1596 : AARCH64_OPND_QLF_V_2D;
1597 switch (inst->opcode->op)
1598 {
1599 case OP_FCVTN:
1600 case OP_FCVTN2:
1601 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1602 inst->operands[1].qualifier = qualifier;
1603 break;
1604 case OP_FCVTL:
1605 case OP_FCVTL2:
1606 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1607 inst->operands[0].qualifier = qualifier;
1608 break;
1609 default:
1610 assert (0);
1611 return 0;
1612 }
1613
1614 return 1;
1615 }
1616
1617 /* Decode size[0], i.e. bit 22, for
1618 e.g. FCVTXN <Vb><d>, <Va><n>. */
1619
1620 static int
1621 decode_asisd_fcvtxn (aarch64_inst *inst)
1622 {
1623 aarch64_field field = {0, 0};
1624 gen_sub_field (FLD_size, 0, 1, &field);
1625 if (!extract_field_2 (&field, inst->value, 0))
1626 return 0;
1627 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1628 return 1;
1629 }
1630
1631 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1632 static int
1633 decode_fcvt (aarch64_inst *inst)
1634 {
1635 enum aarch64_opnd_qualifier qualifier;
1636 aarch64_insn value;
1637 const aarch64_field field = {15, 2};
1638
1639 /* opc dstsize */
1640 value = extract_field_2 (&field, inst->value, 0);
1641 switch (value)
1642 {
1643 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1644 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1645 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1646 default: return 0;
1647 }
1648 inst->operands[0].qualifier = qualifier;
1649
1650 return 1;
1651 }
1652
1653 /* Do miscellaneous decodings that are not common enough to be driven by
1654 flags. */
1655
1656 static int
1657 do_misc_decoding (aarch64_inst *inst)
1658 {
1659 switch (inst->opcode->op)
1660 {
1661 case OP_FCVT:
1662 return decode_fcvt (inst);
1663 case OP_FCVTN:
1664 case OP_FCVTN2:
1665 case OP_FCVTL:
1666 case OP_FCVTL2:
1667 return decode_asimd_fcvt (inst);
1668 case OP_FCVTXN_S:
1669 return decode_asisd_fcvtxn (inst);
1670 default:
1671 return 0;
1672 }
1673 }
1674
1675 /* Opcodes that have fields shared by multiple operands are usually flagged
1676 with flags. In this function, we detect such flags, decode the related
1677 field(s) and store the information in one of the related operands. The
1678 'one' operand is not any operand but one of the operands that can
1679 accommadate all the information that has been decoded. */
1680
1681 static int
1682 do_special_decoding (aarch64_inst *inst)
1683 {
1684 int idx;
1685 aarch64_insn value;
1686 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1687 if (inst->opcode->flags & F_COND)
1688 {
1689 value = extract_field (FLD_cond2, inst->value, 0);
1690 inst->cond = get_cond_from_value (value);
1691 }
1692 /* 'sf' field. */
1693 if (inst->opcode->flags & F_SF)
1694 {
1695 idx = select_operand_for_sf_field_coding (inst->opcode);
1696 value = extract_field (FLD_sf, inst->value, 0);
1697 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1698 if ((inst->opcode->flags & F_N)
1699 && extract_field (FLD_N, inst->value, 0) != value)
1700 return 0;
1701 }
1702 /* 'sf' field. */
1703 if (inst->opcode->flags & F_LSE_SZ)
1704 {
1705 idx = select_operand_for_sf_field_coding (inst->opcode);
1706 value = extract_field (FLD_lse_sz, inst->value, 0);
1707 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1708 }
1709 /* size:Q fields. */
1710 if (inst->opcode->flags & F_SIZEQ)
1711 return decode_sizeq (inst);
1712
1713 if (inst->opcode->flags & F_FPTYPE)
1714 {
1715 idx = select_operand_for_fptype_field_coding (inst->opcode);
1716 value = extract_field (FLD_type, inst->value, 0);
1717 switch (value)
1718 {
1719 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1720 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1721 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1722 default: return 0;
1723 }
1724 }
1725
1726 if (inst->opcode->flags & F_SSIZE)
1727 {
1728 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1729 of the base opcode. */
1730 aarch64_insn mask;
1731 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1732 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1733 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1734 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1735 /* For most related instruciton, the 'size' field is fully available for
1736 operand encoding. */
1737 if (mask == 0x3)
1738 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1739 else
1740 {
1741 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1742 candidates);
1743 inst->operands[idx].qualifier
1744 = get_qualifier_from_partial_encoding (value, candidates, mask);
1745 }
1746 }
1747
1748 if (inst->opcode->flags & F_T)
1749 {
1750 /* Num of consecutive '0's on the right side of imm5<3:0>. */
1751 int num = 0;
1752 unsigned val, Q;
1753 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1754 == AARCH64_OPND_CLASS_SIMD_REG);
1755 /* imm5<3:0> q <t>
1756 0000 x reserved
1757 xxx1 0 8b
1758 xxx1 1 16b
1759 xx10 0 4h
1760 xx10 1 8h
1761 x100 0 2s
1762 x100 1 4s
1763 1000 0 reserved
1764 1000 1 2d */
1765 val = extract_field (FLD_imm5, inst->value, 0);
1766 while ((val & 0x1) == 0 && ++num <= 3)
1767 val >>= 1;
1768 if (num > 3)
1769 return 0;
1770 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1771 inst->operands[0].qualifier =
1772 get_vreg_qualifier_from_value ((num << 1) | Q);
1773 }
1774
1775 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1776 {
1777 /* Use Rt to encode in the case of e.g.
1778 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1779 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1780 if (idx == -1)
1781 {
1782 /* Otherwise use the result operand, which has to be a integer
1783 register. */
1784 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1785 == AARCH64_OPND_CLASS_INT_REG);
1786 idx = 0;
1787 }
1788 assert (idx == 0 || idx == 1);
1789 value = extract_field (FLD_Q, inst->value, 0);
1790 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1791 }
1792
1793 if (inst->opcode->flags & F_LDS_SIZE)
1794 {
1795 aarch64_field field = {0, 0};
1796 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1797 == AARCH64_OPND_CLASS_INT_REG);
1798 gen_sub_field (FLD_opc, 0, 1, &field);
1799 value = extract_field_2 (&field, inst->value, 0);
1800 inst->operands[0].qualifier
1801 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
1802 }
1803
1804 /* Miscellaneous decoding; done as the last step. */
1805 if (inst->opcode->flags & F_MISC)
1806 return do_misc_decoding (inst);
1807
1808 return 1;
1809 }
1810
1811 /* Converters converting a real opcode instruction to its alias form. */
1812
1813 /* ROR <Wd>, <Ws>, #<shift>
1814 is equivalent to:
1815 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1816 static int
1817 convert_extr_to_ror (aarch64_inst *inst)
1818 {
1819 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1820 {
1821 copy_operand_info (inst, 2, 3);
1822 inst->operands[3].type = AARCH64_OPND_NIL;
1823 return 1;
1824 }
1825 return 0;
1826 }
1827
1828 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1829 is equivalent to:
1830 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1831 static int
1832 convert_shll_to_xtl (aarch64_inst *inst)
1833 {
1834 if (inst->operands[2].imm.value == 0)
1835 {
1836 inst->operands[2].type = AARCH64_OPND_NIL;
1837 return 1;
1838 }
1839 return 0;
1840 }
1841
1842 /* Convert
1843 UBFM <Xd>, <Xn>, #<shift>, #63.
1844 to
1845 LSR <Xd>, <Xn>, #<shift>. */
1846 static int
1847 convert_bfm_to_sr (aarch64_inst *inst)
1848 {
1849 int64_t imms, val;
1850
1851 imms = inst->operands[3].imm.value;
1852 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1853 if (imms == val)
1854 {
1855 inst->operands[3].type = AARCH64_OPND_NIL;
1856 return 1;
1857 }
1858
1859 return 0;
1860 }
1861
1862 /* Convert MOV to ORR. */
1863 static int
1864 convert_orr_to_mov (aarch64_inst *inst)
1865 {
1866 /* MOV <Vd>.<T>, <Vn>.<T>
1867 is equivalent to:
1868 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1869 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1870 {
1871 inst->operands[2].type = AARCH64_OPND_NIL;
1872 return 1;
1873 }
1874 return 0;
1875 }
1876
1877 /* When <imms> >= <immr>, the instruction written:
1878 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1879 is equivalent to:
1880 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1881
1882 static int
1883 convert_bfm_to_bfx (aarch64_inst *inst)
1884 {
1885 int64_t immr, imms;
1886
1887 immr = inst->operands[2].imm.value;
1888 imms = inst->operands[3].imm.value;
1889 if (imms >= immr)
1890 {
1891 int64_t lsb = immr;
1892 inst->operands[2].imm.value = lsb;
1893 inst->operands[3].imm.value = imms + 1 - lsb;
1894 /* The two opcodes have different qualifiers for
1895 the immediate operands; reset to help the checking. */
1896 reset_operand_qualifier (inst, 2);
1897 reset_operand_qualifier (inst, 3);
1898 return 1;
1899 }
1900
1901 return 0;
1902 }
1903
1904 /* When <imms> < <immr>, the instruction written:
1905 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1906 is equivalent to:
1907 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1908
1909 static int
1910 convert_bfm_to_bfi (aarch64_inst *inst)
1911 {
1912 int64_t immr, imms, val;
1913
1914 immr = inst->operands[2].imm.value;
1915 imms = inst->operands[3].imm.value;
1916 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1917 if (imms < immr)
1918 {
1919 inst->operands[2].imm.value = (val - immr) & (val - 1);
1920 inst->operands[3].imm.value = imms + 1;
1921 /* The two opcodes have different qualifiers for
1922 the immediate operands; reset to help the checking. */
1923 reset_operand_qualifier (inst, 2);
1924 reset_operand_qualifier (inst, 3);
1925 return 1;
1926 }
1927
1928 return 0;
1929 }
1930
1931 /* The instruction written:
1932 BFC <Xd>, #<lsb>, #<width>
1933 is equivalent to:
1934 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1935
1936 static int
1937 convert_bfm_to_bfc (aarch64_inst *inst)
1938 {
1939 int64_t immr, imms, val;
1940
1941 /* Should have been assured by the base opcode value. */
1942 assert (inst->operands[1].reg.regno == 0x1f);
1943
1944 immr = inst->operands[2].imm.value;
1945 imms = inst->operands[3].imm.value;
1946 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1947 if (imms < immr)
1948 {
1949 /* Drop XZR from the second operand. */
1950 copy_operand_info (inst, 1, 2);
1951 copy_operand_info (inst, 2, 3);
1952 inst->operands[3].type = AARCH64_OPND_NIL;
1953
1954 /* Recalculate the immediates. */
1955 inst->operands[1].imm.value = (val - immr) & (val - 1);
1956 inst->operands[2].imm.value = imms + 1;
1957
1958 /* The two opcodes have different qualifiers for the operands; reset to
1959 help the checking. */
1960 reset_operand_qualifier (inst, 1);
1961 reset_operand_qualifier (inst, 2);
1962 reset_operand_qualifier (inst, 3);
1963
1964 return 1;
1965 }
1966
1967 return 0;
1968 }
1969
1970 /* The instruction written:
1971 LSL <Xd>, <Xn>, #<shift>
1972 is equivalent to:
1973 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1974
1975 static int
1976 convert_ubfm_to_lsl (aarch64_inst *inst)
1977 {
1978 int64_t immr = inst->operands[2].imm.value;
1979 int64_t imms = inst->operands[3].imm.value;
1980 int64_t val
1981 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1982
1983 if ((immr == 0 && imms == val) || immr == imms + 1)
1984 {
1985 inst->operands[3].type = AARCH64_OPND_NIL;
1986 inst->operands[2].imm.value = val - imms;
1987 return 1;
1988 }
1989
1990 return 0;
1991 }
1992
1993 /* CINC <Wd>, <Wn>, <cond>
1994 is equivalent to:
1995 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
1996 where <cond> is not AL or NV. */
1997
1998 static int
1999 convert_from_csel (aarch64_inst *inst)
2000 {
2001 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2002 && (inst->operands[3].cond->value & 0xe) != 0xe)
2003 {
2004 copy_operand_info (inst, 2, 3);
2005 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2006 inst->operands[3].type = AARCH64_OPND_NIL;
2007 return 1;
2008 }
2009 return 0;
2010 }
2011
2012 /* CSET <Wd>, <cond>
2013 is equivalent to:
2014 CSINC <Wd>, WZR, WZR, invert(<cond>)
2015 where <cond> is not AL or NV. */
2016
2017 static int
2018 convert_csinc_to_cset (aarch64_inst *inst)
2019 {
2020 if (inst->operands[1].reg.regno == 0x1f
2021 && inst->operands[2].reg.regno == 0x1f
2022 && (inst->operands[3].cond->value & 0xe) != 0xe)
2023 {
2024 copy_operand_info (inst, 1, 3);
2025 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2026 inst->operands[3].type = AARCH64_OPND_NIL;
2027 inst->operands[2].type = AARCH64_OPND_NIL;
2028 return 1;
2029 }
2030 return 0;
2031 }
2032
2033 /* MOV <Wd>, #<imm>
2034 is equivalent to:
2035 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2036
2037 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2038 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2039 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2040 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2041 machine-instruction mnemonic must be used. */
2042
2043 static int
2044 convert_movewide_to_mov (aarch64_inst *inst)
2045 {
2046 uint64_t value = inst->operands[1].imm.value;
2047 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2048 if (value == 0 && inst->operands[1].shifter.amount != 0)
2049 return 0;
2050 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2051 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2052 value <<= inst->operands[1].shifter.amount;
2053 /* As an alias convertor, it has to be clear that the INST->OPCODE
2054 is the opcode of the real instruction. */
2055 if (inst->opcode->op == OP_MOVN)
2056 {
2057 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2058 value = ~value;
2059 /* A MOVN has an immediate that could be encoded by MOVZ. */
2060 if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
2061 return 0;
2062 }
2063 inst->operands[1].imm.value = value;
2064 inst->operands[1].shifter.amount = 0;
2065 return 1;
2066 }
2067
2068 /* MOV <Wd>, #<imm>
2069 is equivalent to:
2070 ORR <Wd>, WZR, #<imm>.
2071
2072 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2073 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2074 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2075 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2076 machine-instruction mnemonic must be used. */
2077
2078 static int
2079 convert_movebitmask_to_mov (aarch64_inst *inst)
2080 {
2081 int is32;
2082 uint64_t value;
2083
2084 /* Should have been assured by the base opcode value. */
2085 assert (inst->operands[1].reg.regno == 0x1f);
2086 copy_operand_info (inst, 1, 2);
2087 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2088 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2089 value = inst->operands[1].imm.value;
2090 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2091 instruction. */
2092 if (inst->operands[0].reg.regno != 0x1f
2093 && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
2094 || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
2095 return 0;
2096
2097 inst->operands[2].type = AARCH64_OPND_NIL;
2098 return 1;
2099 }
2100
2101 /* Some alias opcodes are disassembled by being converted from their real-form.
2102 N.B. INST->OPCODE is the real opcode rather than the alias. */
2103
2104 static int
2105 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2106 {
2107 switch (alias->op)
2108 {
2109 case OP_ASR_IMM:
2110 case OP_LSR_IMM:
2111 return convert_bfm_to_sr (inst);
2112 case OP_LSL_IMM:
2113 return convert_ubfm_to_lsl (inst);
2114 case OP_CINC:
2115 case OP_CINV:
2116 case OP_CNEG:
2117 return convert_from_csel (inst);
2118 case OP_CSET:
2119 case OP_CSETM:
2120 return convert_csinc_to_cset (inst);
2121 case OP_UBFX:
2122 case OP_BFXIL:
2123 case OP_SBFX:
2124 return convert_bfm_to_bfx (inst);
2125 case OP_SBFIZ:
2126 case OP_BFI:
2127 case OP_UBFIZ:
2128 return convert_bfm_to_bfi (inst);
2129 case OP_BFC:
2130 return convert_bfm_to_bfc (inst);
2131 case OP_MOV_V:
2132 return convert_orr_to_mov (inst);
2133 case OP_MOV_IMM_WIDE:
2134 case OP_MOV_IMM_WIDEN:
2135 return convert_movewide_to_mov (inst);
2136 case OP_MOV_IMM_LOG:
2137 return convert_movebitmask_to_mov (inst);
2138 case OP_ROR_IMM:
2139 return convert_extr_to_ror (inst);
2140 case OP_SXTL:
2141 case OP_SXTL2:
2142 case OP_UXTL:
2143 case OP_UXTL2:
2144 return convert_shll_to_xtl (inst);
2145 default:
2146 return 0;
2147 }
2148 }
2149
2150 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2151 aarch64_inst *, int);
2152
2153 /* Given the instruction information in *INST, check if the instruction has
2154 any alias form that can be used to represent *INST. If the answer is yes,
2155 update *INST to be in the form of the determined alias. */
2156
2157 /* In the opcode description table, the following flags are used in opcode
2158 entries to help establish the relations between the real and alias opcodes:
2159
2160 F_ALIAS: opcode is an alias
2161 F_HAS_ALIAS: opcode has alias(es)
2162 F_P1
2163 F_P2
2164 F_P3: Disassembly preference priority 1-3 (the larger the
2165 higher). If nothing is specified, it is the priority
2166 0 by default, i.e. the lowest priority.
2167
2168 Although the relation between the machine and the alias instructions are not
2169 explicitly described, it can be easily determined from the base opcode
2170 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2171 description entries:
2172
2173 The mask of an alias opcode must be equal to or a super-set (i.e. more
2174 constrained) of that of the aliased opcode; so is the base opcode value.
2175
2176 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2177 && (opcode->mask & real->mask) == real->mask
2178 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2179 then OPCODE is an alias of, and only of, the REAL instruction
2180
2181 The alias relationship is forced flat-structured to keep related algorithm
2182 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2183
2184 During the disassembling, the decoding decision tree (in
2185 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2186 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2187 not specified), the disassembler will check whether there is any alias
2188 instruction exists for this real instruction. If there is, the disassembler
2189 will try to disassemble the 32-bit binary again using the alias's rule, or
2190 try to convert the IR to the form of the alias. In the case of the multiple
2191 aliases, the aliases are tried one by one from the highest priority
2192 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2193 first succeeds first adopted.
2194
2195 You may ask why there is a need for the conversion of IR from one form to
2196 another in handling certain aliases. This is because on one hand it avoids
2197 adding more operand code to handle unusual encoding/decoding; on other
2198 hand, during the disassembling, the conversion is an effective approach to
2199 check the condition of an alias (as an alias may be adopted only if certain
2200 conditions are met).
2201
2202 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2203 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2204 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2205
2206 static void
2207 determine_disassembling_preference (struct aarch64_inst *inst)
2208 {
2209 const aarch64_opcode *opcode;
2210 const aarch64_opcode *alias;
2211
2212 opcode = inst->opcode;
2213
2214 /* This opcode does not have an alias, so use itself. */
2215 if (opcode_has_alias (opcode) == FALSE)
2216 return;
2217
2218 alias = aarch64_find_alias_opcode (opcode);
2219 assert (alias);
2220
2221 #ifdef DEBUG_AARCH64
2222 if (debug_dump)
2223 {
2224 const aarch64_opcode *tmp = alias;
2225 printf ("#### LIST orderd: ");
2226 while (tmp)
2227 {
2228 printf ("%s, ", tmp->name);
2229 tmp = aarch64_find_next_alias_opcode (tmp);
2230 }
2231 printf ("\n");
2232 }
2233 #endif /* DEBUG_AARCH64 */
2234
2235 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2236 {
2237 DEBUG_TRACE ("try %s", alias->name);
2238 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2239
2240 /* An alias can be a pseudo opcode which will never be used in the
2241 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2242 aliasing AND. */
2243 if (pseudo_opcode_p (alias))
2244 {
2245 DEBUG_TRACE ("skip pseudo %s", alias->name);
2246 continue;
2247 }
2248
2249 if ((inst->value & alias->mask) != alias->opcode)
2250 {
2251 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2252 continue;
2253 }
2254 /* No need to do any complicated transformation on operands, if the alias
2255 opcode does not have any operand. */
2256 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2257 {
2258 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2259 aarch64_replace_opcode (inst, alias);
2260 return;
2261 }
2262 if (alias->flags & F_CONV)
2263 {
2264 aarch64_inst copy;
2265 memcpy (&copy, inst, sizeof (aarch64_inst));
2266 /* ALIAS is the preference as long as the instruction can be
2267 successfully converted to the form of ALIAS. */
2268 if (convert_to_alias (&copy, alias) == 1)
2269 {
2270 aarch64_replace_opcode (&copy, alias);
2271 assert (aarch64_match_operands_constraint (&copy, NULL));
2272 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2273 memcpy (inst, &copy, sizeof (aarch64_inst));
2274 return;
2275 }
2276 }
2277 else
2278 {
2279 /* Directly decode the alias opcode. */
2280 aarch64_inst temp;
2281 memset (&temp, '\0', sizeof (aarch64_inst));
2282 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
2283 {
2284 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2285 memcpy (inst, &temp, sizeof (aarch64_inst));
2286 return;
2287 }
2288 }
2289 }
2290 }
2291
2292 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2293 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2294 return 1.
2295
2296 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2297 determined and used to disassemble CODE; this is done just before the
2298 return. */
2299
2300 static int
2301 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2302 aarch64_inst *inst, int noaliases_p)
2303 {
2304 int i;
2305
2306 DEBUG_TRACE ("enter with %s", opcode->name);
2307
2308 assert (opcode && inst);
2309
2310 /* Check the base opcode. */
2311 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2312 {
2313 DEBUG_TRACE ("base opcode match FAIL");
2314 goto decode_fail;
2315 }
2316
2317 /* Clear inst. */
2318 memset (inst, '\0', sizeof (aarch64_inst));
2319
2320 inst->opcode = opcode;
2321 inst->value = code;
2322
2323 /* Assign operand codes and indexes. */
2324 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2325 {
2326 if (opcode->operands[i] == AARCH64_OPND_NIL)
2327 break;
2328 inst->operands[i].type = opcode->operands[i];
2329 inst->operands[i].idx = i;
2330 }
2331
2332 /* Call the opcode decoder indicated by flags. */
2333 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2334 {
2335 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2336 goto decode_fail;
2337 }
2338
2339 /* Call operand decoders. */
2340 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2341 {
2342 const aarch64_operand *opnd;
2343 enum aarch64_opnd type;
2344
2345 type = opcode->operands[i];
2346 if (type == AARCH64_OPND_NIL)
2347 break;
2348 opnd = &aarch64_operands[type];
2349 if (operand_has_extractor (opnd)
2350 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2351 {
2352 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2353 goto decode_fail;
2354 }
2355 }
2356
2357 /* If the opcode has a verifier, then check it now. */
2358 if (opcode->verifier && ! opcode->verifier (opcode, code))
2359 {
2360 DEBUG_TRACE ("operand verifier FAIL");
2361 goto decode_fail;
2362 }
2363
2364 /* Match the qualifiers. */
2365 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2366 {
2367 /* Arriving here, the CODE has been determined as a valid instruction
2368 of OPCODE and *INST has been filled with information of this OPCODE
2369 instruction. Before the return, check if the instruction has any
2370 alias and should be disassembled in the form of its alias instead.
2371 If the answer is yes, *INST will be updated. */
2372 if (!noaliases_p)
2373 determine_disassembling_preference (inst);
2374 DEBUG_TRACE ("SUCCESS");
2375 return 1;
2376 }
2377 else
2378 {
2379 DEBUG_TRACE ("constraint matching FAIL");
2380 }
2381
2382 decode_fail:
2383 return 0;
2384 }
2385 \f
2386 /* This does some user-friendly fix-up to *INST. It is currently focus on
2387 the adjustment of qualifiers to help the printed instruction
2388 recognized/understood more easily. */
2389
2390 static void
2391 user_friendly_fixup (aarch64_inst *inst)
2392 {
2393 switch (inst->opcode->iclass)
2394 {
2395 case testbranch:
2396 /* TBNZ Xn|Wn, #uimm6, label
2397 Test and Branch Not Zero: conditionally jumps to label if bit number
2398 uimm6 in register Xn is not zero. The bit number implies the width of
2399 the register, which may be written and should be disassembled as Wn if
2400 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2401 */
2402 if (inst->operands[1].imm.value < 32)
2403 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2404 break;
2405 default: break;
2406 }
2407 }
2408
2409 /* Decode INSN and fill in *INST the instruction information. An alias
2410 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2411 success. */
2412
2413 int
2414 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2415 bfd_boolean noaliases_p)
2416 {
2417 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2418
2419 #ifdef DEBUG_AARCH64
2420 if (debug_dump)
2421 {
2422 const aarch64_opcode *tmp = opcode;
2423 printf ("\n");
2424 DEBUG_TRACE ("opcode lookup:");
2425 while (tmp != NULL)
2426 {
2427 aarch64_verbose (" %s", tmp->name);
2428 tmp = aarch64_find_next_opcode (tmp);
2429 }
2430 }
2431 #endif /* DEBUG_AARCH64 */
2432
2433 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2434 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2435 opcode field and value, apart from the difference that one of them has an
2436 extra field as part of the opcode, but such a field is used for operand
2437 encoding in other opcode(s) ('immh' in the case of the example). */
2438 while (opcode != NULL)
2439 {
2440 /* But only one opcode can be decoded successfully for, as the
2441 decoding routine will check the constraint carefully. */
2442 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2443 return ERR_OK;
2444 opcode = aarch64_find_next_opcode (opcode);
2445 }
2446
2447 return ERR_UND;
2448 }
2449
2450 /* Print operands. */
2451
2452 static void
2453 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2454 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2455 {
2456 int i, pcrel_p, num_printed;
2457 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2458 {
2459 char str[128];
2460 /* We regard the opcode operand info more, however we also look into
2461 the inst->operands to support the disassembling of the optional
2462 operand.
2463 The two operand code should be the same in all cases, apart from
2464 when the operand can be optional. */
2465 if (opcode->operands[i] == AARCH64_OPND_NIL
2466 || opnds[i].type == AARCH64_OPND_NIL)
2467 break;
2468
2469 /* Generate the operand string in STR. */
2470 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2471 &info->target);
2472
2473 /* Print the delimiter (taking account of omitted operand(s)). */
2474 if (str[0] != '\0')
2475 (*info->fprintf_func) (info->stream, "%s",
2476 num_printed++ == 0 ? "\t" : ", ");
2477
2478 /* Print the operand. */
2479 if (pcrel_p)
2480 (*info->print_address_func) (info->target, info);
2481 else
2482 (*info->fprintf_func) (info->stream, "%s", str);
2483 }
2484 }
2485
2486 /* Print the instruction mnemonic name. */
2487
2488 static void
2489 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2490 {
2491 if (inst->opcode->flags & F_COND)
2492 {
2493 /* For instructions that are truly conditionally executed, e.g. b.cond,
2494 prepare the full mnemonic name with the corresponding condition
2495 suffix. */
2496 char name[8], *ptr;
2497 size_t len;
2498
2499 ptr = strchr (inst->opcode->name, '.');
2500 assert (ptr && inst->cond);
2501 len = ptr - inst->opcode->name;
2502 assert (len < 8);
2503 strncpy (name, inst->opcode->name, len);
2504 name [len] = '\0';
2505 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2506 }
2507 else
2508 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2509 }
2510
2511 /* Print the instruction according to *INST. */
2512
2513 static void
2514 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2515 struct disassemble_info *info)
2516 {
2517 print_mnemonic_name (inst, info);
2518 print_operands (pc, inst->opcode, inst->operands, info);
2519 }
2520
2521 /* Entry-point of the instruction disassembler and printer. */
2522
2523 static void
2524 print_insn_aarch64_word (bfd_vma pc,
2525 uint32_t word,
2526 struct disassemble_info *info)
2527 {
2528 static const char *err_msg[6] =
2529 {
2530 [ERR_OK] = "_",
2531 [-ERR_UND] = "undefined",
2532 [-ERR_UNP] = "unpredictable",
2533 [-ERR_NYI] = "NYI"
2534 };
2535
2536 int ret;
2537 aarch64_inst inst;
2538
2539 info->insn_info_valid = 1;
2540 info->branch_delay_insns = 0;
2541 info->data_size = 0;
2542 info->target = 0;
2543 info->target2 = 0;
2544
2545 if (info->flags & INSN_HAS_RELOC)
2546 /* If the instruction has a reloc associated with it, then
2547 the offset field in the instruction will actually be the
2548 addend for the reloc. (If we are using REL type relocs).
2549 In such cases, we can ignore the pc when computing
2550 addresses, since the addend is not currently pc-relative. */
2551 pc = 0;
2552
2553 ret = aarch64_decode_insn (word, &inst, no_aliases);
2554
2555 if (((word >> 21) & 0x3ff) == 1)
2556 {
2557 /* RESERVED for ALES. */
2558 assert (ret != ERR_OK);
2559 ret = ERR_NYI;
2560 }
2561
2562 switch (ret)
2563 {
2564 case ERR_UND:
2565 case ERR_UNP:
2566 case ERR_NYI:
2567 /* Handle undefined instructions. */
2568 info->insn_type = dis_noninsn;
2569 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2570 word, err_msg[-ret]);
2571 break;
2572 case ERR_OK:
2573 user_friendly_fixup (&inst);
2574 print_aarch64_insn (pc, &inst, info);
2575 break;
2576 default:
2577 abort ();
2578 }
2579 }
2580
2581 /* Disallow mapping symbols ($x, $d etc) from
2582 being displayed in symbol relative addresses. */
2583
2584 bfd_boolean
2585 aarch64_symbol_is_valid (asymbol * sym,
2586 struct disassemble_info * info ATTRIBUTE_UNUSED)
2587 {
2588 const char * name;
2589
2590 if (sym == NULL)
2591 return FALSE;
2592
2593 name = bfd_asymbol_name (sym);
2594
2595 return name
2596 && (name[0] != '$'
2597 || (name[1] != 'x' && name[1] != 'd')
2598 || (name[2] != '\0' && name[2] != '.'));
2599 }
2600
2601 /* Print data bytes on INFO->STREAM. */
2602
2603 static void
2604 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2605 uint32_t word,
2606 struct disassemble_info *info)
2607 {
2608 switch (info->bytes_per_chunk)
2609 {
2610 case 1:
2611 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2612 break;
2613 case 2:
2614 info->fprintf_func (info->stream, ".short\t0x%04x", word);
2615 break;
2616 case 4:
2617 info->fprintf_func (info->stream, ".word\t0x%08x", word);
2618 break;
2619 default:
2620 abort ();
2621 }
2622 }
2623
2624 /* Try to infer the code or data type from a symbol.
2625 Returns nonzero if *MAP_TYPE was set. */
2626
2627 static int
2628 get_sym_code_type (struct disassemble_info *info, int n,
2629 enum map_type *map_type)
2630 {
2631 elf_symbol_type *es;
2632 unsigned int type;
2633 const char *name;
2634
2635 es = *(elf_symbol_type **)(info->symtab + n);
2636 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2637
2638 /* If the symbol has function type then use that. */
2639 if (type == STT_FUNC)
2640 {
2641 *map_type = MAP_INSN;
2642 return TRUE;
2643 }
2644
2645 /* Check for mapping symbols. */
2646 name = bfd_asymbol_name(info->symtab[n]);
2647 if (name[0] == '$'
2648 && (name[1] == 'x' || name[1] == 'd')
2649 && (name[2] == '\0' || name[2] == '.'))
2650 {
2651 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
2652 return TRUE;
2653 }
2654
2655 return FALSE;
2656 }
2657
2658 /* Entry-point of the AArch64 disassembler. */
2659
2660 int
2661 print_insn_aarch64 (bfd_vma pc,
2662 struct disassemble_info *info)
2663 {
2664 bfd_byte buffer[INSNLEN];
2665 int status;
2666 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
2667 bfd_boolean found = FALSE;
2668 unsigned int size = 4;
2669 unsigned long data;
2670
2671 if (info->disassembler_options)
2672 {
2673 set_default_aarch64_dis_options (info);
2674
2675 parse_aarch64_dis_options (info->disassembler_options);
2676
2677 /* To avoid repeated parsing of these options, we remove them here. */
2678 info->disassembler_options = NULL;
2679 }
2680
2681 /* Aarch64 instructions are always little-endian */
2682 info->endian_code = BFD_ENDIAN_LITTLE;
2683
2684 /* First check the full symtab for a mapping symbol, even if there
2685 are no usable non-mapping symbols for this address. */
2686 if (info->symtab_size != 0
2687 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
2688 {
2689 enum map_type type = MAP_INSN;
2690 int last_sym = -1;
2691 bfd_vma addr;
2692 int n;
2693
2694 if (pc <= last_mapping_addr)
2695 last_mapping_sym = -1;
2696
2697 /* Start scanning at the start of the function, or wherever
2698 we finished last time. */
2699 n = info->symtab_pos + 1;
2700 if (n < last_mapping_sym)
2701 n = last_mapping_sym;
2702
2703 /* Scan up to the location being disassembled. */
2704 for (; n < info->symtab_size; n++)
2705 {
2706 addr = bfd_asymbol_value (info->symtab[n]);
2707 if (addr > pc)
2708 break;
2709 if ((info->section == NULL
2710 || info->section == info->symtab[n]->section)
2711 && get_sym_code_type (info, n, &type))
2712 {
2713 last_sym = n;
2714 found = TRUE;
2715 }
2716 }
2717
2718 if (!found)
2719 {
2720 n = info->symtab_pos;
2721 if (n < last_mapping_sym)
2722 n = last_mapping_sym;
2723
2724 /* No mapping symbol found at this address. Look backwards
2725 for a preceeding one. */
2726 for (; n >= 0; n--)
2727 {
2728 if (get_sym_code_type (info, n, &type))
2729 {
2730 last_sym = n;
2731 found = TRUE;
2732 break;
2733 }
2734 }
2735 }
2736
2737 last_mapping_sym = last_sym;
2738 last_type = type;
2739
2740 /* Look a little bit ahead to see if we should print out
2741 less than four bytes of data. If there's a symbol,
2742 mapping or otherwise, after two bytes then don't
2743 print more. */
2744 if (last_type == MAP_DATA)
2745 {
2746 size = 4 - (pc & 3);
2747 for (n = last_sym + 1; n < info->symtab_size; n++)
2748 {
2749 addr = bfd_asymbol_value (info->symtab[n]);
2750 if (addr > pc)
2751 {
2752 if (addr - pc < size)
2753 size = addr - pc;
2754 break;
2755 }
2756 }
2757 /* If the next symbol is after three bytes, we need to
2758 print only part of the data, so that we can use either
2759 .byte or .short. */
2760 if (size == 3)
2761 size = (pc & 1) ? 1 : 2;
2762 }
2763 }
2764
2765 if (last_type == MAP_DATA)
2766 {
2767 /* size was set above. */
2768 info->bytes_per_chunk = size;
2769 info->display_endian = info->endian;
2770 printer = print_insn_data;
2771 }
2772 else
2773 {
2774 info->bytes_per_chunk = size = INSNLEN;
2775 info->display_endian = info->endian_code;
2776 printer = print_insn_aarch64_word;
2777 }
2778
2779 status = (*info->read_memory_func) (pc, buffer, size, info);
2780 if (status != 0)
2781 {
2782 (*info->memory_error_func) (status, pc, info);
2783 return -1;
2784 }
2785
2786 data = bfd_get_bits (buffer, size * 8,
2787 info->display_endian == BFD_ENDIAN_BIG);
2788
2789 (*printer) (pc, data, info);
2790
2791 return size;
2792 }
2793 \f
2794 void
2795 print_aarch64_disassembler_options (FILE *stream)
2796 {
2797 fprintf (stream, _("\n\
2798 The following AARCH64 specific disassembler options are supported for use\n\
2799 with the -M switch (multiple options should be separated by commas):\n"));
2800
2801 fprintf (stream, _("\n\
2802 no-aliases Don't print instruction aliases.\n"));
2803
2804 fprintf (stream, _("\n\
2805 aliases Do print instruction aliases.\n"));
2806
2807 #ifdef DEBUG_AARCH64
2808 fprintf (stream, _("\n\
2809 debug_dump Temp switch for debug trace.\n"));
2810 #endif /* DEBUG_AARCH64 */
2811
2812 fprintf (stream, _("\n"));
2813 }
This page took 0.111065 seconds and 5 git commands to generate.