[AArch64] Add ARMv8.3 combined pointer authentication load instructions
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "dis-asm.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define ERR_OK 0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33
34 #define INSNLEN 4
35
36 /* Cached mapping symbol state. */
37 enum map_type
38 {
39 MAP_INSN,
40 MAP_DATA
41 };
42
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46
47 /* Other options */
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \f
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 #ifdef DEBUG_AARCH64
73 if (CONST_STRNEQ (option, "debug_dump"))
74 {
75 debug_dump = 1;
76 return;
77 }
78 #endif /* DEBUG_AARCH64 */
79
80 /* Invalid option. */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
82 }
83
84 static void
85 parse_aarch64_dis_options (const char *options)
86 {
87 const char *option_end;
88
89 if (options == NULL)
90 return;
91
92 while (*options != '\0')
93 {
94 /* Skip empty options. */
95 if (*options == ',')
96 {
97 options++;
98 continue;
99 }
100
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
104 option_end++;
105
106 parse_aarch64_dis_option (options, option_end - options);
107
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
111 }
112 }
113 \f
114 /* Functions doing the instruction disassembling. */
115
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
119
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
125
126 aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129 uint32_t num;
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
132 va_list va;
133
134 va_start (va, mask);
135 num = va_arg (va, uint32_t);
136 assert (num <= 5);
137 aarch64_insn value = 0x0;
138 while (num--)
139 {
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
144 }
145 return value;
146 }
147
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
150
151 static aarch64_insn
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
153 {
154 aarch64_insn value;
155 unsigned int i;
156 enum aarch64_field_kind kind;
157
158 value = 0;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
160 {
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
164 }
165 return value;
166 }
167
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
171 {
172 uint32_t ret = value;
173
174 assert (i < 32);
175 if ((value >> i) & 0x1)
176 {
177 uint32_t val = (uint32_t)(-1) << i;
178 ret = ret | val;
179 }
180 return (int32_t) ret;
181 }
182
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
185
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
189 {
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
191 assert (value <= 0x1
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
193 return qualifier;
194 }
195
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
198
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
201 {
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
203
204 /* Instructions using vector type 2H should not call this function. Skip over
205 the 2H qualifier. */
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
207 qualifier += 1;
208
209 assert (value <= 0x8
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
211 return qualifier;
212 }
213
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
217 {
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
219
220 assert (value <= 0x4
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
222 return qualifier;
223 }
224
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
229
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
232 {
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
237 i, qualifiers))
238 return qualifiers[i];
239 else
240 return AARCH64_OPND_QLF_NIL;
241 }
242
243 /* Operand extractors. */
244
245 int
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED)
249 {
250 info->reg.regno = extract_field (self->fields[0], code, 0);
251 return 1;
252 }
253
254 int
255 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
256 const aarch64_insn code ATTRIBUTE_UNUSED,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED)
258 {
259 assert (info->idx == 1
260 || info->idx ==3);
261 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
262 return 1;
263 }
264
265 /* e.g. IC <ic_op>{, <Xt>}. */
266 int
267 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
268 const aarch64_insn code,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED)
270 {
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 assert (info->idx == 1
273 && (aarch64_get_operand_class (inst->operands[0].type)
274 == AARCH64_OPND_CLASS_SYSTEM));
275 /* This will make the constraint checking happy and more importantly will
276 help the disassembler determine whether this operand is optional or
277 not. */
278 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
279
280 return 1;
281 }
282
283 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
284 int
285 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED)
288 {
289 /* regno */
290 info->reglane.regno = extract_field (self->fields[0], code,
291 inst->opcode->mask);
292
293 /* Index and/or type. */
294 if (inst->opcode->iclass == asisdone
295 || inst->opcode->iclass == asimdins)
296 {
297 if (info->type == AARCH64_OPND_En
298 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
299 {
300 unsigned shift;
301 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
302 assert (info->idx == 1); /* Vn */
303 aarch64_insn value = extract_field (FLD_imm4, code, 0);
304 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
305 info->qualifier = get_expected_qualifier (inst, info->idx);
306 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
307 info->reglane.index = value >> shift;
308 }
309 else
310 {
311 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
312 imm5<3:0> <V>
313 0000 RESERVED
314 xxx1 B
315 xx10 H
316 x100 S
317 1000 D */
318 int pos = -1;
319 aarch64_insn value = extract_field (FLD_imm5, code, 0);
320 while (++pos <= 3 && (value & 0x1) == 0)
321 value >>= 1;
322 if (pos > 3)
323 return 0;
324 info->qualifier = get_sreg_qualifier_from_value (pos);
325 info->reglane.index = (unsigned) (value >> 1);
326 }
327 }
328 else
329 {
330 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
331 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
332
333 /* Need information in other operand(s) to help decoding. */
334 info->qualifier = get_expected_qualifier (inst, info->idx);
335 switch (info->qualifier)
336 {
337 case AARCH64_OPND_QLF_S_H:
338 /* h:l:m */
339 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
340 FLD_M);
341 info->reglane.regno &= 0xf;
342 break;
343 case AARCH64_OPND_QLF_S_S:
344 /* h:l */
345 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
346 break;
347 case AARCH64_OPND_QLF_S_D:
348 /* H */
349 info->reglane.index = extract_field (FLD_H, code, 0);
350 break;
351 default:
352 return 0;
353 }
354 }
355
356 return 1;
357 }
358
359 int
360 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
361 const aarch64_insn code,
362 const aarch64_inst *inst ATTRIBUTE_UNUSED)
363 {
364 /* R */
365 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
366 /* len */
367 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
368 return 1;
369 }
370
371 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
372 int
373 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
374 aarch64_opnd_info *info, const aarch64_insn code,
375 const aarch64_inst *inst)
376 {
377 aarch64_insn value;
378 /* Number of elements in each structure to be loaded/stored. */
379 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
380
381 struct
382 {
383 unsigned is_reserved;
384 unsigned num_regs;
385 unsigned num_elements;
386 } data [] =
387 { {0, 4, 4},
388 {1, 4, 4},
389 {0, 4, 1},
390 {0, 4, 2},
391 {0, 3, 3},
392 {1, 3, 3},
393 {0, 3, 1},
394 {0, 1, 1},
395 {0, 2, 2},
396 {1, 2, 2},
397 {0, 2, 1},
398 };
399
400 /* Rt */
401 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
402 /* opcode */
403 value = extract_field (FLD_opcode, code, 0);
404 if (expected_num != data[value].num_elements || data[value].is_reserved)
405 return 0;
406 info->reglist.num_regs = data[value].num_regs;
407
408 return 1;
409 }
410
411 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
412 lanes instructions. */
413 int
414 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
415 aarch64_opnd_info *info, const aarch64_insn code,
416 const aarch64_inst *inst)
417 {
418 aarch64_insn value;
419
420 /* Rt */
421 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
422 /* S */
423 value = extract_field (FLD_S, code, 0);
424
425 /* Number of registers is equal to the number of elements in
426 each structure to be loaded/stored. */
427 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
428 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
429
430 /* Except when it is LD1R. */
431 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
432 info->reglist.num_regs = 2;
433
434 return 1;
435 }
436
437 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
438 load/store single element instructions. */
439 int
440 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
441 aarch64_opnd_info *info, const aarch64_insn code,
442 const aarch64_inst *inst ATTRIBUTE_UNUSED)
443 {
444 aarch64_field field = {0, 0};
445 aarch64_insn QSsize; /* fields Q:S:size. */
446 aarch64_insn opcodeh2; /* opcode<2:1> */
447
448 /* Rt */
449 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
450
451 /* Decode the index, opcode<2:1> and size. */
452 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
453 opcodeh2 = extract_field_2 (&field, code, 0);
454 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
455 switch (opcodeh2)
456 {
457 case 0x0:
458 info->qualifier = AARCH64_OPND_QLF_S_B;
459 /* Index encoded in "Q:S:size". */
460 info->reglist.index = QSsize;
461 break;
462 case 0x1:
463 if (QSsize & 0x1)
464 /* UND. */
465 return 0;
466 info->qualifier = AARCH64_OPND_QLF_S_H;
467 /* Index encoded in "Q:S:size<1>". */
468 info->reglist.index = QSsize >> 1;
469 break;
470 case 0x2:
471 if ((QSsize >> 1) & 0x1)
472 /* UND. */
473 return 0;
474 if ((QSsize & 0x1) == 0)
475 {
476 info->qualifier = AARCH64_OPND_QLF_S_S;
477 /* Index encoded in "Q:S". */
478 info->reglist.index = QSsize >> 2;
479 }
480 else
481 {
482 if (extract_field (FLD_S, code, 0))
483 /* UND */
484 return 0;
485 info->qualifier = AARCH64_OPND_QLF_S_D;
486 /* Index encoded in "Q". */
487 info->reglist.index = QSsize >> 3;
488 }
489 break;
490 default:
491 return 0;
492 }
493
494 info->reglist.has_index = 1;
495 info->reglist.num_regs = 0;
496 /* Number of registers is equal to the number of elements in
497 each structure to be loaded/stored. */
498 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
499 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
500
501 return 1;
502 }
503
504 /* Decode fields immh:immb and/or Q for e.g.
505 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
506 or SSHR <V><d>, <V><n>, #<shift>. */
507
508 int
509 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
510 aarch64_opnd_info *info, const aarch64_insn code,
511 const aarch64_inst *inst)
512 {
513 int pos;
514 aarch64_insn Q, imm, immh;
515 enum aarch64_insn_class iclass = inst->opcode->iclass;
516
517 immh = extract_field (FLD_immh, code, 0);
518 if (immh == 0)
519 return 0;
520 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
521 pos = 4;
522 /* Get highest set bit in immh. */
523 while (--pos >= 0 && (immh & 0x8) == 0)
524 immh <<= 1;
525
526 assert ((iclass == asimdshf || iclass == asisdshf)
527 && (info->type == AARCH64_OPND_IMM_VLSR
528 || info->type == AARCH64_OPND_IMM_VLSL));
529
530 if (iclass == asimdshf)
531 {
532 Q = extract_field (FLD_Q, code, 0);
533 /* immh Q <T>
534 0000 x SEE AdvSIMD modified immediate
535 0001 0 8B
536 0001 1 16B
537 001x 0 4H
538 001x 1 8H
539 01xx 0 2S
540 01xx 1 4S
541 1xxx 0 RESERVED
542 1xxx 1 2D */
543 info->qualifier =
544 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
545 }
546 else
547 info->qualifier = get_sreg_qualifier_from_value (pos);
548
549 if (info->type == AARCH64_OPND_IMM_VLSR)
550 /* immh <shift>
551 0000 SEE AdvSIMD modified immediate
552 0001 (16-UInt(immh:immb))
553 001x (32-UInt(immh:immb))
554 01xx (64-UInt(immh:immb))
555 1xxx (128-UInt(immh:immb)) */
556 info->imm.value = (16 << pos) - imm;
557 else
558 /* immh:immb
559 immh <shift>
560 0000 SEE AdvSIMD modified immediate
561 0001 (UInt(immh:immb)-8)
562 001x (UInt(immh:immb)-16)
563 01xx (UInt(immh:immb)-32)
564 1xxx (UInt(immh:immb)-64) */
565 info->imm.value = imm - (8 << pos);
566
567 return 1;
568 }
569
570 /* Decode shift immediate for e.g. sshr (imm). */
571 int
572 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
573 aarch64_opnd_info *info, const aarch64_insn code,
574 const aarch64_inst *inst ATTRIBUTE_UNUSED)
575 {
576 int64_t imm;
577 aarch64_insn val;
578 val = extract_field (FLD_size, code, 0);
579 switch (val)
580 {
581 case 0: imm = 8; break;
582 case 1: imm = 16; break;
583 case 2: imm = 32; break;
584 default: return 0;
585 }
586 info->imm.value = imm;
587 return 1;
588 }
589
590 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
591 value in the field(s) will be extracted as unsigned immediate value. */
592 int
593 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
594 const aarch64_insn code,
595 const aarch64_inst *inst ATTRIBUTE_UNUSED)
596 {
597 int64_t imm;
598
599 imm = extract_all_fields (self, code);
600
601 if (operand_need_sign_extension (self))
602 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
603
604 if (operand_need_shift_by_two (self))
605 imm <<= 2;
606
607 if (info->type == AARCH64_OPND_ADDR_ADRP)
608 imm <<= 12;
609
610 info->imm.value = imm;
611 return 1;
612 }
613
614 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
615 int
616 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
617 const aarch64_insn code,
618 const aarch64_inst *inst ATTRIBUTE_UNUSED)
619 {
620 aarch64_ext_imm (self, info, code, inst);
621 info->shifter.kind = AARCH64_MOD_LSL;
622 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
623 return 1;
624 }
625
626 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
627 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
628 int
629 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
630 aarch64_opnd_info *info,
631 const aarch64_insn code,
632 const aarch64_inst *inst ATTRIBUTE_UNUSED)
633 {
634 uint64_t imm;
635 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
636 aarch64_field field = {0, 0};
637
638 assert (info->idx == 1);
639
640 if (info->type == AARCH64_OPND_SIMD_FPIMM)
641 info->imm.is_fp = 1;
642
643 /* a:b:c:d:e:f:g:h */
644 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
645 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
646 {
647 /* Either MOVI <Dd>, #<imm>
648 or MOVI <Vd>.2D, #<imm>.
649 <imm> is a 64-bit immediate
650 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
651 encoded in "a:b:c:d:e:f:g:h". */
652 int i;
653 unsigned abcdefgh = imm;
654 for (imm = 0ull, i = 0; i < 8; i++)
655 if (((abcdefgh >> i) & 0x1) != 0)
656 imm |= 0xffull << (8 * i);
657 }
658 info->imm.value = imm;
659
660 /* cmode */
661 info->qualifier = get_expected_qualifier (inst, info->idx);
662 switch (info->qualifier)
663 {
664 case AARCH64_OPND_QLF_NIL:
665 /* no shift */
666 info->shifter.kind = AARCH64_MOD_NONE;
667 return 1;
668 case AARCH64_OPND_QLF_LSL:
669 /* shift zeros */
670 info->shifter.kind = AARCH64_MOD_LSL;
671 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
672 {
673 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
674 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
675 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
676 default: assert (0); return 0;
677 }
678 /* 00: 0; 01: 8; 10:16; 11:24. */
679 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
680 break;
681 case AARCH64_OPND_QLF_MSL:
682 /* shift ones */
683 info->shifter.kind = AARCH64_MOD_MSL;
684 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
685 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
686 break;
687 default:
688 assert (0);
689 return 0;
690 }
691
692 return 1;
693 }
694
695 /* Decode an 8-bit floating-point immediate. */
696 int
697 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
698 const aarch64_insn code,
699 const aarch64_inst *inst ATTRIBUTE_UNUSED)
700 {
701 info->imm.value = extract_all_fields (self, code);
702 info->imm.is_fp = 1;
703 return 1;
704 }
705
706 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
707 int
708 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
709 aarch64_opnd_info *info, const aarch64_insn code,
710 const aarch64_inst *inst ATTRIBUTE_UNUSED)
711 {
712 info->imm.value = 64- extract_field (FLD_scale, code, 0);
713 return 1;
714 }
715
716 /* Decode arithmetic immediate for e.g.
717 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
718 int
719 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
720 aarch64_opnd_info *info, const aarch64_insn code,
721 const aarch64_inst *inst ATTRIBUTE_UNUSED)
722 {
723 aarch64_insn value;
724
725 info->shifter.kind = AARCH64_MOD_LSL;
726 /* shift */
727 value = extract_field (FLD_shift, code, 0);
728 if (value >= 2)
729 return 0;
730 info->shifter.amount = value ? 12 : 0;
731 /* imm12 (unsigned) */
732 info->imm.value = extract_field (FLD_imm12, code, 0);
733
734 return 1;
735 }
736
737 /* Return true if VALUE is a valid logical immediate encoding, storing the
738 decoded value in *RESULT if so. ESIZE is the number of bytes in the
739 decoded immediate. */
740 static int
741 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
742 {
743 uint64_t imm, mask;
744 uint32_t N, R, S;
745 unsigned simd_size;
746
747 /* value is N:immr:imms. */
748 S = value & 0x3f;
749 R = (value >> 6) & 0x3f;
750 N = (value >> 12) & 0x1;
751
752 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
753 (in other words, right rotated by R), then replicated. */
754 if (N != 0)
755 {
756 simd_size = 64;
757 mask = 0xffffffffffffffffull;
758 }
759 else
760 {
761 switch (S)
762 {
763 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
764 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
765 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
766 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
767 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
768 default: return 0;
769 }
770 mask = (1ull << simd_size) - 1;
771 /* Top bits are IGNORED. */
772 R &= simd_size - 1;
773 }
774
775 if (simd_size > esize * 8)
776 return 0;
777
778 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
779 if (S == simd_size - 1)
780 return 0;
781 /* S+1 consecutive bits to 1. */
782 /* NOTE: S can't be 63 due to detection above. */
783 imm = (1ull << (S + 1)) - 1;
784 /* Rotate to the left by simd_size - R. */
785 if (R != 0)
786 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
787 /* Replicate the value according to SIMD size. */
788 switch (simd_size)
789 {
790 case 2: imm = (imm << 2) | imm;
791 /* Fall through. */
792 case 4: imm = (imm << 4) | imm;
793 /* Fall through. */
794 case 8: imm = (imm << 8) | imm;
795 /* Fall through. */
796 case 16: imm = (imm << 16) | imm;
797 /* Fall through. */
798 case 32: imm = (imm << 32) | imm;
799 /* Fall through. */
800 case 64: break;
801 default: assert (0); return 0;
802 }
803
804 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
805
806 return 1;
807 }
808
809 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
810 int
811 aarch64_ext_limm (const aarch64_operand *self,
812 aarch64_opnd_info *info, const aarch64_insn code,
813 const aarch64_inst *inst)
814 {
815 uint32_t esize;
816 aarch64_insn value;
817
818 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
819 self->fields[2]);
820 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
821 return decode_limm (esize, value, &info->imm.value);
822 }
823
824 /* Decode a logical immediate for the BIC alias of AND (etc.). */
825 int
826 aarch64_ext_inv_limm (const aarch64_operand *self,
827 aarch64_opnd_info *info, const aarch64_insn code,
828 const aarch64_inst *inst)
829 {
830 if (!aarch64_ext_limm (self, info, code, inst))
831 return 0;
832 info->imm.value = ~info->imm.value;
833 return 1;
834 }
835
836 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
837 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
838 int
839 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
840 aarch64_opnd_info *info,
841 const aarch64_insn code, const aarch64_inst *inst)
842 {
843 aarch64_insn value;
844
845 /* Rt */
846 info->reg.regno = extract_field (FLD_Rt, code, 0);
847
848 /* size */
849 value = extract_field (FLD_ldst_size, code, 0);
850 if (inst->opcode->iclass == ldstpair_indexed
851 || inst->opcode->iclass == ldstnapair_offs
852 || inst->opcode->iclass == ldstpair_off
853 || inst->opcode->iclass == loadlit)
854 {
855 enum aarch64_opnd_qualifier qualifier;
856 switch (value)
857 {
858 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
859 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
860 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
861 default: return 0;
862 }
863 info->qualifier = qualifier;
864 }
865 else
866 {
867 /* opc1:size */
868 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
869 if (value > 0x4)
870 return 0;
871 info->qualifier = get_sreg_qualifier_from_value (value);
872 }
873
874 return 1;
875 }
876
877 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
878 int
879 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
880 aarch64_opnd_info *info,
881 aarch64_insn code,
882 const aarch64_inst *inst ATTRIBUTE_UNUSED)
883 {
884 /* Rn */
885 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
886 return 1;
887 }
888
889 /* Decode the address operand for e.g.
890 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
891 int
892 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
893 aarch64_opnd_info *info,
894 aarch64_insn code, const aarch64_inst *inst)
895 {
896 aarch64_insn S, value;
897
898 /* Rn */
899 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
900 /* Rm */
901 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
902 /* option */
903 value = extract_field (FLD_option, code, 0);
904 info->shifter.kind =
905 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
906 /* Fix-up the shifter kind; although the table-driven approach is
907 efficient, it is slightly inflexible, thus needing this fix-up. */
908 if (info->shifter.kind == AARCH64_MOD_UXTX)
909 info->shifter.kind = AARCH64_MOD_LSL;
910 /* S */
911 S = extract_field (FLD_S, code, 0);
912 if (S == 0)
913 {
914 info->shifter.amount = 0;
915 info->shifter.amount_present = 0;
916 }
917 else
918 {
919 int size;
920 /* Need information in other operand(s) to help achieve the decoding
921 from 'S' field. */
922 info->qualifier = get_expected_qualifier (inst, info->idx);
923 /* Get the size of the data element that is accessed, which may be
924 different from that of the source register size, e.g. in strb/ldrb. */
925 size = aarch64_get_qualifier_esize (info->qualifier);
926 info->shifter.amount = get_logsz (size);
927 info->shifter.amount_present = 1;
928 }
929
930 return 1;
931 }
932
933 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
934 int
935 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
936 aarch64_insn code, const aarch64_inst *inst)
937 {
938 aarch64_insn imm;
939 info->qualifier = get_expected_qualifier (inst, info->idx);
940
941 /* Rn */
942 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
943 /* simm (imm9 or imm7) */
944 imm = extract_field (self->fields[0], code, 0);
945 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
946 if (self->fields[0] == FLD_imm7)
947 /* scaled immediate in ld/st pair instructions. */
948 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
949 /* qualifier */
950 if (inst->opcode->iclass == ldst_unscaled
951 || inst->opcode->iclass == ldstnapair_offs
952 || inst->opcode->iclass == ldstpair_off
953 || inst->opcode->iclass == ldst_unpriv)
954 info->addr.writeback = 0;
955 else
956 {
957 /* pre/post- index */
958 info->addr.writeback = 1;
959 if (extract_field (self->fields[1], code, 0) == 1)
960 info->addr.preind = 1;
961 else
962 info->addr.postind = 1;
963 }
964
965 return 1;
966 }
967
968 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
969 int
970 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
971 aarch64_insn code,
972 const aarch64_inst *inst ATTRIBUTE_UNUSED)
973 {
974 int shift;
975 info->qualifier = get_expected_qualifier (inst, info->idx);
976 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
977 /* Rn */
978 info->addr.base_regno = extract_field (self->fields[0], code, 0);
979 /* uimm12 */
980 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
981 return 1;
982 }
983
984 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
985 int
986 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
987 aarch64_insn code,
988 const aarch64_inst *inst ATTRIBUTE_UNUSED)
989 {
990 aarch64_insn imm;
991
992 info->qualifier = get_expected_qualifier (inst, info->idx);
993 /* Rn */
994 info->addr.base_regno = extract_field (self->fields[0], code, 0);
995 /* simm10 */
996 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
997 info->addr.offset.imm = sign_extend (imm, 9) << 3;
998 if (extract_field (self->fields[3], code, 0) == 1) {
999 info->addr.writeback = 1;
1000 info->addr.preind = 1;
1001 }
1002 return 1;
1003 }
1004
1005 /* Decode the address operand for e.g.
1006 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1007 int
1008 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1009 aarch64_opnd_info *info,
1010 aarch64_insn code, const aarch64_inst *inst)
1011 {
1012 /* The opcode dependent area stores the number of elements in
1013 each structure to be loaded/stored. */
1014 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1015
1016 /* Rn */
1017 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1018 /* Rm | #<amount> */
1019 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1020 if (info->addr.offset.regno == 31)
1021 {
1022 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1023 /* Special handling of loading single structure to all lane. */
1024 info->addr.offset.imm = (is_ld1r ? 1
1025 : inst->operands[0].reglist.num_regs)
1026 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1027 else
1028 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1029 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1030 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1031 }
1032 else
1033 info->addr.offset.is_reg = 1;
1034 info->addr.writeback = 1;
1035
1036 return 1;
1037 }
1038
1039 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1040 int
1041 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1042 aarch64_opnd_info *info,
1043 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1044 {
1045 aarch64_insn value;
1046 /* cond */
1047 value = extract_field (FLD_cond, code, 0);
1048 info->cond = get_cond_from_value (value);
1049 return 1;
1050 }
1051
1052 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1053 int
1054 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1055 aarch64_opnd_info *info,
1056 aarch64_insn code,
1057 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1058 {
1059 /* op0:op1:CRn:CRm:op2 */
1060 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1061 FLD_CRm, FLD_op2);
1062 return 1;
1063 }
1064
1065 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1066 int
1067 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1068 aarch64_opnd_info *info, aarch64_insn code,
1069 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1070 {
1071 int i;
1072 /* op1:op2 */
1073 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1074 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1075 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1076 return 1;
1077 /* Reserved value in <pstatefield>. */
1078 return 0;
1079 }
1080
1081 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1082 int
1083 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1084 aarch64_opnd_info *info,
1085 aarch64_insn code,
1086 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1087 {
1088 int i;
1089 aarch64_insn value;
1090 const aarch64_sys_ins_reg *sysins_ops;
1091 /* op0:op1:CRn:CRm:op2 */
1092 value = extract_fields (code, 0, 5,
1093 FLD_op0, FLD_op1, FLD_CRn,
1094 FLD_CRm, FLD_op2);
1095
1096 switch (info->type)
1097 {
1098 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1099 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1100 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1101 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1102 default: assert (0); return 0;
1103 }
1104
1105 for (i = 0; sysins_ops[i].name != NULL; ++i)
1106 if (sysins_ops[i].value == value)
1107 {
1108 info->sysins_op = sysins_ops + i;
1109 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1110 info->sysins_op->name,
1111 (unsigned)info->sysins_op->value,
1112 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1113 return 1;
1114 }
1115
1116 return 0;
1117 }
1118
1119 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1120
1121 int
1122 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1123 aarch64_opnd_info *info,
1124 aarch64_insn code,
1125 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1126 {
1127 /* CRm */
1128 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1129 return 1;
1130 }
1131
1132 /* Decode the prefetch operation option operand for e.g.
1133 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1134
1135 int
1136 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1137 aarch64_opnd_info *info,
1138 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1139 {
1140 /* prfop in Rt */
1141 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1142 return 1;
1143 }
1144
1145 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1146 to the matching name/value pair in aarch64_hint_options. */
1147
1148 int
1149 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1150 aarch64_opnd_info *info,
1151 aarch64_insn code,
1152 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1153 {
1154 /* CRm:op2. */
1155 unsigned hint_number;
1156 int i;
1157
1158 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1159
1160 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1161 {
1162 if (hint_number == aarch64_hint_options[i].value)
1163 {
1164 info->hint_option = &(aarch64_hint_options[i]);
1165 return 1;
1166 }
1167 }
1168
1169 return 0;
1170 }
1171
1172 /* Decode the extended register operand for e.g.
1173 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1174 int
1175 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1176 aarch64_opnd_info *info,
1177 aarch64_insn code,
1178 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1179 {
1180 aarch64_insn value;
1181
1182 /* Rm */
1183 info->reg.regno = extract_field (FLD_Rm, code, 0);
1184 /* option */
1185 value = extract_field (FLD_option, code, 0);
1186 info->shifter.kind =
1187 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1188 /* imm3 */
1189 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1190
1191 /* This makes the constraint checking happy. */
1192 info->shifter.operator_present = 1;
1193
1194 /* Assume inst->operands[0].qualifier has been resolved. */
1195 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1196 info->qualifier = AARCH64_OPND_QLF_W;
1197 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1198 && (info->shifter.kind == AARCH64_MOD_UXTX
1199 || info->shifter.kind == AARCH64_MOD_SXTX))
1200 info->qualifier = AARCH64_OPND_QLF_X;
1201
1202 return 1;
1203 }
1204
1205 /* Decode the shifted register operand for e.g.
1206 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1207 int
1208 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1209 aarch64_opnd_info *info,
1210 aarch64_insn code,
1211 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1212 {
1213 aarch64_insn value;
1214
1215 /* Rm */
1216 info->reg.regno = extract_field (FLD_Rm, code, 0);
1217 /* shift */
1218 value = extract_field (FLD_shift, code, 0);
1219 info->shifter.kind =
1220 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1221 if (info->shifter.kind == AARCH64_MOD_ROR
1222 && inst->opcode->iclass != log_shift)
1223 /* ROR is not available for the shifted register operand in arithmetic
1224 instructions. */
1225 return 0;
1226 /* imm6 */
1227 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1228
1229 /* This makes the constraint checking happy. */
1230 info->shifter.operator_present = 1;
1231
1232 return 1;
1233 }
1234
1235 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1236 where <offset> is given by the OFFSET parameter and where <factor> is
1237 1 plus SELF's operand-dependent value. fields[0] specifies the field
1238 that holds <base>. */
1239 static int
1240 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1241 aarch64_opnd_info *info, aarch64_insn code,
1242 int64_t offset)
1243 {
1244 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1245 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1246 info->addr.offset.is_reg = FALSE;
1247 info->addr.writeback = FALSE;
1248 info->addr.preind = TRUE;
1249 if (offset != 0)
1250 info->shifter.kind = AARCH64_MOD_MUL_VL;
1251 info->shifter.amount = 1;
1252 info->shifter.operator_present = (info->addr.offset.imm != 0);
1253 info->shifter.amount_present = FALSE;
1254 return 1;
1255 }
1256
1257 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1258 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1259 SELF's operand-dependent value. fields[0] specifies the field that
1260 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1261 int
1262 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1263 aarch64_opnd_info *info, aarch64_insn code,
1264 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1265 {
1266 int offset;
1267
1268 offset = extract_field (FLD_SVE_imm4, code, 0);
1269 offset = ((offset + 8) & 15) - 8;
1270 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1271 }
1272
1273 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1274 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1275 SELF's operand-dependent value. fields[0] specifies the field that
1276 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1277 int
1278 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1279 aarch64_opnd_info *info, aarch64_insn code,
1280 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1281 {
1282 int offset;
1283
1284 offset = extract_field (FLD_SVE_imm6, code, 0);
1285 offset = (((offset + 32) & 63) - 32);
1286 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1287 }
1288
1289 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1290 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1291 SELF's operand-dependent value. fields[0] specifies the field that
1292 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1293 and imm3 fields, with imm3 being the less-significant part. */
1294 int
1295 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1296 aarch64_opnd_info *info,
1297 aarch64_insn code,
1298 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1299 {
1300 int offset;
1301
1302 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1303 offset = (((offset + 256) & 511) - 256);
1304 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1305 }
1306
1307 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1308 is given by the OFFSET parameter and where <shift> is SELF's operand-
1309 dependent value. fields[0] specifies the base register field <base>. */
1310 static int
1311 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1312 aarch64_opnd_info *info, aarch64_insn code,
1313 int64_t offset)
1314 {
1315 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1316 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1317 info->addr.offset.is_reg = FALSE;
1318 info->addr.writeback = FALSE;
1319 info->addr.preind = TRUE;
1320 info->shifter.operator_present = FALSE;
1321 info->shifter.amount_present = FALSE;
1322 return 1;
1323 }
1324
1325 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1326 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1327 value. fields[0] specifies the base register field. */
1328 int
1329 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1330 aarch64_opnd_info *info, aarch64_insn code,
1331 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1332 {
1333 int offset = extract_field (FLD_SVE_imm6, code, 0);
1334 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1335 }
1336
1337 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1338 is SELF's operand-dependent value. fields[0] specifies the base
1339 register field and fields[1] specifies the offset register field. */
1340 int
1341 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1342 aarch64_opnd_info *info, aarch64_insn code,
1343 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1344 {
1345 int index_regno;
1346
1347 index_regno = extract_field (self->fields[1], code, 0);
1348 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1349 return 0;
1350
1351 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1352 info->addr.offset.regno = index_regno;
1353 info->addr.offset.is_reg = TRUE;
1354 info->addr.writeback = FALSE;
1355 info->addr.preind = TRUE;
1356 info->shifter.kind = AARCH64_MOD_LSL;
1357 info->shifter.amount = get_operand_specific_data (self);
1358 info->shifter.operator_present = (info->shifter.amount != 0);
1359 info->shifter.amount_present = (info->shifter.amount != 0);
1360 return 1;
1361 }
1362
1363 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1364 <shift> is SELF's operand-dependent value. fields[0] specifies the
1365 base register field, fields[1] specifies the offset register field and
1366 fields[2] is a single-bit field that selects SXTW over UXTW. */
1367 int
1368 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1369 aarch64_opnd_info *info, aarch64_insn code,
1370 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1371 {
1372 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1373 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1374 info->addr.offset.is_reg = TRUE;
1375 info->addr.writeback = FALSE;
1376 info->addr.preind = TRUE;
1377 if (extract_field (self->fields[2], code, 0))
1378 info->shifter.kind = AARCH64_MOD_SXTW;
1379 else
1380 info->shifter.kind = AARCH64_MOD_UXTW;
1381 info->shifter.amount = get_operand_specific_data (self);
1382 info->shifter.operator_present = TRUE;
1383 info->shifter.amount_present = (info->shifter.amount != 0);
1384 return 1;
1385 }
1386
1387 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1388 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1389 fields[0] specifies the base register field. */
1390 int
1391 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1392 aarch64_opnd_info *info, aarch64_insn code,
1393 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1394 {
1395 int offset = extract_field (FLD_imm5, code, 0);
1396 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1397 }
1398
1399 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1400 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1401 number. fields[0] specifies the base register field and fields[1]
1402 specifies the offset register field. */
1403 static int
1404 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1405 aarch64_insn code, enum aarch64_modifier_kind kind)
1406 {
1407 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1408 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1409 info->addr.offset.is_reg = TRUE;
1410 info->addr.writeback = FALSE;
1411 info->addr.preind = TRUE;
1412 info->shifter.kind = kind;
1413 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1414 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1415 || info->shifter.amount != 0);
1416 info->shifter.amount_present = (info->shifter.amount != 0);
1417 return 1;
1418 }
1419
1420 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1421 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1422 field and fields[1] specifies the offset register field. */
1423 int
1424 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1425 aarch64_opnd_info *info, aarch64_insn code,
1426 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1427 {
1428 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1429 }
1430
1431 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1432 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1433 field and fields[1] specifies the offset register field. */
1434 int
1435 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1436 aarch64_opnd_info *info, aarch64_insn code,
1437 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1438 {
1439 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1440 }
1441
1442 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1443 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1444 field and fields[1] specifies the offset register field. */
1445 int
1446 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1447 aarch64_opnd_info *info, aarch64_insn code,
1448 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1449 {
1450 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1451 }
1452
1453 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1454 has the raw field value and that the low 8 bits decode to VALUE. */
1455 static int
1456 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1457 {
1458 info->shifter.kind = AARCH64_MOD_LSL;
1459 info->shifter.amount = 0;
1460 if (info->imm.value & 0x100)
1461 {
1462 if (value == 0)
1463 /* Decode 0x100 as #0, LSL #8. */
1464 info->shifter.amount = 8;
1465 else
1466 value *= 256;
1467 }
1468 info->shifter.operator_present = (info->shifter.amount != 0);
1469 info->shifter.amount_present = (info->shifter.amount != 0);
1470 info->imm.value = value;
1471 return 1;
1472 }
1473
1474 /* Decode an SVE ADD/SUB immediate. */
1475 int
1476 aarch64_ext_sve_aimm (const aarch64_operand *self,
1477 aarch64_opnd_info *info, const aarch64_insn code,
1478 const aarch64_inst *inst)
1479 {
1480 return (aarch64_ext_imm (self, info, code, inst)
1481 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1482 }
1483
1484 /* Decode an SVE CPY/DUP immediate. */
1485 int
1486 aarch64_ext_sve_asimm (const aarch64_operand *self,
1487 aarch64_opnd_info *info, const aarch64_insn code,
1488 const aarch64_inst *inst)
1489 {
1490 return (aarch64_ext_imm (self, info, code, inst)
1491 && decode_sve_aimm (info, (int8_t) info->imm.value));
1492 }
1493
1494 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1495 The fields array specifies which field to use. */
1496 int
1497 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1498 aarch64_opnd_info *info, aarch64_insn code,
1499 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1500 {
1501 if (extract_field (self->fields[0], code, 0))
1502 info->imm.value = 0x3f800000;
1503 else
1504 info->imm.value = 0x3f000000;
1505 info->imm.is_fp = TRUE;
1506 return 1;
1507 }
1508
1509 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1510 The fields array specifies which field to use. */
1511 int
1512 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1513 aarch64_opnd_info *info, aarch64_insn code,
1514 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1515 {
1516 if (extract_field (self->fields[0], code, 0))
1517 info->imm.value = 0x40000000;
1518 else
1519 info->imm.value = 0x3f000000;
1520 info->imm.is_fp = TRUE;
1521 return 1;
1522 }
1523
1524 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1525 The fields array specifies which field to use. */
1526 int
1527 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1528 aarch64_opnd_info *info, aarch64_insn code,
1529 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1530 {
1531 if (extract_field (self->fields[0], code, 0))
1532 info->imm.value = 0x3f800000;
1533 else
1534 info->imm.value = 0x0;
1535 info->imm.is_fp = TRUE;
1536 return 1;
1537 }
1538
1539 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1540 array specifies which field to use for Zn. MM is encoded in the
1541 concatenation of imm5 and SVE_tszh, with imm5 being the less
1542 significant part. */
1543 int
1544 aarch64_ext_sve_index (const aarch64_operand *self,
1545 aarch64_opnd_info *info, aarch64_insn code,
1546 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1547 {
1548 int val;
1549
1550 info->reglane.regno = extract_field (self->fields[0], code, 0);
1551 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1552 if ((val & 15) == 0)
1553 return 0;
1554 while ((val & 1) == 0)
1555 val /= 2;
1556 info->reglane.index = val / 2;
1557 return 1;
1558 }
1559
1560 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1561 int
1562 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1563 aarch64_opnd_info *info, const aarch64_insn code,
1564 const aarch64_inst *inst)
1565 {
1566 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1567 return (aarch64_ext_limm (self, info, code, inst)
1568 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1569 }
1570
1571 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1572 to use for Zn. The opcode-dependent value specifies the number
1573 of registers in the list. */
1574 int
1575 aarch64_ext_sve_reglist (const aarch64_operand *self,
1576 aarch64_opnd_info *info, aarch64_insn code,
1577 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1578 {
1579 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1580 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1581 return 1;
1582 }
1583
1584 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1585 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1586 field. */
1587 int
1588 aarch64_ext_sve_scale (const aarch64_operand *self,
1589 aarch64_opnd_info *info, aarch64_insn code,
1590 const aarch64_inst *inst)
1591 {
1592 int val;
1593
1594 if (!aarch64_ext_imm (self, info, code, inst))
1595 return 0;
1596 val = extract_field (FLD_SVE_imm4, code, 0);
1597 info->shifter.kind = AARCH64_MOD_MUL;
1598 info->shifter.amount = val + 1;
1599 info->shifter.operator_present = (val != 0);
1600 info->shifter.amount_present = (val != 0);
1601 return 1;
1602 }
1603
1604 /* Return the top set bit in VALUE, which is expected to be relatively
1605 small. */
1606 static uint64_t
1607 get_top_bit (uint64_t value)
1608 {
1609 while ((value & -value) != value)
1610 value -= value & -value;
1611 return value;
1612 }
1613
1614 /* Decode an SVE shift-left immediate. */
1615 int
1616 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1617 aarch64_opnd_info *info, const aarch64_insn code,
1618 const aarch64_inst *inst)
1619 {
1620 if (!aarch64_ext_imm (self, info, code, inst)
1621 || info->imm.value == 0)
1622 return 0;
1623
1624 info->imm.value -= get_top_bit (info->imm.value);
1625 return 1;
1626 }
1627
1628 /* Decode an SVE shift-right immediate. */
1629 int
1630 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1631 aarch64_opnd_info *info, const aarch64_insn code,
1632 const aarch64_inst *inst)
1633 {
1634 if (!aarch64_ext_imm (self, info, code, inst)
1635 || info->imm.value == 0)
1636 return 0;
1637
1638 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1639 return 1;
1640 }
1641 \f
1642 /* Bitfields that are commonly used to encode certain operands' information
1643 may be partially used as part of the base opcode in some instructions.
1644 For example, the bit 1 of the field 'size' in
1645 FCVTXN <Vb><d>, <Va><n>
1646 is actually part of the base opcode, while only size<0> is available
1647 for encoding the register type. Another example is the AdvSIMD
1648 instruction ORR (register), in which the field 'size' is also used for
1649 the base opcode, leaving only the field 'Q' available to encode the
1650 vector register arrangement specifier '8B' or '16B'.
1651
1652 This function tries to deduce the qualifier from the value of partially
1653 constrained field(s). Given the VALUE of such a field or fields, the
1654 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1655 operand encoding), the function returns the matching qualifier or
1656 AARCH64_OPND_QLF_NIL if nothing matches.
1657
1658 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1659 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1660 may end with AARCH64_OPND_QLF_NIL. */
1661
1662 static enum aarch64_opnd_qualifier
1663 get_qualifier_from_partial_encoding (aarch64_insn value,
1664 const enum aarch64_opnd_qualifier* \
1665 candidates,
1666 aarch64_insn mask)
1667 {
1668 int i;
1669 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1670 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1671 {
1672 aarch64_insn standard_value;
1673 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1674 break;
1675 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1676 if ((standard_value & mask) == (value & mask))
1677 return candidates[i];
1678 }
1679 return AARCH64_OPND_QLF_NIL;
1680 }
1681
1682 /* Given a list of qualifier sequences, return all possible valid qualifiers
1683 for operand IDX in QUALIFIERS.
1684 Assume QUALIFIERS is an array whose length is large enough. */
1685
1686 static void
1687 get_operand_possible_qualifiers (int idx,
1688 const aarch64_opnd_qualifier_seq_t *list,
1689 enum aarch64_opnd_qualifier *qualifiers)
1690 {
1691 int i;
1692 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1693 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1694 break;
1695 }
1696
1697 /* Decode the size Q field for e.g. SHADD.
1698 We tag one operand with the qualifer according to the code;
1699 whether the qualifier is valid for this opcode or not, it is the
1700 duty of the semantic checking. */
1701
1702 static int
1703 decode_sizeq (aarch64_inst *inst)
1704 {
1705 int idx;
1706 enum aarch64_opnd_qualifier qualifier;
1707 aarch64_insn code;
1708 aarch64_insn value, mask;
1709 enum aarch64_field_kind fld_sz;
1710 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1711
1712 if (inst->opcode->iclass == asisdlse
1713 || inst->opcode->iclass == asisdlsep
1714 || inst->opcode->iclass == asisdlso
1715 || inst->opcode->iclass == asisdlsop)
1716 fld_sz = FLD_vldst_size;
1717 else
1718 fld_sz = FLD_size;
1719
1720 code = inst->value;
1721 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1722 /* Obtain the info that which bits of fields Q and size are actually
1723 available for operand encoding. Opcodes like FMAXNM and FMLA have
1724 size[1] unavailable. */
1725 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1726
1727 /* The index of the operand we are going to tag a qualifier and the qualifer
1728 itself are reasoned from the value of the size and Q fields and the
1729 possible valid qualifier lists. */
1730 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1731 DEBUG_TRACE ("key idx: %d", idx);
1732
1733 /* For most related instruciton, size:Q are fully available for operand
1734 encoding. */
1735 if (mask == 0x7)
1736 {
1737 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1738 return 1;
1739 }
1740
1741 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1742 candidates);
1743 #ifdef DEBUG_AARCH64
1744 if (debug_dump)
1745 {
1746 int i;
1747 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1748 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1749 DEBUG_TRACE ("qualifier %d: %s", i,
1750 aarch64_get_qualifier_name(candidates[i]));
1751 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1752 }
1753 #endif /* DEBUG_AARCH64 */
1754
1755 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1756
1757 if (qualifier == AARCH64_OPND_QLF_NIL)
1758 return 0;
1759
1760 inst->operands[idx].qualifier = qualifier;
1761 return 1;
1762 }
1763
1764 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1765 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1766
1767 static int
1768 decode_asimd_fcvt (aarch64_inst *inst)
1769 {
1770 aarch64_field field = {0, 0};
1771 aarch64_insn value;
1772 enum aarch64_opnd_qualifier qualifier;
1773
1774 gen_sub_field (FLD_size, 0, 1, &field);
1775 value = extract_field_2 (&field, inst->value, 0);
1776 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1777 : AARCH64_OPND_QLF_V_2D;
1778 switch (inst->opcode->op)
1779 {
1780 case OP_FCVTN:
1781 case OP_FCVTN2:
1782 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1783 inst->operands[1].qualifier = qualifier;
1784 break;
1785 case OP_FCVTL:
1786 case OP_FCVTL2:
1787 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1788 inst->operands[0].qualifier = qualifier;
1789 break;
1790 default:
1791 assert (0);
1792 return 0;
1793 }
1794
1795 return 1;
1796 }
1797
1798 /* Decode size[0], i.e. bit 22, for
1799 e.g. FCVTXN <Vb><d>, <Va><n>. */
1800
1801 static int
1802 decode_asisd_fcvtxn (aarch64_inst *inst)
1803 {
1804 aarch64_field field = {0, 0};
1805 gen_sub_field (FLD_size, 0, 1, &field);
1806 if (!extract_field_2 (&field, inst->value, 0))
1807 return 0;
1808 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1809 return 1;
1810 }
1811
1812 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1813 static int
1814 decode_fcvt (aarch64_inst *inst)
1815 {
1816 enum aarch64_opnd_qualifier qualifier;
1817 aarch64_insn value;
1818 const aarch64_field field = {15, 2};
1819
1820 /* opc dstsize */
1821 value = extract_field_2 (&field, inst->value, 0);
1822 switch (value)
1823 {
1824 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1825 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1826 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1827 default: return 0;
1828 }
1829 inst->operands[0].qualifier = qualifier;
1830
1831 return 1;
1832 }
1833
1834 /* Do miscellaneous decodings that are not common enough to be driven by
1835 flags. */
1836
1837 static int
1838 do_misc_decoding (aarch64_inst *inst)
1839 {
1840 unsigned int value;
1841 switch (inst->opcode->op)
1842 {
1843 case OP_FCVT:
1844 return decode_fcvt (inst);
1845
1846 case OP_FCVTN:
1847 case OP_FCVTN2:
1848 case OP_FCVTL:
1849 case OP_FCVTL2:
1850 return decode_asimd_fcvt (inst);
1851
1852 case OP_FCVTXN_S:
1853 return decode_asisd_fcvtxn (inst);
1854
1855 case OP_MOV_P_P:
1856 case OP_MOVS_P_P:
1857 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1858 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
1859 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1860
1861 case OP_MOV_Z_P_Z:
1862 return (extract_field (FLD_SVE_Zd, inst->value, 0)
1863 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1864
1865 case OP_MOV_Z_V:
1866 /* Index must be zero. */
1867 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1868 return value == 1 || value == 2 || value == 4 || value == 8;
1869
1870 case OP_MOV_Z_Z:
1871 return (extract_field (FLD_SVE_Zn, inst->value, 0)
1872 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1873
1874 case OP_MOV_Z_Zi:
1875 /* Index must be nonzero. */
1876 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1877 return value != 1 && value != 2 && value != 4 && value != 8;
1878
1879 case OP_MOVM_P_P_P:
1880 return (extract_field (FLD_SVE_Pd, inst->value, 0)
1881 == extract_field (FLD_SVE_Pm, inst->value, 0));
1882
1883 case OP_MOVZS_P_P_P:
1884 case OP_MOVZ_P_P_P:
1885 return (extract_field (FLD_SVE_Pn, inst->value, 0)
1886 == extract_field (FLD_SVE_Pm, inst->value, 0));
1887
1888 case OP_NOTS_P_P_P_Z:
1889 case OP_NOT_P_P_P_Z:
1890 return (extract_field (FLD_SVE_Pm, inst->value, 0)
1891 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1892
1893 default:
1894 return 0;
1895 }
1896 }
1897
1898 /* Opcodes that have fields shared by multiple operands are usually flagged
1899 with flags. In this function, we detect such flags, decode the related
1900 field(s) and store the information in one of the related operands. The
1901 'one' operand is not any operand but one of the operands that can
1902 accommadate all the information that has been decoded. */
1903
1904 static int
1905 do_special_decoding (aarch64_inst *inst)
1906 {
1907 int idx;
1908 aarch64_insn value;
1909 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1910 if (inst->opcode->flags & F_COND)
1911 {
1912 value = extract_field (FLD_cond2, inst->value, 0);
1913 inst->cond = get_cond_from_value (value);
1914 }
1915 /* 'sf' field. */
1916 if (inst->opcode->flags & F_SF)
1917 {
1918 idx = select_operand_for_sf_field_coding (inst->opcode);
1919 value = extract_field (FLD_sf, inst->value, 0);
1920 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1921 if ((inst->opcode->flags & F_N)
1922 && extract_field (FLD_N, inst->value, 0) != value)
1923 return 0;
1924 }
1925 /* 'sf' field. */
1926 if (inst->opcode->flags & F_LSE_SZ)
1927 {
1928 idx = select_operand_for_sf_field_coding (inst->opcode);
1929 value = extract_field (FLD_lse_sz, inst->value, 0);
1930 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1931 }
1932 /* size:Q fields. */
1933 if (inst->opcode->flags & F_SIZEQ)
1934 return decode_sizeq (inst);
1935
1936 if (inst->opcode->flags & F_FPTYPE)
1937 {
1938 idx = select_operand_for_fptype_field_coding (inst->opcode);
1939 value = extract_field (FLD_type, inst->value, 0);
1940 switch (value)
1941 {
1942 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1943 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1944 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1945 default: return 0;
1946 }
1947 }
1948
1949 if (inst->opcode->flags & F_SSIZE)
1950 {
1951 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1952 of the base opcode. */
1953 aarch64_insn mask;
1954 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1955 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1956 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1957 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1958 /* For most related instruciton, the 'size' field is fully available for
1959 operand encoding. */
1960 if (mask == 0x3)
1961 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1962 else
1963 {
1964 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1965 candidates);
1966 inst->operands[idx].qualifier
1967 = get_qualifier_from_partial_encoding (value, candidates, mask);
1968 }
1969 }
1970
1971 if (inst->opcode->flags & F_T)
1972 {
1973 /* Num of consecutive '0's on the right side of imm5<3:0>. */
1974 int num = 0;
1975 unsigned val, Q;
1976 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1977 == AARCH64_OPND_CLASS_SIMD_REG);
1978 /* imm5<3:0> q <t>
1979 0000 x reserved
1980 xxx1 0 8b
1981 xxx1 1 16b
1982 xx10 0 4h
1983 xx10 1 8h
1984 x100 0 2s
1985 x100 1 4s
1986 1000 0 reserved
1987 1000 1 2d */
1988 val = extract_field (FLD_imm5, inst->value, 0);
1989 while ((val & 0x1) == 0 && ++num <= 3)
1990 val >>= 1;
1991 if (num > 3)
1992 return 0;
1993 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1994 inst->operands[0].qualifier =
1995 get_vreg_qualifier_from_value ((num << 1) | Q);
1996 }
1997
1998 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1999 {
2000 /* Use Rt to encode in the case of e.g.
2001 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2002 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2003 if (idx == -1)
2004 {
2005 /* Otherwise use the result operand, which has to be a integer
2006 register. */
2007 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2008 == AARCH64_OPND_CLASS_INT_REG);
2009 idx = 0;
2010 }
2011 assert (idx == 0 || idx == 1);
2012 value = extract_field (FLD_Q, inst->value, 0);
2013 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2014 }
2015
2016 if (inst->opcode->flags & F_LDS_SIZE)
2017 {
2018 aarch64_field field = {0, 0};
2019 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2020 == AARCH64_OPND_CLASS_INT_REG);
2021 gen_sub_field (FLD_opc, 0, 1, &field);
2022 value = extract_field_2 (&field, inst->value, 0);
2023 inst->operands[0].qualifier
2024 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2025 }
2026
2027 /* Miscellaneous decoding; done as the last step. */
2028 if (inst->opcode->flags & F_MISC)
2029 return do_misc_decoding (inst);
2030
2031 return 1;
2032 }
2033
2034 /* Converters converting a real opcode instruction to its alias form. */
2035
2036 /* ROR <Wd>, <Ws>, #<shift>
2037 is equivalent to:
2038 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2039 static int
2040 convert_extr_to_ror (aarch64_inst *inst)
2041 {
2042 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2043 {
2044 copy_operand_info (inst, 2, 3);
2045 inst->operands[3].type = AARCH64_OPND_NIL;
2046 return 1;
2047 }
2048 return 0;
2049 }
2050
2051 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2052 is equivalent to:
2053 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2054 static int
2055 convert_shll_to_xtl (aarch64_inst *inst)
2056 {
2057 if (inst->operands[2].imm.value == 0)
2058 {
2059 inst->operands[2].type = AARCH64_OPND_NIL;
2060 return 1;
2061 }
2062 return 0;
2063 }
2064
2065 /* Convert
2066 UBFM <Xd>, <Xn>, #<shift>, #63.
2067 to
2068 LSR <Xd>, <Xn>, #<shift>. */
2069 static int
2070 convert_bfm_to_sr (aarch64_inst *inst)
2071 {
2072 int64_t imms, val;
2073
2074 imms = inst->operands[3].imm.value;
2075 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2076 if (imms == val)
2077 {
2078 inst->operands[3].type = AARCH64_OPND_NIL;
2079 return 1;
2080 }
2081
2082 return 0;
2083 }
2084
2085 /* Convert MOV to ORR. */
2086 static int
2087 convert_orr_to_mov (aarch64_inst *inst)
2088 {
2089 /* MOV <Vd>.<T>, <Vn>.<T>
2090 is equivalent to:
2091 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2092 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2093 {
2094 inst->operands[2].type = AARCH64_OPND_NIL;
2095 return 1;
2096 }
2097 return 0;
2098 }
2099
2100 /* When <imms> >= <immr>, the instruction written:
2101 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2102 is equivalent to:
2103 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2104
2105 static int
2106 convert_bfm_to_bfx (aarch64_inst *inst)
2107 {
2108 int64_t immr, imms;
2109
2110 immr = inst->operands[2].imm.value;
2111 imms = inst->operands[3].imm.value;
2112 if (imms >= immr)
2113 {
2114 int64_t lsb = immr;
2115 inst->operands[2].imm.value = lsb;
2116 inst->operands[3].imm.value = imms + 1 - lsb;
2117 /* The two opcodes have different qualifiers for
2118 the immediate operands; reset to help the checking. */
2119 reset_operand_qualifier (inst, 2);
2120 reset_operand_qualifier (inst, 3);
2121 return 1;
2122 }
2123
2124 return 0;
2125 }
2126
2127 /* When <imms> < <immr>, the instruction written:
2128 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2129 is equivalent to:
2130 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2131
2132 static int
2133 convert_bfm_to_bfi (aarch64_inst *inst)
2134 {
2135 int64_t immr, imms, val;
2136
2137 immr = inst->operands[2].imm.value;
2138 imms = inst->operands[3].imm.value;
2139 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2140 if (imms < immr)
2141 {
2142 inst->operands[2].imm.value = (val - immr) & (val - 1);
2143 inst->operands[3].imm.value = imms + 1;
2144 /* The two opcodes have different qualifiers for
2145 the immediate operands; reset to help the checking. */
2146 reset_operand_qualifier (inst, 2);
2147 reset_operand_qualifier (inst, 3);
2148 return 1;
2149 }
2150
2151 return 0;
2152 }
2153
2154 /* The instruction written:
2155 BFC <Xd>, #<lsb>, #<width>
2156 is equivalent to:
2157 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2158
2159 static int
2160 convert_bfm_to_bfc (aarch64_inst *inst)
2161 {
2162 int64_t immr, imms, val;
2163
2164 /* Should have been assured by the base opcode value. */
2165 assert (inst->operands[1].reg.regno == 0x1f);
2166
2167 immr = inst->operands[2].imm.value;
2168 imms = inst->operands[3].imm.value;
2169 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2170 if (imms < immr)
2171 {
2172 /* Drop XZR from the second operand. */
2173 copy_operand_info (inst, 1, 2);
2174 copy_operand_info (inst, 2, 3);
2175 inst->operands[3].type = AARCH64_OPND_NIL;
2176
2177 /* Recalculate the immediates. */
2178 inst->operands[1].imm.value = (val - immr) & (val - 1);
2179 inst->operands[2].imm.value = imms + 1;
2180
2181 /* The two opcodes have different qualifiers for the operands; reset to
2182 help the checking. */
2183 reset_operand_qualifier (inst, 1);
2184 reset_operand_qualifier (inst, 2);
2185 reset_operand_qualifier (inst, 3);
2186
2187 return 1;
2188 }
2189
2190 return 0;
2191 }
2192
2193 /* The instruction written:
2194 LSL <Xd>, <Xn>, #<shift>
2195 is equivalent to:
2196 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2197
2198 static int
2199 convert_ubfm_to_lsl (aarch64_inst *inst)
2200 {
2201 int64_t immr = inst->operands[2].imm.value;
2202 int64_t imms = inst->operands[3].imm.value;
2203 int64_t val
2204 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2205
2206 if ((immr == 0 && imms == val) || immr == imms + 1)
2207 {
2208 inst->operands[3].type = AARCH64_OPND_NIL;
2209 inst->operands[2].imm.value = val - imms;
2210 return 1;
2211 }
2212
2213 return 0;
2214 }
2215
2216 /* CINC <Wd>, <Wn>, <cond>
2217 is equivalent to:
2218 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2219 where <cond> is not AL or NV. */
2220
2221 static int
2222 convert_from_csel (aarch64_inst *inst)
2223 {
2224 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2225 && (inst->operands[3].cond->value & 0xe) != 0xe)
2226 {
2227 copy_operand_info (inst, 2, 3);
2228 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2229 inst->operands[3].type = AARCH64_OPND_NIL;
2230 return 1;
2231 }
2232 return 0;
2233 }
2234
2235 /* CSET <Wd>, <cond>
2236 is equivalent to:
2237 CSINC <Wd>, WZR, WZR, invert(<cond>)
2238 where <cond> is not AL or NV. */
2239
2240 static int
2241 convert_csinc_to_cset (aarch64_inst *inst)
2242 {
2243 if (inst->operands[1].reg.regno == 0x1f
2244 && inst->operands[2].reg.regno == 0x1f
2245 && (inst->operands[3].cond->value & 0xe) != 0xe)
2246 {
2247 copy_operand_info (inst, 1, 3);
2248 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2249 inst->operands[3].type = AARCH64_OPND_NIL;
2250 inst->operands[2].type = AARCH64_OPND_NIL;
2251 return 1;
2252 }
2253 return 0;
2254 }
2255
2256 /* MOV <Wd>, #<imm>
2257 is equivalent to:
2258 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2259
2260 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2261 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2262 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2263 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2264 machine-instruction mnemonic must be used. */
2265
2266 static int
2267 convert_movewide_to_mov (aarch64_inst *inst)
2268 {
2269 uint64_t value = inst->operands[1].imm.value;
2270 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2271 if (value == 0 && inst->operands[1].shifter.amount != 0)
2272 return 0;
2273 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2274 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2275 value <<= inst->operands[1].shifter.amount;
2276 /* As an alias convertor, it has to be clear that the INST->OPCODE
2277 is the opcode of the real instruction. */
2278 if (inst->opcode->op == OP_MOVN)
2279 {
2280 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2281 value = ~value;
2282 /* A MOVN has an immediate that could be encoded by MOVZ. */
2283 if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
2284 return 0;
2285 }
2286 inst->operands[1].imm.value = value;
2287 inst->operands[1].shifter.amount = 0;
2288 return 1;
2289 }
2290
2291 /* MOV <Wd>, #<imm>
2292 is equivalent to:
2293 ORR <Wd>, WZR, #<imm>.
2294
2295 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2296 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2297 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2298 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2299 machine-instruction mnemonic must be used. */
2300
2301 static int
2302 convert_movebitmask_to_mov (aarch64_inst *inst)
2303 {
2304 int is32;
2305 uint64_t value;
2306
2307 /* Should have been assured by the base opcode value. */
2308 assert (inst->operands[1].reg.regno == 0x1f);
2309 copy_operand_info (inst, 1, 2);
2310 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2311 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2312 value = inst->operands[1].imm.value;
2313 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2314 instruction. */
2315 if (inst->operands[0].reg.regno != 0x1f
2316 && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
2317 || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
2318 return 0;
2319
2320 inst->operands[2].type = AARCH64_OPND_NIL;
2321 return 1;
2322 }
2323
2324 /* Some alias opcodes are disassembled by being converted from their real-form.
2325 N.B. INST->OPCODE is the real opcode rather than the alias. */
2326
2327 static int
2328 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2329 {
2330 switch (alias->op)
2331 {
2332 case OP_ASR_IMM:
2333 case OP_LSR_IMM:
2334 return convert_bfm_to_sr (inst);
2335 case OP_LSL_IMM:
2336 return convert_ubfm_to_lsl (inst);
2337 case OP_CINC:
2338 case OP_CINV:
2339 case OP_CNEG:
2340 return convert_from_csel (inst);
2341 case OP_CSET:
2342 case OP_CSETM:
2343 return convert_csinc_to_cset (inst);
2344 case OP_UBFX:
2345 case OP_BFXIL:
2346 case OP_SBFX:
2347 return convert_bfm_to_bfx (inst);
2348 case OP_SBFIZ:
2349 case OP_BFI:
2350 case OP_UBFIZ:
2351 return convert_bfm_to_bfi (inst);
2352 case OP_BFC:
2353 return convert_bfm_to_bfc (inst);
2354 case OP_MOV_V:
2355 return convert_orr_to_mov (inst);
2356 case OP_MOV_IMM_WIDE:
2357 case OP_MOV_IMM_WIDEN:
2358 return convert_movewide_to_mov (inst);
2359 case OP_MOV_IMM_LOG:
2360 return convert_movebitmask_to_mov (inst);
2361 case OP_ROR_IMM:
2362 return convert_extr_to_ror (inst);
2363 case OP_SXTL:
2364 case OP_SXTL2:
2365 case OP_UXTL:
2366 case OP_UXTL2:
2367 return convert_shll_to_xtl (inst);
2368 default:
2369 return 0;
2370 }
2371 }
2372
2373 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2374 aarch64_inst *, int);
2375
2376 /* Given the instruction information in *INST, check if the instruction has
2377 any alias form that can be used to represent *INST. If the answer is yes,
2378 update *INST to be in the form of the determined alias. */
2379
2380 /* In the opcode description table, the following flags are used in opcode
2381 entries to help establish the relations between the real and alias opcodes:
2382
2383 F_ALIAS: opcode is an alias
2384 F_HAS_ALIAS: opcode has alias(es)
2385 F_P1
2386 F_P2
2387 F_P3: Disassembly preference priority 1-3 (the larger the
2388 higher). If nothing is specified, it is the priority
2389 0 by default, i.e. the lowest priority.
2390
2391 Although the relation between the machine and the alias instructions are not
2392 explicitly described, it can be easily determined from the base opcode
2393 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2394 description entries:
2395
2396 The mask of an alias opcode must be equal to or a super-set (i.e. more
2397 constrained) of that of the aliased opcode; so is the base opcode value.
2398
2399 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2400 && (opcode->mask & real->mask) == real->mask
2401 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2402 then OPCODE is an alias of, and only of, the REAL instruction
2403
2404 The alias relationship is forced flat-structured to keep related algorithm
2405 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2406
2407 During the disassembling, the decoding decision tree (in
2408 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2409 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2410 not specified), the disassembler will check whether there is any alias
2411 instruction exists for this real instruction. If there is, the disassembler
2412 will try to disassemble the 32-bit binary again using the alias's rule, or
2413 try to convert the IR to the form of the alias. In the case of the multiple
2414 aliases, the aliases are tried one by one from the highest priority
2415 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2416 first succeeds first adopted.
2417
2418 You may ask why there is a need for the conversion of IR from one form to
2419 another in handling certain aliases. This is because on one hand it avoids
2420 adding more operand code to handle unusual encoding/decoding; on other
2421 hand, during the disassembling, the conversion is an effective approach to
2422 check the condition of an alias (as an alias may be adopted only if certain
2423 conditions are met).
2424
2425 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2426 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2427 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2428
2429 static void
2430 determine_disassembling_preference (struct aarch64_inst *inst)
2431 {
2432 const aarch64_opcode *opcode;
2433 const aarch64_opcode *alias;
2434
2435 opcode = inst->opcode;
2436
2437 /* This opcode does not have an alias, so use itself. */
2438 if (opcode_has_alias (opcode) == FALSE)
2439 return;
2440
2441 alias = aarch64_find_alias_opcode (opcode);
2442 assert (alias);
2443
2444 #ifdef DEBUG_AARCH64
2445 if (debug_dump)
2446 {
2447 const aarch64_opcode *tmp = alias;
2448 printf ("#### LIST orderd: ");
2449 while (tmp)
2450 {
2451 printf ("%s, ", tmp->name);
2452 tmp = aarch64_find_next_alias_opcode (tmp);
2453 }
2454 printf ("\n");
2455 }
2456 #endif /* DEBUG_AARCH64 */
2457
2458 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2459 {
2460 DEBUG_TRACE ("try %s", alias->name);
2461 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2462
2463 /* An alias can be a pseudo opcode which will never be used in the
2464 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2465 aliasing AND. */
2466 if (pseudo_opcode_p (alias))
2467 {
2468 DEBUG_TRACE ("skip pseudo %s", alias->name);
2469 continue;
2470 }
2471
2472 if ((inst->value & alias->mask) != alias->opcode)
2473 {
2474 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2475 continue;
2476 }
2477 /* No need to do any complicated transformation on operands, if the alias
2478 opcode does not have any operand. */
2479 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2480 {
2481 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2482 aarch64_replace_opcode (inst, alias);
2483 return;
2484 }
2485 if (alias->flags & F_CONV)
2486 {
2487 aarch64_inst copy;
2488 memcpy (&copy, inst, sizeof (aarch64_inst));
2489 /* ALIAS is the preference as long as the instruction can be
2490 successfully converted to the form of ALIAS. */
2491 if (convert_to_alias (&copy, alias) == 1)
2492 {
2493 aarch64_replace_opcode (&copy, alias);
2494 assert (aarch64_match_operands_constraint (&copy, NULL));
2495 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2496 memcpy (inst, &copy, sizeof (aarch64_inst));
2497 return;
2498 }
2499 }
2500 else
2501 {
2502 /* Directly decode the alias opcode. */
2503 aarch64_inst temp;
2504 memset (&temp, '\0', sizeof (aarch64_inst));
2505 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
2506 {
2507 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2508 memcpy (inst, &temp, sizeof (aarch64_inst));
2509 return;
2510 }
2511 }
2512 }
2513 }
2514
2515 /* Some instructions (including all SVE ones) use the instruction class
2516 to describe how a qualifiers_list index is represented in the instruction
2517 encoding. If INST is such an instruction, decode the appropriate fields
2518 and fill in the operand qualifiers accordingly. Return true if no
2519 problems are found. */
2520
2521 static bfd_boolean
2522 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2523 {
2524 int i, variant;
2525
2526 variant = 0;
2527 switch (inst->opcode->iclass)
2528 {
2529 case sve_cpy:
2530 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2531 break;
2532
2533 case sve_index:
2534 i = extract_field (FLD_SVE_tsz, inst->value, 0);
2535 if (i == 0)
2536 return FALSE;
2537 while ((i & 1) == 0)
2538 {
2539 i >>= 1;
2540 variant += 1;
2541 }
2542 break;
2543
2544 case sve_limm:
2545 /* Pick the smallest applicable element size. */
2546 if ((inst->value & 0x20600) == 0x600)
2547 variant = 0;
2548 else if ((inst->value & 0x20400) == 0x400)
2549 variant = 1;
2550 else if ((inst->value & 0x20000) == 0)
2551 variant = 2;
2552 else
2553 variant = 3;
2554 break;
2555
2556 case sve_misc:
2557 /* sve_misc instructions have only a single variant. */
2558 break;
2559
2560 case sve_movprfx:
2561 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2562 break;
2563
2564 case sve_pred_zm:
2565 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2566 break;
2567
2568 case sve_shift_pred:
2569 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2570 sve_shift:
2571 if (i == 0)
2572 return FALSE;
2573 while (i != 1)
2574 {
2575 i >>= 1;
2576 variant += 1;
2577 }
2578 break;
2579
2580 case sve_shift_unpred:
2581 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2582 goto sve_shift;
2583
2584 case sve_size_bhs:
2585 variant = extract_field (FLD_size, inst->value, 0);
2586 if (variant >= 3)
2587 return FALSE;
2588 break;
2589
2590 case sve_size_bhsd:
2591 variant = extract_field (FLD_size, inst->value, 0);
2592 break;
2593
2594 case sve_size_hsd:
2595 i = extract_field (FLD_size, inst->value, 0);
2596 if (i < 1)
2597 return FALSE;
2598 variant = i - 1;
2599 break;
2600
2601 case sve_size_sd:
2602 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2603 break;
2604
2605 default:
2606 /* No mapping between instruction class and qualifiers. */
2607 return TRUE;
2608 }
2609
2610 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2611 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2612 return TRUE;
2613 }
2614 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2615 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2616 return 1.
2617
2618 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2619 determined and used to disassemble CODE; this is done just before the
2620 return. */
2621
2622 static int
2623 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2624 aarch64_inst *inst, int noaliases_p)
2625 {
2626 int i;
2627
2628 DEBUG_TRACE ("enter with %s", opcode->name);
2629
2630 assert (opcode && inst);
2631
2632 /* Check the base opcode. */
2633 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2634 {
2635 DEBUG_TRACE ("base opcode match FAIL");
2636 goto decode_fail;
2637 }
2638
2639 /* Clear inst. */
2640 memset (inst, '\0', sizeof (aarch64_inst));
2641
2642 inst->opcode = opcode;
2643 inst->value = code;
2644
2645 /* Assign operand codes and indexes. */
2646 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2647 {
2648 if (opcode->operands[i] == AARCH64_OPND_NIL)
2649 break;
2650 inst->operands[i].type = opcode->operands[i];
2651 inst->operands[i].idx = i;
2652 }
2653
2654 /* Call the opcode decoder indicated by flags. */
2655 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2656 {
2657 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2658 goto decode_fail;
2659 }
2660
2661 /* Possibly use the instruction class to determine the correct
2662 qualifier. */
2663 if (!aarch64_decode_variant_using_iclass (inst))
2664 {
2665 DEBUG_TRACE ("iclass-based decoder FAIL");
2666 goto decode_fail;
2667 }
2668
2669 /* Call operand decoders. */
2670 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2671 {
2672 const aarch64_operand *opnd;
2673 enum aarch64_opnd type;
2674
2675 type = opcode->operands[i];
2676 if (type == AARCH64_OPND_NIL)
2677 break;
2678 opnd = &aarch64_operands[type];
2679 if (operand_has_extractor (opnd)
2680 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2681 {
2682 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2683 goto decode_fail;
2684 }
2685 }
2686
2687 /* If the opcode has a verifier, then check it now. */
2688 if (opcode->verifier && ! opcode->verifier (opcode, code))
2689 {
2690 DEBUG_TRACE ("operand verifier FAIL");
2691 goto decode_fail;
2692 }
2693
2694 /* Match the qualifiers. */
2695 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2696 {
2697 /* Arriving here, the CODE has been determined as a valid instruction
2698 of OPCODE and *INST has been filled with information of this OPCODE
2699 instruction. Before the return, check if the instruction has any
2700 alias and should be disassembled in the form of its alias instead.
2701 If the answer is yes, *INST will be updated. */
2702 if (!noaliases_p)
2703 determine_disassembling_preference (inst);
2704 DEBUG_TRACE ("SUCCESS");
2705 return 1;
2706 }
2707 else
2708 {
2709 DEBUG_TRACE ("constraint matching FAIL");
2710 }
2711
2712 decode_fail:
2713 return 0;
2714 }
2715 \f
2716 /* This does some user-friendly fix-up to *INST. It is currently focus on
2717 the adjustment of qualifiers to help the printed instruction
2718 recognized/understood more easily. */
2719
2720 static void
2721 user_friendly_fixup (aarch64_inst *inst)
2722 {
2723 switch (inst->opcode->iclass)
2724 {
2725 case testbranch:
2726 /* TBNZ Xn|Wn, #uimm6, label
2727 Test and Branch Not Zero: conditionally jumps to label if bit number
2728 uimm6 in register Xn is not zero. The bit number implies the width of
2729 the register, which may be written and should be disassembled as Wn if
2730 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2731 */
2732 if (inst->operands[1].imm.value < 32)
2733 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2734 break;
2735 default: break;
2736 }
2737 }
2738
2739 /* Decode INSN and fill in *INST the instruction information. An alias
2740 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2741 success. */
2742
2743 int
2744 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2745 bfd_boolean noaliases_p)
2746 {
2747 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2748
2749 #ifdef DEBUG_AARCH64
2750 if (debug_dump)
2751 {
2752 const aarch64_opcode *tmp = opcode;
2753 printf ("\n");
2754 DEBUG_TRACE ("opcode lookup:");
2755 while (tmp != NULL)
2756 {
2757 aarch64_verbose (" %s", tmp->name);
2758 tmp = aarch64_find_next_opcode (tmp);
2759 }
2760 }
2761 #endif /* DEBUG_AARCH64 */
2762
2763 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2764 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2765 opcode field and value, apart from the difference that one of them has an
2766 extra field as part of the opcode, but such a field is used for operand
2767 encoding in other opcode(s) ('immh' in the case of the example). */
2768 while (opcode != NULL)
2769 {
2770 /* But only one opcode can be decoded successfully for, as the
2771 decoding routine will check the constraint carefully. */
2772 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2773 return ERR_OK;
2774 opcode = aarch64_find_next_opcode (opcode);
2775 }
2776
2777 return ERR_UND;
2778 }
2779
2780 /* Print operands. */
2781
2782 static void
2783 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2784 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2785 {
2786 int i, pcrel_p, num_printed;
2787 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2788 {
2789 char str[128];
2790 /* We regard the opcode operand info more, however we also look into
2791 the inst->operands to support the disassembling of the optional
2792 operand.
2793 The two operand code should be the same in all cases, apart from
2794 when the operand can be optional. */
2795 if (opcode->operands[i] == AARCH64_OPND_NIL
2796 || opnds[i].type == AARCH64_OPND_NIL)
2797 break;
2798
2799 /* Generate the operand string in STR. */
2800 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2801 &info->target);
2802
2803 /* Print the delimiter (taking account of omitted operand(s)). */
2804 if (str[0] != '\0')
2805 (*info->fprintf_func) (info->stream, "%s",
2806 num_printed++ == 0 ? "\t" : ", ");
2807
2808 /* Print the operand. */
2809 if (pcrel_p)
2810 (*info->print_address_func) (info->target, info);
2811 else
2812 (*info->fprintf_func) (info->stream, "%s", str);
2813 }
2814 }
2815
2816 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
2817
2818 static void
2819 remove_dot_suffix (char *name, const aarch64_inst *inst)
2820 {
2821 char *ptr;
2822 size_t len;
2823
2824 ptr = strchr (inst->opcode->name, '.');
2825 assert (ptr && inst->cond);
2826 len = ptr - inst->opcode->name;
2827 assert (len < 8);
2828 strncpy (name, inst->opcode->name, len);
2829 name[len] = '\0';
2830 }
2831
2832 /* Print the instruction mnemonic name. */
2833
2834 static void
2835 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2836 {
2837 if (inst->opcode->flags & F_COND)
2838 {
2839 /* For instructions that are truly conditionally executed, e.g. b.cond,
2840 prepare the full mnemonic name with the corresponding condition
2841 suffix. */
2842 char name[8];
2843
2844 remove_dot_suffix (name, inst);
2845 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2846 }
2847 else
2848 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2849 }
2850
2851 /* Decide whether we need to print a comment after the operands of
2852 instruction INST. */
2853
2854 static void
2855 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
2856 {
2857 if (inst->opcode->flags & F_COND)
2858 {
2859 char name[8];
2860 unsigned int i, num_conds;
2861
2862 remove_dot_suffix (name, inst);
2863 num_conds = ARRAY_SIZE (inst->cond->names);
2864 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
2865 (*info->fprintf_func) (info->stream, "%s %s.%s",
2866 i == 1 ? " //" : ",",
2867 name, inst->cond->names[i]);
2868 }
2869 }
2870
2871 /* Print the instruction according to *INST. */
2872
2873 static void
2874 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2875 struct disassemble_info *info)
2876 {
2877 print_mnemonic_name (inst, info);
2878 print_operands (pc, inst->opcode, inst->operands, info);
2879 print_comment (inst, info);
2880 }
2881
2882 /* Entry-point of the instruction disassembler and printer. */
2883
2884 static void
2885 print_insn_aarch64_word (bfd_vma pc,
2886 uint32_t word,
2887 struct disassemble_info *info)
2888 {
2889 static const char *err_msg[6] =
2890 {
2891 [ERR_OK] = "_",
2892 [-ERR_UND] = "undefined",
2893 [-ERR_UNP] = "unpredictable",
2894 [-ERR_NYI] = "NYI"
2895 };
2896
2897 int ret;
2898 aarch64_inst inst;
2899
2900 info->insn_info_valid = 1;
2901 info->branch_delay_insns = 0;
2902 info->data_size = 0;
2903 info->target = 0;
2904 info->target2 = 0;
2905
2906 if (info->flags & INSN_HAS_RELOC)
2907 /* If the instruction has a reloc associated with it, then
2908 the offset field in the instruction will actually be the
2909 addend for the reloc. (If we are using REL type relocs).
2910 In such cases, we can ignore the pc when computing
2911 addresses, since the addend is not currently pc-relative. */
2912 pc = 0;
2913
2914 ret = aarch64_decode_insn (word, &inst, no_aliases);
2915
2916 if (((word >> 21) & 0x3ff) == 1)
2917 {
2918 /* RESERVED for ALES. */
2919 assert (ret != ERR_OK);
2920 ret = ERR_NYI;
2921 }
2922
2923 switch (ret)
2924 {
2925 case ERR_UND:
2926 case ERR_UNP:
2927 case ERR_NYI:
2928 /* Handle undefined instructions. */
2929 info->insn_type = dis_noninsn;
2930 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2931 word, err_msg[-ret]);
2932 break;
2933 case ERR_OK:
2934 user_friendly_fixup (&inst);
2935 print_aarch64_insn (pc, &inst, info);
2936 break;
2937 default:
2938 abort ();
2939 }
2940 }
2941
2942 /* Disallow mapping symbols ($x, $d etc) from
2943 being displayed in symbol relative addresses. */
2944
2945 bfd_boolean
2946 aarch64_symbol_is_valid (asymbol * sym,
2947 struct disassemble_info * info ATTRIBUTE_UNUSED)
2948 {
2949 const char * name;
2950
2951 if (sym == NULL)
2952 return FALSE;
2953
2954 name = bfd_asymbol_name (sym);
2955
2956 return name
2957 && (name[0] != '$'
2958 || (name[1] != 'x' && name[1] != 'd')
2959 || (name[2] != '\0' && name[2] != '.'));
2960 }
2961
2962 /* Print data bytes on INFO->STREAM. */
2963
2964 static void
2965 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2966 uint32_t word,
2967 struct disassemble_info *info)
2968 {
2969 switch (info->bytes_per_chunk)
2970 {
2971 case 1:
2972 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2973 break;
2974 case 2:
2975 info->fprintf_func (info->stream, ".short\t0x%04x", word);
2976 break;
2977 case 4:
2978 info->fprintf_func (info->stream, ".word\t0x%08x", word);
2979 break;
2980 default:
2981 abort ();
2982 }
2983 }
2984
2985 /* Try to infer the code or data type from a symbol.
2986 Returns nonzero if *MAP_TYPE was set. */
2987
2988 static int
2989 get_sym_code_type (struct disassemble_info *info, int n,
2990 enum map_type *map_type)
2991 {
2992 elf_symbol_type *es;
2993 unsigned int type;
2994 const char *name;
2995
2996 es = *(elf_symbol_type **)(info->symtab + n);
2997 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2998
2999 /* If the symbol has function type then use that. */
3000 if (type == STT_FUNC)
3001 {
3002 *map_type = MAP_INSN;
3003 return TRUE;
3004 }
3005
3006 /* Check for mapping symbols. */
3007 name = bfd_asymbol_name(info->symtab[n]);
3008 if (name[0] == '$'
3009 && (name[1] == 'x' || name[1] == 'd')
3010 && (name[2] == '\0' || name[2] == '.'))
3011 {
3012 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3013 return TRUE;
3014 }
3015
3016 return FALSE;
3017 }
3018
3019 /* Entry-point of the AArch64 disassembler. */
3020
3021 int
3022 print_insn_aarch64 (bfd_vma pc,
3023 struct disassemble_info *info)
3024 {
3025 bfd_byte buffer[INSNLEN];
3026 int status;
3027 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
3028 bfd_boolean found = FALSE;
3029 unsigned int size = 4;
3030 unsigned long data;
3031
3032 if (info->disassembler_options)
3033 {
3034 set_default_aarch64_dis_options (info);
3035
3036 parse_aarch64_dis_options (info->disassembler_options);
3037
3038 /* To avoid repeated parsing of these options, we remove them here. */
3039 info->disassembler_options = NULL;
3040 }
3041
3042 /* Aarch64 instructions are always little-endian */
3043 info->endian_code = BFD_ENDIAN_LITTLE;
3044
3045 /* First check the full symtab for a mapping symbol, even if there
3046 are no usable non-mapping symbols for this address. */
3047 if (info->symtab_size != 0
3048 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3049 {
3050 enum map_type type = MAP_INSN;
3051 int last_sym = -1;
3052 bfd_vma addr;
3053 int n;
3054
3055 if (pc <= last_mapping_addr)
3056 last_mapping_sym = -1;
3057
3058 /* Start scanning at the start of the function, or wherever
3059 we finished last time. */
3060 n = info->symtab_pos + 1;
3061 if (n < last_mapping_sym)
3062 n = last_mapping_sym;
3063
3064 /* Scan up to the location being disassembled. */
3065 for (; n < info->symtab_size; n++)
3066 {
3067 addr = bfd_asymbol_value (info->symtab[n]);
3068 if (addr > pc)
3069 break;
3070 if ((info->section == NULL
3071 || info->section == info->symtab[n]->section)
3072 && get_sym_code_type (info, n, &type))
3073 {
3074 last_sym = n;
3075 found = TRUE;
3076 }
3077 }
3078
3079 if (!found)
3080 {
3081 n = info->symtab_pos;
3082 if (n < last_mapping_sym)
3083 n = last_mapping_sym;
3084
3085 /* No mapping symbol found at this address. Look backwards
3086 for a preceeding one. */
3087 for (; n >= 0; n--)
3088 {
3089 if (get_sym_code_type (info, n, &type))
3090 {
3091 last_sym = n;
3092 found = TRUE;
3093 break;
3094 }
3095 }
3096 }
3097
3098 last_mapping_sym = last_sym;
3099 last_type = type;
3100
3101 /* Look a little bit ahead to see if we should print out
3102 less than four bytes of data. If there's a symbol,
3103 mapping or otherwise, after two bytes then don't
3104 print more. */
3105 if (last_type == MAP_DATA)
3106 {
3107 size = 4 - (pc & 3);
3108 for (n = last_sym + 1; n < info->symtab_size; n++)
3109 {
3110 addr = bfd_asymbol_value (info->symtab[n]);
3111 if (addr > pc)
3112 {
3113 if (addr - pc < size)
3114 size = addr - pc;
3115 break;
3116 }
3117 }
3118 /* If the next symbol is after three bytes, we need to
3119 print only part of the data, so that we can use either
3120 .byte or .short. */
3121 if (size == 3)
3122 size = (pc & 1) ? 1 : 2;
3123 }
3124 }
3125
3126 if (last_type == MAP_DATA)
3127 {
3128 /* size was set above. */
3129 info->bytes_per_chunk = size;
3130 info->display_endian = info->endian;
3131 printer = print_insn_data;
3132 }
3133 else
3134 {
3135 info->bytes_per_chunk = size = INSNLEN;
3136 info->display_endian = info->endian_code;
3137 printer = print_insn_aarch64_word;
3138 }
3139
3140 status = (*info->read_memory_func) (pc, buffer, size, info);
3141 if (status != 0)
3142 {
3143 (*info->memory_error_func) (status, pc, info);
3144 return -1;
3145 }
3146
3147 data = bfd_get_bits (buffer, size * 8,
3148 info->display_endian == BFD_ENDIAN_BIG);
3149
3150 (*printer) (pc, data, info);
3151
3152 return size;
3153 }
3154 \f
3155 void
3156 print_aarch64_disassembler_options (FILE *stream)
3157 {
3158 fprintf (stream, _("\n\
3159 The following AARCH64 specific disassembler options are supported for use\n\
3160 with the -M switch (multiple options should be separated by commas):\n"));
3161
3162 fprintf (stream, _("\n\
3163 no-aliases Don't print instruction aliases.\n"));
3164
3165 fprintf (stream, _("\n\
3166 aliases Do print instruction aliases.\n"));
3167
3168 #ifdef DEBUG_AARCH64
3169 fprintf (stream, _("\n\
3170 debug_dump Temp switch for debug trace.\n"));
3171 #endif /* DEBUG_AARCH64 */
3172
3173 fprintf (stream, _("\n"));
3174 }
This page took 0.10156 seconds and 4 git commands to generate.