8b32097a5fa116d728e7e70b15be11534740c843
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define INSNLEN 4
30
31 /* Cached mapping symbol state. */
32 enum map_type
33 {
34 MAP_INSN,
35 MAP_DATA
36 };
37
38 static enum map_type last_type;
39 static int last_mapping_sym = -1;
40 static bfd_vma last_stop_offset = 0;
41 static bfd_vma last_mapping_addr = 0;
42
43 /* Other options */
44 static int no_aliases = 0; /* If set disassemble as most general inst. */
45 \fstatic int no_notes = 1; /* If set do not print disassemble notes in the
46 output as comments. */
47
48 /* Currently active instruction sequence. */
49 static aarch64_instr_sequence insn_sequence;
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 if (CONST_STRNEQ (option, "no-notes"))
73 {
74 no_notes = 1;
75 return;
76 }
77
78 if (CONST_STRNEQ (option, "notes"))
79 {
80 no_notes = 0;
81 return;
82 }
83
84 #ifdef DEBUG_AARCH64
85 if (CONST_STRNEQ (option, "debug_dump"))
86 {
87 debug_dump = 1;
88 return;
89 }
90 #endif /* DEBUG_AARCH64 */
91
92 /* Invalid option. */
93 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
94 }
95
96 static void
97 parse_aarch64_dis_options (const char *options)
98 {
99 const char *option_end;
100
101 if (options == NULL)
102 return;
103
104 while (*options != '\0')
105 {
106 /* Skip empty options. */
107 if (*options == ',')
108 {
109 options++;
110 continue;
111 }
112
113 /* We know that *options is neither NUL or a comma. */
114 option_end = options + 1;
115 while (*option_end != ',' && *option_end != '\0')
116 option_end++;
117
118 parse_aarch64_dis_option (options, option_end - options);
119
120 /* Go on to the next one. If option_end points to a comma, it
121 will be skipped above. */
122 options = option_end;
123 }
124 }
125 \f
126 /* Functions doing the instruction disassembling. */
127
128 /* The unnamed arguments consist of the number of fields and information about
129 these fields where the VALUE will be extracted from CODE and returned.
130 MASK can be zero or the base mask of the opcode.
131
132 N.B. the fields are required to be in such an order than the most signficant
133 field for VALUE comes the first, e.g. the <index> in
134 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
135 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
136 the order of H, L, M. */
137
138 aarch64_insn
139 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
140 {
141 uint32_t num;
142 const aarch64_field *field;
143 enum aarch64_field_kind kind;
144 va_list va;
145
146 va_start (va, mask);
147 num = va_arg (va, uint32_t);
148 assert (num <= 5);
149 aarch64_insn value = 0x0;
150 while (num--)
151 {
152 kind = va_arg (va, enum aarch64_field_kind);
153 field = &fields[kind];
154 value <<= field->width;
155 value |= extract_field (kind, code, mask);
156 }
157 return value;
158 }
159
160 /* Extract the value of all fields in SELF->fields from instruction CODE.
161 The least significant bit comes from the final field. */
162
163 static aarch64_insn
164 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
165 {
166 aarch64_insn value;
167 unsigned int i;
168 enum aarch64_field_kind kind;
169
170 value = 0;
171 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
172 {
173 kind = self->fields[i];
174 value <<= fields[kind].width;
175 value |= extract_field (kind, code, 0);
176 }
177 return value;
178 }
179
180 /* Sign-extend bit I of VALUE. */
181 static inline int32_t
182 sign_extend (aarch64_insn value, unsigned i)
183 {
184 uint32_t ret = value;
185
186 assert (i < 32);
187 if ((value >> i) & 0x1)
188 {
189 uint32_t val = (uint32_t)(-1) << i;
190 ret = ret | val;
191 }
192 return (int32_t) ret;
193 }
194
195 /* N.B. the following inline helpfer functions create a dependency on the
196 order of operand qualifier enumerators. */
197
198 /* Given VALUE, return qualifier for a general purpose register. */
199 static inline enum aarch64_opnd_qualifier
200 get_greg_qualifier_from_value (aarch64_insn value)
201 {
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
203 assert (value <= 0x1
204 && aarch64_get_qualifier_standard_value (qualifier) == value);
205 return qualifier;
206 }
207
208 /* Given VALUE, return qualifier for a vector register. This does not support
209 decoding instructions that accept the 2H vector type. */
210
211 static inline enum aarch64_opnd_qualifier
212 get_vreg_qualifier_from_value (aarch64_insn value)
213 {
214 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
215
216 /* Instructions using vector type 2H should not call this function. Skip over
217 the 2H qualifier. */
218 if (qualifier >= AARCH64_OPND_QLF_V_2H)
219 qualifier += 1;
220
221 assert (value <= 0x8
222 && aarch64_get_qualifier_standard_value (qualifier) == value);
223 return qualifier;
224 }
225
226 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
227 static inline enum aarch64_opnd_qualifier
228 get_sreg_qualifier_from_value (aarch64_insn value)
229 {
230 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
231
232 assert (value <= 0x4
233 && aarch64_get_qualifier_standard_value (qualifier) == value);
234 return qualifier;
235 }
236
237 /* Given the instruction in *INST which is probably half way through the
238 decoding and our caller wants to know the expected qualifier for operand
239 I. Return such a qualifier if we can establish it; otherwise return
240 AARCH64_OPND_QLF_NIL. */
241
242 static aarch64_opnd_qualifier_t
243 get_expected_qualifier (const aarch64_inst *inst, int i)
244 {
245 aarch64_opnd_qualifier_seq_t qualifiers;
246 /* Should not be called if the qualifier is known. */
247 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
248 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
249 i, qualifiers))
250 return qualifiers[i];
251 else
252 return AARCH64_OPND_QLF_NIL;
253 }
254
255 /* Operand extractors. */
256
257 bfd_boolean
258 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
259 const aarch64_insn code,
260 const aarch64_inst *inst ATTRIBUTE_UNUSED,
261 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
262 {
263 info->reg.regno = extract_field (self->fields[0], code, 0);
264 return TRUE;
265 }
266
267 bfd_boolean
268 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
269 const aarch64_insn code ATTRIBUTE_UNUSED,
270 const aarch64_inst *inst ATTRIBUTE_UNUSED,
271 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
272 {
273 assert (info->idx == 1
274 || info->idx ==3);
275 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
276 return TRUE;
277 }
278
279 /* e.g. IC <ic_op>{, <Xt>}. */
280 bfd_boolean
281 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
282 const aarch64_insn code,
283 const aarch64_inst *inst ATTRIBUTE_UNUSED,
284 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
285 {
286 info->reg.regno = extract_field (self->fields[0], code, 0);
287 assert (info->idx == 1
288 && (aarch64_get_operand_class (inst->operands[0].type)
289 == AARCH64_OPND_CLASS_SYSTEM));
290 /* This will make the constraint checking happy and more importantly will
291 help the disassembler determine whether this operand is optional or
292 not. */
293 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
294
295 return TRUE;
296 }
297
298 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
299 bfd_boolean
300 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
301 const aarch64_insn code,
302 const aarch64_inst *inst ATTRIBUTE_UNUSED,
303 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
304 {
305 /* regno */
306 info->reglane.regno = extract_field (self->fields[0], code,
307 inst->opcode->mask);
308
309 /* Index and/or type. */
310 if (inst->opcode->iclass == asisdone
311 || inst->opcode->iclass == asimdins)
312 {
313 if (info->type == AARCH64_OPND_En
314 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
315 {
316 unsigned shift;
317 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
318 assert (info->idx == 1); /* Vn */
319 aarch64_insn value = extract_field (FLD_imm4, code, 0);
320 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
321 info->qualifier = get_expected_qualifier (inst, info->idx);
322 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
323 info->reglane.index = value >> shift;
324 }
325 else
326 {
327 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
328 imm5<3:0> <V>
329 0000 RESERVED
330 xxx1 B
331 xx10 H
332 x100 S
333 1000 D */
334 int pos = -1;
335 aarch64_insn value = extract_field (FLD_imm5, code, 0);
336 while (++pos <= 3 && (value & 0x1) == 0)
337 value >>= 1;
338 if (pos > 3)
339 return FALSE;
340 info->qualifier = get_sreg_qualifier_from_value (pos);
341 info->reglane.index = (unsigned) (value >> 1);
342 }
343 }
344 else if (inst->opcode->iclass == dotproduct)
345 {
346 /* Need information in other operand(s) to help decoding. */
347 info->qualifier = get_expected_qualifier (inst, info->idx);
348 switch (info->qualifier)
349 {
350 case AARCH64_OPND_QLF_S_4B:
351 case AARCH64_OPND_QLF_S_2H:
352 /* L:H */
353 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
354 info->reglane.regno &= 0x1f;
355 break;
356 default:
357 return FALSE;
358 }
359 }
360 else if (inst->opcode->iclass == cryptosm3)
361 {
362 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
363 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
364 }
365 else
366 {
367 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
368 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
369
370 /* Need information in other operand(s) to help decoding. */
371 info->qualifier = get_expected_qualifier (inst, info->idx);
372 switch (info->qualifier)
373 {
374 case AARCH64_OPND_QLF_S_H:
375 if (info->type == AARCH64_OPND_Em16)
376 {
377 /* h:l:m */
378 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
379 FLD_M);
380 info->reglane.regno &= 0xf;
381 }
382 else
383 {
384 /* h:l */
385 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
386 }
387 break;
388 case AARCH64_OPND_QLF_S_S:
389 /* h:l */
390 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
391 break;
392 case AARCH64_OPND_QLF_S_D:
393 /* H */
394 info->reglane.index = extract_field (FLD_H, code, 0);
395 break;
396 default:
397 return FALSE;
398 }
399
400 if (inst->opcode->op == OP_FCMLA_ELEM
401 && info->qualifier != AARCH64_OPND_QLF_S_H)
402 {
403 /* Complex operand takes two elements. */
404 if (info->reglane.index & 1)
405 return FALSE;
406 info->reglane.index /= 2;
407 }
408 }
409
410 return TRUE;
411 }
412
413 bfd_boolean
414 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
415 const aarch64_insn code,
416 const aarch64_inst *inst ATTRIBUTE_UNUSED,
417 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
418 {
419 /* R */
420 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
421 /* len */
422 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
423 return TRUE;
424 }
425
426 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
427 bfd_boolean
428 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
429 aarch64_opnd_info *info, const aarch64_insn code,
430 const aarch64_inst *inst,
431 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
432 {
433 aarch64_insn value;
434 /* Number of elements in each structure to be loaded/stored. */
435 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
436
437 struct
438 {
439 unsigned is_reserved;
440 unsigned num_regs;
441 unsigned num_elements;
442 } data [] =
443 { {0, 4, 4},
444 {1, 4, 4},
445 {0, 4, 1},
446 {0, 4, 2},
447 {0, 3, 3},
448 {1, 3, 3},
449 {0, 3, 1},
450 {0, 1, 1},
451 {0, 2, 2},
452 {1, 2, 2},
453 {0, 2, 1},
454 };
455
456 /* Rt */
457 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
458 /* opcode */
459 value = extract_field (FLD_opcode, code, 0);
460 /* PR 21595: Check for a bogus value. */
461 if (value >= ARRAY_SIZE (data))
462 return FALSE;
463 if (expected_num != data[value].num_elements || data[value].is_reserved)
464 return FALSE;
465 info->reglist.num_regs = data[value].num_regs;
466
467 return TRUE;
468 }
469
470 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
471 lanes instructions. */
472 bfd_boolean
473 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
474 aarch64_opnd_info *info, const aarch64_insn code,
475 const aarch64_inst *inst,
476 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
477 {
478 aarch64_insn value;
479
480 /* Rt */
481 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
482 /* S */
483 value = extract_field (FLD_S, code, 0);
484
485 /* Number of registers is equal to the number of elements in
486 each structure to be loaded/stored. */
487 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
488 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
489
490 /* Except when it is LD1R. */
491 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
492 info->reglist.num_regs = 2;
493
494 return TRUE;
495 }
496
497 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
498 load/store single element instructions. */
499 bfd_boolean
500 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
501 aarch64_opnd_info *info, const aarch64_insn code,
502 const aarch64_inst *inst ATTRIBUTE_UNUSED,
503 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
504 {
505 aarch64_field field = {0, 0};
506 aarch64_insn QSsize; /* fields Q:S:size. */
507 aarch64_insn opcodeh2; /* opcode<2:1> */
508
509 /* Rt */
510 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
511
512 /* Decode the index, opcode<2:1> and size. */
513 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
514 opcodeh2 = extract_field_2 (&field, code, 0);
515 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
516 switch (opcodeh2)
517 {
518 case 0x0:
519 info->qualifier = AARCH64_OPND_QLF_S_B;
520 /* Index encoded in "Q:S:size". */
521 info->reglist.index = QSsize;
522 break;
523 case 0x1:
524 if (QSsize & 0x1)
525 /* UND. */
526 return FALSE;
527 info->qualifier = AARCH64_OPND_QLF_S_H;
528 /* Index encoded in "Q:S:size<1>". */
529 info->reglist.index = QSsize >> 1;
530 break;
531 case 0x2:
532 if ((QSsize >> 1) & 0x1)
533 /* UND. */
534 return FALSE;
535 if ((QSsize & 0x1) == 0)
536 {
537 info->qualifier = AARCH64_OPND_QLF_S_S;
538 /* Index encoded in "Q:S". */
539 info->reglist.index = QSsize >> 2;
540 }
541 else
542 {
543 if (extract_field (FLD_S, code, 0))
544 /* UND */
545 return FALSE;
546 info->qualifier = AARCH64_OPND_QLF_S_D;
547 /* Index encoded in "Q". */
548 info->reglist.index = QSsize >> 3;
549 }
550 break;
551 default:
552 return FALSE;
553 }
554
555 info->reglist.has_index = 1;
556 info->reglist.num_regs = 0;
557 /* Number of registers is equal to the number of elements in
558 each structure to be loaded/stored. */
559 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
560 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
561
562 return TRUE;
563 }
564
565 /* Decode fields immh:immb and/or Q for e.g.
566 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
567 or SSHR <V><d>, <V><n>, #<shift>. */
568
569 bfd_boolean
570 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
571 aarch64_opnd_info *info, const aarch64_insn code,
572 const aarch64_inst *inst,
573 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
574 {
575 int pos;
576 aarch64_insn Q, imm, immh;
577 enum aarch64_insn_class iclass = inst->opcode->iclass;
578
579 immh = extract_field (FLD_immh, code, 0);
580 if (immh == 0)
581 return FALSE;
582 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
583 pos = 4;
584 /* Get highest set bit in immh. */
585 while (--pos >= 0 && (immh & 0x8) == 0)
586 immh <<= 1;
587
588 assert ((iclass == asimdshf || iclass == asisdshf)
589 && (info->type == AARCH64_OPND_IMM_VLSR
590 || info->type == AARCH64_OPND_IMM_VLSL));
591
592 if (iclass == asimdshf)
593 {
594 Q = extract_field (FLD_Q, code, 0);
595 /* immh Q <T>
596 0000 x SEE AdvSIMD modified immediate
597 0001 0 8B
598 0001 1 16B
599 001x 0 4H
600 001x 1 8H
601 01xx 0 2S
602 01xx 1 4S
603 1xxx 0 RESERVED
604 1xxx 1 2D */
605 info->qualifier =
606 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
607 }
608 else
609 info->qualifier = get_sreg_qualifier_from_value (pos);
610
611 if (info->type == AARCH64_OPND_IMM_VLSR)
612 /* immh <shift>
613 0000 SEE AdvSIMD modified immediate
614 0001 (16-UInt(immh:immb))
615 001x (32-UInt(immh:immb))
616 01xx (64-UInt(immh:immb))
617 1xxx (128-UInt(immh:immb)) */
618 info->imm.value = (16 << pos) - imm;
619 else
620 /* immh:immb
621 immh <shift>
622 0000 SEE AdvSIMD modified immediate
623 0001 (UInt(immh:immb)-8)
624 001x (UInt(immh:immb)-16)
625 01xx (UInt(immh:immb)-32)
626 1xxx (UInt(immh:immb)-64) */
627 info->imm.value = imm - (8 << pos);
628
629 return TRUE;
630 }
631
632 /* Decode shift immediate for e.g. sshr (imm). */
633 bfd_boolean
634 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
635 aarch64_opnd_info *info, const aarch64_insn code,
636 const aarch64_inst *inst ATTRIBUTE_UNUSED,
637 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
638 {
639 int64_t imm;
640 aarch64_insn val;
641 val = extract_field (FLD_size, code, 0);
642 switch (val)
643 {
644 case 0: imm = 8; break;
645 case 1: imm = 16; break;
646 case 2: imm = 32; break;
647 default: return FALSE;
648 }
649 info->imm.value = imm;
650 return TRUE;
651 }
652
653 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
654 value in the field(s) will be extracted as unsigned immediate value. */
655 bfd_boolean
656 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
657 const aarch64_insn code,
658 const aarch64_inst *inst ATTRIBUTE_UNUSED,
659 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
660 {
661 int64_t imm;
662
663 imm = extract_all_fields (self, code);
664
665 if (operand_need_sign_extension (self))
666 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
667
668 if (operand_need_shift_by_two (self))
669 imm <<= 2;
670 else if (operand_need_shift_by_four (self))
671 imm <<= 4;
672
673 if (info->type == AARCH64_OPND_ADDR_ADRP)
674 imm <<= 12;
675
676 info->imm.value = imm;
677 return TRUE;
678 }
679
680 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
681 bfd_boolean
682 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
683 const aarch64_insn code,
684 const aarch64_inst *inst ATTRIBUTE_UNUSED,
685 aarch64_operand_error *errors)
686 {
687 aarch64_ext_imm (self, info, code, inst, errors);
688 info->shifter.kind = AARCH64_MOD_LSL;
689 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
690 return TRUE;
691 }
692
693 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
694 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
695 bfd_boolean
696 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
697 aarch64_opnd_info *info,
698 const aarch64_insn code,
699 const aarch64_inst *inst ATTRIBUTE_UNUSED,
700 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
701 {
702 uint64_t imm;
703 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
704 aarch64_field field = {0, 0};
705
706 assert (info->idx == 1);
707
708 if (info->type == AARCH64_OPND_SIMD_FPIMM)
709 info->imm.is_fp = 1;
710
711 /* a:b:c:d:e:f:g:h */
712 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
713 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
714 {
715 /* Either MOVI <Dd>, #<imm>
716 or MOVI <Vd>.2D, #<imm>.
717 <imm> is a 64-bit immediate
718 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
719 encoded in "a:b:c:d:e:f:g:h". */
720 int i;
721 unsigned abcdefgh = imm;
722 for (imm = 0ull, i = 0; i < 8; i++)
723 if (((abcdefgh >> i) & 0x1) != 0)
724 imm |= 0xffull << (8 * i);
725 }
726 info->imm.value = imm;
727
728 /* cmode */
729 info->qualifier = get_expected_qualifier (inst, info->idx);
730 switch (info->qualifier)
731 {
732 case AARCH64_OPND_QLF_NIL:
733 /* no shift */
734 info->shifter.kind = AARCH64_MOD_NONE;
735 return 1;
736 case AARCH64_OPND_QLF_LSL:
737 /* shift zeros */
738 info->shifter.kind = AARCH64_MOD_LSL;
739 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
740 {
741 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
742 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
743 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
744 default: assert (0); return FALSE;
745 }
746 /* 00: 0; 01: 8; 10:16; 11:24. */
747 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
748 break;
749 case AARCH64_OPND_QLF_MSL:
750 /* shift ones */
751 info->shifter.kind = AARCH64_MOD_MSL;
752 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
753 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
754 break;
755 default:
756 assert (0);
757 return FALSE;
758 }
759
760 return TRUE;
761 }
762
763 /* Decode an 8-bit floating-point immediate. */
764 bfd_boolean
765 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
766 const aarch64_insn code,
767 const aarch64_inst *inst ATTRIBUTE_UNUSED,
768 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
769 {
770 info->imm.value = extract_all_fields (self, code);
771 info->imm.is_fp = 1;
772 return TRUE;
773 }
774
775 /* Decode a 1-bit rotate immediate (#90 or #270). */
776 bfd_boolean
777 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
778 const aarch64_insn code,
779 const aarch64_inst *inst ATTRIBUTE_UNUSED,
780 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
781 {
782 uint64_t rot = extract_field (self->fields[0], code, 0);
783 assert (rot < 2U);
784 info->imm.value = rot * 180 + 90;
785 return TRUE;
786 }
787
788 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
789 bfd_boolean
790 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
791 const aarch64_insn code,
792 const aarch64_inst *inst ATTRIBUTE_UNUSED,
793 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
794 {
795 uint64_t rot = extract_field (self->fields[0], code, 0);
796 assert (rot < 4U);
797 info->imm.value = rot * 90;
798 return TRUE;
799 }
800
801 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
802 bfd_boolean
803 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
804 aarch64_opnd_info *info, const aarch64_insn code,
805 const aarch64_inst *inst ATTRIBUTE_UNUSED,
806 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
807 {
808 info->imm.value = 64- extract_field (FLD_scale, code, 0);
809 return TRUE;
810 }
811
812 /* Decode arithmetic immediate for e.g.
813 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
814 bfd_boolean
815 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
816 aarch64_opnd_info *info, const aarch64_insn code,
817 const aarch64_inst *inst ATTRIBUTE_UNUSED,
818 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
819 {
820 aarch64_insn value;
821
822 info->shifter.kind = AARCH64_MOD_LSL;
823 /* shift */
824 value = extract_field (FLD_shift, code, 0);
825 if (value >= 2)
826 return FALSE;
827 info->shifter.amount = value ? 12 : 0;
828 /* imm12 (unsigned) */
829 info->imm.value = extract_field (FLD_imm12, code, 0);
830
831 return TRUE;
832 }
833
834 /* Return true if VALUE is a valid logical immediate encoding, storing the
835 decoded value in *RESULT if so. ESIZE is the number of bytes in the
836 decoded immediate. */
837 static bfd_boolean
838 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
839 {
840 uint64_t imm, mask;
841 uint32_t N, R, S;
842 unsigned simd_size;
843
844 /* value is N:immr:imms. */
845 S = value & 0x3f;
846 R = (value >> 6) & 0x3f;
847 N = (value >> 12) & 0x1;
848
849 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
850 (in other words, right rotated by R), then replicated. */
851 if (N != 0)
852 {
853 simd_size = 64;
854 mask = 0xffffffffffffffffull;
855 }
856 else
857 {
858 switch (S)
859 {
860 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
861 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
862 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
863 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
864 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
865 default: return FALSE;
866 }
867 mask = (1ull << simd_size) - 1;
868 /* Top bits are IGNORED. */
869 R &= simd_size - 1;
870 }
871
872 if (simd_size > esize * 8)
873 return FALSE;
874
875 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
876 if (S == simd_size - 1)
877 return FALSE;
878 /* S+1 consecutive bits to 1. */
879 /* NOTE: S can't be 63 due to detection above. */
880 imm = (1ull << (S + 1)) - 1;
881 /* Rotate to the left by simd_size - R. */
882 if (R != 0)
883 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
884 /* Replicate the value according to SIMD size. */
885 switch (simd_size)
886 {
887 case 2: imm = (imm << 2) | imm;
888 /* Fall through. */
889 case 4: imm = (imm << 4) | imm;
890 /* Fall through. */
891 case 8: imm = (imm << 8) | imm;
892 /* Fall through. */
893 case 16: imm = (imm << 16) | imm;
894 /* Fall through. */
895 case 32: imm = (imm << 32) | imm;
896 /* Fall through. */
897 case 64: break;
898 default: assert (0); return 0;
899 }
900
901 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
902
903 return TRUE;
904 }
905
906 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
907 bfd_boolean
908 aarch64_ext_limm (const aarch64_operand *self,
909 aarch64_opnd_info *info, const aarch64_insn code,
910 const aarch64_inst *inst,
911 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
912 {
913 uint32_t esize;
914 aarch64_insn value;
915
916 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
917 self->fields[2]);
918 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
919 return decode_limm (esize, value, &info->imm.value);
920 }
921
922 /* Decode a logical immediate for the BIC alias of AND (etc.). */
923 bfd_boolean
924 aarch64_ext_inv_limm (const aarch64_operand *self,
925 aarch64_opnd_info *info, const aarch64_insn code,
926 const aarch64_inst *inst,
927 aarch64_operand_error *errors)
928 {
929 if (!aarch64_ext_limm (self, info, code, inst, errors))
930 return FALSE;
931 info->imm.value = ~info->imm.value;
932 return TRUE;
933 }
934
935 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
936 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
937 bfd_boolean
938 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
939 aarch64_opnd_info *info,
940 const aarch64_insn code, const aarch64_inst *inst,
941 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
942 {
943 aarch64_insn value;
944
945 /* Rt */
946 info->reg.regno = extract_field (FLD_Rt, code, 0);
947
948 /* size */
949 value = extract_field (FLD_ldst_size, code, 0);
950 if (inst->opcode->iclass == ldstpair_indexed
951 || inst->opcode->iclass == ldstnapair_offs
952 || inst->opcode->iclass == ldstpair_off
953 || inst->opcode->iclass == loadlit)
954 {
955 enum aarch64_opnd_qualifier qualifier;
956 switch (value)
957 {
958 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
959 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
960 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
961 default: return FALSE;
962 }
963 info->qualifier = qualifier;
964 }
965 else
966 {
967 /* opc1:size */
968 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
969 if (value > 0x4)
970 return FALSE;
971 info->qualifier = get_sreg_qualifier_from_value (value);
972 }
973
974 return TRUE;
975 }
976
977 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
978 bfd_boolean
979 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
980 aarch64_opnd_info *info,
981 aarch64_insn code,
982 const aarch64_inst *inst ATTRIBUTE_UNUSED,
983 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
984 {
985 /* Rn */
986 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
987 return TRUE;
988 }
989
990 /* Decode the address operand for e.g.
991 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
992 bfd_boolean
993 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
994 aarch64_opnd_info *info,
995 aarch64_insn code, const aarch64_inst *inst,
996 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
997 {
998 info->qualifier = get_expected_qualifier (inst, info->idx);
999
1000 /* Rn */
1001 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1002
1003 /* simm9 */
1004 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1005 info->addr.offset.imm = sign_extend (imm, 8);
1006 if (extract_field (self->fields[2], code, 0) == 1) {
1007 info->addr.writeback = 1;
1008 info->addr.preind = 1;
1009 }
1010 return TRUE;
1011 }
1012
1013 /* Decode the address operand for e.g.
1014 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1015 bfd_boolean
1016 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1017 aarch64_opnd_info *info,
1018 aarch64_insn code, const aarch64_inst *inst,
1019 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1020 {
1021 aarch64_insn S, value;
1022
1023 /* Rn */
1024 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1025 /* Rm */
1026 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1027 /* option */
1028 value = extract_field (FLD_option, code, 0);
1029 info->shifter.kind =
1030 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1031 /* Fix-up the shifter kind; although the table-driven approach is
1032 efficient, it is slightly inflexible, thus needing this fix-up. */
1033 if (info->shifter.kind == AARCH64_MOD_UXTX)
1034 info->shifter.kind = AARCH64_MOD_LSL;
1035 /* S */
1036 S = extract_field (FLD_S, code, 0);
1037 if (S == 0)
1038 {
1039 info->shifter.amount = 0;
1040 info->shifter.amount_present = 0;
1041 }
1042 else
1043 {
1044 int size;
1045 /* Need information in other operand(s) to help achieve the decoding
1046 from 'S' field. */
1047 info->qualifier = get_expected_qualifier (inst, info->idx);
1048 /* Get the size of the data element that is accessed, which may be
1049 different from that of the source register size, e.g. in strb/ldrb. */
1050 size = aarch64_get_qualifier_esize (info->qualifier);
1051 info->shifter.amount = get_logsz (size);
1052 info->shifter.amount_present = 1;
1053 }
1054
1055 return TRUE;
1056 }
1057
1058 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1059 bfd_boolean
1060 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1061 aarch64_insn code, const aarch64_inst *inst,
1062 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1063 {
1064 aarch64_insn imm;
1065 info->qualifier = get_expected_qualifier (inst, info->idx);
1066
1067 /* Rn */
1068 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1069 /* simm (imm9 or imm7) */
1070 imm = extract_field (self->fields[0], code, 0);
1071 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1072 if (self->fields[0] == FLD_imm7
1073 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1074 /* scaled immediate in ld/st pair instructions. */
1075 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1076 /* qualifier */
1077 if (inst->opcode->iclass == ldst_unscaled
1078 || inst->opcode->iclass == ldstnapair_offs
1079 || inst->opcode->iclass == ldstpair_off
1080 || inst->opcode->iclass == ldst_unpriv)
1081 info->addr.writeback = 0;
1082 else
1083 {
1084 /* pre/post- index */
1085 info->addr.writeback = 1;
1086 if (extract_field (self->fields[1], code, 0) == 1)
1087 info->addr.preind = 1;
1088 else
1089 info->addr.postind = 1;
1090 }
1091
1092 return TRUE;
1093 }
1094
1095 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1096 bfd_boolean
1097 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1098 aarch64_insn code,
1099 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1100 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1101 {
1102 int shift;
1103 info->qualifier = get_expected_qualifier (inst, info->idx);
1104 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1105 /* Rn */
1106 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1107 /* uimm12 */
1108 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1109 return TRUE;
1110 }
1111
1112 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1113 bfd_boolean
1114 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1115 aarch64_insn code,
1116 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1117 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1118 {
1119 aarch64_insn imm;
1120
1121 info->qualifier = get_expected_qualifier (inst, info->idx);
1122 /* Rn */
1123 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1124 /* simm10 */
1125 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1126 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1127 if (extract_field (self->fields[3], code, 0) == 1) {
1128 info->addr.writeback = 1;
1129 info->addr.preind = 1;
1130 }
1131 return TRUE;
1132 }
1133
1134 /* Decode the address operand for e.g.
1135 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1136 bfd_boolean
1137 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1138 aarch64_opnd_info *info,
1139 aarch64_insn code, const aarch64_inst *inst,
1140 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1141 {
1142 /* The opcode dependent area stores the number of elements in
1143 each structure to be loaded/stored. */
1144 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1145
1146 /* Rn */
1147 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1148 /* Rm | #<amount> */
1149 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1150 if (info->addr.offset.regno == 31)
1151 {
1152 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1153 /* Special handling of loading single structure to all lane. */
1154 info->addr.offset.imm = (is_ld1r ? 1
1155 : inst->operands[0].reglist.num_regs)
1156 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1157 else
1158 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1159 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1160 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1161 }
1162 else
1163 info->addr.offset.is_reg = 1;
1164 info->addr.writeback = 1;
1165
1166 return TRUE;
1167 }
1168
1169 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1170 bfd_boolean
1171 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1172 aarch64_opnd_info *info,
1173 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1174 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1175 {
1176 aarch64_insn value;
1177 /* cond */
1178 value = extract_field (FLD_cond, code, 0);
1179 info->cond = get_cond_from_value (value);
1180 return TRUE;
1181 }
1182
1183 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1184 bfd_boolean
1185 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1186 aarch64_opnd_info *info,
1187 aarch64_insn code,
1188 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1189 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1190 {
1191 /* op0:op1:CRn:CRm:op2 */
1192 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1193 FLD_CRm, FLD_op2);
1194 info->sysreg.flags = 0;
1195
1196 /* If a system instruction, check which restrictions should be on the register
1197 value during decoding, these will be enforced then. */
1198 if (inst->opcode->iclass == ic_system)
1199 {
1200 /* Check to see if it's read-only, else check if it's write only.
1201 if it's both or unspecified don't care. */
1202 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1203 info->sysreg.flags = F_REG_READ;
1204 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1205 == F_SYS_WRITE)
1206 info->sysreg.flags = F_REG_WRITE;
1207 }
1208
1209 return TRUE;
1210 }
1211
1212 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1213 bfd_boolean
1214 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1215 aarch64_opnd_info *info, aarch64_insn code,
1216 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1217 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1218 {
1219 int i;
1220 /* op1:op2 */
1221 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1222 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1223 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1224 return TRUE;
1225 /* Reserved value in <pstatefield>. */
1226 return FALSE;
1227 }
1228
1229 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1230 bfd_boolean
1231 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1232 aarch64_opnd_info *info,
1233 aarch64_insn code,
1234 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1235 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1236 {
1237 int i;
1238 aarch64_insn value;
1239 const aarch64_sys_ins_reg *sysins_ops;
1240 /* op0:op1:CRn:CRm:op2 */
1241 value = extract_fields (code, 0, 5,
1242 FLD_op0, FLD_op1, FLD_CRn,
1243 FLD_CRm, FLD_op2);
1244
1245 switch (info->type)
1246 {
1247 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1248 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1249 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1250 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1251 case AARCH64_OPND_SYSREG_SR:
1252 sysins_ops = aarch64_sys_regs_sr;
1253 /* Let's remove op2 for rctx. Refer to comments in the definition of
1254 aarch64_sys_regs_sr[]. */
1255 value = value & ~(0x7);
1256 break;
1257 default: assert (0); return FALSE;
1258 }
1259
1260 for (i = 0; sysins_ops[i].name != NULL; ++i)
1261 if (sysins_ops[i].value == value)
1262 {
1263 info->sysins_op = sysins_ops + i;
1264 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1265 info->sysins_op->name,
1266 (unsigned)info->sysins_op->value,
1267 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1268 return TRUE;
1269 }
1270
1271 return FALSE;
1272 }
1273
1274 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1275
1276 bfd_boolean
1277 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1278 aarch64_opnd_info *info,
1279 aarch64_insn code,
1280 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1281 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1282 {
1283 /* CRm */
1284 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1285 return TRUE;
1286 }
1287
1288 /* Decode the prefetch operation option operand for e.g.
1289 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1290
1291 bfd_boolean
1292 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1293 aarch64_opnd_info *info,
1294 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1295 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1296 {
1297 /* prfop in Rt */
1298 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1299 return TRUE;
1300 }
1301
1302 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1303 to the matching name/value pair in aarch64_hint_options. */
1304
1305 bfd_boolean
1306 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1307 aarch64_opnd_info *info,
1308 aarch64_insn code,
1309 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1310 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1311 {
1312 /* CRm:op2. */
1313 unsigned hint_number;
1314 int i;
1315
1316 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1317
1318 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1319 {
1320 if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1321 {
1322 info->hint_option = &(aarch64_hint_options[i]);
1323 return TRUE;
1324 }
1325 }
1326
1327 return FALSE;
1328 }
1329
1330 /* Decode the extended register operand for e.g.
1331 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1332 bfd_boolean
1333 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1334 aarch64_opnd_info *info,
1335 aarch64_insn code,
1336 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1337 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1338 {
1339 aarch64_insn value;
1340
1341 /* Rm */
1342 info->reg.regno = extract_field (FLD_Rm, code, 0);
1343 /* option */
1344 value = extract_field (FLD_option, code, 0);
1345 info->shifter.kind =
1346 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1347 /* imm3 */
1348 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1349
1350 /* This makes the constraint checking happy. */
1351 info->shifter.operator_present = 1;
1352
1353 /* Assume inst->operands[0].qualifier has been resolved. */
1354 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1355 info->qualifier = AARCH64_OPND_QLF_W;
1356 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1357 && (info->shifter.kind == AARCH64_MOD_UXTX
1358 || info->shifter.kind == AARCH64_MOD_SXTX))
1359 info->qualifier = AARCH64_OPND_QLF_X;
1360
1361 return TRUE;
1362 }
1363
1364 /* Decode the shifted register operand for e.g.
1365 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1366 bfd_boolean
1367 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1368 aarch64_opnd_info *info,
1369 aarch64_insn code,
1370 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1371 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1372 {
1373 aarch64_insn value;
1374
1375 /* Rm */
1376 info->reg.regno = extract_field (FLD_Rm, code, 0);
1377 /* shift */
1378 value = extract_field (FLD_shift, code, 0);
1379 info->shifter.kind =
1380 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1381 if (info->shifter.kind == AARCH64_MOD_ROR
1382 && inst->opcode->iclass != log_shift)
1383 /* ROR is not available for the shifted register operand in arithmetic
1384 instructions. */
1385 return FALSE;
1386 /* imm6 */
1387 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1388
1389 /* This makes the constraint checking happy. */
1390 info->shifter.operator_present = 1;
1391
1392 return TRUE;
1393 }
1394
1395 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1396 where <offset> is given by the OFFSET parameter and where <factor> is
1397 1 plus SELF's operand-dependent value. fields[0] specifies the field
1398 that holds <base>. */
1399 static bfd_boolean
1400 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1401 aarch64_opnd_info *info, aarch64_insn code,
1402 int64_t offset)
1403 {
1404 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1405 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1406 info->addr.offset.is_reg = FALSE;
1407 info->addr.writeback = FALSE;
1408 info->addr.preind = TRUE;
1409 if (offset != 0)
1410 info->shifter.kind = AARCH64_MOD_MUL_VL;
1411 info->shifter.amount = 1;
1412 info->shifter.operator_present = (info->addr.offset.imm != 0);
1413 info->shifter.amount_present = FALSE;
1414 return TRUE;
1415 }
1416
1417 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1418 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1419 SELF's operand-dependent value. fields[0] specifies the field that
1420 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1421 bfd_boolean
1422 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1423 aarch64_opnd_info *info, aarch64_insn code,
1424 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1425 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1426 {
1427 int offset;
1428
1429 offset = extract_field (FLD_SVE_imm4, code, 0);
1430 offset = ((offset + 8) & 15) - 8;
1431 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1432 }
1433
1434 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1435 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1436 SELF's operand-dependent value. fields[0] specifies the field that
1437 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1438 bfd_boolean
1439 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1440 aarch64_opnd_info *info, aarch64_insn code,
1441 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1442 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1443 {
1444 int offset;
1445
1446 offset = extract_field (FLD_SVE_imm6, code, 0);
1447 offset = (((offset + 32) & 63) - 32);
1448 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1449 }
1450
1451 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1452 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1453 SELF's operand-dependent value. fields[0] specifies the field that
1454 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1455 and imm3 fields, with imm3 being the less-significant part. */
1456 bfd_boolean
1457 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1458 aarch64_opnd_info *info,
1459 aarch64_insn code,
1460 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1461 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1462 {
1463 int offset;
1464
1465 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1466 offset = (((offset + 256) & 511) - 256);
1467 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1468 }
1469
1470 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1471 is given by the OFFSET parameter and where <shift> is SELF's operand-
1472 dependent value. fields[0] specifies the base register field <base>. */
1473 static bfd_boolean
1474 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1475 aarch64_opnd_info *info, aarch64_insn code,
1476 int64_t offset)
1477 {
1478 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1479 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1480 info->addr.offset.is_reg = FALSE;
1481 info->addr.writeback = FALSE;
1482 info->addr.preind = TRUE;
1483 info->shifter.operator_present = FALSE;
1484 info->shifter.amount_present = FALSE;
1485 return TRUE;
1486 }
1487
1488 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1489 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1490 value. fields[0] specifies the base register field. */
1491 bfd_boolean
1492 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1493 aarch64_opnd_info *info, aarch64_insn code,
1494 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1495 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1496 {
1497 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1498 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1499 }
1500
1501 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1502 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1503 value. fields[0] specifies the base register field. */
1504 bfd_boolean
1505 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1506 aarch64_opnd_info *info, aarch64_insn code,
1507 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1508 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1509 {
1510 int offset = extract_field (FLD_SVE_imm6, code, 0);
1511 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1512 }
1513
1514 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1515 is SELF's operand-dependent value. fields[0] specifies the base
1516 register field and fields[1] specifies the offset register field. */
1517 bfd_boolean
1518 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1519 aarch64_opnd_info *info, aarch64_insn code,
1520 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1521 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1522 {
1523 int index_regno;
1524
1525 index_regno = extract_field (self->fields[1], code, 0);
1526 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1527 return FALSE;
1528
1529 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1530 info->addr.offset.regno = index_regno;
1531 info->addr.offset.is_reg = TRUE;
1532 info->addr.writeback = FALSE;
1533 info->addr.preind = TRUE;
1534 info->shifter.kind = AARCH64_MOD_LSL;
1535 info->shifter.amount = get_operand_specific_data (self);
1536 info->shifter.operator_present = (info->shifter.amount != 0);
1537 info->shifter.amount_present = (info->shifter.amount != 0);
1538 return TRUE;
1539 }
1540
1541 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1542 <shift> is SELF's operand-dependent value. fields[0] specifies the
1543 base register field, fields[1] specifies the offset register field and
1544 fields[2] is a single-bit field that selects SXTW over UXTW. */
1545 bfd_boolean
1546 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1547 aarch64_opnd_info *info, aarch64_insn code,
1548 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1549 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1550 {
1551 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1552 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1553 info->addr.offset.is_reg = TRUE;
1554 info->addr.writeback = FALSE;
1555 info->addr.preind = TRUE;
1556 if (extract_field (self->fields[2], code, 0))
1557 info->shifter.kind = AARCH64_MOD_SXTW;
1558 else
1559 info->shifter.kind = AARCH64_MOD_UXTW;
1560 info->shifter.amount = get_operand_specific_data (self);
1561 info->shifter.operator_present = TRUE;
1562 info->shifter.amount_present = (info->shifter.amount != 0);
1563 return TRUE;
1564 }
1565
1566 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1567 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1568 fields[0] specifies the base register field. */
1569 bfd_boolean
1570 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1571 aarch64_opnd_info *info, aarch64_insn code,
1572 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1573 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1574 {
1575 int offset = extract_field (FLD_imm5, code, 0);
1576 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1577 }
1578
1579 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1580 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1581 number. fields[0] specifies the base register field and fields[1]
1582 specifies the offset register field. */
1583 static bfd_boolean
1584 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1585 aarch64_insn code, enum aarch64_modifier_kind kind)
1586 {
1587 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1588 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1589 info->addr.offset.is_reg = TRUE;
1590 info->addr.writeback = FALSE;
1591 info->addr.preind = TRUE;
1592 info->shifter.kind = kind;
1593 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1594 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1595 || info->shifter.amount != 0);
1596 info->shifter.amount_present = (info->shifter.amount != 0);
1597 return TRUE;
1598 }
1599
1600 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1601 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1602 field and fields[1] specifies the offset register field. */
1603 bfd_boolean
1604 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1605 aarch64_opnd_info *info, aarch64_insn code,
1606 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1607 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1608 {
1609 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1610 }
1611
1612 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1613 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1614 field and fields[1] specifies the offset register field. */
1615 bfd_boolean
1616 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1617 aarch64_opnd_info *info, aarch64_insn code,
1618 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1619 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1620 {
1621 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1622 }
1623
1624 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1625 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1626 field and fields[1] specifies the offset register field. */
1627 bfd_boolean
1628 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1629 aarch64_opnd_info *info, aarch64_insn code,
1630 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1631 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1632 {
1633 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1634 }
1635
1636 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1637 has the raw field value and that the low 8 bits decode to VALUE. */
1638 static bfd_boolean
1639 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1640 {
1641 info->shifter.kind = AARCH64_MOD_LSL;
1642 info->shifter.amount = 0;
1643 if (info->imm.value & 0x100)
1644 {
1645 if (value == 0)
1646 /* Decode 0x100 as #0, LSL #8. */
1647 info->shifter.amount = 8;
1648 else
1649 value *= 256;
1650 }
1651 info->shifter.operator_present = (info->shifter.amount != 0);
1652 info->shifter.amount_present = (info->shifter.amount != 0);
1653 info->imm.value = value;
1654 return TRUE;
1655 }
1656
1657 /* Decode an SVE ADD/SUB immediate. */
1658 bfd_boolean
1659 aarch64_ext_sve_aimm (const aarch64_operand *self,
1660 aarch64_opnd_info *info, const aarch64_insn code,
1661 const aarch64_inst *inst,
1662 aarch64_operand_error *errors)
1663 {
1664 return (aarch64_ext_imm (self, info, code, inst, errors)
1665 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1666 }
1667
1668 /* Decode an SVE CPY/DUP immediate. */
1669 bfd_boolean
1670 aarch64_ext_sve_asimm (const aarch64_operand *self,
1671 aarch64_opnd_info *info, const aarch64_insn code,
1672 const aarch64_inst *inst,
1673 aarch64_operand_error *errors)
1674 {
1675 return (aarch64_ext_imm (self, info, code, inst, errors)
1676 && decode_sve_aimm (info, (int8_t) info->imm.value));
1677 }
1678
1679 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1680 The fields array specifies which field to use. */
1681 bfd_boolean
1682 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1683 aarch64_opnd_info *info, aarch64_insn code,
1684 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1685 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1686 {
1687 if (extract_field (self->fields[0], code, 0))
1688 info->imm.value = 0x3f800000;
1689 else
1690 info->imm.value = 0x3f000000;
1691 info->imm.is_fp = TRUE;
1692 return TRUE;
1693 }
1694
1695 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1696 The fields array specifies which field to use. */
1697 bfd_boolean
1698 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1699 aarch64_opnd_info *info, aarch64_insn code,
1700 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1701 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1702 {
1703 if (extract_field (self->fields[0], code, 0))
1704 info->imm.value = 0x40000000;
1705 else
1706 info->imm.value = 0x3f000000;
1707 info->imm.is_fp = TRUE;
1708 return TRUE;
1709 }
1710
1711 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1712 The fields array specifies which field to use. */
1713 bfd_boolean
1714 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1715 aarch64_opnd_info *info, aarch64_insn code,
1716 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1717 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1718 {
1719 if (extract_field (self->fields[0], code, 0))
1720 info->imm.value = 0x3f800000;
1721 else
1722 info->imm.value = 0x0;
1723 info->imm.is_fp = TRUE;
1724 return TRUE;
1725 }
1726
1727 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1728 array specifies which field to use for Zn. MM is encoded in the
1729 concatenation of imm5 and SVE_tszh, with imm5 being the less
1730 significant part. */
1731 bfd_boolean
1732 aarch64_ext_sve_index (const aarch64_operand *self,
1733 aarch64_opnd_info *info, aarch64_insn code,
1734 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1735 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1736 {
1737 int val;
1738
1739 info->reglane.regno = extract_field (self->fields[0], code, 0);
1740 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1741 if ((val & 31) == 0)
1742 return 0;
1743 while ((val & 1) == 0)
1744 val /= 2;
1745 info->reglane.index = val / 2;
1746 return TRUE;
1747 }
1748
1749 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1750 bfd_boolean
1751 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1752 aarch64_opnd_info *info, const aarch64_insn code,
1753 const aarch64_inst *inst,
1754 aarch64_operand_error *errors)
1755 {
1756 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1757 return (aarch64_ext_limm (self, info, code, inst, errors)
1758 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1759 }
1760
1761 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1762 and where MM occupies the most-significant part. The operand-dependent
1763 value specifies the number of bits in Zn. */
1764 bfd_boolean
1765 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1766 aarch64_opnd_info *info, aarch64_insn code,
1767 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1768 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1769 {
1770 unsigned int reg_bits = get_operand_specific_data (self);
1771 unsigned int val = extract_all_fields (self, code);
1772 info->reglane.regno = val & ((1 << reg_bits) - 1);
1773 info->reglane.index = val >> reg_bits;
1774 return TRUE;
1775 }
1776
1777 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1778 to use for Zn. The opcode-dependent value specifies the number
1779 of registers in the list. */
1780 bfd_boolean
1781 aarch64_ext_sve_reglist (const aarch64_operand *self,
1782 aarch64_opnd_info *info, aarch64_insn code,
1783 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1784 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1785 {
1786 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1787 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1788 return TRUE;
1789 }
1790
1791 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1792 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1793 field. */
1794 bfd_boolean
1795 aarch64_ext_sve_scale (const aarch64_operand *self,
1796 aarch64_opnd_info *info, aarch64_insn code,
1797 const aarch64_inst *inst, aarch64_operand_error *errors)
1798 {
1799 int val;
1800
1801 if (!aarch64_ext_imm (self, info, code, inst, errors))
1802 return FALSE;
1803 val = extract_field (FLD_SVE_imm4, code, 0);
1804 info->shifter.kind = AARCH64_MOD_MUL;
1805 info->shifter.amount = val + 1;
1806 info->shifter.operator_present = (val != 0);
1807 info->shifter.amount_present = (val != 0);
1808 return TRUE;
1809 }
1810
1811 /* Return the top set bit in VALUE, which is expected to be relatively
1812 small. */
1813 static uint64_t
1814 get_top_bit (uint64_t value)
1815 {
1816 while ((value & -value) != value)
1817 value -= value & -value;
1818 return value;
1819 }
1820
1821 /* Decode an SVE shift-left immediate. */
1822 bfd_boolean
1823 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1824 aarch64_opnd_info *info, const aarch64_insn code,
1825 const aarch64_inst *inst, aarch64_operand_error *errors)
1826 {
1827 if (!aarch64_ext_imm (self, info, code, inst, errors)
1828 || info->imm.value == 0)
1829 return FALSE;
1830
1831 info->imm.value -= get_top_bit (info->imm.value);
1832 return TRUE;
1833 }
1834
1835 /* Decode an SVE shift-right immediate. */
1836 bfd_boolean
1837 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1838 aarch64_opnd_info *info, const aarch64_insn code,
1839 const aarch64_inst *inst, aarch64_operand_error *errors)
1840 {
1841 if (!aarch64_ext_imm (self, info, code, inst, errors)
1842 || info->imm.value == 0)
1843 return FALSE;
1844
1845 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1846 return TRUE;
1847 }
1848 \f
1849 /* Bitfields that are commonly used to encode certain operands' information
1850 may be partially used as part of the base opcode in some instructions.
1851 For example, the bit 1 of the field 'size' in
1852 FCVTXN <Vb><d>, <Va><n>
1853 is actually part of the base opcode, while only size<0> is available
1854 for encoding the register type. Another example is the AdvSIMD
1855 instruction ORR (register), in which the field 'size' is also used for
1856 the base opcode, leaving only the field 'Q' available to encode the
1857 vector register arrangement specifier '8B' or '16B'.
1858
1859 This function tries to deduce the qualifier from the value of partially
1860 constrained field(s). Given the VALUE of such a field or fields, the
1861 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1862 operand encoding), the function returns the matching qualifier or
1863 AARCH64_OPND_QLF_NIL if nothing matches.
1864
1865 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1866 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1867 may end with AARCH64_OPND_QLF_NIL. */
1868
1869 static enum aarch64_opnd_qualifier
1870 get_qualifier_from_partial_encoding (aarch64_insn value,
1871 const enum aarch64_opnd_qualifier* \
1872 candidates,
1873 aarch64_insn mask)
1874 {
1875 int i;
1876 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1877 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1878 {
1879 aarch64_insn standard_value;
1880 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1881 break;
1882 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1883 if ((standard_value & mask) == (value & mask))
1884 return candidates[i];
1885 }
1886 return AARCH64_OPND_QLF_NIL;
1887 }
1888
1889 /* Given a list of qualifier sequences, return all possible valid qualifiers
1890 for operand IDX in QUALIFIERS.
1891 Assume QUALIFIERS is an array whose length is large enough. */
1892
1893 static void
1894 get_operand_possible_qualifiers (int idx,
1895 const aarch64_opnd_qualifier_seq_t *list,
1896 enum aarch64_opnd_qualifier *qualifiers)
1897 {
1898 int i;
1899 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1900 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1901 break;
1902 }
1903
1904 /* Decode the size Q field for e.g. SHADD.
1905 We tag one operand with the qualifer according to the code;
1906 whether the qualifier is valid for this opcode or not, it is the
1907 duty of the semantic checking. */
1908
1909 static int
1910 decode_sizeq (aarch64_inst *inst)
1911 {
1912 int idx;
1913 enum aarch64_opnd_qualifier qualifier;
1914 aarch64_insn code;
1915 aarch64_insn value, mask;
1916 enum aarch64_field_kind fld_sz;
1917 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1918
1919 if (inst->opcode->iclass == asisdlse
1920 || inst->opcode->iclass == asisdlsep
1921 || inst->opcode->iclass == asisdlso
1922 || inst->opcode->iclass == asisdlsop)
1923 fld_sz = FLD_vldst_size;
1924 else
1925 fld_sz = FLD_size;
1926
1927 code = inst->value;
1928 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1929 /* Obtain the info that which bits of fields Q and size are actually
1930 available for operand encoding. Opcodes like FMAXNM and FMLA have
1931 size[1] unavailable. */
1932 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1933
1934 /* The index of the operand we are going to tag a qualifier and the qualifer
1935 itself are reasoned from the value of the size and Q fields and the
1936 possible valid qualifier lists. */
1937 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1938 DEBUG_TRACE ("key idx: %d", idx);
1939
1940 /* For most related instruciton, size:Q are fully available for operand
1941 encoding. */
1942 if (mask == 0x7)
1943 {
1944 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1945 return 1;
1946 }
1947
1948 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1949 candidates);
1950 #ifdef DEBUG_AARCH64
1951 if (debug_dump)
1952 {
1953 int i;
1954 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1955 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1956 DEBUG_TRACE ("qualifier %d: %s", i,
1957 aarch64_get_qualifier_name(candidates[i]));
1958 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1959 }
1960 #endif /* DEBUG_AARCH64 */
1961
1962 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1963
1964 if (qualifier == AARCH64_OPND_QLF_NIL)
1965 return 0;
1966
1967 inst->operands[idx].qualifier = qualifier;
1968 return 1;
1969 }
1970
1971 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1972 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1973
1974 static int
1975 decode_asimd_fcvt (aarch64_inst *inst)
1976 {
1977 aarch64_field field = {0, 0};
1978 aarch64_insn value;
1979 enum aarch64_opnd_qualifier qualifier;
1980
1981 gen_sub_field (FLD_size, 0, 1, &field);
1982 value = extract_field_2 (&field, inst->value, 0);
1983 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1984 : AARCH64_OPND_QLF_V_2D;
1985 switch (inst->opcode->op)
1986 {
1987 case OP_FCVTN:
1988 case OP_FCVTN2:
1989 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1990 inst->operands[1].qualifier = qualifier;
1991 break;
1992 case OP_FCVTL:
1993 case OP_FCVTL2:
1994 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1995 inst->operands[0].qualifier = qualifier;
1996 break;
1997 default:
1998 assert (0);
1999 return 0;
2000 }
2001
2002 return 1;
2003 }
2004
2005 /* Decode size[0], i.e. bit 22, for
2006 e.g. FCVTXN <Vb><d>, <Va><n>. */
2007
2008 static int
2009 decode_asisd_fcvtxn (aarch64_inst *inst)
2010 {
2011 aarch64_field field = {0, 0};
2012 gen_sub_field (FLD_size, 0, 1, &field);
2013 if (!extract_field_2 (&field, inst->value, 0))
2014 return 0;
2015 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2016 return 1;
2017 }
2018
2019 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2020 static int
2021 decode_fcvt (aarch64_inst *inst)
2022 {
2023 enum aarch64_opnd_qualifier qualifier;
2024 aarch64_insn value;
2025 const aarch64_field field = {15, 2};
2026
2027 /* opc dstsize */
2028 value = extract_field_2 (&field, inst->value, 0);
2029 switch (value)
2030 {
2031 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2032 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2033 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2034 default: return 0;
2035 }
2036 inst->operands[0].qualifier = qualifier;
2037
2038 return 1;
2039 }
2040
2041 /* Do miscellaneous decodings that are not common enough to be driven by
2042 flags. */
2043
2044 static int
2045 do_misc_decoding (aarch64_inst *inst)
2046 {
2047 unsigned int value;
2048 switch (inst->opcode->op)
2049 {
2050 case OP_FCVT:
2051 return decode_fcvt (inst);
2052
2053 case OP_FCVTN:
2054 case OP_FCVTN2:
2055 case OP_FCVTL:
2056 case OP_FCVTL2:
2057 return decode_asimd_fcvt (inst);
2058
2059 case OP_FCVTXN_S:
2060 return decode_asisd_fcvtxn (inst);
2061
2062 case OP_MOV_P_P:
2063 case OP_MOVS_P_P:
2064 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2065 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2066 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2067
2068 case OP_MOV_Z_P_Z:
2069 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2070 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2071
2072 case OP_MOV_Z_V:
2073 /* Index must be zero. */
2074 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2075 return value > 0 && value <= 16 && value == (value & -value);
2076
2077 case OP_MOV_Z_Z:
2078 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2079 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2080
2081 case OP_MOV_Z_Zi:
2082 /* Index must be nonzero. */
2083 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2084 return value > 0 && value != (value & -value);
2085
2086 case OP_MOVM_P_P_P:
2087 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2088 == extract_field (FLD_SVE_Pm, inst->value, 0));
2089
2090 case OP_MOVZS_P_P_P:
2091 case OP_MOVZ_P_P_P:
2092 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2093 == extract_field (FLD_SVE_Pm, inst->value, 0));
2094
2095 case OP_NOTS_P_P_P_Z:
2096 case OP_NOT_P_P_P_Z:
2097 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2098 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2099
2100 default:
2101 return 0;
2102 }
2103 }
2104
2105 /* Opcodes that have fields shared by multiple operands are usually flagged
2106 with flags. In this function, we detect such flags, decode the related
2107 field(s) and store the information in one of the related operands. The
2108 'one' operand is not any operand but one of the operands that can
2109 accommadate all the information that has been decoded. */
2110
2111 static int
2112 do_special_decoding (aarch64_inst *inst)
2113 {
2114 int idx;
2115 aarch64_insn value;
2116 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2117 if (inst->opcode->flags & F_COND)
2118 {
2119 value = extract_field (FLD_cond2, inst->value, 0);
2120 inst->cond = get_cond_from_value (value);
2121 }
2122 /* 'sf' field. */
2123 if (inst->opcode->flags & F_SF)
2124 {
2125 idx = select_operand_for_sf_field_coding (inst->opcode);
2126 value = extract_field (FLD_sf, inst->value, 0);
2127 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2128 if ((inst->opcode->flags & F_N)
2129 && extract_field (FLD_N, inst->value, 0) != value)
2130 return 0;
2131 }
2132 /* 'sf' field. */
2133 if (inst->opcode->flags & F_LSE_SZ)
2134 {
2135 idx = select_operand_for_sf_field_coding (inst->opcode);
2136 value = extract_field (FLD_lse_sz, inst->value, 0);
2137 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2138 }
2139 /* size:Q fields. */
2140 if (inst->opcode->flags & F_SIZEQ)
2141 return decode_sizeq (inst);
2142
2143 if (inst->opcode->flags & F_FPTYPE)
2144 {
2145 idx = select_operand_for_fptype_field_coding (inst->opcode);
2146 value = extract_field (FLD_type, inst->value, 0);
2147 switch (value)
2148 {
2149 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2150 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2151 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2152 default: return 0;
2153 }
2154 }
2155
2156 if (inst->opcode->flags & F_SSIZE)
2157 {
2158 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2159 of the base opcode. */
2160 aarch64_insn mask;
2161 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2162 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2163 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2164 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2165 /* For most related instruciton, the 'size' field is fully available for
2166 operand encoding. */
2167 if (mask == 0x3)
2168 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2169 else
2170 {
2171 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2172 candidates);
2173 inst->operands[idx].qualifier
2174 = get_qualifier_from_partial_encoding (value, candidates, mask);
2175 }
2176 }
2177
2178 if (inst->opcode->flags & F_T)
2179 {
2180 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2181 int num = 0;
2182 unsigned val, Q;
2183 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2184 == AARCH64_OPND_CLASS_SIMD_REG);
2185 /* imm5<3:0> q <t>
2186 0000 x reserved
2187 xxx1 0 8b
2188 xxx1 1 16b
2189 xx10 0 4h
2190 xx10 1 8h
2191 x100 0 2s
2192 x100 1 4s
2193 1000 0 reserved
2194 1000 1 2d */
2195 val = extract_field (FLD_imm5, inst->value, 0);
2196 while ((val & 0x1) == 0 && ++num <= 3)
2197 val >>= 1;
2198 if (num > 3)
2199 return 0;
2200 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2201 inst->operands[0].qualifier =
2202 get_vreg_qualifier_from_value ((num << 1) | Q);
2203 }
2204
2205 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2206 {
2207 /* Use Rt to encode in the case of e.g.
2208 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2209 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2210 if (idx == -1)
2211 {
2212 /* Otherwise use the result operand, which has to be a integer
2213 register. */
2214 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2215 == AARCH64_OPND_CLASS_INT_REG);
2216 idx = 0;
2217 }
2218 assert (idx == 0 || idx == 1);
2219 value = extract_field (FLD_Q, inst->value, 0);
2220 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2221 }
2222
2223 if (inst->opcode->flags & F_LDS_SIZE)
2224 {
2225 aarch64_field field = {0, 0};
2226 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2227 == AARCH64_OPND_CLASS_INT_REG);
2228 gen_sub_field (FLD_opc, 0, 1, &field);
2229 value = extract_field_2 (&field, inst->value, 0);
2230 inst->operands[0].qualifier
2231 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2232 }
2233
2234 /* Miscellaneous decoding; done as the last step. */
2235 if (inst->opcode->flags & F_MISC)
2236 return do_misc_decoding (inst);
2237
2238 return 1;
2239 }
2240
2241 /* Converters converting a real opcode instruction to its alias form. */
2242
2243 /* ROR <Wd>, <Ws>, #<shift>
2244 is equivalent to:
2245 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2246 static int
2247 convert_extr_to_ror (aarch64_inst *inst)
2248 {
2249 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2250 {
2251 copy_operand_info (inst, 2, 3);
2252 inst->operands[3].type = AARCH64_OPND_NIL;
2253 return 1;
2254 }
2255 return 0;
2256 }
2257
2258 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2259 is equivalent to:
2260 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2261 static int
2262 convert_shll_to_xtl (aarch64_inst *inst)
2263 {
2264 if (inst->operands[2].imm.value == 0)
2265 {
2266 inst->operands[2].type = AARCH64_OPND_NIL;
2267 return 1;
2268 }
2269 return 0;
2270 }
2271
2272 /* Convert
2273 UBFM <Xd>, <Xn>, #<shift>, #63.
2274 to
2275 LSR <Xd>, <Xn>, #<shift>. */
2276 static int
2277 convert_bfm_to_sr (aarch64_inst *inst)
2278 {
2279 int64_t imms, val;
2280
2281 imms = inst->operands[3].imm.value;
2282 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2283 if (imms == val)
2284 {
2285 inst->operands[3].type = AARCH64_OPND_NIL;
2286 return 1;
2287 }
2288
2289 return 0;
2290 }
2291
2292 /* Convert MOV to ORR. */
2293 static int
2294 convert_orr_to_mov (aarch64_inst *inst)
2295 {
2296 /* MOV <Vd>.<T>, <Vn>.<T>
2297 is equivalent to:
2298 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2299 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2300 {
2301 inst->operands[2].type = AARCH64_OPND_NIL;
2302 return 1;
2303 }
2304 return 0;
2305 }
2306
2307 /* When <imms> >= <immr>, the instruction written:
2308 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2309 is equivalent to:
2310 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2311
2312 static int
2313 convert_bfm_to_bfx (aarch64_inst *inst)
2314 {
2315 int64_t immr, imms;
2316
2317 immr = inst->operands[2].imm.value;
2318 imms = inst->operands[3].imm.value;
2319 if (imms >= immr)
2320 {
2321 int64_t lsb = immr;
2322 inst->operands[2].imm.value = lsb;
2323 inst->operands[3].imm.value = imms + 1 - lsb;
2324 /* The two opcodes have different qualifiers for
2325 the immediate operands; reset to help the checking. */
2326 reset_operand_qualifier (inst, 2);
2327 reset_operand_qualifier (inst, 3);
2328 return 1;
2329 }
2330
2331 return 0;
2332 }
2333
2334 /* When <imms> < <immr>, the instruction written:
2335 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2336 is equivalent to:
2337 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2338
2339 static int
2340 convert_bfm_to_bfi (aarch64_inst *inst)
2341 {
2342 int64_t immr, imms, val;
2343
2344 immr = inst->operands[2].imm.value;
2345 imms = inst->operands[3].imm.value;
2346 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2347 if (imms < immr)
2348 {
2349 inst->operands[2].imm.value = (val - immr) & (val - 1);
2350 inst->operands[3].imm.value = imms + 1;
2351 /* The two opcodes have different qualifiers for
2352 the immediate operands; reset to help the checking. */
2353 reset_operand_qualifier (inst, 2);
2354 reset_operand_qualifier (inst, 3);
2355 return 1;
2356 }
2357
2358 return 0;
2359 }
2360
2361 /* The instruction written:
2362 BFC <Xd>, #<lsb>, #<width>
2363 is equivalent to:
2364 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2365
2366 static int
2367 convert_bfm_to_bfc (aarch64_inst *inst)
2368 {
2369 int64_t immr, imms, val;
2370
2371 /* Should have been assured by the base opcode value. */
2372 assert (inst->operands[1].reg.regno == 0x1f);
2373
2374 immr = inst->operands[2].imm.value;
2375 imms = inst->operands[3].imm.value;
2376 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2377 if (imms < immr)
2378 {
2379 /* Drop XZR from the second operand. */
2380 copy_operand_info (inst, 1, 2);
2381 copy_operand_info (inst, 2, 3);
2382 inst->operands[3].type = AARCH64_OPND_NIL;
2383
2384 /* Recalculate the immediates. */
2385 inst->operands[1].imm.value = (val - immr) & (val - 1);
2386 inst->operands[2].imm.value = imms + 1;
2387
2388 /* The two opcodes have different qualifiers for the operands; reset to
2389 help the checking. */
2390 reset_operand_qualifier (inst, 1);
2391 reset_operand_qualifier (inst, 2);
2392 reset_operand_qualifier (inst, 3);
2393
2394 return 1;
2395 }
2396
2397 return 0;
2398 }
2399
2400 /* The instruction written:
2401 LSL <Xd>, <Xn>, #<shift>
2402 is equivalent to:
2403 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2404
2405 static int
2406 convert_ubfm_to_lsl (aarch64_inst *inst)
2407 {
2408 int64_t immr = inst->operands[2].imm.value;
2409 int64_t imms = inst->operands[3].imm.value;
2410 int64_t val
2411 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2412
2413 if ((immr == 0 && imms == val) || immr == imms + 1)
2414 {
2415 inst->operands[3].type = AARCH64_OPND_NIL;
2416 inst->operands[2].imm.value = val - imms;
2417 return 1;
2418 }
2419
2420 return 0;
2421 }
2422
2423 /* CINC <Wd>, <Wn>, <cond>
2424 is equivalent to:
2425 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2426 where <cond> is not AL or NV. */
2427
2428 static int
2429 convert_from_csel (aarch64_inst *inst)
2430 {
2431 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2432 && (inst->operands[3].cond->value & 0xe) != 0xe)
2433 {
2434 copy_operand_info (inst, 2, 3);
2435 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2436 inst->operands[3].type = AARCH64_OPND_NIL;
2437 return 1;
2438 }
2439 return 0;
2440 }
2441
2442 /* CSET <Wd>, <cond>
2443 is equivalent to:
2444 CSINC <Wd>, WZR, WZR, invert(<cond>)
2445 where <cond> is not AL or NV. */
2446
2447 static int
2448 convert_csinc_to_cset (aarch64_inst *inst)
2449 {
2450 if (inst->operands[1].reg.regno == 0x1f
2451 && inst->operands[2].reg.regno == 0x1f
2452 && (inst->operands[3].cond->value & 0xe) != 0xe)
2453 {
2454 copy_operand_info (inst, 1, 3);
2455 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2456 inst->operands[3].type = AARCH64_OPND_NIL;
2457 inst->operands[2].type = AARCH64_OPND_NIL;
2458 return 1;
2459 }
2460 return 0;
2461 }
2462
2463 /* MOV <Wd>, #<imm>
2464 is equivalent to:
2465 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2466
2467 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2468 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2469 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2470 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2471 machine-instruction mnemonic must be used. */
2472
2473 static int
2474 convert_movewide_to_mov (aarch64_inst *inst)
2475 {
2476 uint64_t value = inst->operands[1].imm.value;
2477 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2478 if (value == 0 && inst->operands[1].shifter.amount != 0)
2479 return 0;
2480 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2481 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2482 value <<= inst->operands[1].shifter.amount;
2483 /* As an alias convertor, it has to be clear that the INST->OPCODE
2484 is the opcode of the real instruction. */
2485 if (inst->opcode->op == OP_MOVN)
2486 {
2487 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2488 value = ~value;
2489 /* A MOVN has an immediate that could be encoded by MOVZ. */
2490 if (aarch64_wide_constant_p (value, is32, NULL))
2491 return 0;
2492 }
2493 inst->operands[1].imm.value = value;
2494 inst->operands[1].shifter.amount = 0;
2495 return 1;
2496 }
2497
2498 /* MOV <Wd>, #<imm>
2499 is equivalent to:
2500 ORR <Wd>, WZR, #<imm>.
2501
2502 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2503 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2504 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2505 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2506 machine-instruction mnemonic must be used. */
2507
2508 static int
2509 convert_movebitmask_to_mov (aarch64_inst *inst)
2510 {
2511 int is32;
2512 uint64_t value;
2513
2514 /* Should have been assured by the base opcode value. */
2515 assert (inst->operands[1].reg.regno == 0x1f);
2516 copy_operand_info (inst, 1, 2);
2517 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2518 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2519 value = inst->operands[1].imm.value;
2520 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2521 instruction. */
2522 if (inst->operands[0].reg.regno != 0x1f
2523 && (aarch64_wide_constant_p (value, is32, NULL)
2524 || aarch64_wide_constant_p (~value, is32, NULL)))
2525 return 0;
2526
2527 inst->operands[2].type = AARCH64_OPND_NIL;
2528 return 1;
2529 }
2530
2531 /* Some alias opcodes are disassembled by being converted from their real-form.
2532 N.B. INST->OPCODE is the real opcode rather than the alias. */
2533
2534 static int
2535 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2536 {
2537 switch (alias->op)
2538 {
2539 case OP_ASR_IMM:
2540 case OP_LSR_IMM:
2541 return convert_bfm_to_sr (inst);
2542 case OP_LSL_IMM:
2543 return convert_ubfm_to_lsl (inst);
2544 case OP_CINC:
2545 case OP_CINV:
2546 case OP_CNEG:
2547 return convert_from_csel (inst);
2548 case OP_CSET:
2549 case OP_CSETM:
2550 return convert_csinc_to_cset (inst);
2551 case OP_UBFX:
2552 case OP_BFXIL:
2553 case OP_SBFX:
2554 return convert_bfm_to_bfx (inst);
2555 case OP_SBFIZ:
2556 case OP_BFI:
2557 case OP_UBFIZ:
2558 return convert_bfm_to_bfi (inst);
2559 case OP_BFC:
2560 return convert_bfm_to_bfc (inst);
2561 case OP_MOV_V:
2562 return convert_orr_to_mov (inst);
2563 case OP_MOV_IMM_WIDE:
2564 case OP_MOV_IMM_WIDEN:
2565 return convert_movewide_to_mov (inst);
2566 case OP_MOV_IMM_LOG:
2567 return convert_movebitmask_to_mov (inst);
2568 case OP_ROR_IMM:
2569 return convert_extr_to_ror (inst);
2570 case OP_SXTL:
2571 case OP_SXTL2:
2572 case OP_UXTL:
2573 case OP_UXTL2:
2574 return convert_shll_to_xtl (inst);
2575 default:
2576 return 0;
2577 }
2578 }
2579
2580 static bfd_boolean
2581 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2582 aarch64_inst *, int, aarch64_operand_error *errors);
2583
2584 /* Given the instruction information in *INST, check if the instruction has
2585 any alias form that can be used to represent *INST. If the answer is yes,
2586 update *INST to be in the form of the determined alias. */
2587
2588 /* In the opcode description table, the following flags are used in opcode
2589 entries to help establish the relations between the real and alias opcodes:
2590
2591 F_ALIAS: opcode is an alias
2592 F_HAS_ALIAS: opcode has alias(es)
2593 F_P1
2594 F_P2
2595 F_P3: Disassembly preference priority 1-3 (the larger the
2596 higher). If nothing is specified, it is the priority
2597 0 by default, i.e. the lowest priority.
2598
2599 Although the relation between the machine and the alias instructions are not
2600 explicitly described, it can be easily determined from the base opcode
2601 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2602 description entries:
2603
2604 The mask of an alias opcode must be equal to or a super-set (i.e. more
2605 constrained) of that of the aliased opcode; so is the base opcode value.
2606
2607 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2608 && (opcode->mask & real->mask) == real->mask
2609 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2610 then OPCODE is an alias of, and only of, the REAL instruction
2611
2612 The alias relationship is forced flat-structured to keep related algorithm
2613 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2614
2615 During the disassembling, the decoding decision tree (in
2616 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2617 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2618 not specified), the disassembler will check whether there is any alias
2619 instruction exists for this real instruction. If there is, the disassembler
2620 will try to disassemble the 32-bit binary again using the alias's rule, or
2621 try to convert the IR to the form of the alias. In the case of the multiple
2622 aliases, the aliases are tried one by one from the highest priority
2623 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2624 first succeeds first adopted.
2625
2626 You may ask why there is a need for the conversion of IR from one form to
2627 another in handling certain aliases. This is because on one hand it avoids
2628 adding more operand code to handle unusual encoding/decoding; on other
2629 hand, during the disassembling, the conversion is an effective approach to
2630 check the condition of an alias (as an alias may be adopted only if certain
2631 conditions are met).
2632
2633 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2634 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2635 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2636
2637 static void
2638 determine_disassembling_preference (struct aarch64_inst *inst,
2639 aarch64_operand_error *errors)
2640 {
2641 const aarch64_opcode *opcode;
2642 const aarch64_opcode *alias;
2643
2644 opcode = inst->opcode;
2645
2646 /* This opcode does not have an alias, so use itself. */
2647 if (!opcode_has_alias (opcode))
2648 return;
2649
2650 alias = aarch64_find_alias_opcode (opcode);
2651 assert (alias);
2652
2653 #ifdef DEBUG_AARCH64
2654 if (debug_dump)
2655 {
2656 const aarch64_opcode *tmp = alias;
2657 printf ("#### LIST orderd: ");
2658 while (tmp)
2659 {
2660 printf ("%s, ", tmp->name);
2661 tmp = aarch64_find_next_alias_opcode (tmp);
2662 }
2663 printf ("\n");
2664 }
2665 #endif /* DEBUG_AARCH64 */
2666
2667 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2668 {
2669 DEBUG_TRACE ("try %s", alias->name);
2670 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2671
2672 /* An alias can be a pseudo opcode which will never be used in the
2673 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2674 aliasing AND. */
2675 if (pseudo_opcode_p (alias))
2676 {
2677 DEBUG_TRACE ("skip pseudo %s", alias->name);
2678 continue;
2679 }
2680
2681 if ((inst->value & alias->mask) != alias->opcode)
2682 {
2683 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2684 continue;
2685 }
2686 /* No need to do any complicated transformation on operands, if the alias
2687 opcode does not have any operand. */
2688 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2689 {
2690 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2691 aarch64_replace_opcode (inst, alias);
2692 return;
2693 }
2694 if (alias->flags & F_CONV)
2695 {
2696 aarch64_inst copy;
2697 memcpy (&copy, inst, sizeof (aarch64_inst));
2698 /* ALIAS is the preference as long as the instruction can be
2699 successfully converted to the form of ALIAS. */
2700 if (convert_to_alias (&copy, alias) == 1)
2701 {
2702 aarch64_replace_opcode (&copy, alias);
2703 assert (aarch64_match_operands_constraint (&copy, NULL));
2704 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2705 memcpy (inst, &copy, sizeof (aarch64_inst));
2706 return;
2707 }
2708 }
2709 else
2710 {
2711 /* Directly decode the alias opcode. */
2712 aarch64_inst temp;
2713 memset (&temp, '\0', sizeof (aarch64_inst));
2714 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
2715 {
2716 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2717 memcpy (inst, &temp, sizeof (aarch64_inst));
2718 return;
2719 }
2720 }
2721 }
2722 }
2723
2724 /* Some instructions (including all SVE ones) use the instruction class
2725 to describe how a qualifiers_list index is represented in the instruction
2726 encoding. If INST is such an instruction, decode the appropriate fields
2727 and fill in the operand qualifiers accordingly. Return true if no
2728 problems are found. */
2729
2730 static bfd_boolean
2731 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2732 {
2733 int i, variant;
2734
2735 variant = 0;
2736 switch (inst->opcode->iclass)
2737 {
2738 case sve_cpy:
2739 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2740 break;
2741
2742 case sve_index:
2743 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2744 if ((i & 31) == 0)
2745 return FALSE;
2746 while ((i & 1) == 0)
2747 {
2748 i >>= 1;
2749 variant += 1;
2750 }
2751 break;
2752
2753 case sve_limm:
2754 /* Pick the smallest applicable element size. */
2755 if ((inst->value & 0x20600) == 0x600)
2756 variant = 0;
2757 else if ((inst->value & 0x20400) == 0x400)
2758 variant = 1;
2759 else if ((inst->value & 0x20000) == 0)
2760 variant = 2;
2761 else
2762 variant = 3;
2763 break;
2764
2765 case sve_misc:
2766 /* sve_misc instructions have only a single variant. */
2767 break;
2768
2769 case sve_movprfx:
2770 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2771 break;
2772
2773 case sve_pred_zm:
2774 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2775 break;
2776
2777 case sve_shift_pred:
2778 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2779 sve_shift:
2780 if (i == 0)
2781 return FALSE;
2782 while (i != 1)
2783 {
2784 i >>= 1;
2785 variant += 1;
2786 }
2787 break;
2788
2789 case sve_shift_unpred:
2790 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2791 goto sve_shift;
2792
2793 case sve_size_bhs:
2794 variant = extract_field (FLD_size, inst->value, 0);
2795 if (variant >= 3)
2796 return FALSE;
2797 break;
2798
2799 case sve_size_bhsd:
2800 variant = extract_field (FLD_size, inst->value, 0);
2801 break;
2802
2803 case sve_size_hsd:
2804 i = extract_field (FLD_size, inst->value, 0);
2805 if (i < 1)
2806 return FALSE;
2807 variant = i - 1;
2808 break;
2809
2810 case sve_size_bh:
2811 case sve_size_sd:
2812 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2813 break;
2814
2815 case sve_size_sd2:
2816 variant = extract_field (FLD_SVE_sz2, inst->value, 0);
2817 break;
2818
2819 case sve_size_hsd2:
2820 i = extract_field (FLD_SVE_size, inst->value, 0);
2821 if (i < 1)
2822 return FALSE;
2823 variant = i - 1;
2824 break;
2825
2826 case sve_size_13:
2827 /* Ignore low bit of this field since that is set in the opcode for
2828 instructions of this iclass. */
2829 i = (extract_field (FLD_size, inst->value, 0) & 2);
2830 variant = (i >> 1);
2831 break;
2832
2833 case sve_shift_tsz_bhsd:
2834 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2835 if (i == 0)
2836 return FALSE;
2837 while (i != 1)
2838 {
2839 i >>= 1;
2840 variant += 1;
2841 }
2842 break;
2843
2844 case sve_size_tsz_bhs:
2845 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
2846 if (i == 0)
2847 return FALSE;
2848 while (i != 1)
2849 {
2850 if (i & 1)
2851 return FALSE;
2852 i >>= 1;
2853 variant += 1;
2854 }
2855 break;
2856
2857 case sve_shift_tsz_hsd:
2858 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
2859 if (i == 0)
2860 return FALSE;
2861 while (i != 1)
2862 {
2863 i >>= 1;
2864 variant += 1;
2865 }
2866 break;
2867
2868 default:
2869 /* No mapping between instruction class and qualifiers. */
2870 return TRUE;
2871 }
2872
2873 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2874 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2875 return TRUE;
2876 }
2877 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2878 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2879 return 1.
2880
2881 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2882 determined and used to disassemble CODE; this is done just before the
2883 return. */
2884
2885 static bfd_boolean
2886 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2887 aarch64_inst *inst, int noaliases_p,
2888 aarch64_operand_error *errors)
2889 {
2890 int i;
2891
2892 DEBUG_TRACE ("enter with %s", opcode->name);
2893
2894 assert (opcode && inst);
2895
2896 /* Clear inst. */
2897 memset (inst, '\0', sizeof (aarch64_inst));
2898
2899 /* Check the base opcode. */
2900 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2901 {
2902 DEBUG_TRACE ("base opcode match FAIL");
2903 goto decode_fail;
2904 }
2905
2906 inst->opcode = opcode;
2907 inst->value = code;
2908
2909 /* Assign operand codes and indexes. */
2910 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2911 {
2912 if (opcode->operands[i] == AARCH64_OPND_NIL)
2913 break;
2914 inst->operands[i].type = opcode->operands[i];
2915 inst->operands[i].idx = i;
2916 }
2917
2918 /* Call the opcode decoder indicated by flags. */
2919 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2920 {
2921 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2922 goto decode_fail;
2923 }
2924
2925 /* Possibly use the instruction class to determine the correct
2926 qualifier. */
2927 if (!aarch64_decode_variant_using_iclass (inst))
2928 {
2929 DEBUG_TRACE ("iclass-based decoder FAIL");
2930 goto decode_fail;
2931 }
2932
2933 /* Call operand decoders. */
2934 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2935 {
2936 const aarch64_operand *opnd;
2937 enum aarch64_opnd type;
2938
2939 type = opcode->operands[i];
2940 if (type == AARCH64_OPND_NIL)
2941 break;
2942 opnd = &aarch64_operands[type];
2943 if (operand_has_extractor (opnd)
2944 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
2945 errors)))
2946 {
2947 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2948 goto decode_fail;
2949 }
2950 }
2951
2952 /* If the opcode has a verifier, then check it now. */
2953 if (opcode->verifier
2954 && opcode->verifier (inst, code, 0, FALSE, errors, NULL) != ERR_OK)
2955 {
2956 DEBUG_TRACE ("operand verifier FAIL");
2957 goto decode_fail;
2958 }
2959
2960 /* Match the qualifiers. */
2961 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2962 {
2963 /* Arriving here, the CODE has been determined as a valid instruction
2964 of OPCODE and *INST has been filled with information of this OPCODE
2965 instruction. Before the return, check if the instruction has any
2966 alias and should be disassembled in the form of its alias instead.
2967 If the answer is yes, *INST will be updated. */
2968 if (!noaliases_p)
2969 determine_disassembling_preference (inst, errors);
2970 DEBUG_TRACE ("SUCCESS");
2971 return TRUE;
2972 }
2973 else
2974 {
2975 DEBUG_TRACE ("constraint matching FAIL");
2976 }
2977
2978 decode_fail:
2979 return FALSE;
2980 }
2981 \f
2982 /* This does some user-friendly fix-up to *INST. It is currently focus on
2983 the adjustment of qualifiers to help the printed instruction
2984 recognized/understood more easily. */
2985
2986 static void
2987 user_friendly_fixup (aarch64_inst *inst)
2988 {
2989 switch (inst->opcode->iclass)
2990 {
2991 case testbranch:
2992 /* TBNZ Xn|Wn, #uimm6, label
2993 Test and Branch Not Zero: conditionally jumps to label if bit number
2994 uimm6 in register Xn is not zero. The bit number implies the width of
2995 the register, which may be written and should be disassembled as Wn if
2996 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2997 */
2998 if (inst->operands[1].imm.value < 32)
2999 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
3000 break;
3001 default: break;
3002 }
3003 }
3004
3005 /* Decode INSN and fill in *INST the instruction information. An alias
3006 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
3007 success. */
3008
3009 enum err_type
3010 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3011 bfd_boolean noaliases_p,
3012 aarch64_operand_error *errors)
3013 {
3014 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3015
3016 #ifdef DEBUG_AARCH64
3017 if (debug_dump)
3018 {
3019 const aarch64_opcode *tmp = opcode;
3020 printf ("\n");
3021 DEBUG_TRACE ("opcode lookup:");
3022 while (tmp != NULL)
3023 {
3024 aarch64_verbose (" %s", tmp->name);
3025 tmp = aarch64_find_next_opcode (tmp);
3026 }
3027 }
3028 #endif /* DEBUG_AARCH64 */
3029
3030 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3031 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3032 opcode field and value, apart from the difference that one of them has an
3033 extra field as part of the opcode, but such a field is used for operand
3034 encoding in other opcode(s) ('immh' in the case of the example). */
3035 while (opcode != NULL)
3036 {
3037 /* But only one opcode can be decoded successfully for, as the
3038 decoding routine will check the constraint carefully. */
3039 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3040 return ERR_OK;
3041 opcode = aarch64_find_next_opcode (opcode);
3042 }
3043
3044 return ERR_UND;
3045 }
3046
3047 /* Print operands. */
3048
3049 static void
3050 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3051 const aarch64_opnd_info *opnds, struct disassemble_info *info,
3052 bfd_boolean *has_notes)
3053 {
3054 char *notes = NULL;
3055 int i, pcrel_p, num_printed;
3056 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3057 {
3058 char str[128];
3059 /* We regard the opcode operand info more, however we also look into
3060 the inst->operands to support the disassembling of the optional
3061 operand.
3062 The two operand code should be the same in all cases, apart from
3063 when the operand can be optional. */
3064 if (opcode->operands[i] == AARCH64_OPND_NIL
3065 || opnds[i].type == AARCH64_OPND_NIL)
3066 break;
3067
3068 /* Generate the operand string in STR. */
3069 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3070 &info->target, &notes);
3071
3072 /* Print the delimiter (taking account of omitted operand(s)). */
3073 if (str[0] != '\0')
3074 (*info->fprintf_func) (info->stream, "%s",
3075 num_printed++ == 0 ? "\t" : ", ");
3076
3077 /* Print the operand. */
3078 if (pcrel_p)
3079 (*info->print_address_func) (info->target, info);
3080 else
3081 (*info->fprintf_func) (info->stream, "%s", str);
3082 }
3083
3084 if (notes && !no_notes)
3085 {
3086 *has_notes = TRUE;
3087 (*info->fprintf_func) (info->stream, " // note: %s", notes);
3088 }
3089 }
3090
3091 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
3092
3093 static void
3094 remove_dot_suffix (char *name, const aarch64_inst *inst)
3095 {
3096 char *ptr;
3097 size_t len;
3098
3099 ptr = strchr (inst->opcode->name, '.');
3100 assert (ptr && inst->cond);
3101 len = ptr - inst->opcode->name;
3102 assert (len < 8);
3103 strncpy (name, inst->opcode->name, len);
3104 name[len] = '\0';
3105 }
3106
3107 /* Print the instruction mnemonic name. */
3108
3109 static void
3110 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3111 {
3112 if (inst->opcode->flags & F_COND)
3113 {
3114 /* For instructions that are truly conditionally executed, e.g. b.cond,
3115 prepare the full mnemonic name with the corresponding condition
3116 suffix. */
3117 char name[8];
3118
3119 remove_dot_suffix (name, inst);
3120 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
3121 }
3122 else
3123 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
3124 }
3125
3126 /* Decide whether we need to print a comment after the operands of
3127 instruction INST. */
3128
3129 static void
3130 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3131 {
3132 if (inst->opcode->flags & F_COND)
3133 {
3134 char name[8];
3135 unsigned int i, num_conds;
3136
3137 remove_dot_suffix (name, inst);
3138 num_conds = ARRAY_SIZE (inst->cond->names);
3139 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3140 (*info->fprintf_func) (info->stream, "%s %s.%s",
3141 i == 1 ? " //" : ",",
3142 name, inst->cond->names[i]);
3143 }
3144 }
3145
3146 /* Build notes from verifiers into a string for printing. */
3147
3148 static void
3149 print_verifier_notes (aarch64_operand_error *detail,
3150 struct disassemble_info *info)
3151 {
3152 if (no_notes)
3153 return;
3154
3155 /* The output of the verifier cannot be a fatal error, otherwise the assembly
3156 would not have succeeded. We can safely ignore these. */
3157 assert (detail->non_fatal);
3158 assert (detail->error);
3159
3160 /* If there are multiple verifier messages, concat them up to 1k. */
3161 (*info->fprintf_func) (info->stream, " // note: %s", detail->error);
3162 if (detail->index >= 0)
3163 (*info->fprintf_func) (info->stream, " at operand %d", detail->index + 1);
3164 }
3165
3166 /* Print the instruction according to *INST. */
3167
3168 static void
3169 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3170 const aarch64_insn code,
3171 struct disassemble_info *info,
3172 aarch64_operand_error *mismatch_details)
3173 {
3174 bfd_boolean has_notes = FALSE;
3175
3176 print_mnemonic_name (inst, info);
3177 print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
3178 print_comment (inst, info);
3179
3180 /* We've already printed a note, not enough space to print more so exit.
3181 Usually notes shouldn't overlap so it shouldn't happen that we have a note
3182 from a register and instruction at the same time. */
3183 if (has_notes)
3184 return;
3185
3186 /* Always run constraint verifiers, this is needed because constraints need to
3187 maintain a global state regardless of whether the instruction has the flag
3188 set or not. */
3189 enum err_type result = verify_constraints (inst, code, pc, FALSE,
3190 mismatch_details, &insn_sequence);
3191 switch (result)
3192 {
3193 case ERR_UND:
3194 case ERR_UNP:
3195 case ERR_NYI:
3196 assert (0);
3197 case ERR_VFI:
3198 print_verifier_notes (mismatch_details, info);
3199 break;
3200 default:
3201 break;
3202 }
3203 }
3204
3205 /* Entry-point of the instruction disassembler and printer. */
3206
3207 static void
3208 print_insn_aarch64_word (bfd_vma pc,
3209 uint32_t word,
3210 struct disassemble_info *info,
3211 aarch64_operand_error *errors)
3212 {
3213 static const char *err_msg[ERR_NR_ENTRIES+1] =
3214 {
3215 [ERR_OK] = "_",
3216 [ERR_UND] = "undefined",
3217 [ERR_UNP] = "unpredictable",
3218 [ERR_NYI] = "NYI"
3219 };
3220
3221 enum err_type ret;
3222 aarch64_inst inst;
3223
3224 info->insn_info_valid = 1;
3225 info->branch_delay_insns = 0;
3226 info->data_size = 0;
3227 info->target = 0;
3228 info->target2 = 0;
3229
3230 if (info->flags & INSN_HAS_RELOC)
3231 /* If the instruction has a reloc associated with it, then
3232 the offset field in the instruction will actually be the
3233 addend for the reloc. (If we are using REL type relocs).
3234 In such cases, we can ignore the pc when computing
3235 addresses, since the addend is not currently pc-relative. */
3236 pc = 0;
3237
3238 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3239
3240 if (((word >> 21) & 0x3ff) == 1)
3241 {
3242 /* RESERVED for ALES. */
3243 assert (ret != ERR_OK);
3244 ret = ERR_NYI;
3245 }
3246
3247 switch (ret)
3248 {
3249 case ERR_UND:
3250 case ERR_UNP:
3251 case ERR_NYI:
3252 /* Handle undefined instructions. */
3253 info->insn_type = dis_noninsn;
3254 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3255 word, err_msg[ret]);
3256 break;
3257 case ERR_OK:
3258 user_friendly_fixup (&inst);
3259 print_aarch64_insn (pc, &inst, word, info, errors);
3260 break;
3261 default:
3262 abort ();
3263 }
3264 }
3265
3266 /* Disallow mapping symbols ($x, $d etc) from
3267 being displayed in symbol relative addresses. */
3268
3269 bfd_boolean
3270 aarch64_symbol_is_valid (asymbol * sym,
3271 struct disassemble_info * info ATTRIBUTE_UNUSED)
3272 {
3273 const char * name;
3274
3275 if (sym == NULL)
3276 return FALSE;
3277
3278 name = bfd_asymbol_name (sym);
3279
3280 return name
3281 && (name[0] != '$'
3282 || (name[1] != 'x' && name[1] != 'd')
3283 || (name[2] != '\0' && name[2] != '.'));
3284 }
3285
3286 /* Print data bytes on INFO->STREAM. */
3287
3288 static void
3289 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3290 uint32_t word,
3291 struct disassemble_info *info,
3292 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3293 {
3294 switch (info->bytes_per_chunk)
3295 {
3296 case 1:
3297 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3298 break;
3299 case 2:
3300 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3301 break;
3302 case 4:
3303 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3304 break;
3305 default:
3306 abort ();
3307 }
3308 }
3309
3310 /* Try to infer the code or data type from a symbol.
3311 Returns nonzero if *MAP_TYPE was set. */
3312
3313 static int
3314 get_sym_code_type (struct disassemble_info *info, int n,
3315 enum map_type *map_type)
3316 {
3317 elf_symbol_type *es;
3318 unsigned int type;
3319 const char *name;
3320
3321 /* If the symbol is in a different section, ignore it. */
3322 if (info->section != NULL && info->section != info->symtab[n]->section)
3323 return FALSE;
3324
3325 es = *(elf_symbol_type **)(info->symtab + n);
3326 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3327
3328 /* If the symbol has function type then use that. */
3329 if (type == STT_FUNC)
3330 {
3331 *map_type = MAP_INSN;
3332 return TRUE;
3333 }
3334
3335 /* Check for mapping symbols. */
3336 name = bfd_asymbol_name(info->symtab[n]);
3337 if (name[0] == '$'
3338 && (name[1] == 'x' || name[1] == 'd')
3339 && (name[2] == '\0' || name[2] == '.'))
3340 {
3341 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3342 return TRUE;
3343 }
3344
3345 return FALSE;
3346 }
3347
3348 /* Entry-point of the AArch64 disassembler. */
3349
3350 int
3351 print_insn_aarch64 (bfd_vma pc,
3352 struct disassemble_info *info)
3353 {
3354 bfd_byte buffer[INSNLEN];
3355 int status;
3356 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3357 aarch64_operand_error *);
3358 bfd_boolean found = FALSE;
3359 unsigned int size = 4;
3360 unsigned long data;
3361 aarch64_operand_error errors;
3362
3363 if (info->disassembler_options)
3364 {
3365 set_default_aarch64_dis_options (info);
3366
3367 parse_aarch64_dis_options (info->disassembler_options);
3368
3369 /* To avoid repeated parsing of these options, we remove them here. */
3370 info->disassembler_options = NULL;
3371 }
3372
3373 /* Aarch64 instructions are always little-endian */
3374 info->endian_code = BFD_ENDIAN_LITTLE;
3375
3376 /* Default to DATA. A text section is required by the ABI to contain an
3377 INSN mapping symbol at the start. A data section has no such
3378 requirement, hence if no mapping symbol is found the section must
3379 contain only data. This however isn't very useful if the user has
3380 fully stripped the binaries. If this is the case use the section
3381 attributes to determine the default. If we have no section default to
3382 INSN as well, as we may be disassembling some raw bytes on a baremetal
3383 HEX file or similar. */
3384 enum map_type type = MAP_DATA;
3385 if ((info->section && info->section->flags & SEC_CODE) || !info->section)
3386 type = MAP_INSN;
3387
3388 /* First check the full symtab for a mapping symbol, even if there
3389 are no usable non-mapping symbols for this address. */
3390 if (info->symtab_size != 0
3391 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3392 {
3393 int last_sym = -1;
3394 bfd_vma addr, section_vma = 0;
3395 bfd_boolean can_use_search_opt_p;
3396 int n;
3397
3398 if (pc <= last_mapping_addr)
3399 last_mapping_sym = -1;
3400
3401 /* Start scanning at the start of the function, or wherever
3402 we finished last time. */
3403 n = info->symtab_pos + 1;
3404
3405 /* If the last stop offset is different from the current one it means we
3406 are disassembling a different glob of bytes. As such the optimization
3407 would not be safe and we should start over. */
3408 can_use_search_opt_p = last_mapping_sym >= 0
3409 && info->stop_offset == last_stop_offset;
3410
3411 if (n >= last_mapping_sym && can_use_search_opt_p)
3412 n = last_mapping_sym;
3413
3414 /* Look down while we haven't passed the location being disassembled.
3415 The reason for this is that there's no defined order between a symbol
3416 and an mapping symbol that may be at the same address. We may have to
3417 look at least one position ahead. */
3418 for (; n < info->symtab_size; n++)
3419 {
3420 addr = bfd_asymbol_value (info->symtab[n]);
3421 if (addr > pc)
3422 break;
3423 if (get_sym_code_type (info, n, &type))
3424 {
3425 last_sym = n;
3426 found = TRUE;
3427 }
3428 }
3429
3430 if (!found)
3431 {
3432 n = info->symtab_pos;
3433 if (n >= last_mapping_sym && can_use_search_opt_p)
3434 n = last_mapping_sym;
3435
3436 /* No mapping symbol found at this address. Look backwards
3437 for a preceeding one, but don't go pass the section start
3438 otherwise a data section with no mapping symbol can pick up
3439 a text mapping symbol of a preceeding section. The documentation
3440 says section can be NULL, in which case we will seek up all the
3441 way to the top. */
3442 if (info->section)
3443 section_vma = info->section->vma;
3444
3445 for (; n >= 0; n--)
3446 {
3447 addr = bfd_asymbol_value (info->symtab[n]);
3448 if (addr < section_vma)
3449 break;
3450
3451 if (get_sym_code_type (info, n, &type))
3452 {
3453 last_sym = n;
3454 found = TRUE;
3455 break;
3456 }
3457 }
3458 }
3459
3460 last_mapping_sym = last_sym;
3461 last_type = type;
3462 last_stop_offset = info->stop_offset;
3463
3464 /* Look a little bit ahead to see if we should print out
3465 less than four bytes of data. If there's a symbol,
3466 mapping or otherwise, after two bytes then don't
3467 print more. */
3468 if (last_type == MAP_DATA)
3469 {
3470 size = 4 - (pc & 3);
3471 for (n = last_sym + 1; n < info->symtab_size; n++)
3472 {
3473 addr = bfd_asymbol_value (info->symtab[n]);
3474 if (addr > pc)
3475 {
3476 if (addr - pc < size)
3477 size = addr - pc;
3478 break;
3479 }
3480 }
3481 /* If the next symbol is after three bytes, we need to
3482 print only part of the data, so that we can use either
3483 .byte or .short. */
3484 if (size == 3)
3485 size = (pc & 1) ? 1 : 2;
3486 }
3487 }
3488 else
3489 last_type = type;
3490
3491 /* PR 10263: Disassemble data if requested to do so by the user. */
3492 if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
3493 {
3494 /* size was set above. */
3495 info->bytes_per_chunk = size;
3496 info->display_endian = info->endian;
3497 printer = print_insn_data;
3498 }
3499 else
3500 {
3501 info->bytes_per_chunk = size = INSNLEN;
3502 info->display_endian = info->endian_code;
3503 printer = print_insn_aarch64_word;
3504 }
3505
3506 status = (*info->read_memory_func) (pc, buffer, size, info);
3507 if (status != 0)
3508 {
3509 (*info->memory_error_func) (status, pc, info);
3510 return -1;
3511 }
3512
3513 data = bfd_get_bits (buffer, size * 8,
3514 info->display_endian == BFD_ENDIAN_BIG);
3515
3516 (*printer) (pc, data, info, &errors);
3517
3518 return size;
3519 }
3520 \f
3521 void
3522 print_aarch64_disassembler_options (FILE *stream)
3523 {
3524 fprintf (stream, _("\n\
3525 The following AARCH64 specific disassembler options are supported for use\n\
3526 with the -M switch (multiple options should be separated by commas):\n"));
3527
3528 fprintf (stream, _("\n\
3529 no-aliases Don't print instruction aliases.\n"));
3530
3531 fprintf (stream, _("\n\
3532 aliases Do print instruction aliases.\n"));
3533
3534 fprintf (stream, _("\n\
3535 no-notes Don't print instruction notes.\n"));
3536
3537 fprintf (stream, _("\n\
3538 notes Do print instruction notes.\n"));
3539
3540 #ifdef DEBUG_AARCH64
3541 fprintf (stream, _("\n\
3542 debug_dump Temp switch for debug trace.\n"));
3543 #endif /* DEBUG_AARCH64 */
3544
3545 fprintf (stream, _("\n"));
3546 }
This page took 0.142904 seconds and 3 git commands to generate.