AArch64: Refactor verifiers to make more general.
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define INSNLEN 4
30
31 /* Cached mapping symbol state. */
32 enum map_type
33 {
34 MAP_INSN,
35 MAP_DATA
36 };
37
38 static enum map_type last_type;
39 static int last_mapping_sym = -1;
40 static bfd_vma last_mapping_addr = 0;
41
42 /* Other options */
43 static int no_aliases = 0; /* If set disassemble as most general inst. */
44 \fstatic int no_notes = 1; /* If set do not print disassemble notes in the
45 output as comments. */
46
47 /* Currently active instruction sequence. */
48 static aarch64_instr_sequence insn_sequence ATTRIBUTE_UNUSED;
49
50 static void
51 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
52 {
53 }
54
55 static void
56 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
57 {
58 /* Try to match options that are simple flags */
59 if (CONST_STRNEQ (option, "no-aliases"))
60 {
61 no_aliases = 1;
62 return;
63 }
64
65 if (CONST_STRNEQ (option, "aliases"))
66 {
67 no_aliases = 0;
68 return;
69 }
70
71 if (CONST_STRNEQ (option, "no-notes"))
72 {
73 no_notes = 1;
74 return;
75 }
76
77 if (CONST_STRNEQ (option, "notes"))
78 {
79 no_notes = 0;
80 return;
81 }
82
83 #ifdef DEBUG_AARCH64
84 if (CONST_STRNEQ (option, "debug_dump"))
85 {
86 debug_dump = 1;
87 return;
88 }
89 #endif /* DEBUG_AARCH64 */
90
91 /* Invalid option. */
92 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
93 }
94
95 static void
96 parse_aarch64_dis_options (const char *options)
97 {
98 const char *option_end;
99
100 if (options == NULL)
101 return;
102
103 while (*options != '\0')
104 {
105 /* Skip empty options. */
106 if (*options == ',')
107 {
108 options++;
109 continue;
110 }
111
112 /* We know that *options is neither NUL or a comma. */
113 option_end = options + 1;
114 while (*option_end != ',' && *option_end != '\0')
115 option_end++;
116
117 parse_aarch64_dis_option (options, option_end - options);
118
119 /* Go on to the next one. If option_end points to a comma, it
120 will be skipped above. */
121 options = option_end;
122 }
123 }
124 \f
125 /* Functions doing the instruction disassembling. */
126
127 /* The unnamed arguments consist of the number of fields and information about
128 these fields where the VALUE will be extracted from CODE and returned.
129 MASK can be zero or the base mask of the opcode.
130
131 N.B. the fields are required to be in such an order than the most signficant
132 field for VALUE comes the first, e.g. the <index> in
133 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
134 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
135 the order of H, L, M. */
136
137 aarch64_insn
138 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
139 {
140 uint32_t num;
141 const aarch64_field *field;
142 enum aarch64_field_kind kind;
143 va_list va;
144
145 va_start (va, mask);
146 num = va_arg (va, uint32_t);
147 assert (num <= 5);
148 aarch64_insn value = 0x0;
149 while (num--)
150 {
151 kind = va_arg (va, enum aarch64_field_kind);
152 field = &fields[kind];
153 value <<= field->width;
154 value |= extract_field (kind, code, mask);
155 }
156 return value;
157 }
158
159 /* Extract the value of all fields in SELF->fields from instruction CODE.
160 The least significant bit comes from the final field. */
161
162 static aarch64_insn
163 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
164 {
165 aarch64_insn value;
166 unsigned int i;
167 enum aarch64_field_kind kind;
168
169 value = 0;
170 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
171 {
172 kind = self->fields[i];
173 value <<= fields[kind].width;
174 value |= extract_field (kind, code, 0);
175 }
176 return value;
177 }
178
179 /* Sign-extend bit I of VALUE. */
180 static inline int32_t
181 sign_extend (aarch64_insn value, unsigned i)
182 {
183 uint32_t ret = value;
184
185 assert (i < 32);
186 if ((value >> i) & 0x1)
187 {
188 uint32_t val = (uint32_t)(-1) << i;
189 ret = ret | val;
190 }
191 return (int32_t) ret;
192 }
193
194 /* N.B. the following inline helpfer functions create a dependency on the
195 order of operand qualifier enumerators. */
196
197 /* Given VALUE, return qualifier for a general purpose register. */
198 static inline enum aarch64_opnd_qualifier
199 get_greg_qualifier_from_value (aarch64_insn value)
200 {
201 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
202 assert (value <= 0x1
203 && aarch64_get_qualifier_standard_value (qualifier) == value);
204 return qualifier;
205 }
206
207 /* Given VALUE, return qualifier for a vector register. This does not support
208 decoding instructions that accept the 2H vector type. */
209
210 static inline enum aarch64_opnd_qualifier
211 get_vreg_qualifier_from_value (aarch64_insn value)
212 {
213 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
214
215 /* Instructions using vector type 2H should not call this function. Skip over
216 the 2H qualifier. */
217 if (qualifier >= AARCH64_OPND_QLF_V_2H)
218 qualifier += 1;
219
220 assert (value <= 0x8
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
222 return qualifier;
223 }
224
225 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
226 static inline enum aarch64_opnd_qualifier
227 get_sreg_qualifier_from_value (aarch64_insn value)
228 {
229 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
230
231 assert (value <= 0x4
232 && aarch64_get_qualifier_standard_value (qualifier) == value);
233 return qualifier;
234 }
235
236 /* Given the instruction in *INST which is probably half way through the
237 decoding and our caller wants to know the expected qualifier for operand
238 I. Return such a qualifier if we can establish it; otherwise return
239 AARCH64_OPND_QLF_NIL. */
240
241 static aarch64_opnd_qualifier_t
242 get_expected_qualifier (const aarch64_inst *inst, int i)
243 {
244 aarch64_opnd_qualifier_seq_t qualifiers;
245 /* Should not be called if the qualifier is known. */
246 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
247 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
248 i, qualifiers))
249 return qualifiers[i];
250 else
251 return AARCH64_OPND_QLF_NIL;
252 }
253
254 /* Operand extractors. */
255
256 bfd_boolean
257 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
258 const aarch64_insn code,
259 const aarch64_inst *inst ATTRIBUTE_UNUSED,
260 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
261 {
262 info->reg.regno = extract_field (self->fields[0], code, 0);
263 return TRUE;
264 }
265
266 bfd_boolean
267 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
268 const aarch64_insn code ATTRIBUTE_UNUSED,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED,
270 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
271 {
272 assert (info->idx == 1
273 || info->idx ==3);
274 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
275 return TRUE;
276 }
277
278 /* e.g. IC <ic_op>{, <Xt>}. */
279 bfd_boolean
280 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
281 const aarch64_insn code,
282 const aarch64_inst *inst ATTRIBUTE_UNUSED,
283 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
284 {
285 info->reg.regno = extract_field (self->fields[0], code, 0);
286 assert (info->idx == 1
287 && (aarch64_get_operand_class (inst->operands[0].type)
288 == AARCH64_OPND_CLASS_SYSTEM));
289 /* This will make the constraint checking happy and more importantly will
290 help the disassembler determine whether this operand is optional or
291 not. */
292 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
293
294 return TRUE;
295 }
296
297 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
298 bfd_boolean
299 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
300 const aarch64_insn code,
301 const aarch64_inst *inst ATTRIBUTE_UNUSED,
302 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
303 {
304 /* regno */
305 info->reglane.regno = extract_field (self->fields[0], code,
306 inst->opcode->mask);
307
308 /* Index and/or type. */
309 if (inst->opcode->iclass == asisdone
310 || inst->opcode->iclass == asimdins)
311 {
312 if (info->type == AARCH64_OPND_En
313 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
314 {
315 unsigned shift;
316 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
317 assert (info->idx == 1); /* Vn */
318 aarch64_insn value = extract_field (FLD_imm4, code, 0);
319 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
320 info->qualifier = get_expected_qualifier (inst, info->idx);
321 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
322 info->reglane.index = value >> shift;
323 }
324 else
325 {
326 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
327 imm5<3:0> <V>
328 0000 RESERVED
329 xxx1 B
330 xx10 H
331 x100 S
332 1000 D */
333 int pos = -1;
334 aarch64_insn value = extract_field (FLD_imm5, code, 0);
335 while (++pos <= 3 && (value & 0x1) == 0)
336 value >>= 1;
337 if (pos > 3)
338 return FALSE;
339 info->qualifier = get_sreg_qualifier_from_value (pos);
340 info->reglane.index = (unsigned) (value >> 1);
341 }
342 }
343 else if (inst->opcode->iclass == dotproduct)
344 {
345 /* Need information in other operand(s) to help decoding. */
346 info->qualifier = get_expected_qualifier (inst, info->idx);
347 switch (info->qualifier)
348 {
349 case AARCH64_OPND_QLF_S_4B:
350 /* L:H */
351 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
352 info->reglane.regno &= 0x1f;
353 break;
354 default:
355 return FALSE;
356 }
357 }
358 else if (inst->opcode->iclass == cryptosm3)
359 {
360 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
361 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
362 }
363 else
364 {
365 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
366 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
367
368 /* Need information in other operand(s) to help decoding. */
369 info->qualifier = get_expected_qualifier (inst, info->idx);
370 switch (info->qualifier)
371 {
372 case AARCH64_OPND_QLF_S_H:
373 if (info->type == AARCH64_OPND_Em16)
374 {
375 /* h:l:m */
376 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
377 FLD_M);
378 info->reglane.regno &= 0xf;
379 }
380 else
381 {
382 /* h:l */
383 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
384 }
385 break;
386 case AARCH64_OPND_QLF_S_S:
387 /* h:l */
388 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
389 break;
390 case AARCH64_OPND_QLF_S_D:
391 /* H */
392 info->reglane.index = extract_field (FLD_H, code, 0);
393 break;
394 default:
395 return FALSE;
396 }
397
398 if (inst->opcode->op == OP_FCMLA_ELEM
399 && info->qualifier != AARCH64_OPND_QLF_S_H)
400 {
401 /* Complex operand takes two elements. */
402 if (info->reglane.index & 1)
403 return FALSE;
404 info->reglane.index /= 2;
405 }
406 }
407
408 return TRUE;
409 }
410
411 bfd_boolean
412 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
413 const aarch64_insn code,
414 const aarch64_inst *inst ATTRIBUTE_UNUSED,
415 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
416 {
417 /* R */
418 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
419 /* len */
420 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
421 return TRUE;
422 }
423
424 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
425 bfd_boolean
426 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
427 aarch64_opnd_info *info, const aarch64_insn code,
428 const aarch64_inst *inst,
429 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
430 {
431 aarch64_insn value;
432 /* Number of elements in each structure to be loaded/stored. */
433 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
434
435 struct
436 {
437 unsigned is_reserved;
438 unsigned num_regs;
439 unsigned num_elements;
440 } data [] =
441 { {0, 4, 4},
442 {1, 4, 4},
443 {0, 4, 1},
444 {0, 4, 2},
445 {0, 3, 3},
446 {1, 3, 3},
447 {0, 3, 1},
448 {0, 1, 1},
449 {0, 2, 2},
450 {1, 2, 2},
451 {0, 2, 1},
452 };
453
454 /* Rt */
455 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
456 /* opcode */
457 value = extract_field (FLD_opcode, code, 0);
458 /* PR 21595: Check for a bogus value. */
459 if (value >= ARRAY_SIZE (data))
460 return FALSE;
461 if (expected_num != data[value].num_elements || data[value].is_reserved)
462 return FALSE;
463 info->reglist.num_regs = data[value].num_regs;
464
465 return TRUE;
466 }
467
468 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
469 lanes instructions. */
470 bfd_boolean
471 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
472 aarch64_opnd_info *info, const aarch64_insn code,
473 const aarch64_inst *inst,
474 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
475 {
476 aarch64_insn value;
477
478 /* Rt */
479 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
480 /* S */
481 value = extract_field (FLD_S, code, 0);
482
483 /* Number of registers is equal to the number of elements in
484 each structure to be loaded/stored. */
485 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
486 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
487
488 /* Except when it is LD1R. */
489 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
490 info->reglist.num_regs = 2;
491
492 return TRUE;
493 }
494
495 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
496 load/store single element instructions. */
497 bfd_boolean
498 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
499 aarch64_opnd_info *info, const aarch64_insn code,
500 const aarch64_inst *inst ATTRIBUTE_UNUSED,
501 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
502 {
503 aarch64_field field = {0, 0};
504 aarch64_insn QSsize; /* fields Q:S:size. */
505 aarch64_insn opcodeh2; /* opcode<2:1> */
506
507 /* Rt */
508 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
509
510 /* Decode the index, opcode<2:1> and size. */
511 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
512 opcodeh2 = extract_field_2 (&field, code, 0);
513 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
514 switch (opcodeh2)
515 {
516 case 0x0:
517 info->qualifier = AARCH64_OPND_QLF_S_B;
518 /* Index encoded in "Q:S:size". */
519 info->reglist.index = QSsize;
520 break;
521 case 0x1:
522 if (QSsize & 0x1)
523 /* UND. */
524 return FALSE;
525 info->qualifier = AARCH64_OPND_QLF_S_H;
526 /* Index encoded in "Q:S:size<1>". */
527 info->reglist.index = QSsize >> 1;
528 break;
529 case 0x2:
530 if ((QSsize >> 1) & 0x1)
531 /* UND. */
532 return FALSE;
533 if ((QSsize & 0x1) == 0)
534 {
535 info->qualifier = AARCH64_OPND_QLF_S_S;
536 /* Index encoded in "Q:S". */
537 info->reglist.index = QSsize >> 2;
538 }
539 else
540 {
541 if (extract_field (FLD_S, code, 0))
542 /* UND */
543 return FALSE;
544 info->qualifier = AARCH64_OPND_QLF_S_D;
545 /* Index encoded in "Q". */
546 info->reglist.index = QSsize >> 3;
547 }
548 break;
549 default:
550 return FALSE;
551 }
552
553 info->reglist.has_index = 1;
554 info->reglist.num_regs = 0;
555 /* Number of registers is equal to the number of elements in
556 each structure to be loaded/stored. */
557 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
558 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
559
560 return TRUE;
561 }
562
563 /* Decode fields immh:immb and/or Q for e.g.
564 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
565 or SSHR <V><d>, <V><n>, #<shift>. */
566
567 bfd_boolean
568 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
569 aarch64_opnd_info *info, const aarch64_insn code,
570 const aarch64_inst *inst,
571 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
572 {
573 int pos;
574 aarch64_insn Q, imm, immh;
575 enum aarch64_insn_class iclass = inst->opcode->iclass;
576
577 immh = extract_field (FLD_immh, code, 0);
578 if (immh == 0)
579 return FALSE;
580 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
581 pos = 4;
582 /* Get highest set bit in immh. */
583 while (--pos >= 0 && (immh & 0x8) == 0)
584 immh <<= 1;
585
586 assert ((iclass == asimdshf || iclass == asisdshf)
587 && (info->type == AARCH64_OPND_IMM_VLSR
588 || info->type == AARCH64_OPND_IMM_VLSL));
589
590 if (iclass == asimdshf)
591 {
592 Q = extract_field (FLD_Q, code, 0);
593 /* immh Q <T>
594 0000 x SEE AdvSIMD modified immediate
595 0001 0 8B
596 0001 1 16B
597 001x 0 4H
598 001x 1 8H
599 01xx 0 2S
600 01xx 1 4S
601 1xxx 0 RESERVED
602 1xxx 1 2D */
603 info->qualifier =
604 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
605 }
606 else
607 info->qualifier = get_sreg_qualifier_from_value (pos);
608
609 if (info->type == AARCH64_OPND_IMM_VLSR)
610 /* immh <shift>
611 0000 SEE AdvSIMD modified immediate
612 0001 (16-UInt(immh:immb))
613 001x (32-UInt(immh:immb))
614 01xx (64-UInt(immh:immb))
615 1xxx (128-UInt(immh:immb)) */
616 info->imm.value = (16 << pos) - imm;
617 else
618 /* immh:immb
619 immh <shift>
620 0000 SEE AdvSIMD modified immediate
621 0001 (UInt(immh:immb)-8)
622 001x (UInt(immh:immb)-16)
623 01xx (UInt(immh:immb)-32)
624 1xxx (UInt(immh:immb)-64) */
625 info->imm.value = imm - (8 << pos);
626
627 return TRUE;
628 }
629
630 /* Decode shift immediate for e.g. sshr (imm). */
631 bfd_boolean
632 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
633 aarch64_opnd_info *info, const aarch64_insn code,
634 const aarch64_inst *inst ATTRIBUTE_UNUSED,
635 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
636 {
637 int64_t imm;
638 aarch64_insn val;
639 val = extract_field (FLD_size, code, 0);
640 switch (val)
641 {
642 case 0: imm = 8; break;
643 case 1: imm = 16; break;
644 case 2: imm = 32; break;
645 default: return FALSE;
646 }
647 info->imm.value = imm;
648 return TRUE;
649 }
650
651 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
652 value in the field(s) will be extracted as unsigned immediate value. */
653 bfd_boolean
654 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
655 const aarch64_insn code,
656 const aarch64_inst *inst ATTRIBUTE_UNUSED,
657 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
658 {
659 int64_t imm;
660
661 imm = extract_all_fields (self, code);
662
663 if (operand_need_sign_extension (self))
664 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
665
666 if (operand_need_shift_by_two (self))
667 imm <<= 2;
668
669 if (info->type == AARCH64_OPND_ADDR_ADRP)
670 imm <<= 12;
671
672 info->imm.value = imm;
673 return TRUE;
674 }
675
676 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
677 bfd_boolean
678 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
679 const aarch64_insn code,
680 const aarch64_inst *inst ATTRIBUTE_UNUSED,
681 aarch64_operand_error *errors)
682 {
683 aarch64_ext_imm (self, info, code, inst, errors);
684 info->shifter.kind = AARCH64_MOD_LSL;
685 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
686 return TRUE;
687 }
688
689 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
690 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
691 bfd_boolean
692 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
693 aarch64_opnd_info *info,
694 const aarch64_insn code,
695 const aarch64_inst *inst ATTRIBUTE_UNUSED,
696 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
697 {
698 uint64_t imm;
699 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
700 aarch64_field field = {0, 0};
701
702 assert (info->idx == 1);
703
704 if (info->type == AARCH64_OPND_SIMD_FPIMM)
705 info->imm.is_fp = 1;
706
707 /* a:b:c:d:e:f:g:h */
708 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
709 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
710 {
711 /* Either MOVI <Dd>, #<imm>
712 or MOVI <Vd>.2D, #<imm>.
713 <imm> is a 64-bit immediate
714 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
715 encoded in "a:b:c:d:e:f:g:h". */
716 int i;
717 unsigned abcdefgh = imm;
718 for (imm = 0ull, i = 0; i < 8; i++)
719 if (((abcdefgh >> i) & 0x1) != 0)
720 imm |= 0xffull << (8 * i);
721 }
722 info->imm.value = imm;
723
724 /* cmode */
725 info->qualifier = get_expected_qualifier (inst, info->idx);
726 switch (info->qualifier)
727 {
728 case AARCH64_OPND_QLF_NIL:
729 /* no shift */
730 info->shifter.kind = AARCH64_MOD_NONE;
731 return 1;
732 case AARCH64_OPND_QLF_LSL:
733 /* shift zeros */
734 info->shifter.kind = AARCH64_MOD_LSL;
735 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
736 {
737 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
738 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
739 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
740 default: assert (0); return FALSE;
741 }
742 /* 00: 0; 01: 8; 10:16; 11:24. */
743 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
744 break;
745 case AARCH64_OPND_QLF_MSL:
746 /* shift ones */
747 info->shifter.kind = AARCH64_MOD_MSL;
748 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
749 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
750 break;
751 default:
752 assert (0);
753 return FALSE;
754 }
755
756 return TRUE;
757 }
758
759 /* Decode an 8-bit floating-point immediate. */
760 bfd_boolean
761 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
762 const aarch64_insn code,
763 const aarch64_inst *inst ATTRIBUTE_UNUSED,
764 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
765 {
766 info->imm.value = extract_all_fields (self, code);
767 info->imm.is_fp = 1;
768 return TRUE;
769 }
770
771 /* Decode a 1-bit rotate immediate (#90 or #270). */
772 bfd_boolean
773 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
774 const aarch64_insn code,
775 const aarch64_inst *inst ATTRIBUTE_UNUSED,
776 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
777 {
778 uint64_t rot = extract_field (self->fields[0], code, 0);
779 assert (rot < 2U);
780 info->imm.value = rot * 180 + 90;
781 return TRUE;
782 }
783
784 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
785 bfd_boolean
786 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
787 const aarch64_insn code,
788 const aarch64_inst *inst ATTRIBUTE_UNUSED,
789 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
790 {
791 uint64_t rot = extract_field (self->fields[0], code, 0);
792 assert (rot < 4U);
793 info->imm.value = rot * 90;
794 return TRUE;
795 }
796
797 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
798 bfd_boolean
799 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
800 aarch64_opnd_info *info, const aarch64_insn code,
801 const aarch64_inst *inst ATTRIBUTE_UNUSED,
802 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
803 {
804 info->imm.value = 64- extract_field (FLD_scale, code, 0);
805 return TRUE;
806 }
807
808 /* Decode arithmetic immediate for e.g.
809 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
810 bfd_boolean
811 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
812 aarch64_opnd_info *info, const aarch64_insn code,
813 const aarch64_inst *inst ATTRIBUTE_UNUSED,
814 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
815 {
816 aarch64_insn value;
817
818 info->shifter.kind = AARCH64_MOD_LSL;
819 /* shift */
820 value = extract_field (FLD_shift, code, 0);
821 if (value >= 2)
822 return FALSE;
823 info->shifter.amount = value ? 12 : 0;
824 /* imm12 (unsigned) */
825 info->imm.value = extract_field (FLD_imm12, code, 0);
826
827 return TRUE;
828 }
829
830 /* Return true if VALUE is a valid logical immediate encoding, storing the
831 decoded value in *RESULT if so. ESIZE is the number of bytes in the
832 decoded immediate. */
833 static bfd_boolean
834 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
835 {
836 uint64_t imm, mask;
837 uint32_t N, R, S;
838 unsigned simd_size;
839
840 /* value is N:immr:imms. */
841 S = value & 0x3f;
842 R = (value >> 6) & 0x3f;
843 N = (value >> 12) & 0x1;
844
845 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
846 (in other words, right rotated by R), then replicated. */
847 if (N != 0)
848 {
849 simd_size = 64;
850 mask = 0xffffffffffffffffull;
851 }
852 else
853 {
854 switch (S)
855 {
856 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
857 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
858 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
859 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
860 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
861 default: return FALSE;
862 }
863 mask = (1ull << simd_size) - 1;
864 /* Top bits are IGNORED. */
865 R &= simd_size - 1;
866 }
867
868 if (simd_size > esize * 8)
869 return FALSE;
870
871 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
872 if (S == simd_size - 1)
873 return FALSE;
874 /* S+1 consecutive bits to 1. */
875 /* NOTE: S can't be 63 due to detection above. */
876 imm = (1ull << (S + 1)) - 1;
877 /* Rotate to the left by simd_size - R. */
878 if (R != 0)
879 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
880 /* Replicate the value according to SIMD size. */
881 switch (simd_size)
882 {
883 case 2: imm = (imm << 2) | imm;
884 /* Fall through. */
885 case 4: imm = (imm << 4) | imm;
886 /* Fall through. */
887 case 8: imm = (imm << 8) | imm;
888 /* Fall through. */
889 case 16: imm = (imm << 16) | imm;
890 /* Fall through. */
891 case 32: imm = (imm << 32) | imm;
892 /* Fall through. */
893 case 64: break;
894 default: assert (0); return 0;
895 }
896
897 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
898
899 return TRUE;
900 }
901
902 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
903 bfd_boolean
904 aarch64_ext_limm (const aarch64_operand *self,
905 aarch64_opnd_info *info, const aarch64_insn code,
906 const aarch64_inst *inst,
907 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
908 {
909 uint32_t esize;
910 aarch64_insn value;
911
912 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
913 self->fields[2]);
914 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
915 return decode_limm (esize, value, &info->imm.value);
916 }
917
918 /* Decode a logical immediate for the BIC alias of AND (etc.). */
919 bfd_boolean
920 aarch64_ext_inv_limm (const aarch64_operand *self,
921 aarch64_opnd_info *info, const aarch64_insn code,
922 const aarch64_inst *inst,
923 aarch64_operand_error *errors)
924 {
925 if (!aarch64_ext_limm (self, info, code, inst, errors))
926 return FALSE;
927 info->imm.value = ~info->imm.value;
928 return TRUE;
929 }
930
931 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
932 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
933 bfd_boolean
934 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
935 aarch64_opnd_info *info,
936 const aarch64_insn code, const aarch64_inst *inst,
937 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
938 {
939 aarch64_insn value;
940
941 /* Rt */
942 info->reg.regno = extract_field (FLD_Rt, code, 0);
943
944 /* size */
945 value = extract_field (FLD_ldst_size, code, 0);
946 if (inst->opcode->iclass == ldstpair_indexed
947 || inst->opcode->iclass == ldstnapair_offs
948 || inst->opcode->iclass == ldstpair_off
949 || inst->opcode->iclass == loadlit)
950 {
951 enum aarch64_opnd_qualifier qualifier;
952 switch (value)
953 {
954 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
955 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
956 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
957 default: return FALSE;
958 }
959 info->qualifier = qualifier;
960 }
961 else
962 {
963 /* opc1:size */
964 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
965 if (value > 0x4)
966 return FALSE;
967 info->qualifier = get_sreg_qualifier_from_value (value);
968 }
969
970 return TRUE;
971 }
972
973 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
974 bfd_boolean
975 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
976 aarch64_opnd_info *info,
977 aarch64_insn code,
978 const aarch64_inst *inst ATTRIBUTE_UNUSED,
979 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
980 {
981 /* Rn */
982 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
983 return TRUE;
984 }
985
986 /* Decode the address operand for e.g.
987 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
988 bfd_boolean
989 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
990 aarch64_opnd_info *info,
991 aarch64_insn code, const aarch64_inst *inst,
992 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
993 {
994 info->qualifier = get_expected_qualifier (inst, info->idx);
995
996 /* Rn */
997 info->addr.base_regno = extract_field (self->fields[0], code, 0);
998
999 /* simm9 */
1000 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1001 info->addr.offset.imm = sign_extend (imm, 8);
1002 if (extract_field (self->fields[2], code, 0) == 1) {
1003 info->addr.writeback = 1;
1004 info->addr.preind = 1;
1005 }
1006 return TRUE;
1007 }
1008
1009 /* Decode the address operand for e.g.
1010 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1011 bfd_boolean
1012 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1013 aarch64_opnd_info *info,
1014 aarch64_insn code, const aarch64_inst *inst,
1015 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1016 {
1017 aarch64_insn S, value;
1018
1019 /* Rn */
1020 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1021 /* Rm */
1022 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1023 /* option */
1024 value = extract_field (FLD_option, code, 0);
1025 info->shifter.kind =
1026 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1027 /* Fix-up the shifter kind; although the table-driven approach is
1028 efficient, it is slightly inflexible, thus needing this fix-up. */
1029 if (info->shifter.kind == AARCH64_MOD_UXTX)
1030 info->shifter.kind = AARCH64_MOD_LSL;
1031 /* S */
1032 S = extract_field (FLD_S, code, 0);
1033 if (S == 0)
1034 {
1035 info->shifter.amount = 0;
1036 info->shifter.amount_present = 0;
1037 }
1038 else
1039 {
1040 int size;
1041 /* Need information in other operand(s) to help achieve the decoding
1042 from 'S' field. */
1043 info->qualifier = get_expected_qualifier (inst, info->idx);
1044 /* Get the size of the data element that is accessed, which may be
1045 different from that of the source register size, e.g. in strb/ldrb. */
1046 size = aarch64_get_qualifier_esize (info->qualifier);
1047 info->shifter.amount = get_logsz (size);
1048 info->shifter.amount_present = 1;
1049 }
1050
1051 return TRUE;
1052 }
1053
1054 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1055 bfd_boolean
1056 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1057 aarch64_insn code, const aarch64_inst *inst,
1058 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1059 {
1060 aarch64_insn imm;
1061 info->qualifier = get_expected_qualifier (inst, info->idx);
1062
1063 /* Rn */
1064 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1065 /* simm (imm9 or imm7) */
1066 imm = extract_field (self->fields[0], code, 0);
1067 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1068 if (self->fields[0] == FLD_imm7)
1069 /* scaled immediate in ld/st pair instructions. */
1070 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1071 /* qualifier */
1072 if (inst->opcode->iclass == ldst_unscaled
1073 || inst->opcode->iclass == ldstnapair_offs
1074 || inst->opcode->iclass == ldstpair_off
1075 || inst->opcode->iclass == ldst_unpriv)
1076 info->addr.writeback = 0;
1077 else
1078 {
1079 /* pre/post- index */
1080 info->addr.writeback = 1;
1081 if (extract_field (self->fields[1], code, 0) == 1)
1082 info->addr.preind = 1;
1083 else
1084 info->addr.postind = 1;
1085 }
1086
1087 return TRUE;
1088 }
1089
1090 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1091 bfd_boolean
1092 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1093 aarch64_insn code,
1094 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1095 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1096 {
1097 int shift;
1098 info->qualifier = get_expected_qualifier (inst, info->idx);
1099 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1100 /* Rn */
1101 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1102 /* uimm12 */
1103 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1104 return TRUE;
1105 }
1106
1107 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1108 bfd_boolean
1109 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1110 aarch64_insn code,
1111 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1112 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1113 {
1114 aarch64_insn imm;
1115
1116 info->qualifier = get_expected_qualifier (inst, info->idx);
1117 /* Rn */
1118 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1119 /* simm10 */
1120 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1121 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1122 if (extract_field (self->fields[3], code, 0) == 1) {
1123 info->addr.writeback = 1;
1124 info->addr.preind = 1;
1125 }
1126 return TRUE;
1127 }
1128
1129 /* Decode the address operand for e.g.
1130 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1131 bfd_boolean
1132 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1133 aarch64_opnd_info *info,
1134 aarch64_insn code, const aarch64_inst *inst,
1135 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1136 {
1137 /* The opcode dependent area stores the number of elements in
1138 each structure to be loaded/stored. */
1139 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1140
1141 /* Rn */
1142 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1143 /* Rm | #<amount> */
1144 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1145 if (info->addr.offset.regno == 31)
1146 {
1147 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1148 /* Special handling of loading single structure to all lane. */
1149 info->addr.offset.imm = (is_ld1r ? 1
1150 : inst->operands[0].reglist.num_regs)
1151 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1152 else
1153 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1154 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1155 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1156 }
1157 else
1158 info->addr.offset.is_reg = 1;
1159 info->addr.writeback = 1;
1160
1161 return TRUE;
1162 }
1163
1164 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1165 bfd_boolean
1166 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1167 aarch64_opnd_info *info,
1168 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1169 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1170 {
1171 aarch64_insn value;
1172 /* cond */
1173 value = extract_field (FLD_cond, code, 0);
1174 info->cond = get_cond_from_value (value);
1175 return TRUE;
1176 }
1177
1178 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1179 bfd_boolean
1180 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1181 aarch64_opnd_info *info,
1182 aarch64_insn code,
1183 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1184 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1185 {
1186 /* op0:op1:CRn:CRm:op2 */
1187 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1188 FLD_CRm, FLD_op2);
1189 info->sysreg.flags = 0;
1190
1191 /* If a system instruction, check which restrictions should be on the register
1192 value during decoding, these will be enforced then. */
1193 if (inst->opcode->iclass == ic_system)
1194 {
1195 /* Check to see if it's read-only, else check if it's write only.
1196 if it's both or unspecified don't care. */
1197 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1198 info->sysreg.flags = F_REG_READ;
1199 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1200 == F_SYS_WRITE)
1201 info->sysreg.flags = F_REG_WRITE;
1202 }
1203
1204 return TRUE;
1205 }
1206
1207 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1208 bfd_boolean
1209 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1210 aarch64_opnd_info *info, aarch64_insn code,
1211 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1212 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1213 {
1214 int i;
1215 /* op1:op2 */
1216 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1217 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1218 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1219 return TRUE;
1220 /* Reserved value in <pstatefield>. */
1221 return FALSE;
1222 }
1223
1224 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1225 bfd_boolean
1226 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1227 aarch64_opnd_info *info,
1228 aarch64_insn code,
1229 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1230 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1231 {
1232 int i;
1233 aarch64_insn value;
1234 const aarch64_sys_ins_reg *sysins_ops;
1235 /* op0:op1:CRn:CRm:op2 */
1236 value = extract_fields (code, 0, 5,
1237 FLD_op0, FLD_op1, FLD_CRn,
1238 FLD_CRm, FLD_op2);
1239
1240 switch (info->type)
1241 {
1242 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1243 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1244 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1245 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1246 default: assert (0); return FALSE;
1247 }
1248
1249 for (i = 0; sysins_ops[i].name != NULL; ++i)
1250 if (sysins_ops[i].value == value)
1251 {
1252 info->sysins_op = sysins_ops + i;
1253 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1254 info->sysins_op->name,
1255 (unsigned)info->sysins_op->value,
1256 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1257 return TRUE;
1258 }
1259
1260 return FALSE;
1261 }
1262
1263 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1264
1265 bfd_boolean
1266 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1267 aarch64_opnd_info *info,
1268 aarch64_insn code,
1269 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1270 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1271 {
1272 /* CRm */
1273 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1274 return TRUE;
1275 }
1276
1277 /* Decode the prefetch operation option operand for e.g.
1278 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1279
1280 bfd_boolean
1281 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1282 aarch64_opnd_info *info,
1283 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1284 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1285 {
1286 /* prfop in Rt */
1287 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1288 return TRUE;
1289 }
1290
1291 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1292 to the matching name/value pair in aarch64_hint_options. */
1293
1294 bfd_boolean
1295 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1296 aarch64_opnd_info *info,
1297 aarch64_insn code,
1298 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1299 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1300 {
1301 /* CRm:op2. */
1302 unsigned hint_number;
1303 int i;
1304
1305 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1306
1307 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1308 {
1309 if (hint_number == aarch64_hint_options[i].value)
1310 {
1311 info->hint_option = &(aarch64_hint_options[i]);
1312 return TRUE;
1313 }
1314 }
1315
1316 return FALSE;
1317 }
1318
1319 /* Decode the extended register operand for e.g.
1320 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1321 bfd_boolean
1322 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1323 aarch64_opnd_info *info,
1324 aarch64_insn code,
1325 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1326 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1327 {
1328 aarch64_insn value;
1329
1330 /* Rm */
1331 info->reg.regno = extract_field (FLD_Rm, code, 0);
1332 /* option */
1333 value = extract_field (FLD_option, code, 0);
1334 info->shifter.kind =
1335 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1336 /* imm3 */
1337 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1338
1339 /* This makes the constraint checking happy. */
1340 info->shifter.operator_present = 1;
1341
1342 /* Assume inst->operands[0].qualifier has been resolved. */
1343 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1344 info->qualifier = AARCH64_OPND_QLF_W;
1345 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1346 && (info->shifter.kind == AARCH64_MOD_UXTX
1347 || info->shifter.kind == AARCH64_MOD_SXTX))
1348 info->qualifier = AARCH64_OPND_QLF_X;
1349
1350 return TRUE;
1351 }
1352
1353 /* Decode the shifted register operand for e.g.
1354 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1355 bfd_boolean
1356 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1357 aarch64_opnd_info *info,
1358 aarch64_insn code,
1359 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1360 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1361 {
1362 aarch64_insn value;
1363
1364 /* Rm */
1365 info->reg.regno = extract_field (FLD_Rm, code, 0);
1366 /* shift */
1367 value = extract_field (FLD_shift, code, 0);
1368 info->shifter.kind =
1369 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1370 if (info->shifter.kind == AARCH64_MOD_ROR
1371 && inst->opcode->iclass != log_shift)
1372 /* ROR is not available for the shifted register operand in arithmetic
1373 instructions. */
1374 return FALSE;
1375 /* imm6 */
1376 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1377
1378 /* This makes the constraint checking happy. */
1379 info->shifter.operator_present = 1;
1380
1381 return TRUE;
1382 }
1383
1384 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1385 where <offset> is given by the OFFSET parameter and where <factor> is
1386 1 plus SELF's operand-dependent value. fields[0] specifies the field
1387 that holds <base>. */
1388 static bfd_boolean
1389 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1390 aarch64_opnd_info *info, aarch64_insn code,
1391 int64_t offset)
1392 {
1393 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1394 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1395 info->addr.offset.is_reg = FALSE;
1396 info->addr.writeback = FALSE;
1397 info->addr.preind = TRUE;
1398 if (offset != 0)
1399 info->shifter.kind = AARCH64_MOD_MUL_VL;
1400 info->shifter.amount = 1;
1401 info->shifter.operator_present = (info->addr.offset.imm != 0);
1402 info->shifter.amount_present = FALSE;
1403 return TRUE;
1404 }
1405
1406 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1407 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1408 SELF's operand-dependent value. fields[0] specifies the field that
1409 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1410 bfd_boolean
1411 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1412 aarch64_opnd_info *info, aarch64_insn code,
1413 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1414 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1415 {
1416 int offset;
1417
1418 offset = extract_field (FLD_SVE_imm4, code, 0);
1419 offset = ((offset + 8) & 15) - 8;
1420 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1421 }
1422
1423 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1424 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1425 SELF's operand-dependent value. fields[0] specifies the field that
1426 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1427 bfd_boolean
1428 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1429 aarch64_opnd_info *info, aarch64_insn code,
1430 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1431 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1432 {
1433 int offset;
1434
1435 offset = extract_field (FLD_SVE_imm6, code, 0);
1436 offset = (((offset + 32) & 63) - 32);
1437 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1438 }
1439
1440 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1441 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1442 SELF's operand-dependent value. fields[0] specifies the field that
1443 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1444 and imm3 fields, with imm3 being the less-significant part. */
1445 bfd_boolean
1446 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1447 aarch64_opnd_info *info,
1448 aarch64_insn code,
1449 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1450 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1451 {
1452 int offset;
1453
1454 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1455 offset = (((offset + 256) & 511) - 256);
1456 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1457 }
1458
1459 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1460 is given by the OFFSET parameter and where <shift> is SELF's operand-
1461 dependent value. fields[0] specifies the base register field <base>. */
1462 static bfd_boolean
1463 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1464 aarch64_opnd_info *info, aarch64_insn code,
1465 int64_t offset)
1466 {
1467 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1468 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1469 info->addr.offset.is_reg = FALSE;
1470 info->addr.writeback = FALSE;
1471 info->addr.preind = TRUE;
1472 info->shifter.operator_present = FALSE;
1473 info->shifter.amount_present = FALSE;
1474 return TRUE;
1475 }
1476
1477 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1478 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1479 value. fields[0] specifies the base register field. */
1480 bfd_boolean
1481 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1482 aarch64_opnd_info *info, aarch64_insn code,
1483 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1484 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1485 {
1486 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1487 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1488 }
1489
1490 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1491 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1492 value. fields[0] specifies the base register field. */
1493 bfd_boolean
1494 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1495 aarch64_opnd_info *info, aarch64_insn code,
1496 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1497 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1498 {
1499 int offset = extract_field (FLD_SVE_imm6, code, 0);
1500 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1501 }
1502
1503 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1504 is SELF's operand-dependent value. fields[0] specifies the base
1505 register field and fields[1] specifies the offset register field. */
1506 bfd_boolean
1507 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1508 aarch64_opnd_info *info, aarch64_insn code,
1509 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1510 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1511 {
1512 int index_regno;
1513
1514 index_regno = extract_field (self->fields[1], code, 0);
1515 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1516 return FALSE;
1517
1518 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1519 info->addr.offset.regno = index_regno;
1520 info->addr.offset.is_reg = TRUE;
1521 info->addr.writeback = FALSE;
1522 info->addr.preind = TRUE;
1523 info->shifter.kind = AARCH64_MOD_LSL;
1524 info->shifter.amount = get_operand_specific_data (self);
1525 info->shifter.operator_present = (info->shifter.amount != 0);
1526 info->shifter.amount_present = (info->shifter.amount != 0);
1527 return TRUE;
1528 }
1529
1530 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1531 <shift> is SELF's operand-dependent value. fields[0] specifies the
1532 base register field, fields[1] specifies the offset register field and
1533 fields[2] is a single-bit field that selects SXTW over UXTW. */
1534 bfd_boolean
1535 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1536 aarch64_opnd_info *info, aarch64_insn code,
1537 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1538 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1539 {
1540 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1541 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1542 info->addr.offset.is_reg = TRUE;
1543 info->addr.writeback = FALSE;
1544 info->addr.preind = TRUE;
1545 if (extract_field (self->fields[2], code, 0))
1546 info->shifter.kind = AARCH64_MOD_SXTW;
1547 else
1548 info->shifter.kind = AARCH64_MOD_UXTW;
1549 info->shifter.amount = get_operand_specific_data (self);
1550 info->shifter.operator_present = TRUE;
1551 info->shifter.amount_present = (info->shifter.amount != 0);
1552 return TRUE;
1553 }
1554
1555 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1556 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1557 fields[0] specifies the base register field. */
1558 bfd_boolean
1559 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1560 aarch64_opnd_info *info, aarch64_insn code,
1561 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1562 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1563 {
1564 int offset = extract_field (FLD_imm5, code, 0);
1565 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1566 }
1567
1568 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1569 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1570 number. fields[0] specifies the base register field and fields[1]
1571 specifies the offset register field. */
1572 static bfd_boolean
1573 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1574 aarch64_insn code, enum aarch64_modifier_kind kind)
1575 {
1576 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1577 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1578 info->addr.offset.is_reg = TRUE;
1579 info->addr.writeback = FALSE;
1580 info->addr.preind = TRUE;
1581 info->shifter.kind = kind;
1582 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1583 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1584 || info->shifter.amount != 0);
1585 info->shifter.amount_present = (info->shifter.amount != 0);
1586 return TRUE;
1587 }
1588
1589 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1590 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1591 field and fields[1] specifies the offset register field. */
1592 bfd_boolean
1593 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1594 aarch64_opnd_info *info, aarch64_insn code,
1595 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1596 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1597 {
1598 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1599 }
1600
1601 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1602 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1603 field and fields[1] specifies the offset register field. */
1604 bfd_boolean
1605 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1606 aarch64_opnd_info *info, aarch64_insn code,
1607 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1608 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1609 {
1610 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1611 }
1612
1613 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1614 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1615 field and fields[1] specifies the offset register field. */
1616 bfd_boolean
1617 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1618 aarch64_opnd_info *info, aarch64_insn code,
1619 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1620 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1621 {
1622 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1623 }
1624
1625 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1626 has the raw field value and that the low 8 bits decode to VALUE. */
1627 static bfd_boolean
1628 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1629 {
1630 info->shifter.kind = AARCH64_MOD_LSL;
1631 info->shifter.amount = 0;
1632 if (info->imm.value & 0x100)
1633 {
1634 if (value == 0)
1635 /* Decode 0x100 as #0, LSL #8. */
1636 info->shifter.amount = 8;
1637 else
1638 value *= 256;
1639 }
1640 info->shifter.operator_present = (info->shifter.amount != 0);
1641 info->shifter.amount_present = (info->shifter.amount != 0);
1642 info->imm.value = value;
1643 return TRUE;
1644 }
1645
1646 /* Decode an SVE ADD/SUB immediate. */
1647 bfd_boolean
1648 aarch64_ext_sve_aimm (const aarch64_operand *self,
1649 aarch64_opnd_info *info, const aarch64_insn code,
1650 const aarch64_inst *inst,
1651 aarch64_operand_error *errors)
1652 {
1653 return (aarch64_ext_imm (self, info, code, inst, errors)
1654 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1655 }
1656
1657 /* Decode an SVE CPY/DUP immediate. */
1658 bfd_boolean
1659 aarch64_ext_sve_asimm (const aarch64_operand *self,
1660 aarch64_opnd_info *info, const aarch64_insn code,
1661 const aarch64_inst *inst,
1662 aarch64_operand_error *errors)
1663 {
1664 return (aarch64_ext_imm (self, info, code, inst, errors)
1665 && decode_sve_aimm (info, (int8_t) info->imm.value));
1666 }
1667
1668 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1669 The fields array specifies which field to use. */
1670 bfd_boolean
1671 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1672 aarch64_opnd_info *info, aarch64_insn code,
1673 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1674 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1675 {
1676 if (extract_field (self->fields[0], code, 0))
1677 info->imm.value = 0x3f800000;
1678 else
1679 info->imm.value = 0x3f000000;
1680 info->imm.is_fp = TRUE;
1681 return TRUE;
1682 }
1683
1684 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1685 The fields array specifies which field to use. */
1686 bfd_boolean
1687 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1688 aarch64_opnd_info *info, aarch64_insn code,
1689 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1690 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1691 {
1692 if (extract_field (self->fields[0], code, 0))
1693 info->imm.value = 0x40000000;
1694 else
1695 info->imm.value = 0x3f000000;
1696 info->imm.is_fp = TRUE;
1697 return TRUE;
1698 }
1699
1700 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1701 The fields array specifies which field to use. */
1702 bfd_boolean
1703 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1704 aarch64_opnd_info *info, aarch64_insn code,
1705 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1706 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1707 {
1708 if (extract_field (self->fields[0], code, 0))
1709 info->imm.value = 0x3f800000;
1710 else
1711 info->imm.value = 0x0;
1712 info->imm.is_fp = TRUE;
1713 return TRUE;
1714 }
1715
1716 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1717 array specifies which field to use for Zn. MM is encoded in the
1718 concatenation of imm5 and SVE_tszh, with imm5 being the less
1719 significant part. */
1720 bfd_boolean
1721 aarch64_ext_sve_index (const aarch64_operand *self,
1722 aarch64_opnd_info *info, aarch64_insn code,
1723 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1724 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1725 {
1726 int val;
1727
1728 info->reglane.regno = extract_field (self->fields[0], code, 0);
1729 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1730 if ((val & 31) == 0)
1731 return 0;
1732 while ((val & 1) == 0)
1733 val /= 2;
1734 info->reglane.index = val / 2;
1735 return TRUE;
1736 }
1737
1738 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1739 bfd_boolean
1740 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1741 aarch64_opnd_info *info, const aarch64_insn code,
1742 const aarch64_inst *inst,
1743 aarch64_operand_error *errors)
1744 {
1745 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1746 return (aarch64_ext_limm (self, info, code, inst, errors)
1747 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1748 }
1749
1750 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1751 and where MM occupies the most-significant part. The operand-dependent
1752 value specifies the number of bits in Zn. */
1753 bfd_boolean
1754 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1755 aarch64_opnd_info *info, aarch64_insn code,
1756 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1757 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1758 {
1759 unsigned int reg_bits = get_operand_specific_data (self);
1760 unsigned int val = extract_all_fields (self, code);
1761 info->reglane.regno = val & ((1 << reg_bits) - 1);
1762 info->reglane.index = val >> reg_bits;
1763 return TRUE;
1764 }
1765
1766 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1767 to use for Zn. The opcode-dependent value specifies the number
1768 of registers in the list. */
1769 bfd_boolean
1770 aarch64_ext_sve_reglist (const aarch64_operand *self,
1771 aarch64_opnd_info *info, aarch64_insn code,
1772 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1773 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1774 {
1775 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1776 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1777 return TRUE;
1778 }
1779
1780 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1781 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1782 field. */
1783 bfd_boolean
1784 aarch64_ext_sve_scale (const aarch64_operand *self,
1785 aarch64_opnd_info *info, aarch64_insn code,
1786 const aarch64_inst *inst, aarch64_operand_error *errors)
1787 {
1788 int val;
1789
1790 if (!aarch64_ext_imm (self, info, code, inst, errors))
1791 return FALSE;
1792 val = extract_field (FLD_SVE_imm4, code, 0);
1793 info->shifter.kind = AARCH64_MOD_MUL;
1794 info->shifter.amount = val + 1;
1795 info->shifter.operator_present = (val != 0);
1796 info->shifter.amount_present = (val != 0);
1797 return TRUE;
1798 }
1799
1800 /* Return the top set bit in VALUE, which is expected to be relatively
1801 small. */
1802 static uint64_t
1803 get_top_bit (uint64_t value)
1804 {
1805 while ((value & -value) != value)
1806 value -= value & -value;
1807 return value;
1808 }
1809
1810 /* Decode an SVE shift-left immediate. */
1811 bfd_boolean
1812 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1813 aarch64_opnd_info *info, const aarch64_insn code,
1814 const aarch64_inst *inst, aarch64_operand_error *errors)
1815 {
1816 if (!aarch64_ext_imm (self, info, code, inst, errors)
1817 || info->imm.value == 0)
1818 return FALSE;
1819
1820 info->imm.value -= get_top_bit (info->imm.value);
1821 return TRUE;
1822 }
1823
1824 /* Decode an SVE shift-right immediate. */
1825 bfd_boolean
1826 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1827 aarch64_opnd_info *info, const aarch64_insn code,
1828 const aarch64_inst *inst, aarch64_operand_error *errors)
1829 {
1830 if (!aarch64_ext_imm (self, info, code, inst, errors)
1831 || info->imm.value == 0)
1832 return FALSE;
1833
1834 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1835 return TRUE;
1836 }
1837 \f
1838 /* Bitfields that are commonly used to encode certain operands' information
1839 may be partially used as part of the base opcode in some instructions.
1840 For example, the bit 1 of the field 'size' in
1841 FCVTXN <Vb><d>, <Va><n>
1842 is actually part of the base opcode, while only size<0> is available
1843 for encoding the register type. Another example is the AdvSIMD
1844 instruction ORR (register), in which the field 'size' is also used for
1845 the base opcode, leaving only the field 'Q' available to encode the
1846 vector register arrangement specifier '8B' or '16B'.
1847
1848 This function tries to deduce the qualifier from the value of partially
1849 constrained field(s). Given the VALUE of such a field or fields, the
1850 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1851 operand encoding), the function returns the matching qualifier or
1852 AARCH64_OPND_QLF_NIL if nothing matches.
1853
1854 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1855 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1856 may end with AARCH64_OPND_QLF_NIL. */
1857
1858 static enum aarch64_opnd_qualifier
1859 get_qualifier_from_partial_encoding (aarch64_insn value,
1860 const enum aarch64_opnd_qualifier* \
1861 candidates,
1862 aarch64_insn mask)
1863 {
1864 int i;
1865 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1866 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1867 {
1868 aarch64_insn standard_value;
1869 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1870 break;
1871 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1872 if ((standard_value & mask) == (value & mask))
1873 return candidates[i];
1874 }
1875 return AARCH64_OPND_QLF_NIL;
1876 }
1877
1878 /* Given a list of qualifier sequences, return all possible valid qualifiers
1879 for operand IDX in QUALIFIERS.
1880 Assume QUALIFIERS is an array whose length is large enough. */
1881
1882 static void
1883 get_operand_possible_qualifiers (int idx,
1884 const aarch64_opnd_qualifier_seq_t *list,
1885 enum aarch64_opnd_qualifier *qualifiers)
1886 {
1887 int i;
1888 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1889 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1890 break;
1891 }
1892
1893 /* Decode the size Q field for e.g. SHADD.
1894 We tag one operand with the qualifer according to the code;
1895 whether the qualifier is valid for this opcode or not, it is the
1896 duty of the semantic checking. */
1897
1898 static int
1899 decode_sizeq (aarch64_inst *inst)
1900 {
1901 int idx;
1902 enum aarch64_opnd_qualifier qualifier;
1903 aarch64_insn code;
1904 aarch64_insn value, mask;
1905 enum aarch64_field_kind fld_sz;
1906 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1907
1908 if (inst->opcode->iclass == asisdlse
1909 || inst->opcode->iclass == asisdlsep
1910 || inst->opcode->iclass == asisdlso
1911 || inst->opcode->iclass == asisdlsop)
1912 fld_sz = FLD_vldst_size;
1913 else
1914 fld_sz = FLD_size;
1915
1916 code = inst->value;
1917 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1918 /* Obtain the info that which bits of fields Q and size are actually
1919 available for operand encoding. Opcodes like FMAXNM and FMLA have
1920 size[1] unavailable. */
1921 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1922
1923 /* The index of the operand we are going to tag a qualifier and the qualifer
1924 itself are reasoned from the value of the size and Q fields and the
1925 possible valid qualifier lists. */
1926 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1927 DEBUG_TRACE ("key idx: %d", idx);
1928
1929 /* For most related instruciton, size:Q are fully available for operand
1930 encoding. */
1931 if (mask == 0x7)
1932 {
1933 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1934 return 1;
1935 }
1936
1937 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1938 candidates);
1939 #ifdef DEBUG_AARCH64
1940 if (debug_dump)
1941 {
1942 int i;
1943 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1944 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1945 DEBUG_TRACE ("qualifier %d: %s", i,
1946 aarch64_get_qualifier_name(candidates[i]));
1947 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1948 }
1949 #endif /* DEBUG_AARCH64 */
1950
1951 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1952
1953 if (qualifier == AARCH64_OPND_QLF_NIL)
1954 return 0;
1955
1956 inst->operands[idx].qualifier = qualifier;
1957 return 1;
1958 }
1959
1960 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1961 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1962
1963 static int
1964 decode_asimd_fcvt (aarch64_inst *inst)
1965 {
1966 aarch64_field field = {0, 0};
1967 aarch64_insn value;
1968 enum aarch64_opnd_qualifier qualifier;
1969
1970 gen_sub_field (FLD_size, 0, 1, &field);
1971 value = extract_field_2 (&field, inst->value, 0);
1972 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1973 : AARCH64_OPND_QLF_V_2D;
1974 switch (inst->opcode->op)
1975 {
1976 case OP_FCVTN:
1977 case OP_FCVTN2:
1978 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1979 inst->operands[1].qualifier = qualifier;
1980 break;
1981 case OP_FCVTL:
1982 case OP_FCVTL2:
1983 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1984 inst->operands[0].qualifier = qualifier;
1985 break;
1986 default:
1987 assert (0);
1988 return 0;
1989 }
1990
1991 return 1;
1992 }
1993
1994 /* Decode size[0], i.e. bit 22, for
1995 e.g. FCVTXN <Vb><d>, <Va><n>. */
1996
1997 static int
1998 decode_asisd_fcvtxn (aarch64_inst *inst)
1999 {
2000 aarch64_field field = {0, 0};
2001 gen_sub_field (FLD_size, 0, 1, &field);
2002 if (!extract_field_2 (&field, inst->value, 0))
2003 return 0;
2004 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2005 return 1;
2006 }
2007
2008 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2009 static int
2010 decode_fcvt (aarch64_inst *inst)
2011 {
2012 enum aarch64_opnd_qualifier qualifier;
2013 aarch64_insn value;
2014 const aarch64_field field = {15, 2};
2015
2016 /* opc dstsize */
2017 value = extract_field_2 (&field, inst->value, 0);
2018 switch (value)
2019 {
2020 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2021 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2022 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2023 default: return 0;
2024 }
2025 inst->operands[0].qualifier = qualifier;
2026
2027 return 1;
2028 }
2029
2030 /* Do miscellaneous decodings that are not common enough to be driven by
2031 flags. */
2032
2033 static int
2034 do_misc_decoding (aarch64_inst *inst)
2035 {
2036 unsigned int value;
2037 switch (inst->opcode->op)
2038 {
2039 case OP_FCVT:
2040 return decode_fcvt (inst);
2041
2042 case OP_FCVTN:
2043 case OP_FCVTN2:
2044 case OP_FCVTL:
2045 case OP_FCVTL2:
2046 return decode_asimd_fcvt (inst);
2047
2048 case OP_FCVTXN_S:
2049 return decode_asisd_fcvtxn (inst);
2050
2051 case OP_MOV_P_P:
2052 case OP_MOVS_P_P:
2053 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2054 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2055 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2056
2057 case OP_MOV_Z_P_Z:
2058 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2059 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2060
2061 case OP_MOV_Z_V:
2062 /* Index must be zero. */
2063 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2064 return value > 0 && value <= 16 && value == (value & -value);
2065
2066 case OP_MOV_Z_Z:
2067 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2068 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2069
2070 case OP_MOV_Z_Zi:
2071 /* Index must be nonzero. */
2072 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2073 return value > 0 && value != (value & -value);
2074
2075 case OP_MOVM_P_P_P:
2076 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2077 == extract_field (FLD_SVE_Pm, inst->value, 0));
2078
2079 case OP_MOVZS_P_P_P:
2080 case OP_MOVZ_P_P_P:
2081 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2082 == extract_field (FLD_SVE_Pm, inst->value, 0));
2083
2084 case OP_NOTS_P_P_P_Z:
2085 case OP_NOT_P_P_P_Z:
2086 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2087 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2088
2089 default:
2090 return 0;
2091 }
2092 }
2093
2094 /* Opcodes that have fields shared by multiple operands are usually flagged
2095 with flags. In this function, we detect such flags, decode the related
2096 field(s) and store the information in one of the related operands. The
2097 'one' operand is not any operand but one of the operands that can
2098 accommadate all the information that has been decoded. */
2099
2100 static int
2101 do_special_decoding (aarch64_inst *inst)
2102 {
2103 int idx;
2104 aarch64_insn value;
2105 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2106 if (inst->opcode->flags & F_COND)
2107 {
2108 value = extract_field (FLD_cond2, inst->value, 0);
2109 inst->cond = get_cond_from_value (value);
2110 }
2111 /* 'sf' field. */
2112 if (inst->opcode->flags & F_SF)
2113 {
2114 idx = select_operand_for_sf_field_coding (inst->opcode);
2115 value = extract_field (FLD_sf, inst->value, 0);
2116 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2117 if ((inst->opcode->flags & F_N)
2118 && extract_field (FLD_N, inst->value, 0) != value)
2119 return 0;
2120 }
2121 /* 'sf' field. */
2122 if (inst->opcode->flags & F_LSE_SZ)
2123 {
2124 idx = select_operand_for_sf_field_coding (inst->opcode);
2125 value = extract_field (FLD_lse_sz, inst->value, 0);
2126 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2127 }
2128 /* size:Q fields. */
2129 if (inst->opcode->flags & F_SIZEQ)
2130 return decode_sizeq (inst);
2131
2132 if (inst->opcode->flags & F_FPTYPE)
2133 {
2134 idx = select_operand_for_fptype_field_coding (inst->opcode);
2135 value = extract_field (FLD_type, inst->value, 0);
2136 switch (value)
2137 {
2138 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2139 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2140 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2141 default: return 0;
2142 }
2143 }
2144
2145 if (inst->opcode->flags & F_SSIZE)
2146 {
2147 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2148 of the base opcode. */
2149 aarch64_insn mask;
2150 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2151 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2152 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2153 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2154 /* For most related instruciton, the 'size' field is fully available for
2155 operand encoding. */
2156 if (mask == 0x3)
2157 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2158 else
2159 {
2160 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2161 candidates);
2162 inst->operands[idx].qualifier
2163 = get_qualifier_from_partial_encoding (value, candidates, mask);
2164 }
2165 }
2166
2167 if (inst->opcode->flags & F_T)
2168 {
2169 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2170 int num = 0;
2171 unsigned val, Q;
2172 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2173 == AARCH64_OPND_CLASS_SIMD_REG);
2174 /* imm5<3:0> q <t>
2175 0000 x reserved
2176 xxx1 0 8b
2177 xxx1 1 16b
2178 xx10 0 4h
2179 xx10 1 8h
2180 x100 0 2s
2181 x100 1 4s
2182 1000 0 reserved
2183 1000 1 2d */
2184 val = extract_field (FLD_imm5, inst->value, 0);
2185 while ((val & 0x1) == 0 && ++num <= 3)
2186 val >>= 1;
2187 if (num > 3)
2188 return 0;
2189 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2190 inst->operands[0].qualifier =
2191 get_vreg_qualifier_from_value ((num << 1) | Q);
2192 }
2193
2194 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2195 {
2196 /* Use Rt to encode in the case of e.g.
2197 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2198 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2199 if (idx == -1)
2200 {
2201 /* Otherwise use the result operand, which has to be a integer
2202 register. */
2203 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2204 == AARCH64_OPND_CLASS_INT_REG);
2205 idx = 0;
2206 }
2207 assert (idx == 0 || idx == 1);
2208 value = extract_field (FLD_Q, inst->value, 0);
2209 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2210 }
2211
2212 if (inst->opcode->flags & F_LDS_SIZE)
2213 {
2214 aarch64_field field = {0, 0};
2215 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2216 == AARCH64_OPND_CLASS_INT_REG);
2217 gen_sub_field (FLD_opc, 0, 1, &field);
2218 value = extract_field_2 (&field, inst->value, 0);
2219 inst->operands[0].qualifier
2220 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2221 }
2222
2223 /* Miscellaneous decoding; done as the last step. */
2224 if (inst->opcode->flags & F_MISC)
2225 return do_misc_decoding (inst);
2226
2227 return 1;
2228 }
2229
2230 /* Converters converting a real opcode instruction to its alias form. */
2231
2232 /* ROR <Wd>, <Ws>, #<shift>
2233 is equivalent to:
2234 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2235 static int
2236 convert_extr_to_ror (aarch64_inst *inst)
2237 {
2238 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2239 {
2240 copy_operand_info (inst, 2, 3);
2241 inst->operands[3].type = AARCH64_OPND_NIL;
2242 return 1;
2243 }
2244 return 0;
2245 }
2246
2247 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2248 is equivalent to:
2249 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2250 static int
2251 convert_shll_to_xtl (aarch64_inst *inst)
2252 {
2253 if (inst->operands[2].imm.value == 0)
2254 {
2255 inst->operands[2].type = AARCH64_OPND_NIL;
2256 return 1;
2257 }
2258 return 0;
2259 }
2260
2261 /* Convert
2262 UBFM <Xd>, <Xn>, #<shift>, #63.
2263 to
2264 LSR <Xd>, <Xn>, #<shift>. */
2265 static int
2266 convert_bfm_to_sr (aarch64_inst *inst)
2267 {
2268 int64_t imms, val;
2269
2270 imms = inst->operands[3].imm.value;
2271 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2272 if (imms == val)
2273 {
2274 inst->operands[3].type = AARCH64_OPND_NIL;
2275 return 1;
2276 }
2277
2278 return 0;
2279 }
2280
2281 /* Convert MOV to ORR. */
2282 static int
2283 convert_orr_to_mov (aarch64_inst *inst)
2284 {
2285 /* MOV <Vd>.<T>, <Vn>.<T>
2286 is equivalent to:
2287 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2288 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2289 {
2290 inst->operands[2].type = AARCH64_OPND_NIL;
2291 return 1;
2292 }
2293 return 0;
2294 }
2295
2296 /* When <imms> >= <immr>, the instruction written:
2297 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2298 is equivalent to:
2299 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2300
2301 static int
2302 convert_bfm_to_bfx (aarch64_inst *inst)
2303 {
2304 int64_t immr, imms;
2305
2306 immr = inst->operands[2].imm.value;
2307 imms = inst->operands[3].imm.value;
2308 if (imms >= immr)
2309 {
2310 int64_t lsb = immr;
2311 inst->operands[2].imm.value = lsb;
2312 inst->operands[3].imm.value = imms + 1 - lsb;
2313 /* The two opcodes have different qualifiers for
2314 the immediate operands; reset to help the checking. */
2315 reset_operand_qualifier (inst, 2);
2316 reset_operand_qualifier (inst, 3);
2317 return 1;
2318 }
2319
2320 return 0;
2321 }
2322
2323 /* When <imms> < <immr>, the instruction written:
2324 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2325 is equivalent to:
2326 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2327
2328 static int
2329 convert_bfm_to_bfi (aarch64_inst *inst)
2330 {
2331 int64_t immr, imms, val;
2332
2333 immr = inst->operands[2].imm.value;
2334 imms = inst->operands[3].imm.value;
2335 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2336 if (imms < immr)
2337 {
2338 inst->operands[2].imm.value = (val - immr) & (val - 1);
2339 inst->operands[3].imm.value = imms + 1;
2340 /* The two opcodes have different qualifiers for
2341 the immediate operands; reset to help the checking. */
2342 reset_operand_qualifier (inst, 2);
2343 reset_operand_qualifier (inst, 3);
2344 return 1;
2345 }
2346
2347 return 0;
2348 }
2349
2350 /* The instruction written:
2351 BFC <Xd>, #<lsb>, #<width>
2352 is equivalent to:
2353 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2354
2355 static int
2356 convert_bfm_to_bfc (aarch64_inst *inst)
2357 {
2358 int64_t immr, imms, val;
2359
2360 /* Should have been assured by the base opcode value. */
2361 assert (inst->operands[1].reg.regno == 0x1f);
2362
2363 immr = inst->operands[2].imm.value;
2364 imms = inst->operands[3].imm.value;
2365 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2366 if (imms < immr)
2367 {
2368 /* Drop XZR from the second operand. */
2369 copy_operand_info (inst, 1, 2);
2370 copy_operand_info (inst, 2, 3);
2371 inst->operands[3].type = AARCH64_OPND_NIL;
2372
2373 /* Recalculate the immediates. */
2374 inst->operands[1].imm.value = (val - immr) & (val - 1);
2375 inst->operands[2].imm.value = imms + 1;
2376
2377 /* The two opcodes have different qualifiers for the operands; reset to
2378 help the checking. */
2379 reset_operand_qualifier (inst, 1);
2380 reset_operand_qualifier (inst, 2);
2381 reset_operand_qualifier (inst, 3);
2382
2383 return 1;
2384 }
2385
2386 return 0;
2387 }
2388
2389 /* The instruction written:
2390 LSL <Xd>, <Xn>, #<shift>
2391 is equivalent to:
2392 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2393
2394 static int
2395 convert_ubfm_to_lsl (aarch64_inst *inst)
2396 {
2397 int64_t immr = inst->operands[2].imm.value;
2398 int64_t imms = inst->operands[3].imm.value;
2399 int64_t val
2400 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2401
2402 if ((immr == 0 && imms == val) || immr == imms + 1)
2403 {
2404 inst->operands[3].type = AARCH64_OPND_NIL;
2405 inst->operands[2].imm.value = val - imms;
2406 return 1;
2407 }
2408
2409 return 0;
2410 }
2411
2412 /* CINC <Wd>, <Wn>, <cond>
2413 is equivalent to:
2414 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2415 where <cond> is not AL or NV. */
2416
2417 static int
2418 convert_from_csel (aarch64_inst *inst)
2419 {
2420 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2421 && (inst->operands[3].cond->value & 0xe) != 0xe)
2422 {
2423 copy_operand_info (inst, 2, 3);
2424 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2425 inst->operands[3].type = AARCH64_OPND_NIL;
2426 return 1;
2427 }
2428 return 0;
2429 }
2430
2431 /* CSET <Wd>, <cond>
2432 is equivalent to:
2433 CSINC <Wd>, WZR, WZR, invert(<cond>)
2434 where <cond> is not AL or NV. */
2435
2436 static int
2437 convert_csinc_to_cset (aarch64_inst *inst)
2438 {
2439 if (inst->operands[1].reg.regno == 0x1f
2440 && inst->operands[2].reg.regno == 0x1f
2441 && (inst->operands[3].cond->value & 0xe) != 0xe)
2442 {
2443 copy_operand_info (inst, 1, 3);
2444 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2445 inst->operands[3].type = AARCH64_OPND_NIL;
2446 inst->operands[2].type = AARCH64_OPND_NIL;
2447 return 1;
2448 }
2449 return 0;
2450 }
2451
2452 /* MOV <Wd>, #<imm>
2453 is equivalent to:
2454 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2455
2456 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2457 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2458 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2459 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2460 machine-instruction mnemonic must be used. */
2461
2462 static int
2463 convert_movewide_to_mov (aarch64_inst *inst)
2464 {
2465 uint64_t value = inst->operands[1].imm.value;
2466 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2467 if (value == 0 && inst->operands[1].shifter.amount != 0)
2468 return 0;
2469 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2470 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2471 value <<= inst->operands[1].shifter.amount;
2472 /* As an alias convertor, it has to be clear that the INST->OPCODE
2473 is the opcode of the real instruction. */
2474 if (inst->opcode->op == OP_MOVN)
2475 {
2476 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2477 value = ~value;
2478 /* A MOVN has an immediate that could be encoded by MOVZ. */
2479 if (aarch64_wide_constant_p (value, is32, NULL))
2480 return 0;
2481 }
2482 inst->operands[1].imm.value = value;
2483 inst->operands[1].shifter.amount = 0;
2484 return 1;
2485 }
2486
2487 /* MOV <Wd>, #<imm>
2488 is equivalent to:
2489 ORR <Wd>, WZR, #<imm>.
2490
2491 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2492 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2493 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2494 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2495 machine-instruction mnemonic must be used. */
2496
2497 static int
2498 convert_movebitmask_to_mov (aarch64_inst *inst)
2499 {
2500 int is32;
2501 uint64_t value;
2502
2503 /* Should have been assured by the base opcode value. */
2504 assert (inst->operands[1].reg.regno == 0x1f);
2505 copy_operand_info (inst, 1, 2);
2506 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2507 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2508 value = inst->operands[1].imm.value;
2509 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2510 instruction. */
2511 if (inst->operands[0].reg.regno != 0x1f
2512 && (aarch64_wide_constant_p (value, is32, NULL)
2513 || aarch64_wide_constant_p (~value, is32, NULL)))
2514 return 0;
2515
2516 inst->operands[2].type = AARCH64_OPND_NIL;
2517 return 1;
2518 }
2519
2520 /* Some alias opcodes are disassembled by being converted from their real-form.
2521 N.B. INST->OPCODE is the real opcode rather than the alias. */
2522
2523 static int
2524 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2525 {
2526 switch (alias->op)
2527 {
2528 case OP_ASR_IMM:
2529 case OP_LSR_IMM:
2530 return convert_bfm_to_sr (inst);
2531 case OP_LSL_IMM:
2532 return convert_ubfm_to_lsl (inst);
2533 case OP_CINC:
2534 case OP_CINV:
2535 case OP_CNEG:
2536 return convert_from_csel (inst);
2537 case OP_CSET:
2538 case OP_CSETM:
2539 return convert_csinc_to_cset (inst);
2540 case OP_UBFX:
2541 case OP_BFXIL:
2542 case OP_SBFX:
2543 return convert_bfm_to_bfx (inst);
2544 case OP_SBFIZ:
2545 case OP_BFI:
2546 case OP_UBFIZ:
2547 return convert_bfm_to_bfi (inst);
2548 case OP_BFC:
2549 return convert_bfm_to_bfc (inst);
2550 case OP_MOV_V:
2551 return convert_orr_to_mov (inst);
2552 case OP_MOV_IMM_WIDE:
2553 case OP_MOV_IMM_WIDEN:
2554 return convert_movewide_to_mov (inst);
2555 case OP_MOV_IMM_LOG:
2556 return convert_movebitmask_to_mov (inst);
2557 case OP_ROR_IMM:
2558 return convert_extr_to_ror (inst);
2559 case OP_SXTL:
2560 case OP_SXTL2:
2561 case OP_UXTL:
2562 case OP_UXTL2:
2563 return convert_shll_to_xtl (inst);
2564 default:
2565 return 0;
2566 }
2567 }
2568
2569 static bfd_boolean
2570 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2571 aarch64_inst *, int, aarch64_operand_error *errors);
2572
2573 /* Given the instruction information in *INST, check if the instruction has
2574 any alias form that can be used to represent *INST. If the answer is yes,
2575 update *INST to be in the form of the determined alias. */
2576
2577 /* In the opcode description table, the following flags are used in opcode
2578 entries to help establish the relations between the real and alias opcodes:
2579
2580 F_ALIAS: opcode is an alias
2581 F_HAS_ALIAS: opcode has alias(es)
2582 F_P1
2583 F_P2
2584 F_P3: Disassembly preference priority 1-3 (the larger the
2585 higher). If nothing is specified, it is the priority
2586 0 by default, i.e. the lowest priority.
2587
2588 Although the relation between the machine and the alias instructions are not
2589 explicitly described, it can be easily determined from the base opcode
2590 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2591 description entries:
2592
2593 The mask of an alias opcode must be equal to or a super-set (i.e. more
2594 constrained) of that of the aliased opcode; so is the base opcode value.
2595
2596 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2597 && (opcode->mask & real->mask) == real->mask
2598 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2599 then OPCODE is an alias of, and only of, the REAL instruction
2600
2601 The alias relationship is forced flat-structured to keep related algorithm
2602 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2603
2604 During the disassembling, the decoding decision tree (in
2605 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2606 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2607 not specified), the disassembler will check whether there is any alias
2608 instruction exists for this real instruction. If there is, the disassembler
2609 will try to disassemble the 32-bit binary again using the alias's rule, or
2610 try to convert the IR to the form of the alias. In the case of the multiple
2611 aliases, the aliases are tried one by one from the highest priority
2612 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2613 first succeeds first adopted.
2614
2615 You may ask why there is a need for the conversion of IR from one form to
2616 another in handling certain aliases. This is because on one hand it avoids
2617 adding more operand code to handle unusual encoding/decoding; on other
2618 hand, during the disassembling, the conversion is an effective approach to
2619 check the condition of an alias (as an alias may be adopted only if certain
2620 conditions are met).
2621
2622 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2623 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2624 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2625
2626 static void
2627 determine_disassembling_preference (struct aarch64_inst *inst,
2628 aarch64_operand_error *errors)
2629 {
2630 const aarch64_opcode *opcode;
2631 const aarch64_opcode *alias;
2632
2633 opcode = inst->opcode;
2634
2635 /* This opcode does not have an alias, so use itself. */
2636 if (!opcode_has_alias (opcode))
2637 return;
2638
2639 alias = aarch64_find_alias_opcode (opcode);
2640 assert (alias);
2641
2642 #ifdef DEBUG_AARCH64
2643 if (debug_dump)
2644 {
2645 const aarch64_opcode *tmp = alias;
2646 printf ("#### LIST orderd: ");
2647 while (tmp)
2648 {
2649 printf ("%s, ", tmp->name);
2650 tmp = aarch64_find_next_alias_opcode (tmp);
2651 }
2652 printf ("\n");
2653 }
2654 #endif /* DEBUG_AARCH64 */
2655
2656 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2657 {
2658 DEBUG_TRACE ("try %s", alias->name);
2659 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2660
2661 /* An alias can be a pseudo opcode which will never be used in the
2662 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2663 aliasing AND. */
2664 if (pseudo_opcode_p (alias))
2665 {
2666 DEBUG_TRACE ("skip pseudo %s", alias->name);
2667 continue;
2668 }
2669
2670 if ((inst->value & alias->mask) != alias->opcode)
2671 {
2672 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2673 continue;
2674 }
2675 /* No need to do any complicated transformation on operands, if the alias
2676 opcode does not have any operand. */
2677 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2678 {
2679 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2680 aarch64_replace_opcode (inst, alias);
2681 return;
2682 }
2683 if (alias->flags & F_CONV)
2684 {
2685 aarch64_inst copy;
2686 memcpy (&copy, inst, sizeof (aarch64_inst));
2687 /* ALIAS is the preference as long as the instruction can be
2688 successfully converted to the form of ALIAS. */
2689 if (convert_to_alias (&copy, alias) == 1)
2690 {
2691 aarch64_replace_opcode (&copy, alias);
2692 assert (aarch64_match_operands_constraint (&copy, NULL));
2693 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2694 memcpy (inst, &copy, sizeof (aarch64_inst));
2695 return;
2696 }
2697 }
2698 else
2699 {
2700 /* Directly decode the alias opcode. */
2701 aarch64_inst temp;
2702 memset (&temp, '\0', sizeof (aarch64_inst));
2703 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
2704 {
2705 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2706 memcpy (inst, &temp, sizeof (aarch64_inst));
2707 return;
2708 }
2709 }
2710 }
2711 }
2712
2713 /* Some instructions (including all SVE ones) use the instruction class
2714 to describe how a qualifiers_list index is represented in the instruction
2715 encoding. If INST is such an instruction, decode the appropriate fields
2716 and fill in the operand qualifiers accordingly. Return true if no
2717 problems are found. */
2718
2719 static bfd_boolean
2720 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2721 {
2722 int i, variant;
2723
2724 variant = 0;
2725 switch (inst->opcode->iclass)
2726 {
2727 case sve_cpy:
2728 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2729 break;
2730
2731 case sve_index:
2732 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2733 if ((i & 31) == 0)
2734 return FALSE;
2735 while ((i & 1) == 0)
2736 {
2737 i >>= 1;
2738 variant += 1;
2739 }
2740 break;
2741
2742 case sve_limm:
2743 /* Pick the smallest applicable element size. */
2744 if ((inst->value & 0x20600) == 0x600)
2745 variant = 0;
2746 else if ((inst->value & 0x20400) == 0x400)
2747 variant = 1;
2748 else if ((inst->value & 0x20000) == 0)
2749 variant = 2;
2750 else
2751 variant = 3;
2752 break;
2753
2754 case sve_misc:
2755 /* sve_misc instructions have only a single variant. */
2756 break;
2757
2758 case sve_movprfx:
2759 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2760 break;
2761
2762 case sve_pred_zm:
2763 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2764 break;
2765
2766 case sve_shift_pred:
2767 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2768 sve_shift:
2769 if (i == 0)
2770 return FALSE;
2771 while (i != 1)
2772 {
2773 i >>= 1;
2774 variant += 1;
2775 }
2776 break;
2777
2778 case sve_shift_unpred:
2779 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2780 goto sve_shift;
2781
2782 case sve_size_bhs:
2783 variant = extract_field (FLD_size, inst->value, 0);
2784 if (variant >= 3)
2785 return FALSE;
2786 break;
2787
2788 case sve_size_bhsd:
2789 variant = extract_field (FLD_size, inst->value, 0);
2790 break;
2791
2792 case sve_size_hsd:
2793 i = extract_field (FLD_size, inst->value, 0);
2794 if (i < 1)
2795 return FALSE;
2796 variant = i - 1;
2797 break;
2798
2799 case sve_size_sd:
2800 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2801 break;
2802
2803 default:
2804 /* No mapping between instruction class and qualifiers. */
2805 return TRUE;
2806 }
2807
2808 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2809 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2810 return TRUE;
2811 }
2812 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2813 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2814 return 1.
2815
2816 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2817 determined and used to disassemble CODE; this is done just before the
2818 return. */
2819
2820 static bfd_boolean
2821 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2822 aarch64_inst *inst, int noaliases_p,
2823 aarch64_operand_error *errors)
2824 {
2825 int i;
2826
2827 DEBUG_TRACE ("enter with %s", opcode->name);
2828
2829 assert (opcode && inst);
2830
2831 /* Clear inst. */
2832 memset (inst, '\0', sizeof (aarch64_inst));
2833
2834 /* Check the base opcode. */
2835 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2836 {
2837 DEBUG_TRACE ("base opcode match FAIL");
2838 goto decode_fail;
2839 }
2840
2841 inst->opcode = opcode;
2842 inst->value = code;
2843
2844 /* Assign operand codes and indexes. */
2845 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2846 {
2847 if (opcode->operands[i] == AARCH64_OPND_NIL)
2848 break;
2849 inst->operands[i].type = opcode->operands[i];
2850 inst->operands[i].idx = i;
2851 }
2852
2853 /* Call the opcode decoder indicated by flags. */
2854 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2855 {
2856 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2857 goto decode_fail;
2858 }
2859
2860 /* Possibly use the instruction class to determine the correct
2861 qualifier. */
2862 if (!aarch64_decode_variant_using_iclass (inst))
2863 {
2864 DEBUG_TRACE ("iclass-based decoder FAIL");
2865 goto decode_fail;
2866 }
2867
2868 /* Call operand decoders. */
2869 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2870 {
2871 const aarch64_operand *opnd;
2872 enum aarch64_opnd type;
2873
2874 type = opcode->operands[i];
2875 if (type == AARCH64_OPND_NIL)
2876 break;
2877 opnd = &aarch64_operands[type];
2878 if (operand_has_extractor (opnd)
2879 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
2880 errors)))
2881 {
2882 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2883 goto decode_fail;
2884 }
2885 }
2886
2887 /* If the opcode has a verifier, then check it now. */
2888 if (opcode->verifier
2889 && opcode->verifier (inst, code, 0, FALSE, errors, NULL) != ERR_OK)
2890 {
2891 DEBUG_TRACE ("operand verifier FAIL");
2892 goto decode_fail;
2893 }
2894
2895 /* Match the qualifiers. */
2896 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2897 {
2898 /* Arriving here, the CODE has been determined as a valid instruction
2899 of OPCODE and *INST has been filled with information of this OPCODE
2900 instruction. Before the return, check if the instruction has any
2901 alias and should be disassembled in the form of its alias instead.
2902 If the answer is yes, *INST will be updated. */
2903 if (!noaliases_p)
2904 determine_disassembling_preference (inst, errors);
2905 DEBUG_TRACE ("SUCCESS");
2906 return TRUE;
2907 }
2908 else
2909 {
2910 DEBUG_TRACE ("constraint matching FAIL");
2911 }
2912
2913 decode_fail:
2914 return FALSE;
2915 }
2916 \f
2917 /* This does some user-friendly fix-up to *INST. It is currently focus on
2918 the adjustment of qualifiers to help the printed instruction
2919 recognized/understood more easily. */
2920
2921 static void
2922 user_friendly_fixup (aarch64_inst *inst)
2923 {
2924 switch (inst->opcode->iclass)
2925 {
2926 case testbranch:
2927 /* TBNZ Xn|Wn, #uimm6, label
2928 Test and Branch Not Zero: conditionally jumps to label if bit number
2929 uimm6 in register Xn is not zero. The bit number implies the width of
2930 the register, which may be written and should be disassembled as Wn if
2931 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2932 */
2933 if (inst->operands[1].imm.value < 32)
2934 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2935 break;
2936 default: break;
2937 }
2938 }
2939
2940 /* Decode INSN and fill in *INST the instruction information. An alias
2941 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2942 success. */
2943
2944 enum err_type
2945 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2946 bfd_boolean noaliases_p,
2947 aarch64_operand_error *errors)
2948 {
2949 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2950
2951 #ifdef DEBUG_AARCH64
2952 if (debug_dump)
2953 {
2954 const aarch64_opcode *tmp = opcode;
2955 printf ("\n");
2956 DEBUG_TRACE ("opcode lookup:");
2957 while (tmp != NULL)
2958 {
2959 aarch64_verbose (" %s", tmp->name);
2960 tmp = aarch64_find_next_opcode (tmp);
2961 }
2962 }
2963 #endif /* DEBUG_AARCH64 */
2964
2965 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2966 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2967 opcode field and value, apart from the difference that one of them has an
2968 extra field as part of the opcode, but such a field is used for operand
2969 encoding in other opcode(s) ('immh' in the case of the example). */
2970 while (opcode != NULL)
2971 {
2972 /* But only one opcode can be decoded successfully for, as the
2973 decoding routine will check the constraint carefully. */
2974 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
2975 return ERR_OK;
2976 opcode = aarch64_find_next_opcode (opcode);
2977 }
2978
2979 return ERR_UND;
2980 }
2981
2982 /* Print operands. */
2983
2984 static void
2985 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2986 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2987 {
2988 int i, pcrel_p, num_printed;
2989 char *notes = NULL;
2990 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2991 {
2992 char str[128];
2993 /* We regard the opcode operand info more, however we also look into
2994 the inst->operands to support the disassembling of the optional
2995 operand.
2996 The two operand code should be the same in all cases, apart from
2997 when the operand can be optional. */
2998 if (opcode->operands[i] == AARCH64_OPND_NIL
2999 || opnds[i].type == AARCH64_OPND_NIL)
3000 break;
3001
3002 /* Generate the operand string in STR. */
3003 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3004 &info->target, &notes);
3005
3006 /* Print the delimiter (taking account of omitted operand(s)). */
3007 if (str[0] != '\0')
3008 (*info->fprintf_func) (info->stream, "%s",
3009 num_printed++ == 0 ? "\t" : ", ");
3010
3011 /* Print the operand. */
3012 if (pcrel_p)
3013 (*info->print_address_func) (info->target, info);
3014 else
3015 (*info->fprintf_func) (info->stream, "%s", str);
3016 }
3017
3018 if (notes && !no_notes)
3019 (*info->fprintf_func) (info->stream, "\t; note: %s", notes);
3020 }
3021
3022 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
3023
3024 static void
3025 remove_dot_suffix (char *name, const aarch64_inst *inst)
3026 {
3027 char *ptr;
3028 size_t len;
3029
3030 ptr = strchr (inst->opcode->name, '.');
3031 assert (ptr && inst->cond);
3032 len = ptr - inst->opcode->name;
3033 assert (len < 8);
3034 strncpy (name, inst->opcode->name, len);
3035 name[len] = '\0';
3036 }
3037
3038 /* Print the instruction mnemonic name. */
3039
3040 static void
3041 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3042 {
3043 if (inst->opcode->flags & F_COND)
3044 {
3045 /* For instructions that are truly conditionally executed, e.g. b.cond,
3046 prepare the full mnemonic name with the corresponding condition
3047 suffix. */
3048 char name[8];
3049
3050 remove_dot_suffix (name, inst);
3051 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
3052 }
3053 else
3054 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
3055 }
3056
3057 /* Decide whether we need to print a comment after the operands of
3058 instruction INST. */
3059
3060 static void
3061 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3062 {
3063 if (inst->opcode->flags & F_COND)
3064 {
3065 char name[8];
3066 unsigned int i, num_conds;
3067
3068 remove_dot_suffix (name, inst);
3069 num_conds = ARRAY_SIZE (inst->cond->names);
3070 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3071 (*info->fprintf_func) (info->stream, "%s %s.%s",
3072 i == 1 ? " //" : ",",
3073 name, inst->cond->names[i]);
3074 }
3075 }
3076
3077 /* Print the instruction according to *INST. */
3078
3079 static void
3080 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3081 struct disassemble_info *info)
3082 {
3083 print_mnemonic_name (inst, info);
3084 print_operands (pc, inst->opcode, inst->operands, info);
3085 print_comment (inst, info);
3086 }
3087
3088 /* Entry-point of the instruction disassembler and printer. */
3089
3090 static void
3091 print_insn_aarch64_word (bfd_vma pc,
3092 uint32_t word,
3093 struct disassemble_info *info,
3094 aarch64_operand_error *errors)
3095 {
3096 static const char *err_msg[ERR_NR_ENTRIES+1] =
3097 {
3098 [ERR_OK] = "_",
3099 [ERR_UND] = "undefined",
3100 [ERR_UNP] = "unpredictable",
3101 [ERR_NYI] = "NYI"
3102 };
3103
3104 enum err_type ret;
3105 aarch64_inst inst;
3106
3107 info->insn_info_valid = 1;
3108 info->branch_delay_insns = 0;
3109 info->data_size = 0;
3110 info->target = 0;
3111 info->target2 = 0;
3112
3113 if (info->flags & INSN_HAS_RELOC)
3114 /* If the instruction has a reloc associated with it, then
3115 the offset field in the instruction will actually be the
3116 addend for the reloc. (If we are using REL type relocs).
3117 In such cases, we can ignore the pc when computing
3118 addresses, since the addend is not currently pc-relative. */
3119 pc = 0;
3120
3121 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3122
3123 if (((word >> 21) & 0x3ff) == 1)
3124 {
3125 /* RESERVED for ALES. */
3126 assert (ret != ERR_OK);
3127 ret = ERR_NYI;
3128 }
3129
3130 switch (ret)
3131 {
3132 case ERR_UND:
3133 case ERR_UNP:
3134 case ERR_NYI:
3135 /* Handle undefined instructions. */
3136 info->insn_type = dis_noninsn;
3137 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3138 word, err_msg[ret]);
3139 break;
3140 case ERR_OK:
3141 user_friendly_fixup (&inst);
3142 print_aarch64_insn (pc, &inst, info);
3143 break;
3144 default:
3145 abort ();
3146 }
3147 }
3148
3149 /* Disallow mapping symbols ($x, $d etc) from
3150 being displayed in symbol relative addresses. */
3151
3152 bfd_boolean
3153 aarch64_symbol_is_valid (asymbol * sym,
3154 struct disassemble_info * info ATTRIBUTE_UNUSED)
3155 {
3156 const char * name;
3157
3158 if (sym == NULL)
3159 return FALSE;
3160
3161 name = bfd_asymbol_name (sym);
3162
3163 return name
3164 && (name[0] != '$'
3165 || (name[1] != 'x' && name[1] != 'd')
3166 || (name[2] != '\0' && name[2] != '.'));
3167 }
3168
3169 /* Print data bytes on INFO->STREAM. */
3170
3171 static void
3172 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3173 uint32_t word,
3174 struct disassemble_info *info,
3175 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3176 {
3177 switch (info->bytes_per_chunk)
3178 {
3179 case 1:
3180 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3181 break;
3182 case 2:
3183 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3184 break;
3185 case 4:
3186 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3187 break;
3188 default:
3189 abort ();
3190 }
3191 }
3192
3193 /* Try to infer the code or data type from a symbol.
3194 Returns nonzero if *MAP_TYPE was set. */
3195
3196 static int
3197 get_sym_code_type (struct disassemble_info *info, int n,
3198 enum map_type *map_type)
3199 {
3200 elf_symbol_type *es;
3201 unsigned int type;
3202 const char *name;
3203
3204 /* If the symbol is in a different section, ignore it. */
3205 if (info->section != NULL && info->section != info->symtab[n]->section)
3206 return FALSE;
3207
3208 es = *(elf_symbol_type **)(info->symtab + n);
3209 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3210
3211 /* If the symbol has function type then use that. */
3212 if (type == STT_FUNC)
3213 {
3214 *map_type = MAP_INSN;
3215 return TRUE;
3216 }
3217
3218 /* Check for mapping symbols. */
3219 name = bfd_asymbol_name(info->symtab[n]);
3220 if (name[0] == '$'
3221 && (name[1] == 'x' || name[1] == 'd')
3222 && (name[2] == '\0' || name[2] == '.'))
3223 {
3224 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3225 return TRUE;
3226 }
3227
3228 return FALSE;
3229 }
3230
3231 /* Entry-point of the AArch64 disassembler. */
3232
3233 int
3234 print_insn_aarch64 (bfd_vma pc,
3235 struct disassemble_info *info)
3236 {
3237 bfd_byte buffer[INSNLEN];
3238 int status;
3239 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3240 aarch64_operand_error *);
3241 bfd_boolean found = FALSE;
3242 unsigned int size = 4;
3243 unsigned long data;
3244 aarch64_operand_error errors;
3245
3246 if (info->disassembler_options)
3247 {
3248 set_default_aarch64_dis_options (info);
3249
3250 parse_aarch64_dis_options (info->disassembler_options);
3251
3252 /* To avoid repeated parsing of these options, we remove them here. */
3253 info->disassembler_options = NULL;
3254 }
3255
3256 /* Aarch64 instructions are always little-endian */
3257 info->endian_code = BFD_ENDIAN_LITTLE;
3258
3259 /* First check the full symtab for a mapping symbol, even if there
3260 are no usable non-mapping symbols for this address. */
3261 if (info->symtab_size != 0
3262 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3263 {
3264 enum map_type type = MAP_INSN;
3265 int last_sym = -1;
3266 bfd_vma addr;
3267 int n;
3268
3269 if (pc <= last_mapping_addr)
3270 last_mapping_sym = -1;
3271
3272 /* Start scanning at the start of the function, or wherever
3273 we finished last time. */
3274 n = info->symtab_pos + 1;
3275 if (n < last_mapping_sym)
3276 n = last_mapping_sym;
3277
3278 /* Scan up to the location being disassembled. */
3279 for (; n < info->symtab_size; n++)
3280 {
3281 addr = bfd_asymbol_value (info->symtab[n]);
3282 if (addr > pc)
3283 break;
3284 if (get_sym_code_type (info, n, &type))
3285 {
3286 last_sym = n;
3287 found = TRUE;
3288 }
3289 }
3290
3291 if (!found)
3292 {
3293 n = info->symtab_pos;
3294 if (n < last_mapping_sym)
3295 n = last_mapping_sym;
3296
3297 /* No mapping symbol found at this address. Look backwards
3298 for a preceeding one. */
3299 for (; n >= 0; n--)
3300 {
3301 if (get_sym_code_type (info, n, &type))
3302 {
3303 last_sym = n;
3304 found = TRUE;
3305 break;
3306 }
3307 }
3308 }
3309
3310 last_mapping_sym = last_sym;
3311 last_type = type;
3312
3313 /* Look a little bit ahead to see if we should print out
3314 less than four bytes of data. If there's a symbol,
3315 mapping or otherwise, after two bytes then don't
3316 print more. */
3317 if (last_type == MAP_DATA)
3318 {
3319 size = 4 - (pc & 3);
3320 for (n = last_sym + 1; n < info->symtab_size; n++)
3321 {
3322 addr = bfd_asymbol_value (info->symtab[n]);
3323 if (addr > pc)
3324 {
3325 if (addr - pc < size)
3326 size = addr - pc;
3327 break;
3328 }
3329 }
3330 /* If the next symbol is after three bytes, we need to
3331 print only part of the data, so that we can use either
3332 .byte or .short. */
3333 if (size == 3)
3334 size = (pc & 1) ? 1 : 2;
3335 }
3336 }
3337
3338 if (last_type == MAP_DATA)
3339 {
3340 /* size was set above. */
3341 info->bytes_per_chunk = size;
3342 info->display_endian = info->endian;
3343 printer = print_insn_data;
3344 }
3345 else
3346 {
3347 info->bytes_per_chunk = size = INSNLEN;
3348 info->display_endian = info->endian_code;
3349 printer = print_insn_aarch64_word;
3350 }
3351
3352 status = (*info->read_memory_func) (pc, buffer, size, info);
3353 if (status != 0)
3354 {
3355 (*info->memory_error_func) (status, pc, info);
3356 return -1;
3357 }
3358
3359 data = bfd_get_bits (buffer, size * 8,
3360 info->display_endian == BFD_ENDIAN_BIG);
3361
3362 (*printer) (pc, data, info, &errors);
3363
3364 return size;
3365 }
3366 \f
3367 void
3368 print_aarch64_disassembler_options (FILE *stream)
3369 {
3370 fprintf (stream, _("\n\
3371 The following AARCH64 specific disassembler options are supported for use\n\
3372 with the -M switch (multiple options should be separated by commas):\n"));
3373
3374 fprintf (stream, _("\n\
3375 no-aliases Don't print instruction aliases.\n"));
3376
3377 fprintf (stream, _("\n\
3378 aliases Do print instruction aliases.\n"));
3379
3380 fprintf (stream, _("\n\
3381 no-notes Don't print instruction notes.\n"));
3382
3383 fprintf (stream, _("\n\
3384 notes Do print instruction notes.\n"));
3385
3386 #ifdef DEBUG_AARCH64
3387 fprintf (stream, _("\n\
3388 debug_dump Temp switch for debug trace.\n"));
3389 #endif /* DEBUG_AARCH64 */
3390
3391 fprintf (stream, _("\n"));
3392 }
This page took 0.134803 seconds and 4 git commands to generate.