5994b2b655138885b053bf92b94ab2f52d04cf4e
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define ERR_OK 0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33
34 #define INSNLEN 4
35
36 /* Cached mapping symbol state. */
37 enum map_type
38 {
39 MAP_INSN,
40 MAP_DATA
41 };
42
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46
47 /* Other options */
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \f
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 #ifdef DEBUG_AARCH64
73 if (CONST_STRNEQ (option, "debug_dump"))
74 {
75 debug_dump = 1;
76 return;
77 }
78 #endif /* DEBUG_AARCH64 */
79
80 /* Invalid option. */
81 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
82 }
83
84 static void
85 parse_aarch64_dis_options (const char *options)
86 {
87 const char *option_end;
88
89 if (options == NULL)
90 return;
91
92 while (*options != '\0')
93 {
94 /* Skip empty options. */
95 if (*options == ',')
96 {
97 options++;
98 continue;
99 }
100
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
104 option_end++;
105
106 parse_aarch64_dis_option (options, option_end - options);
107
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
111 }
112 }
113 \f
114 /* Functions doing the instruction disassembling. */
115
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
119
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
125
126 aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129 uint32_t num;
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
132 va_list va;
133
134 va_start (va, mask);
135 num = va_arg (va, uint32_t);
136 assert (num <= 5);
137 aarch64_insn value = 0x0;
138 while (num--)
139 {
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
144 }
145 return value;
146 }
147
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
150
151 static aarch64_insn
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
153 {
154 aarch64_insn value;
155 unsigned int i;
156 enum aarch64_field_kind kind;
157
158 value = 0;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
160 {
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
164 }
165 return value;
166 }
167
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
171 {
172 uint32_t ret = value;
173
174 assert (i < 32);
175 if ((value >> i) & 0x1)
176 {
177 uint32_t val = (uint32_t)(-1) << i;
178 ret = ret | val;
179 }
180 return (int32_t) ret;
181 }
182
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
185
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
189 {
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
191 assert (value <= 0x1
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
193 return qualifier;
194 }
195
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
198
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
201 {
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
203
204 /* Instructions using vector type 2H should not call this function. Skip over
205 the 2H qualifier. */
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
207 qualifier += 1;
208
209 assert (value <= 0x8
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
211 return qualifier;
212 }
213
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
217 {
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
219
220 assert (value <= 0x4
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
222 return qualifier;
223 }
224
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
229
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
232 {
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
237 i, qualifiers))
238 return qualifiers[i];
239 else
240 return AARCH64_OPND_QLF_NIL;
241 }
242
243 /* Operand extractors. */
244
245 bfd_boolean
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED,
249 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
250 {
251 info->reg.regno = extract_field (self->fields[0], code, 0);
252 return TRUE;
253 }
254
255 bfd_boolean
256 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
257 const aarch64_insn code ATTRIBUTE_UNUSED,
258 const aarch64_inst *inst ATTRIBUTE_UNUSED,
259 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
260 {
261 assert (info->idx == 1
262 || info->idx ==3);
263 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
264 return TRUE;
265 }
266
267 /* e.g. IC <ic_op>{, <Xt>}. */
268 bfd_boolean
269 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
270 const aarch64_insn code,
271 const aarch64_inst *inst ATTRIBUTE_UNUSED,
272 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
273 {
274 info->reg.regno = extract_field (self->fields[0], code, 0);
275 assert (info->idx == 1
276 && (aarch64_get_operand_class (inst->operands[0].type)
277 == AARCH64_OPND_CLASS_SYSTEM));
278 /* This will make the constraint checking happy and more importantly will
279 help the disassembler determine whether this operand is optional or
280 not. */
281 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
282
283 return TRUE;
284 }
285
286 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
287 bfd_boolean
288 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
289 const aarch64_insn code,
290 const aarch64_inst *inst ATTRIBUTE_UNUSED,
291 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
292 {
293 /* regno */
294 info->reglane.regno = extract_field (self->fields[0], code,
295 inst->opcode->mask);
296
297 /* Index and/or type. */
298 if (inst->opcode->iclass == asisdone
299 || inst->opcode->iclass == asimdins)
300 {
301 if (info->type == AARCH64_OPND_En
302 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
303 {
304 unsigned shift;
305 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
306 assert (info->idx == 1); /* Vn */
307 aarch64_insn value = extract_field (FLD_imm4, code, 0);
308 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
309 info->qualifier = get_expected_qualifier (inst, info->idx);
310 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
311 info->reglane.index = value >> shift;
312 }
313 else
314 {
315 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
316 imm5<3:0> <V>
317 0000 RESERVED
318 xxx1 B
319 xx10 H
320 x100 S
321 1000 D */
322 int pos = -1;
323 aarch64_insn value = extract_field (FLD_imm5, code, 0);
324 while (++pos <= 3 && (value & 0x1) == 0)
325 value >>= 1;
326 if (pos > 3)
327 return FALSE;
328 info->qualifier = get_sreg_qualifier_from_value (pos);
329 info->reglane.index = (unsigned) (value >> 1);
330 }
331 }
332 else if (inst->opcode->iclass == dotproduct)
333 {
334 /* Need information in other operand(s) to help decoding. */
335 info->qualifier = get_expected_qualifier (inst, info->idx);
336 switch (info->qualifier)
337 {
338 case AARCH64_OPND_QLF_S_4B:
339 /* L:H */
340 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
341 info->reglane.regno &= 0x1f;
342 break;
343 default:
344 return FALSE;
345 }
346 }
347 else if (inst->opcode->iclass == cryptosm3)
348 {
349 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
350 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
351 }
352 else
353 {
354 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
355 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
356
357 /* Need information in other operand(s) to help decoding. */
358 info->qualifier = get_expected_qualifier (inst, info->idx);
359 switch (info->qualifier)
360 {
361 case AARCH64_OPND_QLF_S_H:
362 /* h:l:m */
363 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
364 FLD_M);
365 info->reglane.regno &= 0xf;
366 break;
367 case AARCH64_OPND_QLF_S_S:
368 /* h:l */
369 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
370 break;
371 case AARCH64_OPND_QLF_S_D:
372 /* H */
373 info->reglane.index = extract_field (FLD_H, code, 0);
374 break;
375 default:
376 return FALSE;
377 }
378
379 if (inst->opcode->op == OP_FCMLA_ELEM)
380 {
381 /* Complex operand takes two elements. */
382 if (info->reglane.index & 1)
383 return FALSE;
384 info->reglane.index /= 2;
385 }
386 }
387
388 return TRUE;
389 }
390
391 bfd_boolean
392 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
393 const aarch64_insn code,
394 const aarch64_inst *inst ATTRIBUTE_UNUSED,
395 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
396 {
397 /* R */
398 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
399 /* len */
400 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
401 return TRUE;
402 }
403
404 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
405 bfd_boolean
406 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
407 aarch64_opnd_info *info, const aarch64_insn code,
408 const aarch64_inst *inst,
409 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
410 {
411 aarch64_insn value;
412 /* Number of elements in each structure to be loaded/stored. */
413 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
414
415 struct
416 {
417 unsigned is_reserved;
418 unsigned num_regs;
419 unsigned num_elements;
420 } data [] =
421 { {0, 4, 4},
422 {1, 4, 4},
423 {0, 4, 1},
424 {0, 4, 2},
425 {0, 3, 3},
426 {1, 3, 3},
427 {0, 3, 1},
428 {0, 1, 1},
429 {0, 2, 2},
430 {1, 2, 2},
431 {0, 2, 1},
432 };
433
434 /* Rt */
435 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
436 /* opcode */
437 value = extract_field (FLD_opcode, code, 0);
438 /* PR 21595: Check for a bogus value. */
439 if (value >= ARRAY_SIZE (data))
440 return FALSE;
441 if (expected_num != data[value].num_elements || data[value].is_reserved)
442 return FALSE;
443 info->reglist.num_regs = data[value].num_regs;
444
445 return TRUE;
446 }
447
448 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
449 lanes instructions. */
450 bfd_boolean
451 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
452 aarch64_opnd_info *info, const aarch64_insn code,
453 const aarch64_inst *inst,
454 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
455 {
456 aarch64_insn value;
457
458 /* Rt */
459 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
460 /* S */
461 value = extract_field (FLD_S, code, 0);
462
463 /* Number of registers is equal to the number of elements in
464 each structure to be loaded/stored. */
465 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
466 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
467
468 /* Except when it is LD1R. */
469 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
470 info->reglist.num_regs = 2;
471
472 return TRUE;
473 }
474
475 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
476 load/store single element instructions. */
477 bfd_boolean
478 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
479 aarch64_opnd_info *info, const aarch64_insn code,
480 const aarch64_inst *inst ATTRIBUTE_UNUSED,
481 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
482 {
483 aarch64_field field = {0, 0};
484 aarch64_insn QSsize; /* fields Q:S:size. */
485 aarch64_insn opcodeh2; /* opcode<2:1> */
486
487 /* Rt */
488 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
489
490 /* Decode the index, opcode<2:1> and size. */
491 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
492 opcodeh2 = extract_field_2 (&field, code, 0);
493 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
494 switch (opcodeh2)
495 {
496 case 0x0:
497 info->qualifier = AARCH64_OPND_QLF_S_B;
498 /* Index encoded in "Q:S:size". */
499 info->reglist.index = QSsize;
500 break;
501 case 0x1:
502 if (QSsize & 0x1)
503 /* UND. */
504 return FALSE;
505 info->qualifier = AARCH64_OPND_QLF_S_H;
506 /* Index encoded in "Q:S:size<1>". */
507 info->reglist.index = QSsize >> 1;
508 break;
509 case 0x2:
510 if ((QSsize >> 1) & 0x1)
511 /* UND. */
512 return FALSE;
513 if ((QSsize & 0x1) == 0)
514 {
515 info->qualifier = AARCH64_OPND_QLF_S_S;
516 /* Index encoded in "Q:S". */
517 info->reglist.index = QSsize >> 2;
518 }
519 else
520 {
521 if (extract_field (FLD_S, code, 0))
522 /* UND */
523 return FALSE;
524 info->qualifier = AARCH64_OPND_QLF_S_D;
525 /* Index encoded in "Q". */
526 info->reglist.index = QSsize >> 3;
527 }
528 break;
529 default:
530 return FALSE;
531 }
532
533 info->reglist.has_index = 1;
534 info->reglist.num_regs = 0;
535 /* Number of registers is equal to the number of elements in
536 each structure to be loaded/stored. */
537 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
538 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
539
540 return TRUE;
541 }
542
543 /* Decode fields immh:immb and/or Q for e.g.
544 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
545 or SSHR <V><d>, <V><n>, #<shift>. */
546
547 bfd_boolean
548 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
549 aarch64_opnd_info *info, const aarch64_insn code,
550 const aarch64_inst *inst,
551 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
552 {
553 int pos;
554 aarch64_insn Q, imm, immh;
555 enum aarch64_insn_class iclass = inst->opcode->iclass;
556
557 immh = extract_field (FLD_immh, code, 0);
558 if (immh == 0)
559 return FALSE;
560 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
561 pos = 4;
562 /* Get highest set bit in immh. */
563 while (--pos >= 0 && (immh & 0x8) == 0)
564 immh <<= 1;
565
566 assert ((iclass == asimdshf || iclass == asisdshf)
567 && (info->type == AARCH64_OPND_IMM_VLSR
568 || info->type == AARCH64_OPND_IMM_VLSL));
569
570 if (iclass == asimdshf)
571 {
572 Q = extract_field (FLD_Q, code, 0);
573 /* immh Q <T>
574 0000 x SEE AdvSIMD modified immediate
575 0001 0 8B
576 0001 1 16B
577 001x 0 4H
578 001x 1 8H
579 01xx 0 2S
580 01xx 1 4S
581 1xxx 0 RESERVED
582 1xxx 1 2D */
583 info->qualifier =
584 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
585 }
586 else
587 info->qualifier = get_sreg_qualifier_from_value (pos);
588
589 if (info->type == AARCH64_OPND_IMM_VLSR)
590 /* immh <shift>
591 0000 SEE AdvSIMD modified immediate
592 0001 (16-UInt(immh:immb))
593 001x (32-UInt(immh:immb))
594 01xx (64-UInt(immh:immb))
595 1xxx (128-UInt(immh:immb)) */
596 info->imm.value = (16 << pos) - imm;
597 else
598 /* immh:immb
599 immh <shift>
600 0000 SEE AdvSIMD modified immediate
601 0001 (UInt(immh:immb)-8)
602 001x (UInt(immh:immb)-16)
603 01xx (UInt(immh:immb)-32)
604 1xxx (UInt(immh:immb)-64) */
605 info->imm.value = imm - (8 << pos);
606
607 return TRUE;
608 }
609
610 /* Decode shift immediate for e.g. sshr (imm). */
611 bfd_boolean
612 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
613 aarch64_opnd_info *info, const aarch64_insn code,
614 const aarch64_inst *inst ATTRIBUTE_UNUSED,
615 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
616 {
617 int64_t imm;
618 aarch64_insn val;
619 val = extract_field (FLD_size, code, 0);
620 switch (val)
621 {
622 case 0: imm = 8; break;
623 case 1: imm = 16; break;
624 case 2: imm = 32; break;
625 default: return FALSE;
626 }
627 info->imm.value = imm;
628 return TRUE;
629 }
630
631 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
632 value in the field(s) will be extracted as unsigned immediate value. */
633 bfd_boolean
634 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
635 const aarch64_insn code,
636 const aarch64_inst *inst ATTRIBUTE_UNUSED,
637 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
638 {
639 int64_t imm;
640
641 imm = extract_all_fields (self, code);
642
643 if (operand_need_sign_extension (self))
644 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
645
646 if (operand_need_shift_by_two (self))
647 imm <<= 2;
648
649 if (info->type == AARCH64_OPND_ADDR_ADRP)
650 imm <<= 12;
651
652 info->imm.value = imm;
653 return TRUE;
654 }
655
656 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
657 bfd_boolean
658 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
659 const aarch64_insn code,
660 const aarch64_inst *inst ATTRIBUTE_UNUSED,
661 aarch64_operand_error *errors)
662 {
663 aarch64_ext_imm (self, info, code, inst, errors);
664 info->shifter.kind = AARCH64_MOD_LSL;
665 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
666 return TRUE;
667 }
668
669 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
670 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
671 bfd_boolean
672 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
673 aarch64_opnd_info *info,
674 const aarch64_insn code,
675 const aarch64_inst *inst ATTRIBUTE_UNUSED,
676 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
677 {
678 uint64_t imm;
679 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
680 aarch64_field field = {0, 0};
681
682 assert (info->idx == 1);
683
684 if (info->type == AARCH64_OPND_SIMD_FPIMM)
685 info->imm.is_fp = 1;
686
687 /* a:b:c:d:e:f:g:h */
688 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
689 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
690 {
691 /* Either MOVI <Dd>, #<imm>
692 or MOVI <Vd>.2D, #<imm>.
693 <imm> is a 64-bit immediate
694 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
695 encoded in "a:b:c:d:e:f:g:h". */
696 int i;
697 unsigned abcdefgh = imm;
698 for (imm = 0ull, i = 0; i < 8; i++)
699 if (((abcdefgh >> i) & 0x1) != 0)
700 imm |= 0xffull << (8 * i);
701 }
702 info->imm.value = imm;
703
704 /* cmode */
705 info->qualifier = get_expected_qualifier (inst, info->idx);
706 switch (info->qualifier)
707 {
708 case AARCH64_OPND_QLF_NIL:
709 /* no shift */
710 info->shifter.kind = AARCH64_MOD_NONE;
711 return 1;
712 case AARCH64_OPND_QLF_LSL:
713 /* shift zeros */
714 info->shifter.kind = AARCH64_MOD_LSL;
715 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
716 {
717 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
718 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
719 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
720 default: assert (0); return FALSE;
721 }
722 /* 00: 0; 01: 8; 10:16; 11:24. */
723 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
724 break;
725 case AARCH64_OPND_QLF_MSL:
726 /* shift ones */
727 info->shifter.kind = AARCH64_MOD_MSL;
728 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
729 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
730 break;
731 default:
732 assert (0);
733 return FALSE;
734 }
735
736 return TRUE;
737 }
738
739 /* Decode an 8-bit floating-point immediate. */
740 bfd_boolean
741 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
742 const aarch64_insn code,
743 const aarch64_inst *inst ATTRIBUTE_UNUSED,
744 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
745 {
746 info->imm.value = extract_all_fields (self, code);
747 info->imm.is_fp = 1;
748 return TRUE;
749 }
750
751 /* Decode a 1-bit rotate immediate (#90 or #270). */
752 bfd_boolean
753 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
754 const aarch64_insn code,
755 const aarch64_inst *inst ATTRIBUTE_UNUSED,
756 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
757 {
758 uint64_t rot = extract_field (self->fields[0], code, 0);
759 assert (rot < 2U);
760 info->imm.value = rot * 180 + 90;
761 return TRUE;
762 }
763
764 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
765 bfd_boolean
766 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
767 const aarch64_insn code,
768 const aarch64_inst *inst ATTRIBUTE_UNUSED,
769 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
770 {
771 uint64_t rot = extract_field (self->fields[0], code, 0);
772 assert (rot < 4U);
773 info->imm.value = rot * 90;
774 return TRUE;
775 }
776
777 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
778 bfd_boolean
779 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
780 aarch64_opnd_info *info, const aarch64_insn code,
781 const aarch64_inst *inst ATTRIBUTE_UNUSED,
782 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
783 {
784 info->imm.value = 64- extract_field (FLD_scale, code, 0);
785 return TRUE;
786 }
787
788 /* Decode arithmetic immediate for e.g.
789 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
790 bfd_boolean
791 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
792 aarch64_opnd_info *info, const aarch64_insn code,
793 const aarch64_inst *inst ATTRIBUTE_UNUSED,
794 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
795 {
796 aarch64_insn value;
797
798 info->shifter.kind = AARCH64_MOD_LSL;
799 /* shift */
800 value = extract_field (FLD_shift, code, 0);
801 if (value >= 2)
802 return FALSE;
803 info->shifter.amount = value ? 12 : 0;
804 /* imm12 (unsigned) */
805 info->imm.value = extract_field (FLD_imm12, code, 0);
806
807 return TRUE;
808 }
809
810 /* Return true if VALUE is a valid logical immediate encoding, storing the
811 decoded value in *RESULT if so. ESIZE is the number of bytes in the
812 decoded immediate. */
813 static bfd_boolean
814 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
815 {
816 uint64_t imm, mask;
817 uint32_t N, R, S;
818 unsigned simd_size;
819
820 /* value is N:immr:imms. */
821 S = value & 0x3f;
822 R = (value >> 6) & 0x3f;
823 N = (value >> 12) & 0x1;
824
825 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
826 (in other words, right rotated by R), then replicated. */
827 if (N != 0)
828 {
829 simd_size = 64;
830 mask = 0xffffffffffffffffull;
831 }
832 else
833 {
834 switch (S)
835 {
836 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
837 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
838 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
839 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
840 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
841 default: return FALSE;
842 }
843 mask = (1ull << simd_size) - 1;
844 /* Top bits are IGNORED. */
845 R &= simd_size - 1;
846 }
847
848 if (simd_size > esize * 8)
849 return FALSE;
850
851 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
852 if (S == simd_size - 1)
853 return FALSE;
854 /* S+1 consecutive bits to 1. */
855 /* NOTE: S can't be 63 due to detection above. */
856 imm = (1ull << (S + 1)) - 1;
857 /* Rotate to the left by simd_size - R. */
858 if (R != 0)
859 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
860 /* Replicate the value according to SIMD size. */
861 switch (simd_size)
862 {
863 case 2: imm = (imm << 2) | imm;
864 /* Fall through. */
865 case 4: imm = (imm << 4) | imm;
866 /* Fall through. */
867 case 8: imm = (imm << 8) | imm;
868 /* Fall through. */
869 case 16: imm = (imm << 16) | imm;
870 /* Fall through. */
871 case 32: imm = (imm << 32) | imm;
872 /* Fall through. */
873 case 64: break;
874 default: assert (0); return 0;
875 }
876
877 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
878
879 return TRUE;
880 }
881
882 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
883 bfd_boolean
884 aarch64_ext_limm (const aarch64_operand *self,
885 aarch64_opnd_info *info, const aarch64_insn code,
886 const aarch64_inst *inst,
887 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
888 {
889 uint32_t esize;
890 aarch64_insn value;
891
892 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
893 self->fields[2]);
894 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
895 return decode_limm (esize, value, &info->imm.value);
896 }
897
898 /* Decode a logical immediate for the BIC alias of AND (etc.). */
899 bfd_boolean
900 aarch64_ext_inv_limm (const aarch64_operand *self,
901 aarch64_opnd_info *info, const aarch64_insn code,
902 const aarch64_inst *inst,
903 aarch64_operand_error *errors)
904 {
905 if (!aarch64_ext_limm (self, info, code, inst, errors))
906 return FALSE;
907 info->imm.value = ~info->imm.value;
908 return TRUE;
909 }
910
911 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
912 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
913 bfd_boolean
914 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
915 aarch64_opnd_info *info,
916 const aarch64_insn code, const aarch64_inst *inst,
917 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
918 {
919 aarch64_insn value;
920
921 /* Rt */
922 info->reg.regno = extract_field (FLD_Rt, code, 0);
923
924 /* size */
925 value = extract_field (FLD_ldst_size, code, 0);
926 if (inst->opcode->iclass == ldstpair_indexed
927 || inst->opcode->iclass == ldstnapair_offs
928 || inst->opcode->iclass == ldstpair_off
929 || inst->opcode->iclass == loadlit)
930 {
931 enum aarch64_opnd_qualifier qualifier;
932 switch (value)
933 {
934 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
935 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
936 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
937 default: return FALSE;
938 }
939 info->qualifier = qualifier;
940 }
941 else
942 {
943 /* opc1:size */
944 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
945 if (value > 0x4)
946 return FALSE;
947 info->qualifier = get_sreg_qualifier_from_value (value);
948 }
949
950 return TRUE;
951 }
952
953 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
954 bfd_boolean
955 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
956 aarch64_opnd_info *info,
957 aarch64_insn code,
958 const aarch64_inst *inst ATTRIBUTE_UNUSED,
959 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
960 {
961 /* Rn */
962 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
963 return TRUE;
964 }
965
966 /* Decode the address operand for e.g.
967 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
968 bfd_boolean
969 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
970 aarch64_opnd_info *info,
971 aarch64_insn code, const aarch64_inst *inst,
972 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
973 {
974 info->qualifier = get_expected_qualifier (inst, info->idx);
975
976 /* Rn */
977 info->addr.base_regno = extract_field (self->fields[0], code, 0);
978
979 /* simm9 */
980 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
981 info->addr.offset.imm = sign_extend (imm, 8);
982 if (extract_field (self->fields[2], code, 0) == 1) {
983 info->addr.writeback = 1;
984 info->addr.preind = 1;
985 }
986 return TRUE;
987 }
988
989 /* Decode the address operand for e.g.
990 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
991 bfd_boolean
992 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
993 aarch64_opnd_info *info,
994 aarch64_insn code, const aarch64_inst *inst,
995 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
996 {
997 aarch64_insn S, value;
998
999 /* Rn */
1000 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1001 /* Rm */
1002 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1003 /* option */
1004 value = extract_field (FLD_option, code, 0);
1005 info->shifter.kind =
1006 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1007 /* Fix-up the shifter kind; although the table-driven approach is
1008 efficient, it is slightly inflexible, thus needing this fix-up. */
1009 if (info->shifter.kind == AARCH64_MOD_UXTX)
1010 info->shifter.kind = AARCH64_MOD_LSL;
1011 /* S */
1012 S = extract_field (FLD_S, code, 0);
1013 if (S == 0)
1014 {
1015 info->shifter.amount = 0;
1016 info->shifter.amount_present = 0;
1017 }
1018 else
1019 {
1020 int size;
1021 /* Need information in other operand(s) to help achieve the decoding
1022 from 'S' field. */
1023 info->qualifier = get_expected_qualifier (inst, info->idx);
1024 /* Get the size of the data element that is accessed, which may be
1025 different from that of the source register size, e.g. in strb/ldrb. */
1026 size = aarch64_get_qualifier_esize (info->qualifier);
1027 info->shifter.amount = get_logsz (size);
1028 info->shifter.amount_present = 1;
1029 }
1030
1031 return TRUE;
1032 }
1033
1034 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1035 bfd_boolean
1036 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1037 aarch64_insn code, const aarch64_inst *inst,
1038 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1039 {
1040 aarch64_insn imm;
1041 info->qualifier = get_expected_qualifier (inst, info->idx);
1042
1043 /* Rn */
1044 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1045 /* simm (imm9 or imm7) */
1046 imm = extract_field (self->fields[0], code, 0);
1047 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1048 if (self->fields[0] == FLD_imm7)
1049 /* scaled immediate in ld/st pair instructions. */
1050 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1051 /* qualifier */
1052 if (inst->opcode->iclass == ldst_unscaled
1053 || inst->opcode->iclass == ldstnapair_offs
1054 || inst->opcode->iclass == ldstpair_off
1055 || inst->opcode->iclass == ldst_unpriv)
1056 info->addr.writeback = 0;
1057 else
1058 {
1059 /* pre/post- index */
1060 info->addr.writeback = 1;
1061 if (extract_field (self->fields[1], code, 0) == 1)
1062 info->addr.preind = 1;
1063 else
1064 info->addr.postind = 1;
1065 }
1066
1067 return TRUE;
1068 }
1069
1070 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1071 bfd_boolean
1072 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1073 aarch64_insn code,
1074 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1075 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1076 {
1077 int shift;
1078 info->qualifier = get_expected_qualifier (inst, info->idx);
1079 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1080 /* Rn */
1081 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1082 /* uimm12 */
1083 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1084 return TRUE;
1085 }
1086
1087 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1088 bfd_boolean
1089 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1090 aarch64_insn code,
1091 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1092 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1093 {
1094 aarch64_insn imm;
1095
1096 info->qualifier = get_expected_qualifier (inst, info->idx);
1097 /* Rn */
1098 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1099 /* simm10 */
1100 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1101 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1102 if (extract_field (self->fields[3], code, 0) == 1) {
1103 info->addr.writeback = 1;
1104 info->addr.preind = 1;
1105 }
1106 return TRUE;
1107 }
1108
1109 /* Decode the address operand for e.g.
1110 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1111 bfd_boolean
1112 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1113 aarch64_opnd_info *info,
1114 aarch64_insn code, const aarch64_inst *inst,
1115 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1116 {
1117 /* The opcode dependent area stores the number of elements in
1118 each structure to be loaded/stored. */
1119 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1120
1121 /* Rn */
1122 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1123 /* Rm | #<amount> */
1124 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1125 if (info->addr.offset.regno == 31)
1126 {
1127 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1128 /* Special handling of loading single structure to all lane. */
1129 info->addr.offset.imm = (is_ld1r ? 1
1130 : inst->operands[0].reglist.num_regs)
1131 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1132 else
1133 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1134 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1135 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1136 }
1137 else
1138 info->addr.offset.is_reg = 1;
1139 info->addr.writeback = 1;
1140
1141 return TRUE;
1142 }
1143
1144 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1145 bfd_boolean
1146 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1147 aarch64_opnd_info *info,
1148 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1149 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1150 {
1151 aarch64_insn value;
1152 /* cond */
1153 value = extract_field (FLD_cond, code, 0);
1154 info->cond = get_cond_from_value (value);
1155 return TRUE;
1156 }
1157
1158 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1159 bfd_boolean
1160 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1161 aarch64_opnd_info *info,
1162 aarch64_insn code,
1163 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1164 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1165 {
1166 /* op0:op1:CRn:CRm:op2 */
1167 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1168 FLD_CRm, FLD_op2);
1169 return 1;
1170 }
1171
1172 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1173 bfd_boolean
1174 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1175 aarch64_opnd_info *info, aarch64_insn code,
1176 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1177 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1178 {
1179 int i;
1180 /* op1:op2 */
1181 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1182 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1183 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1184 return TRUE;
1185 /* Reserved value in <pstatefield>. */
1186 return FALSE;
1187 }
1188
1189 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1190 bfd_boolean
1191 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1192 aarch64_opnd_info *info,
1193 aarch64_insn code,
1194 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1195 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1196 {
1197 int i;
1198 aarch64_insn value;
1199 const aarch64_sys_ins_reg *sysins_ops;
1200 /* op0:op1:CRn:CRm:op2 */
1201 value = extract_fields (code, 0, 5,
1202 FLD_op0, FLD_op1, FLD_CRn,
1203 FLD_CRm, FLD_op2);
1204
1205 switch (info->type)
1206 {
1207 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1208 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1209 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1210 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1211 default: assert (0); return FALSE;
1212 }
1213
1214 for (i = 0; sysins_ops[i].name != NULL; ++i)
1215 if (sysins_ops[i].value == value)
1216 {
1217 info->sysins_op = sysins_ops + i;
1218 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1219 info->sysins_op->name,
1220 (unsigned)info->sysins_op->value,
1221 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1222 return TRUE;
1223 }
1224
1225 return FALSE;
1226 }
1227
1228 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1229
1230 bfd_boolean
1231 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1232 aarch64_opnd_info *info,
1233 aarch64_insn code,
1234 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1235 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1236 {
1237 /* CRm */
1238 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1239 return TRUE;
1240 }
1241
1242 /* Decode the prefetch operation option operand for e.g.
1243 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1244
1245 bfd_boolean
1246 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1247 aarch64_opnd_info *info,
1248 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1249 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1250 {
1251 /* prfop in Rt */
1252 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1253 return TRUE;
1254 }
1255
1256 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1257 to the matching name/value pair in aarch64_hint_options. */
1258
1259 bfd_boolean
1260 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1261 aarch64_opnd_info *info,
1262 aarch64_insn code,
1263 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1264 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1265 {
1266 /* CRm:op2. */
1267 unsigned hint_number;
1268 int i;
1269
1270 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1271
1272 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1273 {
1274 if (hint_number == aarch64_hint_options[i].value)
1275 {
1276 info->hint_option = &(aarch64_hint_options[i]);
1277 return TRUE;
1278 }
1279 }
1280
1281 return FALSE;
1282 }
1283
1284 /* Decode the extended register operand for e.g.
1285 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1286 bfd_boolean
1287 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1288 aarch64_opnd_info *info,
1289 aarch64_insn code,
1290 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1291 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1292 {
1293 aarch64_insn value;
1294
1295 /* Rm */
1296 info->reg.regno = extract_field (FLD_Rm, code, 0);
1297 /* option */
1298 value = extract_field (FLD_option, code, 0);
1299 info->shifter.kind =
1300 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1301 /* imm3 */
1302 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1303
1304 /* This makes the constraint checking happy. */
1305 info->shifter.operator_present = 1;
1306
1307 /* Assume inst->operands[0].qualifier has been resolved. */
1308 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1309 info->qualifier = AARCH64_OPND_QLF_W;
1310 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1311 && (info->shifter.kind == AARCH64_MOD_UXTX
1312 || info->shifter.kind == AARCH64_MOD_SXTX))
1313 info->qualifier = AARCH64_OPND_QLF_X;
1314
1315 return TRUE;
1316 }
1317
1318 /* Decode the shifted register operand for e.g.
1319 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1320 bfd_boolean
1321 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1322 aarch64_opnd_info *info,
1323 aarch64_insn code,
1324 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1325 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1326 {
1327 aarch64_insn value;
1328
1329 /* Rm */
1330 info->reg.regno = extract_field (FLD_Rm, code, 0);
1331 /* shift */
1332 value = extract_field (FLD_shift, code, 0);
1333 info->shifter.kind =
1334 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1335 if (info->shifter.kind == AARCH64_MOD_ROR
1336 && inst->opcode->iclass != log_shift)
1337 /* ROR is not available for the shifted register operand in arithmetic
1338 instructions. */
1339 return FALSE;
1340 /* imm6 */
1341 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1342
1343 /* This makes the constraint checking happy. */
1344 info->shifter.operator_present = 1;
1345
1346 return TRUE;
1347 }
1348
1349 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1350 where <offset> is given by the OFFSET parameter and where <factor> is
1351 1 plus SELF's operand-dependent value. fields[0] specifies the field
1352 that holds <base>. */
1353 static bfd_boolean
1354 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1355 aarch64_opnd_info *info, aarch64_insn code,
1356 int64_t offset)
1357 {
1358 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1359 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1360 info->addr.offset.is_reg = FALSE;
1361 info->addr.writeback = FALSE;
1362 info->addr.preind = TRUE;
1363 if (offset != 0)
1364 info->shifter.kind = AARCH64_MOD_MUL_VL;
1365 info->shifter.amount = 1;
1366 info->shifter.operator_present = (info->addr.offset.imm != 0);
1367 info->shifter.amount_present = FALSE;
1368 return TRUE;
1369 }
1370
1371 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1372 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1373 SELF's operand-dependent value. fields[0] specifies the field that
1374 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1375 bfd_boolean
1376 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1377 aarch64_opnd_info *info, aarch64_insn code,
1378 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1379 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1380 {
1381 int offset;
1382
1383 offset = extract_field (FLD_SVE_imm4, code, 0);
1384 offset = ((offset + 8) & 15) - 8;
1385 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1386 }
1387
1388 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1389 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1390 SELF's operand-dependent value. fields[0] specifies the field that
1391 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1392 bfd_boolean
1393 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1394 aarch64_opnd_info *info, aarch64_insn code,
1395 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1396 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1397 {
1398 int offset;
1399
1400 offset = extract_field (FLD_SVE_imm6, code, 0);
1401 offset = (((offset + 32) & 63) - 32);
1402 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1403 }
1404
1405 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1406 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1407 SELF's operand-dependent value. fields[0] specifies the field that
1408 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1409 and imm3 fields, with imm3 being the less-significant part. */
1410 bfd_boolean
1411 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1412 aarch64_opnd_info *info,
1413 aarch64_insn code,
1414 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1415 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1416 {
1417 int offset;
1418
1419 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1420 offset = (((offset + 256) & 511) - 256);
1421 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1422 }
1423
1424 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1425 is given by the OFFSET parameter and where <shift> is SELF's operand-
1426 dependent value. fields[0] specifies the base register field <base>. */
1427 static bfd_boolean
1428 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1429 aarch64_opnd_info *info, aarch64_insn code,
1430 int64_t offset)
1431 {
1432 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1433 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1434 info->addr.offset.is_reg = FALSE;
1435 info->addr.writeback = FALSE;
1436 info->addr.preind = TRUE;
1437 info->shifter.operator_present = FALSE;
1438 info->shifter.amount_present = FALSE;
1439 return TRUE;
1440 }
1441
1442 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1443 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1444 value. fields[0] specifies the base register field. */
1445 bfd_boolean
1446 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1447 aarch64_opnd_info *info, aarch64_insn code,
1448 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1449 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1450 {
1451 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1452 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1453 }
1454
1455 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1456 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1457 value. fields[0] specifies the base register field. */
1458 bfd_boolean
1459 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1460 aarch64_opnd_info *info, aarch64_insn code,
1461 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1462 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1463 {
1464 int offset = extract_field (FLD_SVE_imm6, code, 0);
1465 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1466 }
1467
1468 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1469 is SELF's operand-dependent value. fields[0] specifies the base
1470 register field and fields[1] specifies the offset register field. */
1471 bfd_boolean
1472 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1473 aarch64_opnd_info *info, aarch64_insn code,
1474 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1475 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1476 {
1477 int index_regno;
1478
1479 index_regno = extract_field (self->fields[1], code, 0);
1480 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1481 return FALSE;
1482
1483 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1484 info->addr.offset.regno = index_regno;
1485 info->addr.offset.is_reg = TRUE;
1486 info->addr.writeback = FALSE;
1487 info->addr.preind = TRUE;
1488 info->shifter.kind = AARCH64_MOD_LSL;
1489 info->shifter.amount = get_operand_specific_data (self);
1490 info->shifter.operator_present = (info->shifter.amount != 0);
1491 info->shifter.amount_present = (info->shifter.amount != 0);
1492 return TRUE;
1493 }
1494
1495 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1496 <shift> is SELF's operand-dependent value. fields[0] specifies the
1497 base register field, fields[1] specifies the offset register field and
1498 fields[2] is a single-bit field that selects SXTW over UXTW. */
1499 bfd_boolean
1500 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1501 aarch64_opnd_info *info, aarch64_insn code,
1502 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1503 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1504 {
1505 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1506 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1507 info->addr.offset.is_reg = TRUE;
1508 info->addr.writeback = FALSE;
1509 info->addr.preind = TRUE;
1510 if (extract_field (self->fields[2], code, 0))
1511 info->shifter.kind = AARCH64_MOD_SXTW;
1512 else
1513 info->shifter.kind = AARCH64_MOD_UXTW;
1514 info->shifter.amount = get_operand_specific_data (self);
1515 info->shifter.operator_present = TRUE;
1516 info->shifter.amount_present = (info->shifter.amount != 0);
1517 return TRUE;
1518 }
1519
1520 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1521 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1522 fields[0] specifies the base register field. */
1523 bfd_boolean
1524 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1525 aarch64_opnd_info *info, aarch64_insn code,
1526 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1527 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1528 {
1529 int offset = extract_field (FLD_imm5, code, 0);
1530 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1531 }
1532
1533 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1534 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1535 number. fields[0] specifies the base register field and fields[1]
1536 specifies the offset register field. */
1537 static bfd_boolean
1538 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1539 aarch64_insn code, enum aarch64_modifier_kind kind)
1540 {
1541 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1542 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1543 info->addr.offset.is_reg = TRUE;
1544 info->addr.writeback = FALSE;
1545 info->addr.preind = TRUE;
1546 info->shifter.kind = kind;
1547 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1548 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1549 || info->shifter.amount != 0);
1550 info->shifter.amount_present = (info->shifter.amount != 0);
1551 return TRUE;
1552 }
1553
1554 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1555 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1556 field and fields[1] specifies the offset register field. */
1557 bfd_boolean
1558 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1559 aarch64_opnd_info *info, aarch64_insn code,
1560 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1561 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1562 {
1563 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1564 }
1565
1566 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1567 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1568 field and fields[1] specifies the offset register field. */
1569 bfd_boolean
1570 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1571 aarch64_opnd_info *info, aarch64_insn code,
1572 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1573 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1574 {
1575 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1576 }
1577
1578 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1579 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1580 field and fields[1] specifies the offset register field. */
1581 bfd_boolean
1582 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1583 aarch64_opnd_info *info, aarch64_insn code,
1584 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1585 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1586 {
1587 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1588 }
1589
1590 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1591 has the raw field value and that the low 8 bits decode to VALUE. */
1592 static bfd_boolean
1593 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1594 {
1595 info->shifter.kind = AARCH64_MOD_LSL;
1596 info->shifter.amount = 0;
1597 if (info->imm.value & 0x100)
1598 {
1599 if (value == 0)
1600 /* Decode 0x100 as #0, LSL #8. */
1601 info->shifter.amount = 8;
1602 else
1603 value *= 256;
1604 }
1605 info->shifter.operator_present = (info->shifter.amount != 0);
1606 info->shifter.amount_present = (info->shifter.amount != 0);
1607 info->imm.value = value;
1608 return TRUE;
1609 }
1610
1611 /* Decode an SVE ADD/SUB immediate. */
1612 bfd_boolean
1613 aarch64_ext_sve_aimm (const aarch64_operand *self,
1614 aarch64_opnd_info *info, const aarch64_insn code,
1615 const aarch64_inst *inst,
1616 aarch64_operand_error *errors)
1617 {
1618 return (aarch64_ext_imm (self, info, code, inst, errors)
1619 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1620 }
1621
1622 /* Decode an SVE CPY/DUP immediate. */
1623 bfd_boolean
1624 aarch64_ext_sve_asimm (const aarch64_operand *self,
1625 aarch64_opnd_info *info, const aarch64_insn code,
1626 const aarch64_inst *inst,
1627 aarch64_operand_error *errors)
1628 {
1629 return (aarch64_ext_imm (self, info, code, inst, errors)
1630 && decode_sve_aimm (info, (int8_t) info->imm.value));
1631 }
1632
1633 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1634 The fields array specifies which field to use. */
1635 bfd_boolean
1636 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1637 aarch64_opnd_info *info, aarch64_insn code,
1638 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1639 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1640 {
1641 if (extract_field (self->fields[0], code, 0))
1642 info->imm.value = 0x3f800000;
1643 else
1644 info->imm.value = 0x3f000000;
1645 info->imm.is_fp = TRUE;
1646 return TRUE;
1647 }
1648
1649 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1650 The fields array specifies which field to use. */
1651 bfd_boolean
1652 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1653 aarch64_opnd_info *info, aarch64_insn code,
1654 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1655 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1656 {
1657 if (extract_field (self->fields[0], code, 0))
1658 info->imm.value = 0x40000000;
1659 else
1660 info->imm.value = 0x3f000000;
1661 info->imm.is_fp = TRUE;
1662 return TRUE;
1663 }
1664
1665 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1666 The fields array specifies which field to use. */
1667 bfd_boolean
1668 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1669 aarch64_opnd_info *info, aarch64_insn code,
1670 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1671 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1672 {
1673 if (extract_field (self->fields[0], code, 0))
1674 info->imm.value = 0x3f800000;
1675 else
1676 info->imm.value = 0x0;
1677 info->imm.is_fp = TRUE;
1678 return TRUE;
1679 }
1680
1681 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1682 array specifies which field to use for Zn. MM is encoded in the
1683 concatenation of imm5 and SVE_tszh, with imm5 being the less
1684 significant part. */
1685 bfd_boolean
1686 aarch64_ext_sve_index (const aarch64_operand *self,
1687 aarch64_opnd_info *info, aarch64_insn code,
1688 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1689 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1690 {
1691 int val;
1692
1693 info->reglane.regno = extract_field (self->fields[0], code, 0);
1694 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1695 if ((val & 31) == 0)
1696 return 0;
1697 while ((val & 1) == 0)
1698 val /= 2;
1699 info->reglane.index = val / 2;
1700 return TRUE;
1701 }
1702
1703 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1704 bfd_boolean
1705 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1706 aarch64_opnd_info *info, const aarch64_insn code,
1707 const aarch64_inst *inst,
1708 aarch64_operand_error *errors)
1709 {
1710 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1711 return (aarch64_ext_limm (self, info, code, inst, errors)
1712 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1713 }
1714
1715 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1716 and where MM occupies the most-significant part. The operand-dependent
1717 value specifies the number of bits in Zn. */
1718 bfd_boolean
1719 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1720 aarch64_opnd_info *info, aarch64_insn code,
1721 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1722 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1723 {
1724 unsigned int reg_bits = get_operand_specific_data (self);
1725 unsigned int val = extract_all_fields (self, code);
1726 info->reglane.regno = val & ((1 << reg_bits) - 1);
1727 info->reglane.index = val >> reg_bits;
1728 return TRUE;
1729 }
1730
1731 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1732 to use for Zn. The opcode-dependent value specifies the number
1733 of registers in the list. */
1734 bfd_boolean
1735 aarch64_ext_sve_reglist (const aarch64_operand *self,
1736 aarch64_opnd_info *info, aarch64_insn code,
1737 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1738 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1739 {
1740 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1741 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1742 return TRUE;
1743 }
1744
1745 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1746 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1747 field. */
1748 bfd_boolean
1749 aarch64_ext_sve_scale (const aarch64_operand *self,
1750 aarch64_opnd_info *info, aarch64_insn code,
1751 const aarch64_inst *inst, aarch64_operand_error *errors)
1752 {
1753 int val;
1754
1755 if (!aarch64_ext_imm (self, info, code, inst, errors))
1756 return FALSE;
1757 val = extract_field (FLD_SVE_imm4, code, 0);
1758 info->shifter.kind = AARCH64_MOD_MUL;
1759 info->shifter.amount = val + 1;
1760 info->shifter.operator_present = (val != 0);
1761 info->shifter.amount_present = (val != 0);
1762 return TRUE;
1763 }
1764
1765 /* Return the top set bit in VALUE, which is expected to be relatively
1766 small. */
1767 static uint64_t
1768 get_top_bit (uint64_t value)
1769 {
1770 while ((value & -value) != value)
1771 value -= value & -value;
1772 return value;
1773 }
1774
1775 /* Decode an SVE shift-left immediate. */
1776 bfd_boolean
1777 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1778 aarch64_opnd_info *info, const aarch64_insn code,
1779 const aarch64_inst *inst, aarch64_operand_error *errors)
1780 {
1781 if (!aarch64_ext_imm (self, info, code, inst, errors)
1782 || info->imm.value == 0)
1783 return FALSE;
1784
1785 info->imm.value -= get_top_bit (info->imm.value);
1786 return TRUE;
1787 }
1788
1789 /* Decode an SVE shift-right immediate. */
1790 bfd_boolean
1791 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1792 aarch64_opnd_info *info, const aarch64_insn code,
1793 const aarch64_inst *inst, aarch64_operand_error *errors)
1794 {
1795 if (!aarch64_ext_imm (self, info, code, inst, errors)
1796 || info->imm.value == 0)
1797 return FALSE;
1798
1799 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1800 return TRUE;
1801 }
1802 \f
1803 /* Bitfields that are commonly used to encode certain operands' information
1804 may be partially used as part of the base opcode in some instructions.
1805 For example, the bit 1 of the field 'size' in
1806 FCVTXN <Vb><d>, <Va><n>
1807 is actually part of the base opcode, while only size<0> is available
1808 for encoding the register type. Another example is the AdvSIMD
1809 instruction ORR (register), in which the field 'size' is also used for
1810 the base opcode, leaving only the field 'Q' available to encode the
1811 vector register arrangement specifier '8B' or '16B'.
1812
1813 This function tries to deduce the qualifier from the value of partially
1814 constrained field(s). Given the VALUE of such a field or fields, the
1815 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1816 operand encoding), the function returns the matching qualifier or
1817 AARCH64_OPND_QLF_NIL if nothing matches.
1818
1819 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1820 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1821 may end with AARCH64_OPND_QLF_NIL. */
1822
1823 static enum aarch64_opnd_qualifier
1824 get_qualifier_from_partial_encoding (aarch64_insn value,
1825 const enum aarch64_opnd_qualifier* \
1826 candidates,
1827 aarch64_insn mask)
1828 {
1829 int i;
1830 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1831 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1832 {
1833 aarch64_insn standard_value;
1834 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1835 break;
1836 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1837 if ((standard_value & mask) == (value & mask))
1838 return candidates[i];
1839 }
1840 return AARCH64_OPND_QLF_NIL;
1841 }
1842
1843 /* Given a list of qualifier sequences, return all possible valid qualifiers
1844 for operand IDX in QUALIFIERS.
1845 Assume QUALIFIERS is an array whose length is large enough. */
1846
1847 static void
1848 get_operand_possible_qualifiers (int idx,
1849 const aarch64_opnd_qualifier_seq_t *list,
1850 enum aarch64_opnd_qualifier *qualifiers)
1851 {
1852 int i;
1853 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1854 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1855 break;
1856 }
1857
1858 /* Decode the size Q field for e.g. SHADD.
1859 We tag one operand with the qualifer according to the code;
1860 whether the qualifier is valid for this opcode or not, it is the
1861 duty of the semantic checking. */
1862
1863 static int
1864 decode_sizeq (aarch64_inst *inst)
1865 {
1866 int idx;
1867 enum aarch64_opnd_qualifier qualifier;
1868 aarch64_insn code;
1869 aarch64_insn value, mask;
1870 enum aarch64_field_kind fld_sz;
1871 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1872
1873 if (inst->opcode->iclass == asisdlse
1874 || inst->opcode->iclass == asisdlsep
1875 || inst->opcode->iclass == asisdlso
1876 || inst->opcode->iclass == asisdlsop)
1877 fld_sz = FLD_vldst_size;
1878 else
1879 fld_sz = FLD_size;
1880
1881 code = inst->value;
1882 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1883 /* Obtain the info that which bits of fields Q and size are actually
1884 available for operand encoding. Opcodes like FMAXNM and FMLA have
1885 size[1] unavailable. */
1886 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1887
1888 /* The index of the operand we are going to tag a qualifier and the qualifer
1889 itself are reasoned from the value of the size and Q fields and the
1890 possible valid qualifier lists. */
1891 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1892 DEBUG_TRACE ("key idx: %d", idx);
1893
1894 /* For most related instruciton, size:Q are fully available for operand
1895 encoding. */
1896 if (mask == 0x7)
1897 {
1898 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1899 return 1;
1900 }
1901
1902 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1903 candidates);
1904 #ifdef DEBUG_AARCH64
1905 if (debug_dump)
1906 {
1907 int i;
1908 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1909 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1910 DEBUG_TRACE ("qualifier %d: %s", i,
1911 aarch64_get_qualifier_name(candidates[i]));
1912 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1913 }
1914 #endif /* DEBUG_AARCH64 */
1915
1916 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1917
1918 if (qualifier == AARCH64_OPND_QLF_NIL)
1919 return 0;
1920
1921 inst->operands[idx].qualifier = qualifier;
1922 return 1;
1923 }
1924
1925 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1926 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1927
1928 static int
1929 decode_asimd_fcvt (aarch64_inst *inst)
1930 {
1931 aarch64_field field = {0, 0};
1932 aarch64_insn value;
1933 enum aarch64_opnd_qualifier qualifier;
1934
1935 gen_sub_field (FLD_size, 0, 1, &field);
1936 value = extract_field_2 (&field, inst->value, 0);
1937 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1938 : AARCH64_OPND_QLF_V_2D;
1939 switch (inst->opcode->op)
1940 {
1941 case OP_FCVTN:
1942 case OP_FCVTN2:
1943 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1944 inst->operands[1].qualifier = qualifier;
1945 break;
1946 case OP_FCVTL:
1947 case OP_FCVTL2:
1948 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1949 inst->operands[0].qualifier = qualifier;
1950 break;
1951 default:
1952 assert (0);
1953 return 0;
1954 }
1955
1956 return 1;
1957 }
1958
1959 /* Decode size[0], i.e. bit 22, for
1960 e.g. FCVTXN <Vb><d>, <Va><n>. */
1961
1962 static int
1963 decode_asisd_fcvtxn (aarch64_inst *inst)
1964 {
1965 aarch64_field field = {0, 0};
1966 gen_sub_field (FLD_size, 0, 1, &field);
1967 if (!extract_field_2 (&field, inst->value, 0))
1968 return 0;
1969 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1970 return 1;
1971 }
1972
1973 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1974 static int
1975 decode_fcvt (aarch64_inst *inst)
1976 {
1977 enum aarch64_opnd_qualifier qualifier;
1978 aarch64_insn value;
1979 const aarch64_field field = {15, 2};
1980
1981 /* opc dstsize */
1982 value = extract_field_2 (&field, inst->value, 0);
1983 switch (value)
1984 {
1985 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1986 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1987 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1988 default: return 0;
1989 }
1990 inst->operands[0].qualifier = qualifier;
1991
1992 return 1;
1993 }
1994
1995 /* Do miscellaneous decodings that are not common enough to be driven by
1996 flags. */
1997
1998 static int
1999 do_misc_decoding (aarch64_inst *inst)
2000 {
2001 unsigned int value;
2002 switch (inst->opcode->op)
2003 {
2004 case OP_FCVT:
2005 return decode_fcvt (inst);
2006
2007 case OP_FCVTN:
2008 case OP_FCVTN2:
2009 case OP_FCVTL:
2010 case OP_FCVTL2:
2011 return decode_asimd_fcvt (inst);
2012
2013 case OP_FCVTXN_S:
2014 return decode_asisd_fcvtxn (inst);
2015
2016 case OP_MOV_P_P:
2017 case OP_MOVS_P_P:
2018 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2019 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2020 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2021
2022 case OP_MOV_Z_P_Z:
2023 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2024 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2025
2026 case OP_MOV_Z_V:
2027 /* Index must be zero. */
2028 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2029 return value > 0 && value <= 16 && value == (value & -value);
2030
2031 case OP_MOV_Z_Z:
2032 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2033 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2034
2035 case OP_MOV_Z_Zi:
2036 /* Index must be nonzero. */
2037 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2038 return value > 0 && value != (value & -value);
2039
2040 case OP_MOVM_P_P_P:
2041 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2042 == extract_field (FLD_SVE_Pm, inst->value, 0));
2043
2044 case OP_MOVZS_P_P_P:
2045 case OP_MOVZ_P_P_P:
2046 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2047 == extract_field (FLD_SVE_Pm, inst->value, 0));
2048
2049 case OP_NOTS_P_P_P_Z:
2050 case OP_NOT_P_P_P_Z:
2051 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2052 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2053
2054 default:
2055 return 0;
2056 }
2057 }
2058
2059 /* Opcodes that have fields shared by multiple operands are usually flagged
2060 with flags. In this function, we detect such flags, decode the related
2061 field(s) and store the information in one of the related operands. The
2062 'one' operand is not any operand but one of the operands that can
2063 accommadate all the information that has been decoded. */
2064
2065 static int
2066 do_special_decoding (aarch64_inst *inst)
2067 {
2068 int idx;
2069 aarch64_insn value;
2070 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2071 if (inst->opcode->flags & F_COND)
2072 {
2073 value = extract_field (FLD_cond2, inst->value, 0);
2074 inst->cond = get_cond_from_value (value);
2075 }
2076 /* 'sf' field. */
2077 if (inst->opcode->flags & F_SF)
2078 {
2079 idx = select_operand_for_sf_field_coding (inst->opcode);
2080 value = extract_field (FLD_sf, inst->value, 0);
2081 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2082 if ((inst->opcode->flags & F_N)
2083 && extract_field (FLD_N, inst->value, 0) != value)
2084 return 0;
2085 }
2086 /* 'sf' field. */
2087 if (inst->opcode->flags & F_LSE_SZ)
2088 {
2089 idx = select_operand_for_sf_field_coding (inst->opcode);
2090 value = extract_field (FLD_lse_sz, inst->value, 0);
2091 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2092 }
2093 /* size:Q fields. */
2094 if (inst->opcode->flags & F_SIZEQ)
2095 return decode_sizeq (inst);
2096
2097 if (inst->opcode->flags & F_FPTYPE)
2098 {
2099 idx = select_operand_for_fptype_field_coding (inst->opcode);
2100 value = extract_field (FLD_type, inst->value, 0);
2101 switch (value)
2102 {
2103 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2104 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2105 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2106 default: return 0;
2107 }
2108 }
2109
2110 if (inst->opcode->flags & F_SSIZE)
2111 {
2112 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2113 of the base opcode. */
2114 aarch64_insn mask;
2115 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2116 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2117 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2118 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2119 /* For most related instruciton, the 'size' field is fully available for
2120 operand encoding. */
2121 if (mask == 0x3)
2122 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2123 else
2124 {
2125 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2126 candidates);
2127 inst->operands[idx].qualifier
2128 = get_qualifier_from_partial_encoding (value, candidates, mask);
2129 }
2130 }
2131
2132 if (inst->opcode->flags & F_T)
2133 {
2134 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2135 int num = 0;
2136 unsigned val, Q;
2137 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2138 == AARCH64_OPND_CLASS_SIMD_REG);
2139 /* imm5<3:0> q <t>
2140 0000 x reserved
2141 xxx1 0 8b
2142 xxx1 1 16b
2143 xx10 0 4h
2144 xx10 1 8h
2145 x100 0 2s
2146 x100 1 4s
2147 1000 0 reserved
2148 1000 1 2d */
2149 val = extract_field (FLD_imm5, inst->value, 0);
2150 while ((val & 0x1) == 0 && ++num <= 3)
2151 val >>= 1;
2152 if (num > 3)
2153 return 0;
2154 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2155 inst->operands[0].qualifier =
2156 get_vreg_qualifier_from_value ((num << 1) | Q);
2157 }
2158
2159 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2160 {
2161 /* Use Rt to encode in the case of e.g.
2162 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2163 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2164 if (idx == -1)
2165 {
2166 /* Otherwise use the result operand, which has to be a integer
2167 register. */
2168 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2169 == AARCH64_OPND_CLASS_INT_REG);
2170 idx = 0;
2171 }
2172 assert (idx == 0 || idx == 1);
2173 value = extract_field (FLD_Q, inst->value, 0);
2174 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2175 }
2176
2177 if (inst->opcode->flags & F_LDS_SIZE)
2178 {
2179 aarch64_field field = {0, 0};
2180 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2181 == AARCH64_OPND_CLASS_INT_REG);
2182 gen_sub_field (FLD_opc, 0, 1, &field);
2183 value = extract_field_2 (&field, inst->value, 0);
2184 inst->operands[0].qualifier
2185 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2186 }
2187
2188 /* Miscellaneous decoding; done as the last step. */
2189 if (inst->opcode->flags & F_MISC)
2190 return do_misc_decoding (inst);
2191
2192 return 1;
2193 }
2194
2195 /* Converters converting a real opcode instruction to its alias form. */
2196
2197 /* ROR <Wd>, <Ws>, #<shift>
2198 is equivalent to:
2199 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2200 static int
2201 convert_extr_to_ror (aarch64_inst *inst)
2202 {
2203 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2204 {
2205 copy_operand_info (inst, 2, 3);
2206 inst->operands[3].type = AARCH64_OPND_NIL;
2207 return 1;
2208 }
2209 return 0;
2210 }
2211
2212 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2213 is equivalent to:
2214 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2215 static int
2216 convert_shll_to_xtl (aarch64_inst *inst)
2217 {
2218 if (inst->operands[2].imm.value == 0)
2219 {
2220 inst->operands[2].type = AARCH64_OPND_NIL;
2221 return 1;
2222 }
2223 return 0;
2224 }
2225
2226 /* Convert
2227 UBFM <Xd>, <Xn>, #<shift>, #63.
2228 to
2229 LSR <Xd>, <Xn>, #<shift>. */
2230 static int
2231 convert_bfm_to_sr (aarch64_inst *inst)
2232 {
2233 int64_t imms, val;
2234
2235 imms = inst->operands[3].imm.value;
2236 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2237 if (imms == val)
2238 {
2239 inst->operands[3].type = AARCH64_OPND_NIL;
2240 return 1;
2241 }
2242
2243 return 0;
2244 }
2245
2246 /* Convert MOV to ORR. */
2247 static int
2248 convert_orr_to_mov (aarch64_inst *inst)
2249 {
2250 /* MOV <Vd>.<T>, <Vn>.<T>
2251 is equivalent to:
2252 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2253 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2254 {
2255 inst->operands[2].type = AARCH64_OPND_NIL;
2256 return 1;
2257 }
2258 return 0;
2259 }
2260
2261 /* When <imms> >= <immr>, the instruction written:
2262 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2263 is equivalent to:
2264 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2265
2266 static int
2267 convert_bfm_to_bfx (aarch64_inst *inst)
2268 {
2269 int64_t immr, imms;
2270
2271 immr = inst->operands[2].imm.value;
2272 imms = inst->operands[3].imm.value;
2273 if (imms >= immr)
2274 {
2275 int64_t lsb = immr;
2276 inst->operands[2].imm.value = lsb;
2277 inst->operands[3].imm.value = imms + 1 - lsb;
2278 /* The two opcodes have different qualifiers for
2279 the immediate operands; reset to help the checking. */
2280 reset_operand_qualifier (inst, 2);
2281 reset_operand_qualifier (inst, 3);
2282 return 1;
2283 }
2284
2285 return 0;
2286 }
2287
2288 /* When <imms> < <immr>, the instruction written:
2289 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2290 is equivalent to:
2291 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2292
2293 static int
2294 convert_bfm_to_bfi (aarch64_inst *inst)
2295 {
2296 int64_t immr, imms, val;
2297
2298 immr = inst->operands[2].imm.value;
2299 imms = inst->operands[3].imm.value;
2300 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2301 if (imms < immr)
2302 {
2303 inst->operands[2].imm.value = (val - immr) & (val - 1);
2304 inst->operands[3].imm.value = imms + 1;
2305 /* The two opcodes have different qualifiers for
2306 the immediate operands; reset to help the checking. */
2307 reset_operand_qualifier (inst, 2);
2308 reset_operand_qualifier (inst, 3);
2309 return 1;
2310 }
2311
2312 return 0;
2313 }
2314
2315 /* The instruction written:
2316 BFC <Xd>, #<lsb>, #<width>
2317 is equivalent to:
2318 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2319
2320 static int
2321 convert_bfm_to_bfc (aarch64_inst *inst)
2322 {
2323 int64_t immr, imms, val;
2324
2325 /* Should have been assured by the base opcode value. */
2326 assert (inst->operands[1].reg.regno == 0x1f);
2327
2328 immr = inst->operands[2].imm.value;
2329 imms = inst->operands[3].imm.value;
2330 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2331 if (imms < immr)
2332 {
2333 /* Drop XZR from the second operand. */
2334 copy_operand_info (inst, 1, 2);
2335 copy_operand_info (inst, 2, 3);
2336 inst->operands[3].type = AARCH64_OPND_NIL;
2337
2338 /* Recalculate the immediates. */
2339 inst->operands[1].imm.value = (val - immr) & (val - 1);
2340 inst->operands[2].imm.value = imms + 1;
2341
2342 /* The two opcodes have different qualifiers for the operands; reset to
2343 help the checking. */
2344 reset_operand_qualifier (inst, 1);
2345 reset_operand_qualifier (inst, 2);
2346 reset_operand_qualifier (inst, 3);
2347
2348 return 1;
2349 }
2350
2351 return 0;
2352 }
2353
2354 /* The instruction written:
2355 LSL <Xd>, <Xn>, #<shift>
2356 is equivalent to:
2357 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2358
2359 static int
2360 convert_ubfm_to_lsl (aarch64_inst *inst)
2361 {
2362 int64_t immr = inst->operands[2].imm.value;
2363 int64_t imms = inst->operands[3].imm.value;
2364 int64_t val
2365 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2366
2367 if ((immr == 0 && imms == val) || immr == imms + 1)
2368 {
2369 inst->operands[3].type = AARCH64_OPND_NIL;
2370 inst->operands[2].imm.value = val - imms;
2371 return 1;
2372 }
2373
2374 return 0;
2375 }
2376
2377 /* CINC <Wd>, <Wn>, <cond>
2378 is equivalent to:
2379 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2380 where <cond> is not AL or NV. */
2381
2382 static int
2383 convert_from_csel (aarch64_inst *inst)
2384 {
2385 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2386 && (inst->operands[3].cond->value & 0xe) != 0xe)
2387 {
2388 copy_operand_info (inst, 2, 3);
2389 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2390 inst->operands[3].type = AARCH64_OPND_NIL;
2391 return 1;
2392 }
2393 return 0;
2394 }
2395
2396 /* CSET <Wd>, <cond>
2397 is equivalent to:
2398 CSINC <Wd>, WZR, WZR, invert(<cond>)
2399 where <cond> is not AL or NV. */
2400
2401 static int
2402 convert_csinc_to_cset (aarch64_inst *inst)
2403 {
2404 if (inst->operands[1].reg.regno == 0x1f
2405 && inst->operands[2].reg.regno == 0x1f
2406 && (inst->operands[3].cond->value & 0xe) != 0xe)
2407 {
2408 copy_operand_info (inst, 1, 3);
2409 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2410 inst->operands[3].type = AARCH64_OPND_NIL;
2411 inst->operands[2].type = AARCH64_OPND_NIL;
2412 return 1;
2413 }
2414 return 0;
2415 }
2416
2417 /* MOV <Wd>, #<imm>
2418 is equivalent to:
2419 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2420
2421 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2422 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2423 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2424 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2425 machine-instruction mnemonic must be used. */
2426
2427 static int
2428 convert_movewide_to_mov (aarch64_inst *inst)
2429 {
2430 uint64_t value = inst->operands[1].imm.value;
2431 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2432 if (value == 0 && inst->operands[1].shifter.amount != 0)
2433 return 0;
2434 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2435 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2436 value <<= inst->operands[1].shifter.amount;
2437 /* As an alias convertor, it has to be clear that the INST->OPCODE
2438 is the opcode of the real instruction. */
2439 if (inst->opcode->op == OP_MOVN)
2440 {
2441 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2442 value = ~value;
2443 /* A MOVN has an immediate that could be encoded by MOVZ. */
2444 if (aarch64_wide_constant_p (value, is32, NULL))
2445 return 0;
2446 }
2447 inst->operands[1].imm.value = value;
2448 inst->operands[1].shifter.amount = 0;
2449 return 1;
2450 }
2451
2452 /* MOV <Wd>, #<imm>
2453 is equivalent to:
2454 ORR <Wd>, WZR, #<imm>.
2455
2456 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2457 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2458 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2459 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2460 machine-instruction mnemonic must be used. */
2461
2462 static int
2463 convert_movebitmask_to_mov (aarch64_inst *inst)
2464 {
2465 int is32;
2466 uint64_t value;
2467
2468 /* Should have been assured by the base opcode value. */
2469 assert (inst->operands[1].reg.regno == 0x1f);
2470 copy_operand_info (inst, 1, 2);
2471 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2472 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2473 value = inst->operands[1].imm.value;
2474 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2475 instruction. */
2476 if (inst->operands[0].reg.regno != 0x1f
2477 && (aarch64_wide_constant_p (value, is32, NULL)
2478 || aarch64_wide_constant_p (~value, is32, NULL)))
2479 return 0;
2480
2481 inst->operands[2].type = AARCH64_OPND_NIL;
2482 return 1;
2483 }
2484
2485 /* Some alias opcodes are disassembled by being converted from their real-form.
2486 N.B. INST->OPCODE is the real opcode rather than the alias. */
2487
2488 static int
2489 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2490 {
2491 switch (alias->op)
2492 {
2493 case OP_ASR_IMM:
2494 case OP_LSR_IMM:
2495 return convert_bfm_to_sr (inst);
2496 case OP_LSL_IMM:
2497 return convert_ubfm_to_lsl (inst);
2498 case OP_CINC:
2499 case OP_CINV:
2500 case OP_CNEG:
2501 return convert_from_csel (inst);
2502 case OP_CSET:
2503 case OP_CSETM:
2504 return convert_csinc_to_cset (inst);
2505 case OP_UBFX:
2506 case OP_BFXIL:
2507 case OP_SBFX:
2508 return convert_bfm_to_bfx (inst);
2509 case OP_SBFIZ:
2510 case OP_BFI:
2511 case OP_UBFIZ:
2512 return convert_bfm_to_bfi (inst);
2513 case OP_BFC:
2514 return convert_bfm_to_bfc (inst);
2515 case OP_MOV_V:
2516 return convert_orr_to_mov (inst);
2517 case OP_MOV_IMM_WIDE:
2518 case OP_MOV_IMM_WIDEN:
2519 return convert_movewide_to_mov (inst);
2520 case OP_MOV_IMM_LOG:
2521 return convert_movebitmask_to_mov (inst);
2522 case OP_ROR_IMM:
2523 return convert_extr_to_ror (inst);
2524 case OP_SXTL:
2525 case OP_SXTL2:
2526 case OP_UXTL:
2527 case OP_UXTL2:
2528 return convert_shll_to_xtl (inst);
2529 default:
2530 return 0;
2531 }
2532 }
2533
2534 static bfd_boolean
2535 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2536 aarch64_inst *, int, aarch64_operand_error *errors);
2537
2538 /* Given the instruction information in *INST, check if the instruction has
2539 any alias form that can be used to represent *INST. If the answer is yes,
2540 update *INST to be in the form of the determined alias. */
2541
2542 /* In the opcode description table, the following flags are used in opcode
2543 entries to help establish the relations between the real and alias opcodes:
2544
2545 F_ALIAS: opcode is an alias
2546 F_HAS_ALIAS: opcode has alias(es)
2547 F_P1
2548 F_P2
2549 F_P3: Disassembly preference priority 1-3 (the larger the
2550 higher). If nothing is specified, it is the priority
2551 0 by default, i.e. the lowest priority.
2552
2553 Although the relation between the machine and the alias instructions are not
2554 explicitly described, it can be easily determined from the base opcode
2555 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2556 description entries:
2557
2558 The mask of an alias opcode must be equal to or a super-set (i.e. more
2559 constrained) of that of the aliased opcode; so is the base opcode value.
2560
2561 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2562 && (opcode->mask & real->mask) == real->mask
2563 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2564 then OPCODE is an alias of, and only of, the REAL instruction
2565
2566 The alias relationship is forced flat-structured to keep related algorithm
2567 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2568
2569 During the disassembling, the decoding decision tree (in
2570 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2571 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2572 not specified), the disassembler will check whether there is any alias
2573 instruction exists for this real instruction. If there is, the disassembler
2574 will try to disassemble the 32-bit binary again using the alias's rule, or
2575 try to convert the IR to the form of the alias. In the case of the multiple
2576 aliases, the aliases are tried one by one from the highest priority
2577 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2578 first succeeds first adopted.
2579
2580 You may ask why there is a need for the conversion of IR from one form to
2581 another in handling certain aliases. This is because on one hand it avoids
2582 adding more operand code to handle unusual encoding/decoding; on other
2583 hand, during the disassembling, the conversion is an effective approach to
2584 check the condition of an alias (as an alias may be adopted only if certain
2585 conditions are met).
2586
2587 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2588 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2589 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2590
2591 static void
2592 determine_disassembling_preference (struct aarch64_inst *inst,
2593 aarch64_operand_error *errors)
2594 {
2595 const aarch64_opcode *opcode;
2596 const aarch64_opcode *alias;
2597
2598 opcode = inst->opcode;
2599
2600 /* This opcode does not have an alias, so use itself. */
2601 if (!opcode_has_alias (opcode))
2602 return;
2603
2604 alias = aarch64_find_alias_opcode (opcode);
2605 assert (alias);
2606
2607 #ifdef DEBUG_AARCH64
2608 if (debug_dump)
2609 {
2610 const aarch64_opcode *tmp = alias;
2611 printf ("#### LIST orderd: ");
2612 while (tmp)
2613 {
2614 printf ("%s, ", tmp->name);
2615 tmp = aarch64_find_next_alias_opcode (tmp);
2616 }
2617 printf ("\n");
2618 }
2619 #endif /* DEBUG_AARCH64 */
2620
2621 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2622 {
2623 DEBUG_TRACE ("try %s", alias->name);
2624 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2625
2626 /* An alias can be a pseudo opcode which will never be used in the
2627 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2628 aliasing AND. */
2629 if (pseudo_opcode_p (alias))
2630 {
2631 DEBUG_TRACE ("skip pseudo %s", alias->name);
2632 continue;
2633 }
2634
2635 if ((inst->value & alias->mask) != alias->opcode)
2636 {
2637 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2638 continue;
2639 }
2640 /* No need to do any complicated transformation on operands, if the alias
2641 opcode does not have any operand. */
2642 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2643 {
2644 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2645 aarch64_replace_opcode (inst, alias);
2646 return;
2647 }
2648 if (alias->flags & F_CONV)
2649 {
2650 aarch64_inst copy;
2651 memcpy (&copy, inst, sizeof (aarch64_inst));
2652 /* ALIAS is the preference as long as the instruction can be
2653 successfully converted to the form of ALIAS. */
2654 if (convert_to_alias (&copy, alias) == 1)
2655 {
2656 aarch64_replace_opcode (&copy, alias);
2657 assert (aarch64_match_operands_constraint (&copy, NULL));
2658 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2659 memcpy (inst, &copy, sizeof (aarch64_inst));
2660 return;
2661 }
2662 }
2663 else
2664 {
2665 /* Directly decode the alias opcode. */
2666 aarch64_inst temp;
2667 memset (&temp, '\0', sizeof (aarch64_inst));
2668 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
2669 {
2670 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2671 memcpy (inst, &temp, sizeof (aarch64_inst));
2672 return;
2673 }
2674 }
2675 }
2676 }
2677
2678 /* Some instructions (including all SVE ones) use the instruction class
2679 to describe how a qualifiers_list index is represented in the instruction
2680 encoding. If INST is such an instruction, decode the appropriate fields
2681 and fill in the operand qualifiers accordingly. Return true if no
2682 problems are found. */
2683
2684 static bfd_boolean
2685 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2686 {
2687 int i, variant;
2688
2689 variant = 0;
2690 switch (inst->opcode->iclass)
2691 {
2692 case sve_cpy:
2693 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2694 break;
2695
2696 case sve_index:
2697 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2698 if ((i & 31) == 0)
2699 return FALSE;
2700 while ((i & 1) == 0)
2701 {
2702 i >>= 1;
2703 variant += 1;
2704 }
2705 break;
2706
2707 case sve_limm:
2708 /* Pick the smallest applicable element size. */
2709 if ((inst->value & 0x20600) == 0x600)
2710 variant = 0;
2711 else if ((inst->value & 0x20400) == 0x400)
2712 variant = 1;
2713 else if ((inst->value & 0x20000) == 0)
2714 variant = 2;
2715 else
2716 variant = 3;
2717 break;
2718
2719 case sve_misc:
2720 /* sve_misc instructions have only a single variant. */
2721 break;
2722
2723 case sve_movprfx:
2724 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2725 break;
2726
2727 case sve_pred_zm:
2728 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2729 break;
2730
2731 case sve_shift_pred:
2732 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2733 sve_shift:
2734 if (i == 0)
2735 return FALSE;
2736 while (i != 1)
2737 {
2738 i >>= 1;
2739 variant += 1;
2740 }
2741 break;
2742
2743 case sve_shift_unpred:
2744 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2745 goto sve_shift;
2746
2747 case sve_size_bhs:
2748 variant = extract_field (FLD_size, inst->value, 0);
2749 if (variant >= 3)
2750 return FALSE;
2751 break;
2752
2753 case sve_size_bhsd:
2754 variant = extract_field (FLD_size, inst->value, 0);
2755 break;
2756
2757 case sve_size_hsd:
2758 i = extract_field (FLD_size, inst->value, 0);
2759 if (i < 1)
2760 return FALSE;
2761 variant = i - 1;
2762 break;
2763
2764 case sve_size_sd:
2765 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2766 break;
2767
2768 default:
2769 /* No mapping between instruction class and qualifiers. */
2770 return TRUE;
2771 }
2772
2773 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2774 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2775 return TRUE;
2776 }
2777 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2778 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2779 return 1.
2780
2781 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2782 determined and used to disassemble CODE; this is done just before the
2783 return. */
2784
2785 static bfd_boolean
2786 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2787 aarch64_inst *inst, int noaliases_p,
2788 aarch64_operand_error *errors)
2789 {
2790 int i;
2791
2792 DEBUG_TRACE ("enter with %s", opcode->name);
2793
2794 assert (opcode && inst);
2795
2796 /* Clear inst. */
2797 memset (inst, '\0', sizeof (aarch64_inst));
2798
2799 /* Check the base opcode. */
2800 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2801 {
2802 DEBUG_TRACE ("base opcode match FAIL");
2803 goto decode_fail;
2804 }
2805
2806 inst->opcode = opcode;
2807 inst->value = code;
2808
2809 /* Assign operand codes and indexes. */
2810 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2811 {
2812 if (opcode->operands[i] == AARCH64_OPND_NIL)
2813 break;
2814 inst->operands[i].type = opcode->operands[i];
2815 inst->operands[i].idx = i;
2816 }
2817
2818 /* Call the opcode decoder indicated by flags. */
2819 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2820 {
2821 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2822 goto decode_fail;
2823 }
2824
2825 /* Possibly use the instruction class to determine the correct
2826 qualifier. */
2827 if (!aarch64_decode_variant_using_iclass (inst))
2828 {
2829 DEBUG_TRACE ("iclass-based decoder FAIL");
2830 goto decode_fail;
2831 }
2832
2833 /* Call operand decoders. */
2834 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2835 {
2836 const aarch64_operand *opnd;
2837 enum aarch64_opnd type;
2838
2839 type = opcode->operands[i];
2840 if (type == AARCH64_OPND_NIL)
2841 break;
2842 opnd = &aarch64_operands[type];
2843 if (operand_has_extractor (opnd)
2844 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
2845 errors)))
2846 {
2847 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2848 goto decode_fail;
2849 }
2850 }
2851
2852 /* If the opcode has a verifier, then check it now. */
2853 if (opcode->verifier && ! opcode->verifier (opcode, code))
2854 {
2855 DEBUG_TRACE ("operand verifier FAIL");
2856 goto decode_fail;
2857 }
2858
2859 /* Match the qualifiers. */
2860 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2861 {
2862 /* Arriving here, the CODE has been determined as a valid instruction
2863 of OPCODE and *INST has been filled with information of this OPCODE
2864 instruction. Before the return, check if the instruction has any
2865 alias and should be disassembled in the form of its alias instead.
2866 If the answer is yes, *INST will be updated. */
2867 if (!noaliases_p)
2868 determine_disassembling_preference (inst, errors);
2869 DEBUG_TRACE ("SUCCESS");
2870 return TRUE;
2871 }
2872 else
2873 {
2874 DEBUG_TRACE ("constraint matching FAIL");
2875 }
2876
2877 decode_fail:
2878 return FALSE;
2879 }
2880 \f
2881 /* This does some user-friendly fix-up to *INST. It is currently focus on
2882 the adjustment of qualifiers to help the printed instruction
2883 recognized/understood more easily. */
2884
2885 static void
2886 user_friendly_fixup (aarch64_inst *inst)
2887 {
2888 switch (inst->opcode->iclass)
2889 {
2890 case testbranch:
2891 /* TBNZ Xn|Wn, #uimm6, label
2892 Test and Branch Not Zero: conditionally jumps to label if bit number
2893 uimm6 in register Xn is not zero. The bit number implies the width of
2894 the register, which may be written and should be disassembled as Wn if
2895 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2896 */
2897 if (inst->operands[1].imm.value < 32)
2898 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2899 break;
2900 default: break;
2901 }
2902 }
2903
2904 /* Decode INSN and fill in *INST the instruction information. An alias
2905 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2906 success. */
2907
2908 int
2909 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2910 bfd_boolean noaliases_p,
2911 aarch64_operand_error *errors)
2912 {
2913 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2914
2915 #ifdef DEBUG_AARCH64
2916 if (debug_dump)
2917 {
2918 const aarch64_opcode *tmp = opcode;
2919 printf ("\n");
2920 DEBUG_TRACE ("opcode lookup:");
2921 while (tmp != NULL)
2922 {
2923 aarch64_verbose (" %s", tmp->name);
2924 tmp = aarch64_find_next_opcode (tmp);
2925 }
2926 }
2927 #endif /* DEBUG_AARCH64 */
2928
2929 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2930 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2931 opcode field and value, apart from the difference that one of them has an
2932 extra field as part of the opcode, but such a field is used for operand
2933 encoding in other opcode(s) ('immh' in the case of the example). */
2934 while (opcode != NULL)
2935 {
2936 /* But only one opcode can be decoded successfully for, as the
2937 decoding routine will check the constraint carefully. */
2938 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
2939 return ERR_OK;
2940 opcode = aarch64_find_next_opcode (opcode);
2941 }
2942
2943 return ERR_UND;
2944 }
2945
2946 /* Print operands. */
2947
2948 static void
2949 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2950 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2951 {
2952 int i, pcrel_p, num_printed;
2953 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2954 {
2955 char str[128];
2956 /* We regard the opcode operand info more, however we also look into
2957 the inst->operands to support the disassembling of the optional
2958 operand.
2959 The two operand code should be the same in all cases, apart from
2960 when the operand can be optional. */
2961 if (opcode->operands[i] == AARCH64_OPND_NIL
2962 || opnds[i].type == AARCH64_OPND_NIL)
2963 break;
2964
2965 /* Generate the operand string in STR. */
2966 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2967 &info->target);
2968
2969 /* Print the delimiter (taking account of omitted operand(s)). */
2970 if (str[0] != '\0')
2971 (*info->fprintf_func) (info->stream, "%s",
2972 num_printed++ == 0 ? "\t" : ", ");
2973
2974 /* Print the operand. */
2975 if (pcrel_p)
2976 (*info->print_address_func) (info->target, info);
2977 else
2978 (*info->fprintf_func) (info->stream, "%s", str);
2979 }
2980 }
2981
2982 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
2983
2984 static void
2985 remove_dot_suffix (char *name, const aarch64_inst *inst)
2986 {
2987 char *ptr;
2988 size_t len;
2989
2990 ptr = strchr (inst->opcode->name, '.');
2991 assert (ptr && inst->cond);
2992 len = ptr - inst->opcode->name;
2993 assert (len < 8);
2994 strncpy (name, inst->opcode->name, len);
2995 name[len] = '\0';
2996 }
2997
2998 /* Print the instruction mnemonic name. */
2999
3000 static void
3001 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3002 {
3003 if (inst->opcode->flags & F_COND)
3004 {
3005 /* For instructions that are truly conditionally executed, e.g. b.cond,
3006 prepare the full mnemonic name with the corresponding condition
3007 suffix. */
3008 char name[8];
3009
3010 remove_dot_suffix (name, inst);
3011 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
3012 }
3013 else
3014 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
3015 }
3016
3017 /* Decide whether we need to print a comment after the operands of
3018 instruction INST. */
3019
3020 static void
3021 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3022 {
3023 if (inst->opcode->flags & F_COND)
3024 {
3025 char name[8];
3026 unsigned int i, num_conds;
3027
3028 remove_dot_suffix (name, inst);
3029 num_conds = ARRAY_SIZE (inst->cond->names);
3030 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3031 (*info->fprintf_func) (info->stream, "%s %s.%s",
3032 i == 1 ? " //" : ",",
3033 name, inst->cond->names[i]);
3034 }
3035 }
3036
3037 /* Print the instruction according to *INST. */
3038
3039 static void
3040 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3041 struct disassemble_info *info)
3042 {
3043 print_mnemonic_name (inst, info);
3044 print_operands (pc, inst->opcode, inst->operands, info);
3045 print_comment (inst, info);
3046 }
3047
3048 /* Entry-point of the instruction disassembler and printer. */
3049
3050 static void
3051 print_insn_aarch64_word (bfd_vma pc,
3052 uint32_t word,
3053 struct disassemble_info *info,
3054 aarch64_operand_error *errors)
3055 {
3056 static const char *err_msg[6] =
3057 {
3058 [ERR_OK] = "_",
3059 [-ERR_UND] = "undefined",
3060 [-ERR_UNP] = "unpredictable",
3061 [-ERR_NYI] = "NYI"
3062 };
3063
3064 int ret;
3065 aarch64_inst inst;
3066
3067 info->insn_info_valid = 1;
3068 info->branch_delay_insns = 0;
3069 info->data_size = 0;
3070 info->target = 0;
3071 info->target2 = 0;
3072
3073 if (info->flags & INSN_HAS_RELOC)
3074 /* If the instruction has a reloc associated with it, then
3075 the offset field in the instruction will actually be the
3076 addend for the reloc. (If we are using REL type relocs).
3077 In such cases, we can ignore the pc when computing
3078 addresses, since the addend is not currently pc-relative. */
3079 pc = 0;
3080
3081 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3082
3083 if (((word >> 21) & 0x3ff) == 1)
3084 {
3085 /* RESERVED for ALES. */
3086 assert (ret != ERR_OK);
3087 ret = ERR_NYI;
3088 }
3089
3090 switch (ret)
3091 {
3092 case ERR_UND:
3093 case ERR_UNP:
3094 case ERR_NYI:
3095 /* Handle undefined instructions. */
3096 info->insn_type = dis_noninsn;
3097 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3098 word, err_msg[-ret]);
3099 break;
3100 case ERR_OK:
3101 user_friendly_fixup (&inst);
3102 print_aarch64_insn (pc, &inst, info);
3103 break;
3104 default:
3105 abort ();
3106 }
3107 }
3108
3109 /* Disallow mapping symbols ($x, $d etc) from
3110 being displayed in symbol relative addresses. */
3111
3112 bfd_boolean
3113 aarch64_symbol_is_valid (asymbol * sym,
3114 struct disassemble_info * info ATTRIBUTE_UNUSED)
3115 {
3116 const char * name;
3117
3118 if (sym == NULL)
3119 return FALSE;
3120
3121 name = bfd_asymbol_name (sym);
3122
3123 return name
3124 && (name[0] != '$'
3125 || (name[1] != 'x' && name[1] != 'd')
3126 || (name[2] != '\0' && name[2] != '.'));
3127 }
3128
3129 /* Print data bytes on INFO->STREAM. */
3130
3131 static void
3132 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3133 uint32_t word,
3134 struct disassemble_info *info,
3135 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3136 {
3137 switch (info->bytes_per_chunk)
3138 {
3139 case 1:
3140 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3141 break;
3142 case 2:
3143 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3144 break;
3145 case 4:
3146 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3147 break;
3148 default:
3149 abort ();
3150 }
3151 }
3152
3153 /* Try to infer the code or data type from a symbol.
3154 Returns nonzero if *MAP_TYPE was set. */
3155
3156 static int
3157 get_sym_code_type (struct disassemble_info *info, int n,
3158 enum map_type *map_type)
3159 {
3160 elf_symbol_type *es;
3161 unsigned int type;
3162 const char *name;
3163
3164 /* If the symbol is in a different section, ignore it. */
3165 if (info->section != NULL && info->section != info->symtab[n]->section)
3166 return FALSE;
3167
3168 es = *(elf_symbol_type **)(info->symtab + n);
3169 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3170
3171 /* If the symbol has function type then use that. */
3172 if (type == STT_FUNC)
3173 {
3174 *map_type = MAP_INSN;
3175 return TRUE;
3176 }
3177
3178 /* Check for mapping symbols. */
3179 name = bfd_asymbol_name(info->symtab[n]);
3180 if (name[0] == '$'
3181 && (name[1] == 'x' || name[1] == 'd')
3182 && (name[2] == '\0' || name[2] == '.'))
3183 {
3184 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3185 return TRUE;
3186 }
3187
3188 return FALSE;
3189 }
3190
3191 /* Entry-point of the AArch64 disassembler. */
3192
3193 int
3194 print_insn_aarch64 (bfd_vma pc,
3195 struct disassemble_info *info)
3196 {
3197 bfd_byte buffer[INSNLEN];
3198 int status;
3199 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3200 aarch64_operand_error *);
3201 bfd_boolean found = FALSE;
3202 unsigned int size = 4;
3203 unsigned long data;
3204 aarch64_operand_error errors;
3205
3206 if (info->disassembler_options)
3207 {
3208 set_default_aarch64_dis_options (info);
3209
3210 parse_aarch64_dis_options (info->disassembler_options);
3211
3212 /* To avoid repeated parsing of these options, we remove them here. */
3213 info->disassembler_options = NULL;
3214 }
3215
3216 /* Aarch64 instructions are always little-endian */
3217 info->endian_code = BFD_ENDIAN_LITTLE;
3218
3219 /* First check the full symtab for a mapping symbol, even if there
3220 are no usable non-mapping symbols for this address. */
3221 if (info->symtab_size != 0
3222 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3223 {
3224 enum map_type type = MAP_INSN;
3225 int last_sym = -1;
3226 bfd_vma addr;
3227 int n;
3228
3229 if (pc <= last_mapping_addr)
3230 last_mapping_sym = -1;
3231
3232 /* Start scanning at the start of the function, or wherever
3233 we finished last time. */
3234 n = info->symtab_pos + 1;
3235 if (n < last_mapping_sym)
3236 n = last_mapping_sym;
3237
3238 /* Scan up to the location being disassembled. */
3239 for (; n < info->symtab_size; n++)
3240 {
3241 addr = bfd_asymbol_value (info->symtab[n]);
3242 if (addr > pc)
3243 break;
3244 if (get_sym_code_type (info, n, &type))
3245 {
3246 last_sym = n;
3247 found = TRUE;
3248 }
3249 }
3250
3251 if (!found)
3252 {
3253 n = info->symtab_pos;
3254 if (n < last_mapping_sym)
3255 n = last_mapping_sym;
3256
3257 /* No mapping symbol found at this address. Look backwards
3258 for a preceeding one. */
3259 for (; n >= 0; n--)
3260 {
3261 if (get_sym_code_type (info, n, &type))
3262 {
3263 last_sym = n;
3264 found = TRUE;
3265 break;
3266 }
3267 }
3268 }
3269
3270 last_mapping_sym = last_sym;
3271 last_type = type;
3272
3273 /* Look a little bit ahead to see if we should print out
3274 less than four bytes of data. If there's a symbol,
3275 mapping or otherwise, after two bytes then don't
3276 print more. */
3277 if (last_type == MAP_DATA)
3278 {
3279 size = 4 - (pc & 3);
3280 for (n = last_sym + 1; n < info->symtab_size; n++)
3281 {
3282 addr = bfd_asymbol_value (info->symtab[n]);
3283 if (addr > pc)
3284 {
3285 if (addr - pc < size)
3286 size = addr - pc;
3287 break;
3288 }
3289 }
3290 /* If the next symbol is after three bytes, we need to
3291 print only part of the data, so that we can use either
3292 .byte or .short. */
3293 if (size == 3)
3294 size = (pc & 1) ? 1 : 2;
3295 }
3296 }
3297
3298 if (last_type == MAP_DATA)
3299 {
3300 /* size was set above. */
3301 info->bytes_per_chunk = size;
3302 info->display_endian = info->endian;
3303 printer = print_insn_data;
3304 }
3305 else
3306 {
3307 info->bytes_per_chunk = size = INSNLEN;
3308 info->display_endian = info->endian_code;
3309 printer = print_insn_aarch64_word;
3310 }
3311
3312 status = (*info->read_memory_func) (pc, buffer, size, info);
3313 if (status != 0)
3314 {
3315 (*info->memory_error_func) (status, pc, info);
3316 return -1;
3317 }
3318
3319 data = bfd_get_bits (buffer, size * 8,
3320 info->display_endian == BFD_ENDIAN_BIG);
3321
3322 (*printer) (pc, data, info, &errors);
3323
3324 return size;
3325 }
3326 \f
3327 void
3328 print_aarch64_disassembler_options (FILE *stream)
3329 {
3330 fprintf (stream, _("\n\
3331 The following AARCH64 specific disassembler options are supported for use\n\
3332 with the -M switch (multiple options should be separated by commas):\n"));
3333
3334 fprintf (stream, _("\n\
3335 no-aliases Don't print instruction aliases.\n"));
3336
3337 fprintf (stream, _("\n\
3338 aliases Do print instruction aliases.\n"));
3339
3340 #ifdef DEBUG_AARCH64
3341 fprintf (stream, _("\n\
3342 debug_dump Temp switch for debug trace.\n"));
3343 #endif /* DEBUG_AARCH64 */
3344
3345 fprintf (stream, _("\n"));
3346 }
This page took 0.098232 seconds and 4 git commands to generate.