4c31f57451d20d8029624e601b15e41cf56f5a19
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define INSNLEN 4
30
31 /* Cached mapping symbol state. */
32 enum map_type
33 {
34 MAP_INSN,
35 MAP_DATA
36 };
37
38 static enum map_type last_type;
39 static int last_mapping_sym = -1;
40 static bfd_vma last_mapping_addr = 0;
41
42 /* Other options */
43 static int no_aliases = 0; /* If set disassemble as most general inst. */
44 \fstatic int no_notes = 1; /* If set do not print disassemble notes in the
45 output as comments. */
46
47 /* Currently active instruction sequence. */
48 static aarch64_instr_sequence insn_sequence;
49
50 static void
51 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
52 {
53 }
54
55 static void
56 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
57 {
58 /* Try to match options that are simple flags */
59 if (CONST_STRNEQ (option, "no-aliases"))
60 {
61 no_aliases = 1;
62 return;
63 }
64
65 if (CONST_STRNEQ (option, "aliases"))
66 {
67 no_aliases = 0;
68 return;
69 }
70
71 if (CONST_STRNEQ (option, "no-notes"))
72 {
73 no_notes = 1;
74 return;
75 }
76
77 if (CONST_STRNEQ (option, "notes"))
78 {
79 no_notes = 0;
80 return;
81 }
82
83 #ifdef DEBUG_AARCH64
84 if (CONST_STRNEQ (option, "debug_dump"))
85 {
86 debug_dump = 1;
87 return;
88 }
89 #endif /* DEBUG_AARCH64 */
90
91 /* Invalid option. */
92 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
93 }
94
95 static void
96 parse_aarch64_dis_options (const char *options)
97 {
98 const char *option_end;
99
100 if (options == NULL)
101 return;
102
103 while (*options != '\0')
104 {
105 /* Skip empty options. */
106 if (*options == ',')
107 {
108 options++;
109 continue;
110 }
111
112 /* We know that *options is neither NUL or a comma. */
113 option_end = options + 1;
114 while (*option_end != ',' && *option_end != '\0')
115 option_end++;
116
117 parse_aarch64_dis_option (options, option_end - options);
118
119 /* Go on to the next one. If option_end points to a comma, it
120 will be skipped above. */
121 options = option_end;
122 }
123 }
124 \f
125 /* Functions doing the instruction disassembling. */
126
127 /* The unnamed arguments consist of the number of fields and information about
128 these fields where the VALUE will be extracted from CODE and returned.
129 MASK can be zero or the base mask of the opcode.
130
131 N.B. the fields are required to be in such an order than the most signficant
132 field for VALUE comes the first, e.g. the <index> in
133 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
134 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
135 the order of H, L, M. */
136
137 aarch64_insn
138 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
139 {
140 uint32_t num;
141 const aarch64_field *field;
142 enum aarch64_field_kind kind;
143 va_list va;
144
145 va_start (va, mask);
146 num = va_arg (va, uint32_t);
147 assert (num <= 5);
148 aarch64_insn value = 0x0;
149 while (num--)
150 {
151 kind = va_arg (va, enum aarch64_field_kind);
152 field = &fields[kind];
153 value <<= field->width;
154 value |= extract_field (kind, code, mask);
155 }
156 return value;
157 }
158
159 /* Extract the value of all fields in SELF->fields from instruction CODE.
160 The least significant bit comes from the final field. */
161
162 static aarch64_insn
163 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
164 {
165 aarch64_insn value;
166 unsigned int i;
167 enum aarch64_field_kind kind;
168
169 value = 0;
170 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
171 {
172 kind = self->fields[i];
173 value <<= fields[kind].width;
174 value |= extract_field (kind, code, 0);
175 }
176 return value;
177 }
178
179 /* Sign-extend bit I of VALUE. */
180 static inline int32_t
181 sign_extend (aarch64_insn value, unsigned i)
182 {
183 uint32_t ret = value;
184
185 assert (i < 32);
186 if ((value >> i) & 0x1)
187 {
188 uint32_t val = (uint32_t)(-1) << i;
189 ret = ret | val;
190 }
191 return (int32_t) ret;
192 }
193
194 /* N.B. the following inline helpfer functions create a dependency on the
195 order of operand qualifier enumerators. */
196
197 /* Given VALUE, return qualifier for a general purpose register. */
198 static inline enum aarch64_opnd_qualifier
199 get_greg_qualifier_from_value (aarch64_insn value)
200 {
201 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
202 assert (value <= 0x1
203 && aarch64_get_qualifier_standard_value (qualifier) == value);
204 return qualifier;
205 }
206
207 /* Given VALUE, return qualifier for a vector register. This does not support
208 decoding instructions that accept the 2H vector type. */
209
210 static inline enum aarch64_opnd_qualifier
211 get_vreg_qualifier_from_value (aarch64_insn value)
212 {
213 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
214
215 /* Instructions using vector type 2H should not call this function. Skip over
216 the 2H qualifier. */
217 if (qualifier >= AARCH64_OPND_QLF_V_2H)
218 qualifier += 1;
219
220 assert (value <= 0x8
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
222 return qualifier;
223 }
224
225 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
226 static inline enum aarch64_opnd_qualifier
227 get_sreg_qualifier_from_value (aarch64_insn value)
228 {
229 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
230
231 assert (value <= 0x4
232 && aarch64_get_qualifier_standard_value (qualifier) == value);
233 return qualifier;
234 }
235
236 /* Given the instruction in *INST which is probably half way through the
237 decoding and our caller wants to know the expected qualifier for operand
238 I. Return such a qualifier if we can establish it; otherwise return
239 AARCH64_OPND_QLF_NIL. */
240
241 static aarch64_opnd_qualifier_t
242 get_expected_qualifier (const aarch64_inst *inst, int i)
243 {
244 aarch64_opnd_qualifier_seq_t qualifiers;
245 /* Should not be called if the qualifier is known. */
246 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
247 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
248 i, qualifiers))
249 return qualifiers[i];
250 else
251 return AARCH64_OPND_QLF_NIL;
252 }
253
254 /* Operand extractors. */
255
256 bfd_boolean
257 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
258 const aarch64_insn code,
259 const aarch64_inst *inst ATTRIBUTE_UNUSED,
260 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
261 {
262 info->reg.regno = extract_field (self->fields[0], code, 0);
263 return TRUE;
264 }
265
266 bfd_boolean
267 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
268 const aarch64_insn code ATTRIBUTE_UNUSED,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED,
270 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
271 {
272 assert (info->idx == 1
273 || info->idx ==3);
274 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
275 return TRUE;
276 }
277
278 /* e.g. IC <ic_op>{, <Xt>}. */
279 bfd_boolean
280 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
281 const aarch64_insn code,
282 const aarch64_inst *inst ATTRIBUTE_UNUSED,
283 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
284 {
285 info->reg.regno = extract_field (self->fields[0], code, 0);
286 assert (info->idx == 1
287 && (aarch64_get_operand_class (inst->operands[0].type)
288 == AARCH64_OPND_CLASS_SYSTEM));
289 /* This will make the constraint checking happy and more importantly will
290 help the disassembler determine whether this operand is optional or
291 not. */
292 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
293
294 return TRUE;
295 }
296
297 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
298 bfd_boolean
299 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
300 const aarch64_insn code,
301 const aarch64_inst *inst ATTRIBUTE_UNUSED,
302 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
303 {
304 /* regno */
305 info->reglane.regno = extract_field (self->fields[0], code,
306 inst->opcode->mask);
307
308 /* Index and/or type. */
309 if (inst->opcode->iclass == asisdone
310 || inst->opcode->iclass == asimdins)
311 {
312 if (info->type == AARCH64_OPND_En
313 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
314 {
315 unsigned shift;
316 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
317 assert (info->idx == 1); /* Vn */
318 aarch64_insn value = extract_field (FLD_imm4, code, 0);
319 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
320 info->qualifier = get_expected_qualifier (inst, info->idx);
321 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
322 info->reglane.index = value >> shift;
323 }
324 else
325 {
326 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
327 imm5<3:0> <V>
328 0000 RESERVED
329 xxx1 B
330 xx10 H
331 x100 S
332 1000 D */
333 int pos = -1;
334 aarch64_insn value = extract_field (FLD_imm5, code, 0);
335 while (++pos <= 3 && (value & 0x1) == 0)
336 value >>= 1;
337 if (pos > 3)
338 return FALSE;
339 info->qualifier = get_sreg_qualifier_from_value (pos);
340 info->reglane.index = (unsigned) (value >> 1);
341 }
342 }
343 else if (inst->opcode->iclass == dotproduct)
344 {
345 /* Need information in other operand(s) to help decoding. */
346 info->qualifier = get_expected_qualifier (inst, info->idx);
347 switch (info->qualifier)
348 {
349 case AARCH64_OPND_QLF_S_4B:
350 /* L:H */
351 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
352 info->reglane.regno &= 0x1f;
353 break;
354 default:
355 return FALSE;
356 }
357 }
358 else if (inst->opcode->iclass == cryptosm3)
359 {
360 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
361 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
362 }
363 else
364 {
365 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
366 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
367
368 /* Need information in other operand(s) to help decoding. */
369 info->qualifier = get_expected_qualifier (inst, info->idx);
370 switch (info->qualifier)
371 {
372 case AARCH64_OPND_QLF_S_H:
373 if (info->type == AARCH64_OPND_Em16)
374 {
375 /* h:l:m */
376 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
377 FLD_M);
378 info->reglane.regno &= 0xf;
379 }
380 else
381 {
382 /* h:l */
383 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
384 }
385 break;
386 case AARCH64_OPND_QLF_S_S:
387 /* h:l */
388 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
389 break;
390 case AARCH64_OPND_QLF_S_D:
391 /* H */
392 info->reglane.index = extract_field (FLD_H, code, 0);
393 break;
394 default:
395 return FALSE;
396 }
397
398 if (inst->opcode->op == OP_FCMLA_ELEM
399 && info->qualifier != AARCH64_OPND_QLF_S_H)
400 {
401 /* Complex operand takes two elements. */
402 if (info->reglane.index & 1)
403 return FALSE;
404 info->reglane.index /= 2;
405 }
406 }
407
408 return TRUE;
409 }
410
411 bfd_boolean
412 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
413 const aarch64_insn code,
414 const aarch64_inst *inst ATTRIBUTE_UNUSED,
415 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
416 {
417 /* R */
418 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
419 /* len */
420 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
421 return TRUE;
422 }
423
424 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
425 bfd_boolean
426 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
427 aarch64_opnd_info *info, const aarch64_insn code,
428 const aarch64_inst *inst,
429 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
430 {
431 aarch64_insn value;
432 /* Number of elements in each structure to be loaded/stored. */
433 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
434
435 struct
436 {
437 unsigned is_reserved;
438 unsigned num_regs;
439 unsigned num_elements;
440 } data [] =
441 { {0, 4, 4},
442 {1, 4, 4},
443 {0, 4, 1},
444 {0, 4, 2},
445 {0, 3, 3},
446 {1, 3, 3},
447 {0, 3, 1},
448 {0, 1, 1},
449 {0, 2, 2},
450 {1, 2, 2},
451 {0, 2, 1},
452 };
453
454 /* Rt */
455 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
456 /* opcode */
457 value = extract_field (FLD_opcode, code, 0);
458 /* PR 21595: Check for a bogus value. */
459 if (value >= ARRAY_SIZE (data))
460 return FALSE;
461 if (expected_num != data[value].num_elements || data[value].is_reserved)
462 return FALSE;
463 info->reglist.num_regs = data[value].num_regs;
464
465 return TRUE;
466 }
467
468 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
469 lanes instructions. */
470 bfd_boolean
471 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
472 aarch64_opnd_info *info, const aarch64_insn code,
473 const aarch64_inst *inst,
474 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
475 {
476 aarch64_insn value;
477
478 /* Rt */
479 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
480 /* S */
481 value = extract_field (FLD_S, code, 0);
482
483 /* Number of registers is equal to the number of elements in
484 each structure to be loaded/stored. */
485 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
486 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
487
488 /* Except when it is LD1R. */
489 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
490 info->reglist.num_regs = 2;
491
492 return TRUE;
493 }
494
495 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
496 load/store single element instructions. */
497 bfd_boolean
498 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
499 aarch64_opnd_info *info, const aarch64_insn code,
500 const aarch64_inst *inst ATTRIBUTE_UNUSED,
501 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
502 {
503 aarch64_field field = {0, 0};
504 aarch64_insn QSsize; /* fields Q:S:size. */
505 aarch64_insn opcodeh2; /* opcode<2:1> */
506
507 /* Rt */
508 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
509
510 /* Decode the index, opcode<2:1> and size. */
511 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
512 opcodeh2 = extract_field_2 (&field, code, 0);
513 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
514 switch (opcodeh2)
515 {
516 case 0x0:
517 info->qualifier = AARCH64_OPND_QLF_S_B;
518 /* Index encoded in "Q:S:size". */
519 info->reglist.index = QSsize;
520 break;
521 case 0x1:
522 if (QSsize & 0x1)
523 /* UND. */
524 return FALSE;
525 info->qualifier = AARCH64_OPND_QLF_S_H;
526 /* Index encoded in "Q:S:size<1>". */
527 info->reglist.index = QSsize >> 1;
528 break;
529 case 0x2:
530 if ((QSsize >> 1) & 0x1)
531 /* UND. */
532 return FALSE;
533 if ((QSsize & 0x1) == 0)
534 {
535 info->qualifier = AARCH64_OPND_QLF_S_S;
536 /* Index encoded in "Q:S". */
537 info->reglist.index = QSsize >> 2;
538 }
539 else
540 {
541 if (extract_field (FLD_S, code, 0))
542 /* UND */
543 return FALSE;
544 info->qualifier = AARCH64_OPND_QLF_S_D;
545 /* Index encoded in "Q". */
546 info->reglist.index = QSsize >> 3;
547 }
548 break;
549 default:
550 return FALSE;
551 }
552
553 info->reglist.has_index = 1;
554 info->reglist.num_regs = 0;
555 /* Number of registers is equal to the number of elements in
556 each structure to be loaded/stored. */
557 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
558 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
559
560 return TRUE;
561 }
562
563 /* Decode fields immh:immb and/or Q for e.g.
564 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
565 or SSHR <V><d>, <V><n>, #<shift>. */
566
567 bfd_boolean
568 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
569 aarch64_opnd_info *info, const aarch64_insn code,
570 const aarch64_inst *inst,
571 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
572 {
573 int pos;
574 aarch64_insn Q, imm, immh;
575 enum aarch64_insn_class iclass = inst->opcode->iclass;
576
577 immh = extract_field (FLD_immh, code, 0);
578 if (immh == 0)
579 return FALSE;
580 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
581 pos = 4;
582 /* Get highest set bit in immh. */
583 while (--pos >= 0 && (immh & 0x8) == 0)
584 immh <<= 1;
585
586 assert ((iclass == asimdshf || iclass == asisdshf)
587 && (info->type == AARCH64_OPND_IMM_VLSR
588 || info->type == AARCH64_OPND_IMM_VLSL));
589
590 if (iclass == asimdshf)
591 {
592 Q = extract_field (FLD_Q, code, 0);
593 /* immh Q <T>
594 0000 x SEE AdvSIMD modified immediate
595 0001 0 8B
596 0001 1 16B
597 001x 0 4H
598 001x 1 8H
599 01xx 0 2S
600 01xx 1 4S
601 1xxx 0 RESERVED
602 1xxx 1 2D */
603 info->qualifier =
604 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
605 }
606 else
607 info->qualifier = get_sreg_qualifier_from_value (pos);
608
609 if (info->type == AARCH64_OPND_IMM_VLSR)
610 /* immh <shift>
611 0000 SEE AdvSIMD modified immediate
612 0001 (16-UInt(immh:immb))
613 001x (32-UInt(immh:immb))
614 01xx (64-UInt(immh:immb))
615 1xxx (128-UInt(immh:immb)) */
616 info->imm.value = (16 << pos) - imm;
617 else
618 /* immh:immb
619 immh <shift>
620 0000 SEE AdvSIMD modified immediate
621 0001 (UInt(immh:immb)-8)
622 001x (UInt(immh:immb)-16)
623 01xx (UInt(immh:immb)-32)
624 1xxx (UInt(immh:immb)-64) */
625 info->imm.value = imm - (8 << pos);
626
627 return TRUE;
628 }
629
630 /* Decode shift immediate for e.g. sshr (imm). */
631 bfd_boolean
632 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
633 aarch64_opnd_info *info, const aarch64_insn code,
634 const aarch64_inst *inst ATTRIBUTE_UNUSED,
635 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
636 {
637 int64_t imm;
638 aarch64_insn val;
639 val = extract_field (FLD_size, code, 0);
640 switch (val)
641 {
642 case 0: imm = 8; break;
643 case 1: imm = 16; break;
644 case 2: imm = 32; break;
645 default: return FALSE;
646 }
647 info->imm.value = imm;
648 return TRUE;
649 }
650
651 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
652 value in the field(s) will be extracted as unsigned immediate value. */
653 bfd_boolean
654 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
655 const aarch64_insn code,
656 const aarch64_inst *inst ATTRIBUTE_UNUSED,
657 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
658 {
659 int64_t imm;
660
661 imm = extract_all_fields (self, code);
662
663 if (operand_need_sign_extension (self))
664 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
665
666 if (operand_need_shift_by_two (self))
667 imm <<= 2;
668 else if (operand_need_shift_by_four (self))
669 imm <<= 4;
670
671 if (info->type == AARCH64_OPND_ADDR_ADRP)
672 imm <<= 12;
673
674 info->imm.value = imm;
675 return TRUE;
676 }
677
678 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
679 bfd_boolean
680 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
681 const aarch64_insn code,
682 const aarch64_inst *inst ATTRIBUTE_UNUSED,
683 aarch64_operand_error *errors)
684 {
685 aarch64_ext_imm (self, info, code, inst, errors);
686 info->shifter.kind = AARCH64_MOD_LSL;
687 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
688 return TRUE;
689 }
690
691 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
692 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
693 bfd_boolean
694 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
695 aarch64_opnd_info *info,
696 const aarch64_insn code,
697 const aarch64_inst *inst ATTRIBUTE_UNUSED,
698 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
699 {
700 uint64_t imm;
701 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
702 aarch64_field field = {0, 0};
703
704 assert (info->idx == 1);
705
706 if (info->type == AARCH64_OPND_SIMD_FPIMM)
707 info->imm.is_fp = 1;
708
709 /* a:b:c:d:e:f:g:h */
710 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
711 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
712 {
713 /* Either MOVI <Dd>, #<imm>
714 or MOVI <Vd>.2D, #<imm>.
715 <imm> is a 64-bit immediate
716 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
717 encoded in "a:b:c:d:e:f:g:h". */
718 int i;
719 unsigned abcdefgh = imm;
720 for (imm = 0ull, i = 0; i < 8; i++)
721 if (((abcdefgh >> i) & 0x1) != 0)
722 imm |= 0xffull << (8 * i);
723 }
724 info->imm.value = imm;
725
726 /* cmode */
727 info->qualifier = get_expected_qualifier (inst, info->idx);
728 switch (info->qualifier)
729 {
730 case AARCH64_OPND_QLF_NIL:
731 /* no shift */
732 info->shifter.kind = AARCH64_MOD_NONE;
733 return 1;
734 case AARCH64_OPND_QLF_LSL:
735 /* shift zeros */
736 info->shifter.kind = AARCH64_MOD_LSL;
737 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
738 {
739 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
740 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
741 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
742 default: assert (0); return FALSE;
743 }
744 /* 00: 0; 01: 8; 10:16; 11:24. */
745 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
746 break;
747 case AARCH64_OPND_QLF_MSL:
748 /* shift ones */
749 info->shifter.kind = AARCH64_MOD_MSL;
750 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
751 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
752 break;
753 default:
754 assert (0);
755 return FALSE;
756 }
757
758 return TRUE;
759 }
760
761 /* Decode an 8-bit floating-point immediate. */
762 bfd_boolean
763 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
764 const aarch64_insn code,
765 const aarch64_inst *inst ATTRIBUTE_UNUSED,
766 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
767 {
768 info->imm.value = extract_all_fields (self, code);
769 info->imm.is_fp = 1;
770 return TRUE;
771 }
772
773 /* Decode a 1-bit rotate immediate (#90 or #270). */
774 bfd_boolean
775 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
776 const aarch64_insn code,
777 const aarch64_inst *inst ATTRIBUTE_UNUSED,
778 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
779 {
780 uint64_t rot = extract_field (self->fields[0], code, 0);
781 assert (rot < 2U);
782 info->imm.value = rot * 180 + 90;
783 return TRUE;
784 }
785
786 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
787 bfd_boolean
788 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
789 const aarch64_insn code,
790 const aarch64_inst *inst ATTRIBUTE_UNUSED,
791 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
792 {
793 uint64_t rot = extract_field (self->fields[0], code, 0);
794 assert (rot < 4U);
795 info->imm.value = rot * 90;
796 return TRUE;
797 }
798
799 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
800 bfd_boolean
801 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
802 aarch64_opnd_info *info, const aarch64_insn code,
803 const aarch64_inst *inst ATTRIBUTE_UNUSED,
804 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
805 {
806 info->imm.value = 64- extract_field (FLD_scale, code, 0);
807 return TRUE;
808 }
809
810 /* Decode arithmetic immediate for e.g.
811 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
812 bfd_boolean
813 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
814 aarch64_opnd_info *info, const aarch64_insn code,
815 const aarch64_inst *inst ATTRIBUTE_UNUSED,
816 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
817 {
818 aarch64_insn value;
819
820 info->shifter.kind = AARCH64_MOD_LSL;
821 /* shift */
822 value = extract_field (FLD_shift, code, 0);
823 if (value >= 2)
824 return FALSE;
825 info->shifter.amount = value ? 12 : 0;
826 /* imm12 (unsigned) */
827 info->imm.value = extract_field (FLD_imm12, code, 0);
828
829 return TRUE;
830 }
831
832 /* Return true if VALUE is a valid logical immediate encoding, storing the
833 decoded value in *RESULT if so. ESIZE is the number of bytes in the
834 decoded immediate. */
835 static bfd_boolean
836 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
837 {
838 uint64_t imm, mask;
839 uint32_t N, R, S;
840 unsigned simd_size;
841
842 /* value is N:immr:imms. */
843 S = value & 0x3f;
844 R = (value >> 6) & 0x3f;
845 N = (value >> 12) & 0x1;
846
847 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
848 (in other words, right rotated by R), then replicated. */
849 if (N != 0)
850 {
851 simd_size = 64;
852 mask = 0xffffffffffffffffull;
853 }
854 else
855 {
856 switch (S)
857 {
858 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
859 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
860 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
861 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
862 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
863 default: return FALSE;
864 }
865 mask = (1ull << simd_size) - 1;
866 /* Top bits are IGNORED. */
867 R &= simd_size - 1;
868 }
869
870 if (simd_size > esize * 8)
871 return FALSE;
872
873 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
874 if (S == simd_size - 1)
875 return FALSE;
876 /* S+1 consecutive bits to 1. */
877 /* NOTE: S can't be 63 due to detection above. */
878 imm = (1ull << (S + 1)) - 1;
879 /* Rotate to the left by simd_size - R. */
880 if (R != 0)
881 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
882 /* Replicate the value according to SIMD size. */
883 switch (simd_size)
884 {
885 case 2: imm = (imm << 2) | imm;
886 /* Fall through. */
887 case 4: imm = (imm << 4) | imm;
888 /* Fall through. */
889 case 8: imm = (imm << 8) | imm;
890 /* Fall through. */
891 case 16: imm = (imm << 16) | imm;
892 /* Fall through. */
893 case 32: imm = (imm << 32) | imm;
894 /* Fall through. */
895 case 64: break;
896 default: assert (0); return 0;
897 }
898
899 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
900
901 return TRUE;
902 }
903
904 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
905 bfd_boolean
906 aarch64_ext_limm (const aarch64_operand *self,
907 aarch64_opnd_info *info, const aarch64_insn code,
908 const aarch64_inst *inst,
909 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
910 {
911 uint32_t esize;
912 aarch64_insn value;
913
914 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
915 self->fields[2]);
916 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
917 return decode_limm (esize, value, &info->imm.value);
918 }
919
920 /* Decode a logical immediate for the BIC alias of AND (etc.). */
921 bfd_boolean
922 aarch64_ext_inv_limm (const aarch64_operand *self,
923 aarch64_opnd_info *info, const aarch64_insn code,
924 const aarch64_inst *inst,
925 aarch64_operand_error *errors)
926 {
927 if (!aarch64_ext_limm (self, info, code, inst, errors))
928 return FALSE;
929 info->imm.value = ~info->imm.value;
930 return TRUE;
931 }
932
933 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
934 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
935 bfd_boolean
936 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
937 aarch64_opnd_info *info,
938 const aarch64_insn code, const aarch64_inst *inst,
939 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
940 {
941 aarch64_insn value;
942
943 /* Rt */
944 info->reg.regno = extract_field (FLD_Rt, code, 0);
945
946 /* size */
947 value = extract_field (FLD_ldst_size, code, 0);
948 if (inst->opcode->iclass == ldstpair_indexed
949 || inst->opcode->iclass == ldstnapair_offs
950 || inst->opcode->iclass == ldstpair_off
951 || inst->opcode->iclass == loadlit)
952 {
953 enum aarch64_opnd_qualifier qualifier;
954 switch (value)
955 {
956 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
957 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
958 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
959 default: return FALSE;
960 }
961 info->qualifier = qualifier;
962 }
963 else
964 {
965 /* opc1:size */
966 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
967 if (value > 0x4)
968 return FALSE;
969 info->qualifier = get_sreg_qualifier_from_value (value);
970 }
971
972 return TRUE;
973 }
974
975 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
976 bfd_boolean
977 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
978 aarch64_opnd_info *info,
979 aarch64_insn code,
980 const aarch64_inst *inst ATTRIBUTE_UNUSED,
981 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
982 {
983 /* Rn */
984 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
985 return TRUE;
986 }
987
988 /* Decode the address operand for e.g.
989 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
990 bfd_boolean
991 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
992 aarch64_opnd_info *info,
993 aarch64_insn code, const aarch64_inst *inst,
994 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
995 {
996 info->qualifier = get_expected_qualifier (inst, info->idx);
997
998 /* Rn */
999 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1000
1001 /* simm9 */
1002 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1003 info->addr.offset.imm = sign_extend (imm, 8);
1004 if (extract_field (self->fields[2], code, 0) == 1) {
1005 info->addr.writeback = 1;
1006 info->addr.preind = 1;
1007 }
1008 return TRUE;
1009 }
1010
1011 /* Decode the address operand for e.g.
1012 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1013 bfd_boolean
1014 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1015 aarch64_opnd_info *info,
1016 aarch64_insn code, const aarch64_inst *inst,
1017 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1018 {
1019 aarch64_insn S, value;
1020
1021 /* Rn */
1022 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1023 /* Rm */
1024 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1025 /* option */
1026 value = extract_field (FLD_option, code, 0);
1027 info->shifter.kind =
1028 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1029 /* Fix-up the shifter kind; although the table-driven approach is
1030 efficient, it is slightly inflexible, thus needing this fix-up. */
1031 if (info->shifter.kind == AARCH64_MOD_UXTX)
1032 info->shifter.kind = AARCH64_MOD_LSL;
1033 /* S */
1034 S = extract_field (FLD_S, code, 0);
1035 if (S == 0)
1036 {
1037 info->shifter.amount = 0;
1038 info->shifter.amount_present = 0;
1039 }
1040 else
1041 {
1042 int size;
1043 /* Need information in other operand(s) to help achieve the decoding
1044 from 'S' field. */
1045 info->qualifier = get_expected_qualifier (inst, info->idx);
1046 /* Get the size of the data element that is accessed, which may be
1047 different from that of the source register size, e.g. in strb/ldrb. */
1048 size = aarch64_get_qualifier_esize (info->qualifier);
1049 info->shifter.amount = get_logsz (size);
1050 info->shifter.amount_present = 1;
1051 }
1052
1053 return TRUE;
1054 }
1055
1056 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1057 bfd_boolean
1058 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1059 aarch64_insn code, const aarch64_inst *inst,
1060 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1061 {
1062 aarch64_insn imm;
1063 info->qualifier = get_expected_qualifier (inst, info->idx);
1064
1065 /* Rn */
1066 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1067 /* simm (imm9 or imm7) */
1068 imm = extract_field (self->fields[0], code, 0);
1069 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1070 if (self->fields[0] == FLD_imm7
1071 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1072 /* scaled immediate in ld/st pair instructions. */
1073 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1074 /* qualifier */
1075 if (inst->opcode->iclass == ldst_unscaled
1076 || inst->opcode->iclass == ldstnapair_offs
1077 || inst->opcode->iclass == ldstpair_off
1078 || inst->opcode->iclass == ldst_unpriv)
1079 info->addr.writeback = 0;
1080 else
1081 {
1082 /* pre/post- index */
1083 info->addr.writeback = 1;
1084 if (extract_field (self->fields[1], code, 0) == 1)
1085 info->addr.preind = 1;
1086 else
1087 info->addr.postind = 1;
1088 }
1089
1090 return TRUE;
1091 }
1092
1093 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1094 bfd_boolean
1095 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1096 aarch64_insn code,
1097 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1098 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1099 {
1100 int shift;
1101 info->qualifier = get_expected_qualifier (inst, info->idx);
1102 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1103 /* Rn */
1104 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1105 /* uimm12 */
1106 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1107 return TRUE;
1108 }
1109
1110 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1111 bfd_boolean
1112 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1113 aarch64_insn code,
1114 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1115 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1116 {
1117 aarch64_insn imm;
1118
1119 info->qualifier = get_expected_qualifier (inst, info->idx);
1120 /* Rn */
1121 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1122 /* simm10 */
1123 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1124 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1125 if (extract_field (self->fields[3], code, 0) == 1) {
1126 info->addr.writeback = 1;
1127 info->addr.preind = 1;
1128 }
1129 return TRUE;
1130 }
1131
1132 /* Decode the address operand for e.g.
1133 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1134 bfd_boolean
1135 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1136 aarch64_opnd_info *info,
1137 aarch64_insn code, const aarch64_inst *inst,
1138 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1139 {
1140 /* The opcode dependent area stores the number of elements in
1141 each structure to be loaded/stored. */
1142 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1143
1144 /* Rn */
1145 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1146 /* Rm | #<amount> */
1147 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1148 if (info->addr.offset.regno == 31)
1149 {
1150 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1151 /* Special handling of loading single structure to all lane. */
1152 info->addr.offset.imm = (is_ld1r ? 1
1153 : inst->operands[0].reglist.num_regs)
1154 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1155 else
1156 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1157 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1158 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1159 }
1160 else
1161 info->addr.offset.is_reg = 1;
1162 info->addr.writeback = 1;
1163
1164 return TRUE;
1165 }
1166
1167 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1168 bfd_boolean
1169 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1170 aarch64_opnd_info *info,
1171 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1172 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1173 {
1174 aarch64_insn value;
1175 /* cond */
1176 value = extract_field (FLD_cond, code, 0);
1177 info->cond = get_cond_from_value (value);
1178 return TRUE;
1179 }
1180
1181 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1182 bfd_boolean
1183 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1184 aarch64_opnd_info *info,
1185 aarch64_insn code,
1186 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1187 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1188 {
1189 /* op0:op1:CRn:CRm:op2 */
1190 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1191 FLD_CRm, FLD_op2);
1192 info->sysreg.flags = 0;
1193
1194 /* If a system instruction, check which restrictions should be on the register
1195 value during decoding, these will be enforced then. */
1196 if (inst->opcode->iclass == ic_system)
1197 {
1198 /* Check to see if it's read-only, else check if it's write only.
1199 if it's both or unspecified don't care. */
1200 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1201 info->sysreg.flags = F_REG_READ;
1202 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1203 == F_SYS_WRITE)
1204 info->sysreg.flags = F_REG_WRITE;
1205 }
1206
1207 return TRUE;
1208 }
1209
1210 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1211 bfd_boolean
1212 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1213 aarch64_opnd_info *info, aarch64_insn code,
1214 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1215 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1216 {
1217 int i;
1218 /* op1:op2 */
1219 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1220 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1221 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1222 return TRUE;
1223 /* Reserved value in <pstatefield>. */
1224 return FALSE;
1225 }
1226
1227 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1228 bfd_boolean
1229 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1230 aarch64_opnd_info *info,
1231 aarch64_insn code,
1232 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1233 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1234 {
1235 int i;
1236 aarch64_insn value;
1237 const aarch64_sys_ins_reg *sysins_ops;
1238 /* op0:op1:CRn:CRm:op2 */
1239 value = extract_fields (code, 0, 5,
1240 FLD_op0, FLD_op1, FLD_CRn,
1241 FLD_CRm, FLD_op2);
1242
1243 switch (info->type)
1244 {
1245 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1246 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1247 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1248 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1249 case AARCH64_OPND_SYSREG_SR:
1250 sysins_ops = aarch64_sys_regs_sr;
1251 /* Let's remove op2 for rctx. Refer to comments in the definition of
1252 aarch64_sys_regs_sr[]. */
1253 value = value & ~(0x7);
1254 break;
1255 default: assert (0); return FALSE;
1256 }
1257
1258 for (i = 0; sysins_ops[i].name != NULL; ++i)
1259 if (sysins_ops[i].value == value)
1260 {
1261 info->sysins_op = sysins_ops + i;
1262 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1263 info->sysins_op->name,
1264 (unsigned)info->sysins_op->value,
1265 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1266 return TRUE;
1267 }
1268
1269 return FALSE;
1270 }
1271
1272 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1273
1274 bfd_boolean
1275 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1276 aarch64_opnd_info *info,
1277 aarch64_insn code,
1278 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1279 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1280 {
1281 /* CRm */
1282 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1283 return TRUE;
1284 }
1285
1286 /* Decode the prefetch operation option operand for e.g.
1287 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1288
1289 bfd_boolean
1290 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1291 aarch64_opnd_info *info,
1292 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1293 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1294 {
1295 /* prfop in Rt */
1296 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1297 return TRUE;
1298 }
1299
1300 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1301 to the matching name/value pair in aarch64_hint_options. */
1302
1303 bfd_boolean
1304 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1305 aarch64_opnd_info *info,
1306 aarch64_insn code,
1307 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1308 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1309 {
1310 /* CRm:op2. */
1311 unsigned hint_number;
1312 int i;
1313
1314 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1315
1316 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1317 {
1318 if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1319 {
1320 info->hint_option = &(aarch64_hint_options[i]);
1321 return TRUE;
1322 }
1323 }
1324
1325 return FALSE;
1326 }
1327
1328 /* Decode the extended register operand for e.g.
1329 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1330 bfd_boolean
1331 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1332 aarch64_opnd_info *info,
1333 aarch64_insn code,
1334 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1335 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1336 {
1337 aarch64_insn value;
1338
1339 /* Rm */
1340 info->reg.regno = extract_field (FLD_Rm, code, 0);
1341 /* option */
1342 value = extract_field (FLD_option, code, 0);
1343 info->shifter.kind =
1344 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1345 /* imm3 */
1346 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1347
1348 /* This makes the constraint checking happy. */
1349 info->shifter.operator_present = 1;
1350
1351 /* Assume inst->operands[0].qualifier has been resolved. */
1352 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1353 info->qualifier = AARCH64_OPND_QLF_W;
1354 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1355 && (info->shifter.kind == AARCH64_MOD_UXTX
1356 || info->shifter.kind == AARCH64_MOD_SXTX))
1357 info->qualifier = AARCH64_OPND_QLF_X;
1358
1359 return TRUE;
1360 }
1361
1362 /* Decode the shifted register operand for e.g.
1363 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1364 bfd_boolean
1365 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1366 aarch64_opnd_info *info,
1367 aarch64_insn code,
1368 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1369 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1370 {
1371 aarch64_insn value;
1372
1373 /* Rm */
1374 info->reg.regno = extract_field (FLD_Rm, code, 0);
1375 /* shift */
1376 value = extract_field (FLD_shift, code, 0);
1377 info->shifter.kind =
1378 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1379 if (info->shifter.kind == AARCH64_MOD_ROR
1380 && inst->opcode->iclass != log_shift)
1381 /* ROR is not available for the shifted register operand in arithmetic
1382 instructions. */
1383 return FALSE;
1384 /* imm6 */
1385 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1386
1387 /* This makes the constraint checking happy. */
1388 info->shifter.operator_present = 1;
1389
1390 return TRUE;
1391 }
1392
1393 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1394 where <offset> is given by the OFFSET parameter and where <factor> is
1395 1 plus SELF's operand-dependent value. fields[0] specifies the field
1396 that holds <base>. */
1397 static bfd_boolean
1398 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1399 aarch64_opnd_info *info, aarch64_insn code,
1400 int64_t offset)
1401 {
1402 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1403 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1404 info->addr.offset.is_reg = FALSE;
1405 info->addr.writeback = FALSE;
1406 info->addr.preind = TRUE;
1407 if (offset != 0)
1408 info->shifter.kind = AARCH64_MOD_MUL_VL;
1409 info->shifter.amount = 1;
1410 info->shifter.operator_present = (info->addr.offset.imm != 0);
1411 info->shifter.amount_present = FALSE;
1412 return TRUE;
1413 }
1414
1415 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1416 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1417 SELF's operand-dependent value. fields[0] specifies the field that
1418 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1419 bfd_boolean
1420 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1421 aarch64_opnd_info *info, aarch64_insn code,
1422 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1423 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1424 {
1425 int offset;
1426
1427 offset = extract_field (FLD_SVE_imm4, code, 0);
1428 offset = ((offset + 8) & 15) - 8;
1429 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1430 }
1431
1432 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1433 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1434 SELF's operand-dependent value. fields[0] specifies the field that
1435 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1436 bfd_boolean
1437 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1438 aarch64_opnd_info *info, aarch64_insn code,
1439 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1440 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1441 {
1442 int offset;
1443
1444 offset = extract_field (FLD_SVE_imm6, code, 0);
1445 offset = (((offset + 32) & 63) - 32);
1446 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1447 }
1448
1449 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1450 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1451 SELF's operand-dependent value. fields[0] specifies the field that
1452 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1453 and imm3 fields, with imm3 being the less-significant part. */
1454 bfd_boolean
1455 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1456 aarch64_opnd_info *info,
1457 aarch64_insn code,
1458 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1459 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1460 {
1461 int offset;
1462
1463 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1464 offset = (((offset + 256) & 511) - 256);
1465 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1466 }
1467
1468 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1469 is given by the OFFSET parameter and where <shift> is SELF's operand-
1470 dependent value. fields[0] specifies the base register field <base>. */
1471 static bfd_boolean
1472 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1473 aarch64_opnd_info *info, aarch64_insn code,
1474 int64_t offset)
1475 {
1476 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1477 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1478 info->addr.offset.is_reg = FALSE;
1479 info->addr.writeback = FALSE;
1480 info->addr.preind = TRUE;
1481 info->shifter.operator_present = FALSE;
1482 info->shifter.amount_present = FALSE;
1483 return TRUE;
1484 }
1485
1486 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1487 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1488 value. fields[0] specifies the base register field. */
1489 bfd_boolean
1490 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1491 aarch64_opnd_info *info, aarch64_insn code,
1492 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1493 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1494 {
1495 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1496 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1497 }
1498
1499 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1500 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1501 value. fields[0] specifies the base register field. */
1502 bfd_boolean
1503 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1504 aarch64_opnd_info *info, aarch64_insn code,
1505 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1506 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1507 {
1508 int offset = extract_field (FLD_SVE_imm6, code, 0);
1509 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1510 }
1511
1512 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1513 is SELF's operand-dependent value. fields[0] specifies the base
1514 register field and fields[1] specifies the offset register field. */
1515 bfd_boolean
1516 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1517 aarch64_opnd_info *info, aarch64_insn code,
1518 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1519 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1520 {
1521 int index_regno;
1522
1523 index_regno = extract_field (self->fields[1], code, 0);
1524 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1525 return FALSE;
1526
1527 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1528 info->addr.offset.regno = index_regno;
1529 info->addr.offset.is_reg = TRUE;
1530 info->addr.writeback = FALSE;
1531 info->addr.preind = TRUE;
1532 info->shifter.kind = AARCH64_MOD_LSL;
1533 info->shifter.amount = get_operand_specific_data (self);
1534 info->shifter.operator_present = (info->shifter.amount != 0);
1535 info->shifter.amount_present = (info->shifter.amount != 0);
1536 return TRUE;
1537 }
1538
1539 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1540 <shift> is SELF's operand-dependent value. fields[0] specifies the
1541 base register field, fields[1] specifies the offset register field and
1542 fields[2] is a single-bit field that selects SXTW over UXTW. */
1543 bfd_boolean
1544 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1545 aarch64_opnd_info *info, aarch64_insn code,
1546 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1547 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1548 {
1549 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1550 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1551 info->addr.offset.is_reg = TRUE;
1552 info->addr.writeback = FALSE;
1553 info->addr.preind = TRUE;
1554 if (extract_field (self->fields[2], code, 0))
1555 info->shifter.kind = AARCH64_MOD_SXTW;
1556 else
1557 info->shifter.kind = AARCH64_MOD_UXTW;
1558 info->shifter.amount = get_operand_specific_data (self);
1559 info->shifter.operator_present = TRUE;
1560 info->shifter.amount_present = (info->shifter.amount != 0);
1561 return TRUE;
1562 }
1563
1564 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1565 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1566 fields[0] specifies the base register field. */
1567 bfd_boolean
1568 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1569 aarch64_opnd_info *info, aarch64_insn code,
1570 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1571 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1572 {
1573 int offset = extract_field (FLD_imm5, code, 0);
1574 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1575 }
1576
1577 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1578 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1579 number. fields[0] specifies the base register field and fields[1]
1580 specifies the offset register field. */
1581 static bfd_boolean
1582 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1583 aarch64_insn code, enum aarch64_modifier_kind kind)
1584 {
1585 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1586 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1587 info->addr.offset.is_reg = TRUE;
1588 info->addr.writeback = FALSE;
1589 info->addr.preind = TRUE;
1590 info->shifter.kind = kind;
1591 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1592 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1593 || info->shifter.amount != 0);
1594 info->shifter.amount_present = (info->shifter.amount != 0);
1595 return TRUE;
1596 }
1597
1598 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1599 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1600 field and fields[1] specifies the offset register field. */
1601 bfd_boolean
1602 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1603 aarch64_opnd_info *info, aarch64_insn code,
1604 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1605 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1606 {
1607 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1608 }
1609
1610 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1611 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1612 field and fields[1] specifies the offset register field. */
1613 bfd_boolean
1614 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1615 aarch64_opnd_info *info, aarch64_insn code,
1616 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1617 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1618 {
1619 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1620 }
1621
1622 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1623 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1624 field and fields[1] specifies the offset register field. */
1625 bfd_boolean
1626 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1627 aarch64_opnd_info *info, aarch64_insn code,
1628 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1629 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1630 {
1631 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1632 }
1633
1634 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1635 has the raw field value and that the low 8 bits decode to VALUE. */
1636 static bfd_boolean
1637 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1638 {
1639 info->shifter.kind = AARCH64_MOD_LSL;
1640 info->shifter.amount = 0;
1641 if (info->imm.value & 0x100)
1642 {
1643 if (value == 0)
1644 /* Decode 0x100 as #0, LSL #8. */
1645 info->shifter.amount = 8;
1646 else
1647 value *= 256;
1648 }
1649 info->shifter.operator_present = (info->shifter.amount != 0);
1650 info->shifter.amount_present = (info->shifter.amount != 0);
1651 info->imm.value = value;
1652 return TRUE;
1653 }
1654
1655 /* Decode an SVE ADD/SUB immediate. */
1656 bfd_boolean
1657 aarch64_ext_sve_aimm (const aarch64_operand *self,
1658 aarch64_opnd_info *info, const aarch64_insn code,
1659 const aarch64_inst *inst,
1660 aarch64_operand_error *errors)
1661 {
1662 return (aarch64_ext_imm (self, info, code, inst, errors)
1663 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1664 }
1665
1666 /* Decode an SVE CPY/DUP immediate. */
1667 bfd_boolean
1668 aarch64_ext_sve_asimm (const aarch64_operand *self,
1669 aarch64_opnd_info *info, const aarch64_insn code,
1670 const aarch64_inst *inst,
1671 aarch64_operand_error *errors)
1672 {
1673 return (aarch64_ext_imm (self, info, code, inst, errors)
1674 && decode_sve_aimm (info, (int8_t) info->imm.value));
1675 }
1676
1677 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1678 The fields array specifies which field to use. */
1679 bfd_boolean
1680 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1681 aarch64_opnd_info *info, aarch64_insn code,
1682 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1683 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1684 {
1685 if (extract_field (self->fields[0], code, 0))
1686 info->imm.value = 0x3f800000;
1687 else
1688 info->imm.value = 0x3f000000;
1689 info->imm.is_fp = TRUE;
1690 return TRUE;
1691 }
1692
1693 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1694 The fields array specifies which field to use. */
1695 bfd_boolean
1696 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1697 aarch64_opnd_info *info, aarch64_insn code,
1698 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1699 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1700 {
1701 if (extract_field (self->fields[0], code, 0))
1702 info->imm.value = 0x40000000;
1703 else
1704 info->imm.value = 0x3f000000;
1705 info->imm.is_fp = TRUE;
1706 return TRUE;
1707 }
1708
1709 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1710 The fields array specifies which field to use. */
1711 bfd_boolean
1712 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1713 aarch64_opnd_info *info, aarch64_insn code,
1714 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1715 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1716 {
1717 if (extract_field (self->fields[0], code, 0))
1718 info->imm.value = 0x3f800000;
1719 else
1720 info->imm.value = 0x0;
1721 info->imm.is_fp = TRUE;
1722 return TRUE;
1723 }
1724
1725 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1726 array specifies which field to use for Zn. MM is encoded in the
1727 concatenation of imm5 and SVE_tszh, with imm5 being the less
1728 significant part. */
1729 bfd_boolean
1730 aarch64_ext_sve_index (const aarch64_operand *self,
1731 aarch64_opnd_info *info, aarch64_insn code,
1732 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1733 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1734 {
1735 int val;
1736
1737 info->reglane.regno = extract_field (self->fields[0], code, 0);
1738 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1739 if ((val & 31) == 0)
1740 return 0;
1741 while ((val & 1) == 0)
1742 val /= 2;
1743 info->reglane.index = val / 2;
1744 return TRUE;
1745 }
1746
1747 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1748 bfd_boolean
1749 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1750 aarch64_opnd_info *info, const aarch64_insn code,
1751 const aarch64_inst *inst,
1752 aarch64_operand_error *errors)
1753 {
1754 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1755 return (aarch64_ext_limm (self, info, code, inst, errors)
1756 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1757 }
1758
1759 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1760 and where MM occupies the most-significant part. The operand-dependent
1761 value specifies the number of bits in Zn. */
1762 bfd_boolean
1763 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1764 aarch64_opnd_info *info, aarch64_insn code,
1765 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1766 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1767 {
1768 unsigned int reg_bits = get_operand_specific_data (self);
1769 unsigned int val = extract_all_fields (self, code);
1770 info->reglane.regno = val & ((1 << reg_bits) - 1);
1771 info->reglane.index = val >> reg_bits;
1772 return TRUE;
1773 }
1774
1775 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1776 to use for Zn. The opcode-dependent value specifies the number
1777 of registers in the list. */
1778 bfd_boolean
1779 aarch64_ext_sve_reglist (const aarch64_operand *self,
1780 aarch64_opnd_info *info, aarch64_insn code,
1781 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1782 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1783 {
1784 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1785 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1786 return TRUE;
1787 }
1788
1789 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1790 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1791 field. */
1792 bfd_boolean
1793 aarch64_ext_sve_scale (const aarch64_operand *self,
1794 aarch64_opnd_info *info, aarch64_insn code,
1795 const aarch64_inst *inst, aarch64_operand_error *errors)
1796 {
1797 int val;
1798
1799 if (!aarch64_ext_imm (self, info, code, inst, errors))
1800 return FALSE;
1801 val = extract_field (FLD_SVE_imm4, code, 0);
1802 info->shifter.kind = AARCH64_MOD_MUL;
1803 info->shifter.amount = val + 1;
1804 info->shifter.operator_present = (val != 0);
1805 info->shifter.amount_present = (val != 0);
1806 return TRUE;
1807 }
1808
1809 /* Return the top set bit in VALUE, which is expected to be relatively
1810 small. */
1811 static uint64_t
1812 get_top_bit (uint64_t value)
1813 {
1814 while ((value & -value) != value)
1815 value -= value & -value;
1816 return value;
1817 }
1818
1819 /* Decode an SVE shift-left immediate. */
1820 bfd_boolean
1821 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1822 aarch64_opnd_info *info, const aarch64_insn code,
1823 const aarch64_inst *inst, aarch64_operand_error *errors)
1824 {
1825 if (!aarch64_ext_imm (self, info, code, inst, errors)
1826 || info->imm.value == 0)
1827 return FALSE;
1828
1829 info->imm.value -= get_top_bit (info->imm.value);
1830 return TRUE;
1831 }
1832
1833 /* Decode an SVE shift-right immediate. */
1834 bfd_boolean
1835 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1836 aarch64_opnd_info *info, const aarch64_insn code,
1837 const aarch64_inst *inst, aarch64_operand_error *errors)
1838 {
1839 if (!aarch64_ext_imm (self, info, code, inst, errors)
1840 || info->imm.value == 0)
1841 return FALSE;
1842
1843 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1844 return TRUE;
1845 }
1846 \f
1847 /* Bitfields that are commonly used to encode certain operands' information
1848 may be partially used as part of the base opcode in some instructions.
1849 For example, the bit 1 of the field 'size' in
1850 FCVTXN <Vb><d>, <Va><n>
1851 is actually part of the base opcode, while only size<0> is available
1852 for encoding the register type. Another example is the AdvSIMD
1853 instruction ORR (register), in which the field 'size' is also used for
1854 the base opcode, leaving only the field 'Q' available to encode the
1855 vector register arrangement specifier '8B' or '16B'.
1856
1857 This function tries to deduce the qualifier from the value of partially
1858 constrained field(s). Given the VALUE of such a field or fields, the
1859 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1860 operand encoding), the function returns the matching qualifier or
1861 AARCH64_OPND_QLF_NIL if nothing matches.
1862
1863 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1864 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1865 may end with AARCH64_OPND_QLF_NIL. */
1866
1867 static enum aarch64_opnd_qualifier
1868 get_qualifier_from_partial_encoding (aarch64_insn value,
1869 const enum aarch64_opnd_qualifier* \
1870 candidates,
1871 aarch64_insn mask)
1872 {
1873 int i;
1874 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1875 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1876 {
1877 aarch64_insn standard_value;
1878 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1879 break;
1880 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1881 if ((standard_value & mask) == (value & mask))
1882 return candidates[i];
1883 }
1884 return AARCH64_OPND_QLF_NIL;
1885 }
1886
1887 /* Given a list of qualifier sequences, return all possible valid qualifiers
1888 for operand IDX in QUALIFIERS.
1889 Assume QUALIFIERS is an array whose length is large enough. */
1890
1891 static void
1892 get_operand_possible_qualifiers (int idx,
1893 const aarch64_opnd_qualifier_seq_t *list,
1894 enum aarch64_opnd_qualifier *qualifiers)
1895 {
1896 int i;
1897 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1898 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1899 break;
1900 }
1901
1902 /* Decode the size Q field for e.g. SHADD.
1903 We tag one operand with the qualifer according to the code;
1904 whether the qualifier is valid for this opcode or not, it is the
1905 duty of the semantic checking. */
1906
1907 static int
1908 decode_sizeq (aarch64_inst *inst)
1909 {
1910 int idx;
1911 enum aarch64_opnd_qualifier qualifier;
1912 aarch64_insn code;
1913 aarch64_insn value, mask;
1914 enum aarch64_field_kind fld_sz;
1915 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1916
1917 if (inst->opcode->iclass == asisdlse
1918 || inst->opcode->iclass == asisdlsep
1919 || inst->opcode->iclass == asisdlso
1920 || inst->opcode->iclass == asisdlsop)
1921 fld_sz = FLD_vldst_size;
1922 else
1923 fld_sz = FLD_size;
1924
1925 code = inst->value;
1926 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1927 /* Obtain the info that which bits of fields Q and size are actually
1928 available for operand encoding. Opcodes like FMAXNM and FMLA have
1929 size[1] unavailable. */
1930 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1931
1932 /* The index of the operand we are going to tag a qualifier and the qualifer
1933 itself are reasoned from the value of the size and Q fields and the
1934 possible valid qualifier lists. */
1935 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1936 DEBUG_TRACE ("key idx: %d", idx);
1937
1938 /* For most related instruciton, size:Q are fully available for operand
1939 encoding. */
1940 if (mask == 0x7)
1941 {
1942 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1943 return 1;
1944 }
1945
1946 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1947 candidates);
1948 #ifdef DEBUG_AARCH64
1949 if (debug_dump)
1950 {
1951 int i;
1952 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1953 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1954 DEBUG_TRACE ("qualifier %d: %s", i,
1955 aarch64_get_qualifier_name(candidates[i]));
1956 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1957 }
1958 #endif /* DEBUG_AARCH64 */
1959
1960 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1961
1962 if (qualifier == AARCH64_OPND_QLF_NIL)
1963 return 0;
1964
1965 inst->operands[idx].qualifier = qualifier;
1966 return 1;
1967 }
1968
1969 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1970 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1971
1972 static int
1973 decode_asimd_fcvt (aarch64_inst *inst)
1974 {
1975 aarch64_field field = {0, 0};
1976 aarch64_insn value;
1977 enum aarch64_opnd_qualifier qualifier;
1978
1979 gen_sub_field (FLD_size, 0, 1, &field);
1980 value = extract_field_2 (&field, inst->value, 0);
1981 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1982 : AARCH64_OPND_QLF_V_2D;
1983 switch (inst->opcode->op)
1984 {
1985 case OP_FCVTN:
1986 case OP_FCVTN2:
1987 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1988 inst->operands[1].qualifier = qualifier;
1989 break;
1990 case OP_FCVTL:
1991 case OP_FCVTL2:
1992 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1993 inst->operands[0].qualifier = qualifier;
1994 break;
1995 default:
1996 assert (0);
1997 return 0;
1998 }
1999
2000 return 1;
2001 }
2002
2003 /* Decode size[0], i.e. bit 22, for
2004 e.g. FCVTXN <Vb><d>, <Va><n>. */
2005
2006 static int
2007 decode_asisd_fcvtxn (aarch64_inst *inst)
2008 {
2009 aarch64_field field = {0, 0};
2010 gen_sub_field (FLD_size, 0, 1, &field);
2011 if (!extract_field_2 (&field, inst->value, 0))
2012 return 0;
2013 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2014 return 1;
2015 }
2016
2017 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2018 static int
2019 decode_fcvt (aarch64_inst *inst)
2020 {
2021 enum aarch64_opnd_qualifier qualifier;
2022 aarch64_insn value;
2023 const aarch64_field field = {15, 2};
2024
2025 /* opc dstsize */
2026 value = extract_field_2 (&field, inst->value, 0);
2027 switch (value)
2028 {
2029 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2030 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2031 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2032 default: return 0;
2033 }
2034 inst->operands[0].qualifier = qualifier;
2035
2036 return 1;
2037 }
2038
2039 /* Do miscellaneous decodings that are not common enough to be driven by
2040 flags. */
2041
2042 static int
2043 do_misc_decoding (aarch64_inst *inst)
2044 {
2045 unsigned int value;
2046 switch (inst->opcode->op)
2047 {
2048 case OP_FCVT:
2049 return decode_fcvt (inst);
2050
2051 case OP_FCVTN:
2052 case OP_FCVTN2:
2053 case OP_FCVTL:
2054 case OP_FCVTL2:
2055 return decode_asimd_fcvt (inst);
2056
2057 case OP_FCVTXN_S:
2058 return decode_asisd_fcvtxn (inst);
2059
2060 case OP_MOV_P_P:
2061 case OP_MOVS_P_P:
2062 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2063 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2064 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2065
2066 case OP_MOV_Z_P_Z:
2067 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2068 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2069
2070 case OP_MOV_Z_V:
2071 /* Index must be zero. */
2072 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2073 return value > 0 && value <= 16 && value == (value & -value);
2074
2075 case OP_MOV_Z_Z:
2076 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2077 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2078
2079 case OP_MOV_Z_Zi:
2080 /* Index must be nonzero. */
2081 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2082 return value > 0 && value != (value & -value);
2083
2084 case OP_MOVM_P_P_P:
2085 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2086 == extract_field (FLD_SVE_Pm, inst->value, 0));
2087
2088 case OP_MOVZS_P_P_P:
2089 case OP_MOVZ_P_P_P:
2090 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2091 == extract_field (FLD_SVE_Pm, inst->value, 0));
2092
2093 case OP_NOTS_P_P_P_Z:
2094 case OP_NOT_P_P_P_Z:
2095 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2096 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2097
2098 default:
2099 return 0;
2100 }
2101 }
2102
2103 /* Opcodes that have fields shared by multiple operands are usually flagged
2104 with flags. In this function, we detect such flags, decode the related
2105 field(s) and store the information in one of the related operands. The
2106 'one' operand is not any operand but one of the operands that can
2107 accommadate all the information that has been decoded. */
2108
2109 static int
2110 do_special_decoding (aarch64_inst *inst)
2111 {
2112 int idx;
2113 aarch64_insn value;
2114 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2115 if (inst->opcode->flags & F_COND)
2116 {
2117 value = extract_field (FLD_cond2, inst->value, 0);
2118 inst->cond = get_cond_from_value (value);
2119 }
2120 /* 'sf' field. */
2121 if (inst->opcode->flags & F_SF)
2122 {
2123 idx = select_operand_for_sf_field_coding (inst->opcode);
2124 value = extract_field (FLD_sf, inst->value, 0);
2125 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2126 if ((inst->opcode->flags & F_N)
2127 && extract_field (FLD_N, inst->value, 0) != value)
2128 return 0;
2129 }
2130 /* 'sf' field. */
2131 if (inst->opcode->flags & F_LSE_SZ)
2132 {
2133 idx = select_operand_for_sf_field_coding (inst->opcode);
2134 value = extract_field (FLD_lse_sz, inst->value, 0);
2135 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2136 }
2137 /* size:Q fields. */
2138 if (inst->opcode->flags & F_SIZEQ)
2139 return decode_sizeq (inst);
2140
2141 if (inst->opcode->flags & F_FPTYPE)
2142 {
2143 idx = select_operand_for_fptype_field_coding (inst->opcode);
2144 value = extract_field (FLD_type, inst->value, 0);
2145 switch (value)
2146 {
2147 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2148 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2149 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2150 default: return 0;
2151 }
2152 }
2153
2154 if (inst->opcode->flags & F_SSIZE)
2155 {
2156 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2157 of the base opcode. */
2158 aarch64_insn mask;
2159 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2160 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2161 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2162 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2163 /* For most related instruciton, the 'size' field is fully available for
2164 operand encoding. */
2165 if (mask == 0x3)
2166 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2167 else
2168 {
2169 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2170 candidates);
2171 inst->operands[idx].qualifier
2172 = get_qualifier_from_partial_encoding (value, candidates, mask);
2173 }
2174 }
2175
2176 if (inst->opcode->flags & F_T)
2177 {
2178 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2179 int num = 0;
2180 unsigned val, Q;
2181 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2182 == AARCH64_OPND_CLASS_SIMD_REG);
2183 /* imm5<3:0> q <t>
2184 0000 x reserved
2185 xxx1 0 8b
2186 xxx1 1 16b
2187 xx10 0 4h
2188 xx10 1 8h
2189 x100 0 2s
2190 x100 1 4s
2191 1000 0 reserved
2192 1000 1 2d */
2193 val = extract_field (FLD_imm5, inst->value, 0);
2194 while ((val & 0x1) == 0 && ++num <= 3)
2195 val >>= 1;
2196 if (num > 3)
2197 return 0;
2198 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2199 inst->operands[0].qualifier =
2200 get_vreg_qualifier_from_value ((num << 1) | Q);
2201 }
2202
2203 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2204 {
2205 /* Use Rt to encode in the case of e.g.
2206 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2207 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2208 if (idx == -1)
2209 {
2210 /* Otherwise use the result operand, which has to be a integer
2211 register. */
2212 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2213 == AARCH64_OPND_CLASS_INT_REG);
2214 idx = 0;
2215 }
2216 assert (idx == 0 || idx == 1);
2217 value = extract_field (FLD_Q, inst->value, 0);
2218 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2219 }
2220
2221 if (inst->opcode->flags & F_LDS_SIZE)
2222 {
2223 aarch64_field field = {0, 0};
2224 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2225 == AARCH64_OPND_CLASS_INT_REG);
2226 gen_sub_field (FLD_opc, 0, 1, &field);
2227 value = extract_field_2 (&field, inst->value, 0);
2228 inst->operands[0].qualifier
2229 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2230 }
2231
2232 /* Miscellaneous decoding; done as the last step. */
2233 if (inst->opcode->flags & F_MISC)
2234 return do_misc_decoding (inst);
2235
2236 return 1;
2237 }
2238
2239 /* Converters converting a real opcode instruction to its alias form. */
2240
2241 /* ROR <Wd>, <Ws>, #<shift>
2242 is equivalent to:
2243 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2244 static int
2245 convert_extr_to_ror (aarch64_inst *inst)
2246 {
2247 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2248 {
2249 copy_operand_info (inst, 2, 3);
2250 inst->operands[3].type = AARCH64_OPND_NIL;
2251 return 1;
2252 }
2253 return 0;
2254 }
2255
2256 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2257 is equivalent to:
2258 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2259 static int
2260 convert_shll_to_xtl (aarch64_inst *inst)
2261 {
2262 if (inst->operands[2].imm.value == 0)
2263 {
2264 inst->operands[2].type = AARCH64_OPND_NIL;
2265 return 1;
2266 }
2267 return 0;
2268 }
2269
2270 /* Convert
2271 UBFM <Xd>, <Xn>, #<shift>, #63.
2272 to
2273 LSR <Xd>, <Xn>, #<shift>. */
2274 static int
2275 convert_bfm_to_sr (aarch64_inst *inst)
2276 {
2277 int64_t imms, val;
2278
2279 imms = inst->operands[3].imm.value;
2280 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2281 if (imms == val)
2282 {
2283 inst->operands[3].type = AARCH64_OPND_NIL;
2284 return 1;
2285 }
2286
2287 return 0;
2288 }
2289
2290 /* Convert MOV to ORR. */
2291 static int
2292 convert_orr_to_mov (aarch64_inst *inst)
2293 {
2294 /* MOV <Vd>.<T>, <Vn>.<T>
2295 is equivalent to:
2296 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2297 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2298 {
2299 inst->operands[2].type = AARCH64_OPND_NIL;
2300 return 1;
2301 }
2302 return 0;
2303 }
2304
2305 /* When <imms> >= <immr>, the instruction written:
2306 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2307 is equivalent to:
2308 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2309
2310 static int
2311 convert_bfm_to_bfx (aarch64_inst *inst)
2312 {
2313 int64_t immr, imms;
2314
2315 immr = inst->operands[2].imm.value;
2316 imms = inst->operands[3].imm.value;
2317 if (imms >= immr)
2318 {
2319 int64_t lsb = immr;
2320 inst->operands[2].imm.value = lsb;
2321 inst->operands[3].imm.value = imms + 1 - lsb;
2322 /* The two opcodes have different qualifiers for
2323 the immediate operands; reset to help the checking. */
2324 reset_operand_qualifier (inst, 2);
2325 reset_operand_qualifier (inst, 3);
2326 return 1;
2327 }
2328
2329 return 0;
2330 }
2331
2332 /* When <imms> < <immr>, the instruction written:
2333 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2334 is equivalent to:
2335 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2336
2337 static int
2338 convert_bfm_to_bfi (aarch64_inst *inst)
2339 {
2340 int64_t immr, imms, val;
2341
2342 immr = inst->operands[2].imm.value;
2343 imms = inst->operands[3].imm.value;
2344 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2345 if (imms < immr)
2346 {
2347 inst->operands[2].imm.value = (val - immr) & (val - 1);
2348 inst->operands[3].imm.value = imms + 1;
2349 /* The two opcodes have different qualifiers for
2350 the immediate operands; reset to help the checking. */
2351 reset_operand_qualifier (inst, 2);
2352 reset_operand_qualifier (inst, 3);
2353 return 1;
2354 }
2355
2356 return 0;
2357 }
2358
2359 /* The instruction written:
2360 BFC <Xd>, #<lsb>, #<width>
2361 is equivalent to:
2362 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2363
2364 static int
2365 convert_bfm_to_bfc (aarch64_inst *inst)
2366 {
2367 int64_t immr, imms, val;
2368
2369 /* Should have been assured by the base opcode value. */
2370 assert (inst->operands[1].reg.regno == 0x1f);
2371
2372 immr = inst->operands[2].imm.value;
2373 imms = inst->operands[3].imm.value;
2374 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2375 if (imms < immr)
2376 {
2377 /* Drop XZR from the second operand. */
2378 copy_operand_info (inst, 1, 2);
2379 copy_operand_info (inst, 2, 3);
2380 inst->operands[3].type = AARCH64_OPND_NIL;
2381
2382 /* Recalculate the immediates. */
2383 inst->operands[1].imm.value = (val - immr) & (val - 1);
2384 inst->operands[2].imm.value = imms + 1;
2385
2386 /* The two opcodes have different qualifiers for the operands; reset to
2387 help the checking. */
2388 reset_operand_qualifier (inst, 1);
2389 reset_operand_qualifier (inst, 2);
2390 reset_operand_qualifier (inst, 3);
2391
2392 return 1;
2393 }
2394
2395 return 0;
2396 }
2397
2398 /* The instruction written:
2399 LSL <Xd>, <Xn>, #<shift>
2400 is equivalent to:
2401 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2402
2403 static int
2404 convert_ubfm_to_lsl (aarch64_inst *inst)
2405 {
2406 int64_t immr = inst->operands[2].imm.value;
2407 int64_t imms = inst->operands[3].imm.value;
2408 int64_t val
2409 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2410
2411 if ((immr == 0 && imms == val) || immr == imms + 1)
2412 {
2413 inst->operands[3].type = AARCH64_OPND_NIL;
2414 inst->operands[2].imm.value = val - imms;
2415 return 1;
2416 }
2417
2418 return 0;
2419 }
2420
2421 /* CINC <Wd>, <Wn>, <cond>
2422 is equivalent to:
2423 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2424 where <cond> is not AL or NV. */
2425
2426 static int
2427 convert_from_csel (aarch64_inst *inst)
2428 {
2429 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2430 && (inst->operands[3].cond->value & 0xe) != 0xe)
2431 {
2432 copy_operand_info (inst, 2, 3);
2433 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2434 inst->operands[3].type = AARCH64_OPND_NIL;
2435 return 1;
2436 }
2437 return 0;
2438 }
2439
2440 /* CSET <Wd>, <cond>
2441 is equivalent to:
2442 CSINC <Wd>, WZR, WZR, invert(<cond>)
2443 where <cond> is not AL or NV. */
2444
2445 static int
2446 convert_csinc_to_cset (aarch64_inst *inst)
2447 {
2448 if (inst->operands[1].reg.regno == 0x1f
2449 && inst->operands[2].reg.regno == 0x1f
2450 && (inst->operands[3].cond->value & 0xe) != 0xe)
2451 {
2452 copy_operand_info (inst, 1, 3);
2453 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2454 inst->operands[3].type = AARCH64_OPND_NIL;
2455 inst->operands[2].type = AARCH64_OPND_NIL;
2456 return 1;
2457 }
2458 return 0;
2459 }
2460
2461 /* MOV <Wd>, #<imm>
2462 is equivalent to:
2463 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2464
2465 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2466 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2467 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2468 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2469 machine-instruction mnemonic must be used. */
2470
2471 static int
2472 convert_movewide_to_mov (aarch64_inst *inst)
2473 {
2474 uint64_t value = inst->operands[1].imm.value;
2475 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2476 if (value == 0 && inst->operands[1].shifter.amount != 0)
2477 return 0;
2478 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2479 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2480 value <<= inst->operands[1].shifter.amount;
2481 /* As an alias convertor, it has to be clear that the INST->OPCODE
2482 is the opcode of the real instruction. */
2483 if (inst->opcode->op == OP_MOVN)
2484 {
2485 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2486 value = ~value;
2487 /* A MOVN has an immediate that could be encoded by MOVZ. */
2488 if (aarch64_wide_constant_p (value, is32, NULL))
2489 return 0;
2490 }
2491 inst->operands[1].imm.value = value;
2492 inst->operands[1].shifter.amount = 0;
2493 return 1;
2494 }
2495
2496 /* MOV <Wd>, #<imm>
2497 is equivalent to:
2498 ORR <Wd>, WZR, #<imm>.
2499
2500 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2501 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2502 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2503 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2504 machine-instruction mnemonic must be used. */
2505
2506 static int
2507 convert_movebitmask_to_mov (aarch64_inst *inst)
2508 {
2509 int is32;
2510 uint64_t value;
2511
2512 /* Should have been assured by the base opcode value. */
2513 assert (inst->operands[1].reg.regno == 0x1f);
2514 copy_operand_info (inst, 1, 2);
2515 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2516 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2517 value = inst->operands[1].imm.value;
2518 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2519 instruction. */
2520 if (inst->operands[0].reg.regno != 0x1f
2521 && (aarch64_wide_constant_p (value, is32, NULL)
2522 || aarch64_wide_constant_p (~value, is32, NULL)))
2523 return 0;
2524
2525 inst->operands[2].type = AARCH64_OPND_NIL;
2526 return 1;
2527 }
2528
2529 /* Some alias opcodes are disassembled by being converted from their real-form.
2530 N.B. INST->OPCODE is the real opcode rather than the alias. */
2531
2532 static int
2533 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2534 {
2535 switch (alias->op)
2536 {
2537 case OP_ASR_IMM:
2538 case OP_LSR_IMM:
2539 return convert_bfm_to_sr (inst);
2540 case OP_LSL_IMM:
2541 return convert_ubfm_to_lsl (inst);
2542 case OP_CINC:
2543 case OP_CINV:
2544 case OP_CNEG:
2545 return convert_from_csel (inst);
2546 case OP_CSET:
2547 case OP_CSETM:
2548 return convert_csinc_to_cset (inst);
2549 case OP_UBFX:
2550 case OP_BFXIL:
2551 case OP_SBFX:
2552 return convert_bfm_to_bfx (inst);
2553 case OP_SBFIZ:
2554 case OP_BFI:
2555 case OP_UBFIZ:
2556 return convert_bfm_to_bfi (inst);
2557 case OP_BFC:
2558 return convert_bfm_to_bfc (inst);
2559 case OP_MOV_V:
2560 return convert_orr_to_mov (inst);
2561 case OP_MOV_IMM_WIDE:
2562 case OP_MOV_IMM_WIDEN:
2563 return convert_movewide_to_mov (inst);
2564 case OP_MOV_IMM_LOG:
2565 return convert_movebitmask_to_mov (inst);
2566 case OP_ROR_IMM:
2567 return convert_extr_to_ror (inst);
2568 case OP_SXTL:
2569 case OP_SXTL2:
2570 case OP_UXTL:
2571 case OP_UXTL2:
2572 return convert_shll_to_xtl (inst);
2573 default:
2574 return 0;
2575 }
2576 }
2577
2578 static bfd_boolean
2579 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2580 aarch64_inst *, int, aarch64_operand_error *errors);
2581
2582 /* Given the instruction information in *INST, check if the instruction has
2583 any alias form that can be used to represent *INST. If the answer is yes,
2584 update *INST to be in the form of the determined alias. */
2585
2586 /* In the opcode description table, the following flags are used in opcode
2587 entries to help establish the relations between the real and alias opcodes:
2588
2589 F_ALIAS: opcode is an alias
2590 F_HAS_ALIAS: opcode has alias(es)
2591 F_P1
2592 F_P2
2593 F_P3: Disassembly preference priority 1-3 (the larger the
2594 higher). If nothing is specified, it is the priority
2595 0 by default, i.e. the lowest priority.
2596
2597 Although the relation between the machine and the alias instructions are not
2598 explicitly described, it can be easily determined from the base opcode
2599 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2600 description entries:
2601
2602 The mask of an alias opcode must be equal to or a super-set (i.e. more
2603 constrained) of that of the aliased opcode; so is the base opcode value.
2604
2605 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2606 && (opcode->mask & real->mask) == real->mask
2607 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2608 then OPCODE is an alias of, and only of, the REAL instruction
2609
2610 The alias relationship is forced flat-structured to keep related algorithm
2611 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2612
2613 During the disassembling, the decoding decision tree (in
2614 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2615 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2616 not specified), the disassembler will check whether there is any alias
2617 instruction exists for this real instruction. If there is, the disassembler
2618 will try to disassemble the 32-bit binary again using the alias's rule, or
2619 try to convert the IR to the form of the alias. In the case of the multiple
2620 aliases, the aliases are tried one by one from the highest priority
2621 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2622 first succeeds first adopted.
2623
2624 You may ask why there is a need for the conversion of IR from one form to
2625 another in handling certain aliases. This is because on one hand it avoids
2626 adding more operand code to handle unusual encoding/decoding; on other
2627 hand, during the disassembling, the conversion is an effective approach to
2628 check the condition of an alias (as an alias may be adopted only if certain
2629 conditions are met).
2630
2631 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2632 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2633 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2634
2635 static void
2636 determine_disassembling_preference (struct aarch64_inst *inst,
2637 aarch64_operand_error *errors)
2638 {
2639 const aarch64_opcode *opcode;
2640 const aarch64_opcode *alias;
2641
2642 opcode = inst->opcode;
2643
2644 /* This opcode does not have an alias, so use itself. */
2645 if (!opcode_has_alias (opcode))
2646 return;
2647
2648 alias = aarch64_find_alias_opcode (opcode);
2649 assert (alias);
2650
2651 #ifdef DEBUG_AARCH64
2652 if (debug_dump)
2653 {
2654 const aarch64_opcode *tmp = alias;
2655 printf ("#### LIST orderd: ");
2656 while (tmp)
2657 {
2658 printf ("%s, ", tmp->name);
2659 tmp = aarch64_find_next_alias_opcode (tmp);
2660 }
2661 printf ("\n");
2662 }
2663 #endif /* DEBUG_AARCH64 */
2664
2665 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2666 {
2667 DEBUG_TRACE ("try %s", alias->name);
2668 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2669
2670 /* An alias can be a pseudo opcode which will never be used in the
2671 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2672 aliasing AND. */
2673 if (pseudo_opcode_p (alias))
2674 {
2675 DEBUG_TRACE ("skip pseudo %s", alias->name);
2676 continue;
2677 }
2678
2679 if ((inst->value & alias->mask) != alias->opcode)
2680 {
2681 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2682 continue;
2683 }
2684 /* No need to do any complicated transformation on operands, if the alias
2685 opcode does not have any operand. */
2686 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2687 {
2688 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2689 aarch64_replace_opcode (inst, alias);
2690 return;
2691 }
2692 if (alias->flags & F_CONV)
2693 {
2694 aarch64_inst copy;
2695 memcpy (&copy, inst, sizeof (aarch64_inst));
2696 /* ALIAS is the preference as long as the instruction can be
2697 successfully converted to the form of ALIAS. */
2698 if (convert_to_alias (&copy, alias) == 1)
2699 {
2700 aarch64_replace_opcode (&copy, alias);
2701 assert (aarch64_match_operands_constraint (&copy, NULL));
2702 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2703 memcpy (inst, &copy, sizeof (aarch64_inst));
2704 return;
2705 }
2706 }
2707 else
2708 {
2709 /* Directly decode the alias opcode. */
2710 aarch64_inst temp;
2711 memset (&temp, '\0', sizeof (aarch64_inst));
2712 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
2713 {
2714 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2715 memcpy (inst, &temp, sizeof (aarch64_inst));
2716 return;
2717 }
2718 }
2719 }
2720 }
2721
2722 /* Some instructions (including all SVE ones) use the instruction class
2723 to describe how a qualifiers_list index is represented in the instruction
2724 encoding. If INST is such an instruction, decode the appropriate fields
2725 and fill in the operand qualifiers accordingly. Return true if no
2726 problems are found. */
2727
2728 static bfd_boolean
2729 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2730 {
2731 int i, variant;
2732
2733 variant = 0;
2734 switch (inst->opcode->iclass)
2735 {
2736 case sve_cpy:
2737 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2738 break;
2739
2740 case sve_index:
2741 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2742 if ((i & 31) == 0)
2743 return FALSE;
2744 while ((i & 1) == 0)
2745 {
2746 i >>= 1;
2747 variant += 1;
2748 }
2749 break;
2750
2751 case sve_limm:
2752 /* Pick the smallest applicable element size. */
2753 if ((inst->value & 0x20600) == 0x600)
2754 variant = 0;
2755 else if ((inst->value & 0x20400) == 0x400)
2756 variant = 1;
2757 else if ((inst->value & 0x20000) == 0)
2758 variant = 2;
2759 else
2760 variant = 3;
2761 break;
2762
2763 case sve_misc:
2764 /* sve_misc instructions have only a single variant. */
2765 break;
2766
2767 case sve_movprfx:
2768 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2769 break;
2770
2771 case sve_pred_zm:
2772 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2773 break;
2774
2775 case sve_shift_pred:
2776 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2777 sve_shift:
2778 if (i == 0)
2779 return FALSE;
2780 while (i != 1)
2781 {
2782 i >>= 1;
2783 variant += 1;
2784 }
2785 break;
2786
2787 case sve_shift_unpred:
2788 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2789 goto sve_shift;
2790
2791 case sve_size_bhs:
2792 variant = extract_field (FLD_size, inst->value, 0);
2793 if (variant >= 3)
2794 return FALSE;
2795 break;
2796
2797 case sve_size_bhsd:
2798 variant = extract_field (FLD_size, inst->value, 0);
2799 break;
2800
2801 case sve_size_hsd:
2802 i = extract_field (FLD_size, inst->value, 0);
2803 if (i < 1)
2804 return FALSE;
2805 variant = i - 1;
2806 break;
2807
2808 case sve_size_sd:
2809 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2810 break;
2811
2812 default:
2813 /* No mapping between instruction class and qualifiers. */
2814 return TRUE;
2815 }
2816
2817 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2818 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2819 return TRUE;
2820 }
2821 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2822 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2823 return 1.
2824
2825 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2826 determined and used to disassemble CODE; this is done just before the
2827 return. */
2828
2829 static bfd_boolean
2830 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2831 aarch64_inst *inst, int noaliases_p,
2832 aarch64_operand_error *errors)
2833 {
2834 int i;
2835
2836 DEBUG_TRACE ("enter with %s", opcode->name);
2837
2838 assert (opcode && inst);
2839
2840 /* Clear inst. */
2841 memset (inst, '\0', sizeof (aarch64_inst));
2842
2843 /* Check the base opcode. */
2844 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2845 {
2846 DEBUG_TRACE ("base opcode match FAIL");
2847 goto decode_fail;
2848 }
2849
2850 inst->opcode = opcode;
2851 inst->value = code;
2852
2853 /* Assign operand codes and indexes. */
2854 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2855 {
2856 if (opcode->operands[i] == AARCH64_OPND_NIL)
2857 break;
2858 inst->operands[i].type = opcode->operands[i];
2859 inst->operands[i].idx = i;
2860 }
2861
2862 /* Call the opcode decoder indicated by flags. */
2863 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2864 {
2865 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2866 goto decode_fail;
2867 }
2868
2869 /* Possibly use the instruction class to determine the correct
2870 qualifier. */
2871 if (!aarch64_decode_variant_using_iclass (inst))
2872 {
2873 DEBUG_TRACE ("iclass-based decoder FAIL");
2874 goto decode_fail;
2875 }
2876
2877 /* Call operand decoders. */
2878 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2879 {
2880 const aarch64_operand *opnd;
2881 enum aarch64_opnd type;
2882
2883 type = opcode->operands[i];
2884 if (type == AARCH64_OPND_NIL)
2885 break;
2886 opnd = &aarch64_operands[type];
2887 if (operand_has_extractor (opnd)
2888 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
2889 errors)))
2890 {
2891 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2892 goto decode_fail;
2893 }
2894 }
2895
2896 /* If the opcode has a verifier, then check it now. */
2897 if (opcode->verifier
2898 && opcode->verifier (inst, code, 0, FALSE, errors, NULL) != ERR_OK)
2899 {
2900 DEBUG_TRACE ("operand verifier FAIL");
2901 goto decode_fail;
2902 }
2903
2904 /* Match the qualifiers. */
2905 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2906 {
2907 /* Arriving here, the CODE has been determined as a valid instruction
2908 of OPCODE and *INST has been filled with information of this OPCODE
2909 instruction. Before the return, check if the instruction has any
2910 alias and should be disassembled in the form of its alias instead.
2911 If the answer is yes, *INST will be updated. */
2912 if (!noaliases_p)
2913 determine_disassembling_preference (inst, errors);
2914 DEBUG_TRACE ("SUCCESS");
2915 return TRUE;
2916 }
2917 else
2918 {
2919 DEBUG_TRACE ("constraint matching FAIL");
2920 }
2921
2922 decode_fail:
2923 return FALSE;
2924 }
2925 \f
2926 /* This does some user-friendly fix-up to *INST. It is currently focus on
2927 the adjustment of qualifiers to help the printed instruction
2928 recognized/understood more easily. */
2929
2930 static void
2931 user_friendly_fixup (aarch64_inst *inst)
2932 {
2933 switch (inst->opcode->iclass)
2934 {
2935 case testbranch:
2936 /* TBNZ Xn|Wn, #uimm6, label
2937 Test and Branch Not Zero: conditionally jumps to label if bit number
2938 uimm6 in register Xn is not zero. The bit number implies the width of
2939 the register, which may be written and should be disassembled as Wn if
2940 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2941 */
2942 if (inst->operands[1].imm.value < 32)
2943 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2944 break;
2945 default: break;
2946 }
2947 }
2948
2949 /* Decode INSN and fill in *INST the instruction information. An alias
2950 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2951 success. */
2952
2953 enum err_type
2954 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2955 bfd_boolean noaliases_p,
2956 aarch64_operand_error *errors)
2957 {
2958 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2959
2960 #ifdef DEBUG_AARCH64
2961 if (debug_dump)
2962 {
2963 const aarch64_opcode *tmp = opcode;
2964 printf ("\n");
2965 DEBUG_TRACE ("opcode lookup:");
2966 while (tmp != NULL)
2967 {
2968 aarch64_verbose (" %s", tmp->name);
2969 tmp = aarch64_find_next_opcode (tmp);
2970 }
2971 }
2972 #endif /* DEBUG_AARCH64 */
2973
2974 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2975 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2976 opcode field and value, apart from the difference that one of them has an
2977 extra field as part of the opcode, but such a field is used for operand
2978 encoding in other opcode(s) ('immh' in the case of the example). */
2979 while (opcode != NULL)
2980 {
2981 /* But only one opcode can be decoded successfully for, as the
2982 decoding routine will check the constraint carefully. */
2983 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
2984 return ERR_OK;
2985 opcode = aarch64_find_next_opcode (opcode);
2986 }
2987
2988 return ERR_UND;
2989 }
2990
2991 /* Print operands. */
2992
2993 static void
2994 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2995 const aarch64_opnd_info *opnds, struct disassemble_info *info,
2996 bfd_boolean *has_notes)
2997 {
2998 char *notes = NULL;
2999 int i, pcrel_p, num_printed;
3000 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3001 {
3002 char str[128];
3003 /* We regard the opcode operand info more, however we also look into
3004 the inst->operands to support the disassembling of the optional
3005 operand.
3006 The two operand code should be the same in all cases, apart from
3007 when the operand can be optional. */
3008 if (opcode->operands[i] == AARCH64_OPND_NIL
3009 || opnds[i].type == AARCH64_OPND_NIL)
3010 break;
3011
3012 /* Generate the operand string in STR. */
3013 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3014 &info->target, &notes);
3015
3016 /* Print the delimiter (taking account of omitted operand(s)). */
3017 if (str[0] != '\0')
3018 (*info->fprintf_func) (info->stream, "%s",
3019 num_printed++ == 0 ? "\t" : ", ");
3020
3021 /* Print the operand. */
3022 if (pcrel_p)
3023 (*info->print_address_func) (info->target, info);
3024 else
3025 (*info->fprintf_func) (info->stream, "%s", str);
3026 }
3027
3028 if (notes && !no_notes)
3029 {
3030 *has_notes = TRUE;
3031 (*info->fprintf_func) (info->stream, " // note: %s", notes);
3032 }
3033 }
3034
3035 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
3036
3037 static void
3038 remove_dot_suffix (char *name, const aarch64_inst *inst)
3039 {
3040 char *ptr;
3041 size_t len;
3042
3043 ptr = strchr (inst->opcode->name, '.');
3044 assert (ptr && inst->cond);
3045 len = ptr - inst->opcode->name;
3046 assert (len < 8);
3047 strncpy (name, inst->opcode->name, len);
3048 name[len] = '\0';
3049 }
3050
3051 /* Print the instruction mnemonic name. */
3052
3053 static void
3054 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3055 {
3056 if (inst->opcode->flags & F_COND)
3057 {
3058 /* For instructions that are truly conditionally executed, e.g. b.cond,
3059 prepare the full mnemonic name with the corresponding condition
3060 suffix. */
3061 char name[8];
3062
3063 remove_dot_suffix (name, inst);
3064 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
3065 }
3066 else
3067 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
3068 }
3069
3070 /* Decide whether we need to print a comment after the operands of
3071 instruction INST. */
3072
3073 static void
3074 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3075 {
3076 if (inst->opcode->flags & F_COND)
3077 {
3078 char name[8];
3079 unsigned int i, num_conds;
3080
3081 remove_dot_suffix (name, inst);
3082 num_conds = ARRAY_SIZE (inst->cond->names);
3083 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3084 (*info->fprintf_func) (info->stream, "%s %s.%s",
3085 i == 1 ? " //" : ",",
3086 name, inst->cond->names[i]);
3087 }
3088 }
3089
3090 /* Build notes from verifiers into a string for printing. */
3091
3092 static void
3093 print_verifier_notes (aarch64_operand_error *detail,
3094 struct disassemble_info *info)
3095 {
3096 if (no_notes)
3097 return;
3098
3099 /* The output of the verifier cannot be a fatal error, otherwise the assembly
3100 would not have succeeded. We can safely ignore these. */
3101 assert (detail->non_fatal);
3102 assert (detail->error);
3103
3104 /* If there are multiple verifier messages, concat them up to 1k. */
3105 (*info->fprintf_func) (info->stream, " // note: %s", detail->error);
3106 if (detail->index >= 0)
3107 (*info->fprintf_func) (info->stream, " at operand %d", detail->index + 1);
3108 }
3109
3110 /* Print the instruction according to *INST. */
3111
3112 static void
3113 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3114 const aarch64_insn code,
3115 struct disassemble_info *info,
3116 aarch64_operand_error *mismatch_details)
3117 {
3118 bfd_boolean has_notes = FALSE;
3119
3120 print_mnemonic_name (inst, info);
3121 print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
3122 print_comment (inst, info);
3123
3124 /* We've already printed a note, not enough space to print more so exit.
3125 Usually notes shouldn't overlap so it shouldn't happen that we have a note
3126 from a register and instruction at the same time. */
3127 if (has_notes)
3128 return;
3129
3130 /* Always run constraint verifiers, this is needed because constraints need to
3131 maintain a global state regardless of whether the instruction has the flag
3132 set or not. */
3133 enum err_type result = verify_constraints (inst, code, pc, FALSE,
3134 mismatch_details, &insn_sequence);
3135 switch (result)
3136 {
3137 case ERR_UND:
3138 case ERR_UNP:
3139 case ERR_NYI:
3140 assert (0);
3141 case ERR_VFI:
3142 print_verifier_notes (mismatch_details, info);
3143 break;
3144 default:
3145 break;
3146 }
3147 }
3148
3149 /* Entry-point of the instruction disassembler and printer. */
3150
3151 static void
3152 print_insn_aarch64_word (bfd_vma pc,
3153 uint32_t word,
3154 struct disassemble_info *info,
3155 aarch64_operand_error *errors)
3156 {
3157 static const char *err_msg[ERR_NR_ENTRIES+1] =
3158 {
3159 [ERR_OK] = "_",
3160 [ERR_UND] = "undefined",
3161 [ERR_UNP] = "unpredictable",
3162 [ERR_NYI] = "NYI"
3163 };
3164
3165 enum err_type ret;
3166 aarch64_inst inst;
3167
3168 info->insn_info_valid = 1;
3169 info->branch_delay_insns = 0;
3170 info->data_size = 0;
3171 info->target = 0;
3172 info->target2 = 0;
3173
3174 if (info->flags & INSN_HAS_RELOC)
3175 /* If the instruction has a reloc associated with it, then
3176 the offset field in the instruction will actually be the
3177 addend for the reloc. (If we are using REL type relocs).
3178 In such cases, we can ignore the pc when computing
3179 addresses, since the addend is not currently pc-relative. */
3180 pc = 0;
3181
3182 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3183
3184 if (((word >> 21) & 0x3ff) == 1)
3185 {
3186 /* RESERVED for ALES. */
3187 assert (ret != ERR_OK);
3188 ret = ERR_NYI;
3189 }
3190
3191 switch (ret)
3192 {
3193 case ERR_UND:
3194 case ERR_UNP:
3195 case ERR_NYI:
3196 /* Handle undefined instructions. */
3197 info->insn_type = dis_noninsn;
3198 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3199 word, err_msg[ret]);
3200 break;
3201 case ERR_OK:
3202 user_friendly_fixup (&inst);
3203 print_aarch64_insn (pc, &inst, word, info, errors);
3204 break;
3205 default:
3206 abort ();
3207 }
3208 }
3209
3210 /* Disallow mapping symbols ($x, $d etc) from
3211 being displayed in symbol relative addresses. */
3212
3213 bfd_boolean
3214 aarch64_symbol_is_valid (asymbol * sym,
3215 struct disassemble_info * info ATTRIBUTE_UNUSED)
3216 {
3217 const char * name;
3218
3219 if (sym == NULL)
3220 return FALSE;
3221
3222 name = bfd_asymbol_name (sym);
3223
3224 return name
3225 && (name[0] != '$'
3226 || (name[1] != 'x' && name[1] != 'd')
3227 || (name[2] != '\0' && name[2] != '.'));
3228 }
3229
3230 /* Print data bytes on INFO->STREAM. */
3231
3232 static void
3233 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3234 uint32_t word,
3235 struct disassemble_info *info,
3236 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3237 {
3238 switch (info->bytes_per_chunk)
3239 {
3240 case 1:
3241 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3242 break;
3243 case 2:
3244 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3245 break;
3246 case 4:
3247 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3248 break;
3249 default:
3250 abort ();
3251 }
3252 }
3253
3254 /* Try to infer the code or data type from a symbol.
3255 Returns nonzero if *MAP_TYPE was set. */
3256
3257 static int
3258 get_sym_code_type (struct disassemble_info *info, int n,
3259 enum map_type *map_type)
3260 {
3261 elf_symbol_type *es;
3262 unsigned int type;
3263 const char *name;
3264
3265 /* If the symbol is in a different section, ignore it. */
3266 if (info->section != NULL && info->section != info->symtab[n]->section)
3267 return FALSE;
3268
3269 es = *(elf_symbol_type **)(info->symtab + n);
3270 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3271
3272 /* If the symbol has function type then use that. */
3273 if (type == STT_FUNC)
3274 {
3275 *map_type = MAP_INSN;
3276 return TRUE;
3277 }
3278
3279 /* Check for mapping symbols. */
3280 name = bfd_asymbol_name(info->symtab[n]);
3281 if (name[0] == '$'
3282 && (name[1] == 'x' || name[1] == 'd')
3283 && (name[2] == '\0' || name[2] == '.'))
3284 {
3285 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3286 return TRUE;
3287 }
3288
3289 return FALSE;
3290 }
3291
3292 /* Entry-point of the AArch64 disassembler. */
3293
3294 int
3295 print_insn_aarch64 (bfd_vma pc,
3296 struct disassemble_info *info)
3297 {
3298 bfd_byte buffer[INSNLEN];
3299 int status;
3300 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3301 aarch64_operand_error *);
3302 bfd_boolean found = FALSE;
3303 unsigned int size = 4;
3304 unsigned long data;
3305 aarch64_operand_error errors;
3306
3307 if (info->disassembler_options)
3308 {
3309 set_default_aarch64_dis_options (info);
3310
3311 parse_aarch64_dis_options (info->disassembler_options);
3312
3313 /* To avoid repeated parsing of these options, we remove them here. */
3314 info->disassembler_options = NULL;
3315 }
3316
3317 /* Aarch64 instructions are always little-endian */
3318 info->endian_code = BFD_ENDIAN_LITTLE;
3319
3320 /* First check the full symtab for a mapping symbol, even if there
3321 are no usable non-mapping symbols for this address. */
3322 if (info->symtab_size != 0
3323 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3324 {
3325 enum map_type type = MAP_INSN;
3326 int last_sym = -1;
3327 bfd_vma addr;
3328 int n;
3329
3330 if (pc <= last_mapping_addr)
3331 last_mapping_sym = -1;
3332
3333 /* Start scanning at the start of the function, or wherever
3334 we finished last time. */
3335 n = info->symtab_pos + 1;
3336 if (n < last_mapping_sym)
3337 n = last_mapping_sym;
3338
3339 /* Scan up to the location being disassembled. */
3340 for (; n < info->symtab_size; n++)
3341 {
3342 addr = bfd_asymbol_value (info->symtab[n]);
3343 if (addr > pc)
3344 break;
3345 if (get_sym_code_type (info, n, &type))
3346 {
3347 last_sym = n;
3348 found = TRUE;
3349 }
3350 }
3351
3352 if (!found)
3353 {
3354 n = info->symtab_pos;
3355 if (n < last_mapping_sym)
3356 n = last_mapping_sym;
3357
3358 /* No mapping symbol found at this address. Look backwards
3359 for a preceeding one. */
3360 for (; n >= 0; n--)
3361 {
3362 if (get_sym_code_type (info, n, &type))
3363 {
3364 last_sym = n;
3365 found = TRUE;
3366 break;
3367 }
3368 }
3369 }
3370
3371 last_mapping_sym = last_sym;
3372 last_type = type;
3373
3374 /* Look a little bit ahead to see if we should print out
3375 less than four bytes of data. If there's a symbol,
3376 mapping or otherwise, after two bytes then don't
3377 print more. */
3378 if (last_type == MAP_DATA)
3379 {
3380 size = 4 - (pc & 3);
3381 for (n = last_sym + 1; n < info->symtab_size; n++)
3382 {
3383 addr = bfd_asymbol_value (info->symtab[n]);
3384 if (addr > pc)
3385 {
3386 if (addr - pc < size)
3387 size = addr - pc;
3388 break;
3389 }
3390 }
3391 /* If the next symbol is after three bytes, we need to
3392 print only part of the data, so that we can use either
3393 .byte or .short. */
3394 if (size == 3)
3395 size = (pc & 1) ? 1 : 2;
3396 }
3397 }
3398
3399 if (last_type == MAP_DATA)
3400 {
3401 /* size was set above. */
3402 info->bytes_per_chunk = size;
3403 info->display_endian = info->endian;
3404 printer = print_insn_data;
3405 }
3406 else
3407 {
3408 info->bytes_per_chunk = size = INSNLEN;
3409 info->display_endian = info->endian_code;
3410 printer = print_insn_aarch64_word;
3411 }
3412
3413 status = (*info->read_memory_func) (pc, buffer, size, info);
3414 if (status != 0)
3415 {
3416 (*info->memory_error_func) (status, pc, info);
3417 return -1;
3418 }
3419
3420 data = bfd_get_bits (buffer, size * 8,
3421 info->display_endian == BFD_ENDIAN_BIG);
3422
3423 (*printer) (pc, data, info, &errors);
3424
3425 return size;
3426 }
3427 \f
3428 void
3429 print_aarch64_disassembler_options (FILE *stream)
3430 {
3431 fprintf (stream, _("\n\
3432 The following AARCH64 specific disassembler options are supported for use\n\
3433 with the -M switch (multiple options should be separated by commas):\n"));
3434
3435 fprintf (stream, _("\n\
3436 no-aliases Don't print instruction aliases.\n"));
3437
3438 fprintf (stream, _("\n\
3439 aliases Do print instruction aliases.\n"));
3440
3441 fprintf (stream, _("\n\
3442 no-notes Don't print instruction notes.\n"));
3443
3444 fprintf (stream, _("\n\
3445 notes Do print instruction notes.\n"));
3446
3447 #ifdef DEBUG_AARCH64
3448 fprintf (stream, _("\n\
3449 debug_dump Temp switch for debug trace.\n"));
3450 #endif /* DEBUG_AARCH64 */
3451
3452 fprintf (stream, _("\n"));
3453 }
This page took 0.103359 seconds and 4 git commands to generate.