ubsan: aarch64: left shift of negative value
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define INSNLEN 4
30
31 /* Cached mapping symbol state. */
32 enum map_type
33 {
34 MAP_INSN,
35 MAP_DATA
36 };
37
38 static enum map_type last_type;
39 static int last_mapping_sym = -1;
40 static bfd_vma last_stop_offset = 0;
41 static bfd_vma last_mapping_addr = 0;
42
43 /* Other options */
44 static int no_aliases = 0; /* If set disassemble as most general inst. */
45 \fstatic int no_notes = 1; /* If set do not print disassemble notes in the
46 output as comments. */
47
48 /* Currently active instruction sequence. */
49 static aarch64_instr_sequence insn_sequence;
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 if (CONST_STRNEQ (option, "no-notes"))
73 {
74 no_notes = 1;
75 return;
76 }
77
78 if (CONST_STRNEQ (option, "notes"))
79 {
80 no_notes = 0;
81 return;
82 }
83
84 #ifdef DEBUG_AARCH64
85 if (CONST_STRNEQ (option, "debug_dump"))
86 {
87 debug_dump = 1;
88 return;
89 }
90 #endif /* DEBUG_AARCH64 */
91
92 /* Invalid option. */
93 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
94 }
95
96 static void
97 parse_aarch64_dis_options (const char *options)
98 {
99 const char *option_end;
100
101 if (options == NULL)
102 return;
103
104 while (*options != '\0')
105 {
106 /* Skip empty options. */
107 if (*options == ',')
108 {
109 options++;
110 continue;
111 }
112
113 /* We know that *options is neither NUL or a comma. */
114 option_end = options + 1;
115 while (*option_end != ',' && *option_end != '\0')
116 option_end++;
117
118 parse_aarch64_dis_option (options, option_end - options);
119
120 /* Go on to the next one. If option_end points to a comma, it
121 will be skipped above. */
122 options = option_end;
123 }
124 }
125 \f
126 /* Functions doing the instruction disassembling. */
127
128 /* The unnamed arguments consist of the number of fields and information about
129 these fields where the VALUE will be extracted from CODE and returned.
130 MASK can be zero or the base mask of the opcode.
131
132 N.B. the fields are required to be in such an order than the most signficant
133 field for VALUE comes the first, e.g. the <index> in
134 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
135 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
136 the order of H, L, M. */
137
138 aarch64_insn
139 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
140 {
141 uint32_t num;
142 const aarch64_field *field;
143 enum aarch64_field_kind kind;
144 va_list va;
145
146 va_start (va, mask);
147 num = va_arg (va, uint32_t);
148 assert (num <= 5);
149 aarch64_insn value = 0x0;
150 while (num--)
151 {
152 kind = va_arg (va, enum aarch64_field_kind);
153 field = &fields[kind];
154 value <<= field->width;
155 value |= extract_field (kind, code, mask);
156 }
157 return value;
158 }
159
160 /* Extract the value of all fields in SELF->fields from instruction CODE.
161 The least significant bit comes from the final field. */
162
163 static aarch64_insn
164 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
165 {
166 aarch64_insn value;
167 unsigned int i;
168 enum aarch64_field_kind kind;
169
170 value = 0;
171 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
172 {
173 kind = self->fields[i];
174 value <<= fields[kind].width;
175 value |= extract_field (kind, code, 0);
176 }
177 return value;
178 }
179
180 /* Sign-extend bit I of VALUE. */
181 static inline uint64_t
182 sign_extend (aarch64_insn value, unsigned i)
183 {
184 uint64_t ret, sign;
185
186 assert (i < 32);
187 ret = value;
188 sign = (uint64_t) 1 << i;
189 return ((ret & (sign + sign - 1)) ^ sign) - sign;
190 }
191
192 /* N.B. the following inline helpfer functions create a dependency on the
193 order of operand qualifier enumerators. */
194
195 /* Given VALUE, return qualifier for a general purpose register. */
196 static inline enum aarch64_opnd_qualifier
197 get_greg_qualifier_from_value (aarch64_insn value)
198 {
199 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
200 assert (value <= 0x1
201 && aarch64_get_qualifier_standard_value (qualifier) == value);
202 return qualifier;
203 }
204
205 /* Given VALUE, return qualifier for a vector register. This does not support
206 decoding instructions that accept the 2H vector type. */
207
208 static inline enum aarch64_opnd_qualifier
209 get_vreg_qualifier_from_value (aarch64_insn value)
210 {
211 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
212
213 /* Instructions using vector type 2H should not call this function. Skip over
214 the 2H qualifier. */
215 if (qualifier >= AARCH64_OPND_QLF_V_2H)
216 qualifier += 1;
217
218 assert (value <= 0x8
219 && aarch64_get_qualifier_standard_value (qualifier) == value);
220 return qualifier;
221 }
222
223 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
224 static inline enum aarch64_opnd_qualifier
225 get_sreg_qualifier_from_value (aarch64_insn value)
226 {
227 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
228
229 assert (value <= 0x4
230 && aarch64_get_qualifier_standard_value (qualifier) == value);
231 return qualifier;
232 }
233
234 /* Given the instruction in *INST which is probably half way through the
235 decoding and our caller wants to know the expected qualifier for operand
236 I. Return such a qualifier if we can establish it; otherwise return
237 AARCH64_OPND_QLF_NIL. */
238
239 static aarch64_opnd_qualifier_t
240 get_expected_qualifier (const aarch64_inst *inst, int i)
241 {
242 aarch64_opnd_qualifier_seq_t qualifiers;
243 /* Should not be called if the qualifier is known. */
244 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
245 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
246 i, qualifiers))
247 return qualifiers[i];
248 else
249 return AARCH64_OPND_QLF_NIL;
250 }
251
252 /* Operand extractors. */
253
254 bfd_boolean
255 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
256 const aarch64_insn code,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED,
258 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
259 {
260 info->reg.regno = extract_field (self->fields[0], code, 0);
261 return TRUE;
262 }
263
264 bfd_boolean
265 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
266 const aarch64_insn code ATTRIBUTE_UNUSED,
267 const aarch64_inst *inst ATTRIBUTE_UNUSED,
268 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
269 {
270 assert (info->idx == 1
271 || info->idx ==3);
272 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
273 return TRUE;
274 }
275
276 /* e.g. IC <ic_op>{, <Xt>}. */
277 bfd_boolean
278 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
279 const aarch64_insn code,
280 const aarch64_inst *inst ATTRIBUTE_UNUSED,
281 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
282 {
283 info->reg.regno = extract_field (self->fields[0], code, 0);
284 assert (info->idx == 1
285 && (aarch64_get_operand_class (inst->operands[0].type)
286 == AARCH64_OPND_CLASS_SYSTEM));
287 /* This will make the constraint checking happy and more importantly will
288 help the disassembler determine whether this operand is optional or
289 not. */
290 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
291
292 return TRUE;
293 }
294
295 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
296 bfd_boolean
297 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
298 const aarch64_insn code,
299 const aarch64_inst *inst ATTRIBUTE_UNUSED,
300 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
301 {
302 /* regno */
303 info->reglane.regno = extract_field (self->fields[0], code,
304 inst->opcode->mask);
305
306 /* Index and/or type. */
307 if (inst->opcode->iclass == asisdone
308 || inst->opcode->iclass == asimdins)
309 {
310 if (info->type == AARCH64_OPND_En
311 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
312 {
313 unsigned shift;
314 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
315 assert (info->idx == 1); /* Vn */
316 aarch64_insn value = extract_field (FLD_imm4, code, 0);
317 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
318 info->qualifier = get_expected_qualifier (inst, info->idx);
319 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
320 info->reglane.index = value >> shift;
321 }
322 else
323 {
324 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
325 imm5<3:0> <V>
326 0000 RESERVED
327 xxx1 B
328 xx10 H
329 x100 S
330 1000 D */
331 int pos = -1;
332 aarch64_insn value = extract_field (FLD_imm5, code, 0);
333 while (++pos <= 3 && (value & 0x1) == 0)
334 value >>= 1;
335 if (pos > 3)
336 return FALSE;
337 info->qualifier = get_sreg_qualifier_from_value (pos);
338 info->reglane.index = (unsigned) (value >> 1);
339 }
340 }
341 else if (inst->opcode->iclass == dotproduct)
342 {
343 /* Need information in other operand(s) to help decoding. */
344 info->qualifier = get_expected_qualifier (inst, info->idx);
345 switch (info->qualifier)
346 {
347 case AARCH64_OPND_QLF_S_4B:
348 case AARCH64_OPND_QLF_S_2H:
349 /* L:H */
350 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
351 info->reglane.regno &= 0x1f;
352 break;
353 default:
354 return FALSE;
355 }
356 }
357 else if (inst->opcode->iclass == cryptosm3)
358 {
359 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
360 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
361 }
362 else
363 {
364 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
365 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
366
367 /* Need information in other operand(s) to help decoding. */
368 info->qualifier = get_expected_qualifier (inst, info->idx);
369 switch (info->qualifier)
370 {
371 case AARCH64_OPND_QLF_S_H:
372 if (info->type == AARCH64_OPND_Em16)
373 {
374 /* h:l:m */
375 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
376 FLD_M);
377 info->reglane.regno &= 0xf;
378 }
379 else
380 {
381 /* h:l */
382 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
383 }
384 break;
385 case AARCH64_OPND_QLF_S_S:
386 /* h:l */
387 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
388 break;
389 case AARCH64_OPND_QLF_S_D:
390 /* H */
391 info->reglane.index = extract_field (FLD_H, code, 0);
392 break;
393 default:
394 return FALSE;
395 }
396
397 if (inst->opcode->op == OP_FCMLA_ELEM
398 && info->qualifier != AARCH64_OPND_QLF_S_H)
399 {
400 /* Complex operand takes two elements. */
401 if (info->reglane.index & 1)
402 return FALSE;
403 info->reglane.index /= 2;
404 }
405 }
406
407 return TRUE;
408 }
409
410 bfd_boolean
411 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
412 const aarch64_insn code,
413 const aarch64_inst *inst ATTRIBUTE_UNUSED,
414 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
415 {
416 /* R */
417 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
418 /* len */
419 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
420 return TRUE;
421 }
422
423 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
424 bfd_boolean
425 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
426 aarch64_opnd_info *info, const aarch64_insn code,
427 const aarch64_inst *inst,
428 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
429 {
430 aarch64_insn value;
431 /* Number of elements in each structure to be loaded/stored. */
432 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
433
434 struct
435 {
436 unsigned is_reserved;
437 unsigned num_regs;
438 unsigned num_elements;
439 } data [] =
440 { {0, 4, 4},
441 {1, 4, 4},
442 {0, 4, 1},
443 {0, 4, 2},
444 {0, 3, 3},
445 {1, 3, 3},
446 {0, 3, 1},
447 {0, 1, 1},
448 {0, 2, 2},
449 {1, 2, 2},
450 {0, 2, 1},
451 };
452
453 /* Rt */
454 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
455 /* opcode */
456 value = extract_field (FLD_opcode, code, 0);
457 /* PR 21595: Check for a bogus value. */
458 if (value >= ARRAY_SIZE (data))
459 return FALSE;
460 if (expected_num != data[value].num_elements || data[value].is_reserved)
461 return FALSE;
462 info->reglist.num_regs = data[value].num_regs;
463
464 return TRUE;
465 }
466
467 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
468 lanes instructions. */
469 bfd_boolean
470 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
471 aarch64_opnd_info *info, const aarch64_insn code,
472 const aarch64_inst *inst,
473 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
474 {
475 aarch64_insn value;
476
477 /* Rt */
478 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
479 /* S */
480 value = extract_field (FLD_S, code, 0);
481
482 /* Number of registers is equal to the number of elements in
483 each structure to be loaded/stored. */
484 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
485 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
486
487 /* Except when it is LD1R. */
488 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
489 info->reglist.num_regs = 2;
490
491 return TRUE;
492 }
493
494 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
495 load/store single element instructions. */
496 bfd_boolean
497 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
498 aarch64_opnd_info *info, const aarch64_insn code,
499 const aarch64_inst *inst ATTRIBUTE_UNUSED,
500 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
501 {
502 aarch64_field field = {0, 0};
503 aarch64_insn QSsize; /* fields Q:S:size. */
504 aarch64_insn opcodeh2; /* opcode<2:1> */
505
506 /* Rt */
507 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
508
509 /* Decode the index, opcode<2:1> and size. */
510 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
511 opcodeh2 = extract_field_2 (&field, code, 0);
512 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
513 switch (opcodeh2)
514 {
515 case 0x0:
516 info->qualifier = AARCH64_OPND_QLF_S_B;
517 /* Index encoded in "Q:S:size". */
518 info->reglist.index = QSsize;
519 break;
520 case 0x1:
521 if (QSsize & 0x1)
522 /* UND. */
523 return FALSE;
524 info->qualifier = AARCH64_OPND_QLF_S_H;
525 /* Index encoded in "Q:S:size<1>". */
526 info->reglist.index = QSsize >> 1;
527 break;
528 case 0x2:
529 if ((QSsize >> 1) & 0x1)
530 /* UND. */
531 return FALSE;
532 if ((QSsize & 0x1) == 0)
533 {
534 info->qualifier = AARCH64_OPND_QLF_S_S;
535 /* Index encoded in "Q:S". */
536 info->reglist.index = QSsize >> 2;
537 }
538 else
539 {
540 if (extract_field (FLD_S, code, 0))
541 /* UND */
542 return FALSE;
543 info->qualifier = AARCH64_OPND_QLF_S_D;
544 /* Index encoded in "Q". */
545 info->reglist.index = QSsize >> 3;
546 }
547 break;
548 default:
549 return FALSE;
550 }
551
552 info->reglist.has_index = 1;
553 info->reglist.num_regs = 0;
554 /* Number of registers is equal to the number of elements in
555 each structure to be loaded/stored. */
556 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
557 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
558
559 return TRUE;
560 }
561
562 /* Decode fields immh:immb and/or Q for e.g.
563 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
564 or SSHR <V><d>, <V><n>, #<shift>. */
565
566 bfd_boolean
567 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
568 aarch64_opnd_info *info, const aarch64_insn code,
569 const aarch64_inst *inst,
570 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
571 {
572 int pos;
573 aarch64_insn Q, imm, immh;
574 enum aarch64_insn_class iclass = inst->opcode->iclass;
575
576 immh = extract_field (FLD_immh, code, 0);
577 if (immh == 0)
578 return FALSE;
579 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
580 pos = 4;
581 /* Get highest set bit in immh. */
582 while (--pos >= 0 && (immh & 0x8) == 0)
583 immh <<= 1;
584
585 assert ((iclass == asimdshf || iclass == asisdshf)
586 && (info->type == AARCH64_OPND_IMM_VLSR
587 || info->type == AARCH64_OPND_IMM_VLSL));
588
589 if (iclass == asimdshf)
590 {
591 Q = extract_field (FLD_Q, code, 0);
592 /* immh Q <T>
593 0000 x SEE AdvSIMD modified immediate
594 0001 0 8B
595 0001 1 16B
596 001x 0 4H
597 001x 1 8H
598 01xx 0 2S
599 01xx 1 4S
600 1xxx 0 RESERVED
601 1xxx 1 2D */
602 info->qualifier =
603 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
604 }
605 else
606 info->qualifier = get_sreg_qualifier_from_value (pos);
607
608 if (info->type == AARCH64_OPND_IMM_VLSR)
609 /* immh <shift>
610 0000 SEE AdvSIMD modified immediate
611 0001 (16-UInt(immh:immb))
612 001x (32-UInt(immh:immb))
613 01xx (64-UInt(immh:immb))
614 1xxx (128-UInt(immh:immb)) */
615 info->imm.value = (16 << pos) - imm;
616 else
617 /* immh:immb
618 immh <shift>
619 0000 SEE AdvSIMD modified immediate
620 0001 (UInt(immh:immb)-8)
621 001x (UInt(immh:immb)-16)
622 01xx (UInt(immh:immb)-32)
623 1xxx (UInt(immh:immb)-64) */
624 info->imm.value = imm - (8 << pos);
625
626 return TRUE;
627 }
628
629 /* Decode shift immediate for e.g. sshr (imm). */
630 bfd_boolean
631 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
632 aarch64_opnd_info *info, const aarch64_insn code,
633 const aarch64_inst *inst ATTRIBUTE_UNUSED,
634 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
635 {
636 int64_t imm;
637 aarch64_insn val;
638 val = extract_field (FLD_size, code, 0);
639 switch (val)
640 {
641 case 0: imm = 8; break;
642 case 1: imm = 16; break;
643 case 2: imm = 32; break;
644 default: return FALSE;
645 }
646 info->imm.value = imm;
647 return TRUE;
648 }
649
650 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
651 value in the field(s) will be extracted as unsigned immediate value. */
652 bfd_boolean
653 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
654 const aarch64_insn code,
655 const aarch64_inst *inst ATTRIBUTE_UNUSED,
656 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
657 {
658 uint64_t imm;
659
660 imm = extract_all_fields (self, code);
661
662 if (operand_need_sign_extension (self))
663 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
664
665 if (operand_need_shift_by_two (self))
666 imm <<= 2;
667 else if (operand_need_shift_by_four (self))
668 imm <<= 4;
669
670 if (info->type == AARCH64_OPND_ADDR_ADRP)
671 imm <<= 12;
672
673 info->imm.value = imm;
674 return TRUE;
675 }
676
677 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
678 bfd_boolean
679 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
680 const aarch64_insn code,
681 const aarch64_inst *inst ATTRIBUTE_UNUSED,
682 aarch64_operand_error *errors)
683 {
684 aarch64_ext_imm (self, info, code, inst, errors);
685 info->shifter.kind = AARCH64_MOD_LSL;
686 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
687 return TRUE;
688 }
689
690 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
691 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
692 bfd_boolean
693 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
694 aarch64_opnd_info *info,
695 const aarch64_insn code,
696 const aarch64_inst *inst ATTRIBUTE_UNUSED,
697 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
698 {
699 uint64_t imm;
700 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
701 aarch64_field field = {0, 0};
702
703 assert (info->idx == 1);
704
705 if (info->type == AARCH64_OPND_SIMD_FPIMM)
706 info->imm.is_fp = 1;
707
708 /* a:b:c:d:e:f:g:h */
709 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
710 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
711 {
712 /* Either MOVI <Dd>, #<imm>
713 or MOVI <Vd>.2D, #<imm>.
714 <imm> is a 64-bit immediate
715 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
716 encoded in "a:b:c:d:e:f:g:h". */
717 int i;
718 unsigned abcdefgh = imm;
719 for (imm = 0ull, i = 0; i < 8; i++)
720 if (((abcdefgh >> i) & 0x1) != 0)
721 imm |= 0xffull << (8 * i);
722 }
723 info->imm.value = imm;
724
725 /* cmode */
726 info->qualifier = get_expected_qualifier (inst, info->idx);
727 switch (info->qualifier)
728 {
729 case AARCH64_OPND_QLF_NIL:
730 /* no shift */
731 info->shifter.kind = AARCH64_MOD_NONE;
732 return 1;
733 case AARCH64_OPND_QLF_LSL:
734 /* shift zeros */
735 info->shifter.kind = AARCH64_MOD_LSL;
736 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
737 {
738 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
739 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
740 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
741 default: assert (0); return FALSE;
742 }
743 /* 00: 0; 01: 8; 10:16; 11:24. */
744 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
745 break;
746 case AARCH64_OPND_QLF_MSL:
747 /* shift ones */
748 info->shifter.kind = AARCH64_MOD_MSL;
749 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
750 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
751 break;
752 default:
753 assert (0);
754 return FALSE;
755 }
756
757 return TRUE;
758 }
759
760 /* Decode an 8-bit floating-point immediate. */
761 bfd_boolean
762 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
763 const aarch64_insn code,
764 const aarch64_inst *inst ATTRIBUTE_UNUSED,
765 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
766 {
767 info->imm.value = extract_all_fields (self, code);
768 info->imm.is_fp = 1;
769 return TRUE;
770 }
771
772 /* Decode a 1-bit rotate immediate (#90 or #270). */
773 bfd_boolean
774 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
775 const aarch64_insn code,
776 const aarch64_inst *inst ATTRIBUTE_UNUSED,
777 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
778 {
779 uint64_t rot = extract_field (self->fields[0], code, 0);
780 assert (rot < 2U);
781 info->imm.value = rot * 180 + 90;
782 return TRUE;
783 }
784
785 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
786 bfd_boolean
787 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
788 const aarch64_insn code,
789 const aarch64_inst *inst ATTRIBUTE_UNUSED,
790 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
791 {
792 uint64_t rot = extract_field (self->fields[0], code, 0);
793 assert (rot < 4U);
794 info->imm.value = rot * 90;
795 return TRUE;
796 }
797
798 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
799 bfd_boolean
800 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
801 aarch64_opnd_info *info, const aarch64_insn code,
802 const aarch64_inst *inst ATTRIBUTE_UNUSED,
803 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
804 {
805 info->imm.value = 64- extract_field (FLD_scale, code, 0);
806 return TRUE;
807 }
808
809 /* Decode arithmetic immediate for e.g.
810 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
811 bfd_boolean
812 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
813 aarch64_opnd_info *info, const aarch64_insn code,
814 const aarch64_inst *inst ATTRIBUTE_UNUSED,
815 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
816 {
817 aarch64_insn value;
818
819 info->shifter.kind = AARCH64_MOD_LSL;
820 /* shift */
821 value = extract_field (FLD_shift, code, 0);
822 if (value >= 2)
823 return FALSE;
824 info->shifter.amount = value ? 12 : 0;
825 /* imm12 (unsigned) */
826 info->imm.value = extract_field (FLD_imm12, code, 0);
827
828 return TRUE;
829 }
830
831 /* Return true if VALUE is a valid logical immediate encoding, storing the
832 decoded value in *RESULT if so. ESIZE is the number of bytes in the
833 decoded immediate. */
834 static bfd_boolean
835 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
836 {
837 uint64_t imm, mask;
838 uint32_t N, R, S;
839 unsigned simd_size;
840
841 /* value is N:immr:imms. */
842 S = value & 0x3f;
843 R = (value >> 6) & 0x3f;
844 N = (value >> 12) & 0x1;
845
846 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
847 (in other words, right rotated by R), then replicated. */
848 if (N != 0)
849 {
850 simd_size = 64;
851 mask = 0xffffffffffffffffull;
852 }
853 else
854 {
855 switch (S)
856 {
857 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
858 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
859 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
860 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
861 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
862 default: return FALSE;
863 }
864 mask = (1ull << simd_size) - 1;
865 /* Top bits are IGNORED. */
866 R &= simd_size - 1;
867 }
868
869 if (simd_size > esize * 8)
870 return FALSE;
871
872 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
873 if (S == simd_size - 1)
874 return FALSE;
875 /* S+1 consecutive bits to 1. */
876 /* NOTE: S can't be 63 due to detection above. */
877 imm = (1ull << (S + 1)) - 1;
878 /* Rotate to the left by simd_size - R. */
879 if (R != 0)
880 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
881 /* Replicate the value according to SIMD size. */
882 switch (simd_size)
883 {
884 case 2: imm = (imm << 2) | imm;
885 /* Fall through. */
886 case 4: imm = (imm << 4) | imm;
887 /* Fall through. */
888 case 8: imm = (imm << 8) | imm;
889 /* Fall through. */
890 case 16: imm = (imm << 16) | imm;
891 /* Fall through. */
892 case 32: imm = (imm << 32) | imm;
893 /* Fall through. */
894 case 64: break;
895 default: assert (0); return 0;
896 }
897
898 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
899
900 return TRUE;
901 }
902
903 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
904 bfd_boolean
905 aarch64_ext_limm (const aarch64_operand *self,
906 aarch64_opnd_info *info, const aarch64_insn code,
907 const aarch64_inst *inst,
908 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
909 {
910 uint32_t esize;
911 aarch64_insn value;
912
913 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
914 self->fields[2]);
915 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
916 return decode_limm (esize, value, &info->imm.value);
917 }
918
919 /* Decode a logical immediate for the BIC alias of AND (etc.). */
920 bfd_boolean
921 aarch64_ext_inv_limm (const aarch64_operand *self,
922 aarch64_opnd_info *info, const aarch64_insn code,
923 const aarch64_inst *inst,
924 aarch64_operand_error *errors)
925 {
926 if (!aarch64_ext_limm (self, info, code, inst, errors))
927 return FALSE;
928 info->imm.value = ~info->imm.value;
929 return TRUE;
930 }
931
932 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
933 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
934 bfd_boolean
935 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
936 aarch64_opnd_info *info,
937 const aarch64_insn code, const aarch64_inst *inst,
938 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
939 {
940 aarch64_insn value;
941
942 /* Rt */
943 info->reg.regno = extract_field (FLD_Rt, code, 0);
944
945 /* size */
946 value = extract_field (FLD_ldst_size, code, 0);
947 if (inst->opcode->iclass == ldstpair_indexed
948 || inst->opcode->iclass == ldstnapair_offs
949 || inst->opcode->iclass == ldstpair_off
950 || inst->opcode->iclass == loadlit)
951 {
952 enum aarch64_opnd_qualifier qualifier;
953 switch (value)
954 {
955 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
956 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
957 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
958 default: return FALSE;
959 }
960 info->qualifier = qualifier;
961 }
962 else
963 {
964 /* opc1:size */
965 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
966 if (value > 0x4)
967 return FALSE;
968 info->qualifier = get_sreg_qualifier_from_value (value);
969 }
970
971 return TRUE;
972 }
973
974 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
975 bfd_boolean
976 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
977 aarch64_opnd_info *info,
978 aarch64_insn code,
979 const aarch64_inst *inst ATTRIBUTE_UNUSED,
980 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
981 {
982 /* Rn */
983 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
984 return TRUE;
985 }
986
987 /* Decode the address operand for e.g.
988 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
989 bfd_boolean
990 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
991 aarch64_opnd_info *info,
992 aarch64_insn code, const aarch64_inst *inst,
993 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
994 {
995 info->qualifier = get_expected_qualifier (inst, info->idx);
996
997 /* Rn */
998 info->addr.base_regno = extract_field (self->fields[0], code, 0);
999
1000 /* simm9 */
1001 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1002 info->addr.offset.imm = sign_extend (imm, 8);
1003 if (extract_field (self->fields[2], code, 0) == 1) {
1004 info->addr.writeback = 1;
1005 info->addr.preind = 1;
1006 }
1007 return TRUE;
1008 }
1009
1010 /* Decode the address operand for e.g.
1011 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1012 bfd_boolean
1013 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1014 aarch64_opnd_info *info,
1015 aarch64_insn code, const aarch64_inst *inst,
1016 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1017 {
1018 aarch64_insn S, value;
1019
1020 /* Rn */
1021 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1022 /* Rm */
1023 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1024 /* option */
1025 value = extract_field (FLD_option, code, 0);
1026 info->shifter.kind =
1027 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1028 /* Fix-up the shifter kind; although the table-driven approach is
1029 efficient, it is slightly inflexible, thus needing this fix-up. */
1030 if (info->shifter.kind == AARCH64_MOD_UXTX)
1031 info->shifter.kind = AARCH64_MOD_LSL;
1032 /* S */
1033 S = extract_field (FLD_S, code, 0);
1034 if (S == 0)
1035 {
1036 info->shifter.amount = 0;
1037 info->shifter.amount_present = 0;
1038 }
1039 else
1040 {
1041 int size;
1042 /* Need information in other operand(s) to help achieve the decoding
1043 from 'S' field. */
1044 info->qualifier = get_expected_qualifier (inst, info->idx);
1045 /* Get the size of the data element that is accessed, which may be
1046 different from that of the source register size, e.g. in strb/ldrb. */
1047 size = aarch64_get_qualifier_esize (info->qualifier);
1048 info->shifter.amount = get_logsz (size);
1049 info->shifter.amount_present = 1;
1050 }
1051
1052 return TRUE;
1053 }
1054
1055 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1056 bfd_boolean
1057 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1058 aarch64_insn code, const aarch64_inst *inst,
1059 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1060 {
1061 aarch64_insn imm;
1062 info->qualifier = get_expected_qualifier (inst, info->idx);
1063
1064 /* Rn */
1065 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1066 /* simm (imm9 or imm7) */
1067 imm = extract_field (self->fields[0], code, 0);
1068 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1069 if (self->fields[0] == FLD_imm7
1070 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1071 /* scaled immediate in ld/st pair instructions. */
1072 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1073 /* qualifier */
1074 if (inst->opcode->iclass == ldst_unscaled
1075 || inst->opcode->iclass == ldstnapair_offs
1076 || inst->opcode->iclass == ldstpair_off
1077 || inst->opcode->iclass == ldst_unpriv)
1078 info->addr.writeback = 0;
1079 else
1080 {
1081 /* pre/post- index */
1082 info->addr.writeback = 1;
1083 if (extract_field (self->fields[1], code, 0) == 1)
1084 info->addr.preind = 1;
1085 else
1086 info->addr.postind = 1;
1087 }
1088
1089 return TRUE;
1090 }
1091
1092 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1093 bfd_boolean
1094 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1095 aarch64_insn code,
1096 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1097 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1098 {
1099 int shift;
1100 info->qualifier = get_expected_qualifier (inst, info->idx);
1101 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1102 /* Rn */
1103 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1104 /* uimm12 */
1105 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1106 return TRUE;
1107 }
1108
1109 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1110 bfd_boolean
1111 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1112 aarch64_insn code,
1113 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1114 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1115 {
1116 aarch64_insn imm;
1117
1118 info->qualifier = get_expected_qualifier (inst, info->idx);
1119 /* Rn */
1120 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1121 /* simm10 */
1122 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1123 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1124 if (extract_field (self->fields[3], code, 0) == 1) {
1125 info->addr.writeback = 1;
1126 info->addr.preind = 1;
1127 }
1128 return TRUE;
1129 }
1130
1131 /* Decode the address operand for e.g.
1132 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1133 bfd_boolean
1134 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1135 aarch64_opnd_info *info,
1136 aarch64_insn code, const aarch64_inst *inst,
1137 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1138 {
1139 /* The opcode dependent area stores the number of elements in
1140 each structure to be loaded/stored. */
1141 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1142
1143 /* Rn */
1144 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1145 /* Rm | #<amount> */
1146 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1147 if (info->addr.offset.regno == 31)
1148 {
1149 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1150 /* Special handling of loading single structure to all lane. */
1151 info->addr.offset.imm = (is_ld1r ? 1
1152 : inst->operands[0].reglist.num_regs)
1153 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1154 else
1155 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1156 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1157 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1158 }
1159 else
1160 info->addr.offset.is_reg = 1;
1161 info->addr.writeback = 1;
1162
1163 return TRUE;
1164 }
1165
1166 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1167 bfd_boolean
1168 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1169 aarch64_opnd_info *info,
1170 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1171 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1172 {
1173 aarch64_insn value;
1174 /* cond */
1175 value = extract_field (FLD_cond, code, 0);
1176 info->cond = get_cond_from_value (value);
1177 return TRUE;
1178 }
1179
1180 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1181 bfd_boolean
1182 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1183 aarch64_opnd_info *info,
1184 aarch64_insn code,
1185 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1186 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1187 {
1188 /* op0:op1:CRn:CRm:op2 */
1189 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1190 FLD_CRm, FLD_op2);
1191 info->sysreg.flags = 0;
1192
1193 /* If a system instruction, check which restrictions should be on the register
1194 value during decoding, these will be enforced then. */
1195 if (inst->opcode->iclass == ic_system)
1196 {
1197 /* Check to see if it's read-only, else check if it's write only.
1198 if it's both or unspecified don't care. */
1199 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1200 info->sysreg.flags = F_REG_READ;
1201 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1202 == F_SYS_WRITE)
1203 info->sysreg.flags = F_REG_WRITE;
1204 }
1205
1206 return TRUE;
1207 }
1208
1209 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1210 bfd_boolean
1211 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1212 aarch64_opnd_info *info, aarch64_insn code,
1213 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1214 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1215 {
1216 int i;
1217 /* op1:op2 */
1218 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1219 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1220 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1221 return TRUE;
1222 /* Reserved value in <pstatefield>. */
1223 return FALSE;
1224 }
1225
1226 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1227 bfd_boolean
1228 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1229 aarch64_opnd_info *info,
1230 aarch64_insn code,
1231 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1232 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1233 {
1234 int i;
1235 aarch64_insn value;
1236 const aarch64_sys_ins_reg *sysins_ops;
1237 /* op0:op1:CRn:CRm:op2 */
1238 value = extract_fields (code, 0, 5,
1239 FLD_op0, FLD_op1, FLD_CRn,
1240 FLD_CRm, FLD_op2);
1241
1242 switch (info->type)
1243 {
1244 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1245 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1246 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1247 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1248 case AARCH64_OPND_SYSREG_SR:
1249 sysins_ops = aarch64_sys_regs_sr;
1250 /* Let's remove op2 for rctx. Refer to comments in the definition of
1251 aarch64_sys_regs_sr[]. */
1252 value = value & ~(0x7);
1253 break;
1254 default: assert (0); return FALSE;
1255 }
1256
1257 for (i = 0; sysins_ops[i].name != NULL; ++i)
1258 if (sysins_ops[i].value == value)
1259 {
1260 info->sysins_op = sysins_ops + i;
1261 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1262 info->sysins_op->name,
1263 (unsigned)info->sysins_op->value,
1264 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1265 return TRUE;
1266 }
1267
1268 return FALSE;
1269 }
1270
1271 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1272
1273 bfd_boolean
1274 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1275 aarch64_opnd_info *info,
1276 aarch64_insn code,
1277 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1278 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1279 {
1280 /* CRm */
1281 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1282 return TRUE;
1283 }
1284
1285 /* Decode the prefetch operation option operand for e.g.
1286 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1287
1288 bfd_boolean
1289 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1290 aarch64_opnd_info *info,
1291 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1292 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1293 {
1294 /* prfop in Rt */
1295 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1296 return TRUE;
1297 }
1298
1299 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1300 to the matching name/value pair in aarch64_hint_options. */
1301
1302 bfd_boolean
1303 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1304 aarch64_opnd_info *info,
1305 aarch64_insn code,
1306 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1307 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1308 {
1309 /* CRm:op2. */
1310 unsigned hint_number;
1311 int i;
1312
1313 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1314
1315 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1316 {
1317 if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1318 {
1319 info->hint_option = &(aarch64_hint_options[i]);
1320 return TRUE;
1321 }
1322 }
1323
1324 return FALSE;
1325 }
1326
1327 /* Decode the extended register operand for e.g.
1328 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1329 bfd_boolean
1330 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1331 aarch64_opnd_info *info,
1332 aarch64_insn code,
1333 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1334 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1335 {
1336 aarch64_insn value;
1337
1338 /* Rm */
1339 info->reg.regno = extract_field (FLD_Rm, code, 0);
1340 /* option */
1341 value = extract_field (FLD_option, code, 0);
1342 info->shifter.kind =
1343 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1344 /* imm3 */
1345 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1346
1347 /* This makes the constraint checking happy. */
1348 info->shifter.operator_present = 1;
1349
1350 /* Assume inst->operands[0].qualifier has been resolved. */
1351 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1352 info->qualifier = AARCH64_OPND_QLF_W;
1353 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1354 && (info->shifter.kind == AARCH64_MOD_UXTX
1355 || info->shifter.kind == AARCH64_MOD_SXTX))
1356 info->qualifier = AARCH64_OPND_QLF_X;
1357
1358 return TRUE;
1359 }
1360
1361 /* Decode the shifted register operand for e.g.
1362 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1363 bfd_boolean
1364 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1365 aarch64_opnd_info *info,
1366 aarch64_insn code,
1367 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1368 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1369 {
1370 aarch64_insn value;
1371
1372 /* Rm */
1373 info->reg.regno = extract_field (FLD_Rm, code, 0);
1374 /* shift */
1375 value = extract_field (FLD_shift, code, 0);
1376 info->shifter.kind =
1377 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1378 if (info->shifter.kind == AARCH64_MOD_ROR
1379 && inst->opcode->iclass != log_shift)
1380 /* ROR is not available for the shifted register operand in arithmetic
1381 instructions. */
1382 return FALSE;
1383 /* imm6 */
1384 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1385
1386 /* This makes the constraint checking happy. */
1387 info->shifter.operator_present = 1;
1388
1389 return TRUE;
1390 }
1391
1392 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1393 where <offset> is given by the OFFSET parameter and where <factor> is
1394 1 plus SELF's operand-dependent value. fields[0] specifies the field
1395 that holds <base>. */
1396 static bfd_boolean
1397 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1398 aarch64_opnd_info *info, aarch64_insn code,
1399 int64_t offset)
1400 {
1401 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1402 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1403 info->addr.offset.is_reg = FALSE;
1404 info->addr.writeback = FALSE;
1405 info->addr.preind = TRUE;
1406 if (offset != 0)
1407 info->shifter.kind = AARCH64_MOD_MUL_VL;
1408 info->shifter.amount = 1;
1409 info->shifter.operator_present = (info->addr.offset.imm != 0);
1410 info->shifter.amount_present = FALSE;
1411 return TRUE;
1412 }
1413
1414 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1415 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1416 SELF's operand-dependent value. fields[0] specifies the field that
1417 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1418 bfd_boolean
1419 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1420 aarch64_opnd_info *info, aarch64_insn code,
1421 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1422 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1423 {
1424 int offset;
1425
1426 offset = extract_field (FLD_SVE_imm4, code, 0);
1427 offset = ((offset + 8) & 15) - 8;
1428 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1429 }
1430
1431 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1432 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1433 SELF's operand-dependent value. fields[0] specifies the field that
1434 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1435 bfd_boolean
1436 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1437 aarch64_opnd_info *info, aarch64_insn code,
1438 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1439 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1440 {
1441 int offset;
1442
1443 offset = extract_field (FLD_SVE_imm6, code, 0);
1444 offset = (((offset + 32) & 63) - 32);
1445 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1446 }
1447
1448 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1449 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1450 SELF's operand-dependent value. fields[0] specifies the field that
1451 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1452 and imm3 fields, with imm3 being the less-significant part. */
1453 bfd_boolean
1454 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1455 aarch64_opnd_info *info,
1456 aarch64_insn code,
1457 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1458 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1459 {
1460 int offset;
1461
1462 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1463 offset = (((offset + 256) & 511) - 256);
1464 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1465 }
1466
1467 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1468 is given by the OFFSET parameter and where <shift> is SELF's operand-
1469 dependent value. fields[0] specifies the base register field <base>. */
1470 static bfd_boolean
1471 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1472 aarch64_opnd_info *info, aarch64_insn code,
1473 int64_t offset)
1474 {
1475 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1476 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1477 info->addr.offset.is_reg = FALSE;
1478 info->addr.writeback = FALSE;
1479 info->addr.preind = TRUE;
1480 info->shifter.operator_present = FALSE;
1481 info->shifter.amount_present = FALSE;
1482 return TRUE;
1483 }
1484
1485 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1486 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1487 value. fields[0] specifies the base register field. */
1488 bfd_boolean
1489 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1490 aarch64_opnd_info *info, aarch64_insn code,
1491 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1492 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1493 {
1494 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1495 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1496 }
1497
1498 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1499 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1500 value. fields[0] specifies the base register field. */
1501 bfd_boolean
1502 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1503 aarch64_opnd_info *info, aarch64_insn code,
1504 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1505 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1506 {
1507 int offset = extract_field (FLD_SVE_imm6, code, 0);
1508 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1509 }
1510
1511 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1512 is SELF's operand-dependent value. fields[0] specifies the base
1513 register field and fields[1] specifies the offset register field. */
1514 bfd_boolean
1515 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1516 aarch64_opnd_info *info, aarch64_insn code,
1517 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1518 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1519 {
1520 int index_regno;
1521
1522 index_regno = extract_field (self->fields[1], code, 0);
1523 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1524 return FALSE;
1525
1526 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1527 info->addr.offset.regno = index_regno;
1528 info->addr.offset.is_reg = TRUE;
1529 info->addr.writeback = FALSE;
1530 info->addr.preind = TRUE;
1531 info->shifter.kind = AARCH64_MOD_LSL;
1532 info->shifter.amount = get_operand_specific_data (self);
1533 info->shifter.operator_present = (info->shifter.amount != 0);
1534 info->shifter.amount_present = (info->shifter.amount != 0);
1535 return TRUE;
1536 }
1537
1538 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1539 <shift> is SELF's operand-dependent value. fields[0] specifies the
1540 base register field, fields[1] specifies the offset register field and
1541 fields[2] is a single-bit field that selects SXTW over UXTW. */
1542 bfd_boolean
1543 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1544 aarch64_opnd_info *info, aarch64_insn code,
1545 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1546 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1547 {
1548 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1549 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1550 info->addr.offset.is_reg = TRUE;
1551 info->addr.writeback = FALSE;
1552 info->addr.preind = TRUE;
1553 if (extract_field (self->fields[2], code, 0))
1554 info->shifter.kind = AARCH64_MOD_SXTW;
1555 else
1556 info->shifter.kind = AARCH64_MOD_UXTW;
1557 info->shifter.amount = get_operand_specific_data (self);
1558 info->shifter.operator_present = TRUE;
1559 info->shifter.amount_present = (info->shifter.amount != 0);
1560 return TRUE;
1561 }
1562
1563 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1564 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1565 fields[0] specifies the base register field. */
1566 bfd_boolean
1567 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1568 aarch64_opnd_info *info, aarch64_insn code,
1569 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1570 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1571 {
1572 int offset = extract_field (FLD_imm5, code, 0);
1573 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1574 }
1575
1576 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1577 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1578 number. fields[0] specifies the base register field and fields[1]
1579 specifies the offset register field. */
1580 static bfd_boolean
1581 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1582 aarch64_insn code, enum aarch64_modifier_kind kind)
1583 {
1584 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1585 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1586 info->addr.offset.is_reg = TRUE;
1587 info->addr.writeback = FALSE;
1588 info->addr.preind = TRUE;
1589 info->shifter.kind = kind;
1590 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1591 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1592 || info->shifter.amount != 0);
1593 info->shifter.amount_present = (info->shifter.amount != 0);
1594 return TRUE;
1595 }
1596
1597 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1598 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1599 field and fields[1] specifies the offset register field. */
1600 bfd_boolean
1601 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1602 aarch64_opnd_info *info, aarch64_insn code,
1603 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1604 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1605 {
1606 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1607 }
1608
1609 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1610 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1611 field and fields[1] specifies the offset register field. */
1612 bfd_boolean
1613 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1614 aarch64_opnd_info *info, aarch64_insn code,
1615 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1616 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1617 {
1618 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1619 }
1620
1621 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1622 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1623 field and fields[1] specifies the offset register field. */
1624 bfd_boolean
1625 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1626 aarch64_opnd_info *info, aarch64_insn code,
1627 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1628 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1629 {
1630 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1631 }
1632
1633 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1634 has the raw field value and that the low 8 bits decode to VALUE. */
1635 static bfd_boolean
1636 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1637 {
1638 info->shifter.kind = AARCH64_MOD_LSL;
1639 info->shifter.amount = 0;
1640 if (info->imm.value & 0x100)
1641 {
1642 if (value == 0)
1643 /* Decode 0x100 as #0, LSL #8. */
1644 info->shifter.amount = 8;
1645 else
1646 value *= 256;
1647 }
1648 info->shifter.operator_present = (info->shifter.amount != 0);
1649 info->shifter.amount_present = (info->shifter.amount != 0);
1650 info->imm.value = value;
1651 return TRUE;
1652 }
1653
1654 /* Decode an SVE ADD/SUB immediate. */
1655 bfd_boolean
1656 aarch64_ext_sve_aimm (const aarch64_operand *self,
1657 aarch64_opnd_info *info, const aarch64_insn code,
1658 const aarch64_inst *inst,
1659 aarch64_operand_error *errors)
1660 {
1661 return (aarch64_ext_imm (self, info, code, inst, errors)
1662 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1663 }
1664
1665 /* Decode an SVE CPY/DUP immediate. */
1666 bfd_boolean
1667 aarch64_ext_sve_asimm (const aarch64_operand *self,
1668 aarch64_opnd_info *info, const aarch64_insn code,
1669 const aarch64_inst *inst,
1670 aarch64_operand_error *errors)
1671 {
1672 return (aarch64_ext_imm (self, info, code, inst, errors)
1673 && decode_sve_aimm (info, (int8_t) info->imm.value));
1674 }
1675
1676 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1677 The fields array specifies which field to use. */
1678 bfd_boolean
1679 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1680 aarch64_opnd_info *info, aarch64_insn code,
1681 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1682 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1683 {
1684 if (extract_field (self->fields[0], code, 0))
1685 info->imm.value = 0x3f800000;
1686 else
1687 info->imm.value = 0x3f000000;
1688 info->imm.is_fp = TRUE;
1689 return TRUE;
1690 }
1691
1692 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1693 The fields array specifies which field to use. */
1694 bfd_boolean
1695 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1696 aarch64_opnd_info *info, aarch64_insn code,
1697 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1698 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1699 {
1700 if (extract_field (self->fields[0], code, 0))
1701 info->imm.value = 0x40000000;
1702 else
1703 info->imm.value = 0x3f000000;
1704 info->imm.is_fp = TRUE;
1705 return TRUE;
1706 }
1707
1708 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1709 The fields array specifies which field to use. */
1710 bfd_boolean
1711 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1712 aarch64_opnd_info *info, aarch64_insn code,
1713 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1714 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1715 {
1716 if (extract_field (self->fields[0], code, 0))
1717 info->imm.value = 0x3f800000;
1718 else
1719 info->imm.value = 0x0;
1720 info->imm.is_fp = TRUE;
1721 return TRUE;
1722 }
1723
1724 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1725 array specifies which field to use for Zn. MM is encoded in the
1726 concatenation of imm5 and SVE_tszh, with imm5 being the less
1727 significant part. */
1728 bfd_boolean
1729 aarch64_ext_sve_index (const aarch64_operand *self,
1730 aarch64_opnd_info *info, aarch64_insn code,
1731 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1732 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1733 {
1734 int val;
1735
1736 info->reglane.regno = extract_field (self->fields[0], code, 0);
1737 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1738 if ((val & 31) == 0)
1739 return 0;
1740 while ((val & 1) == 0)
1741 val /= 2;
1742 info->reglane.index = val / 2;
1743 return TRUE;
1744 }
1745
1746 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1747 bfd_boolean
1748 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1749 aarch64_opnd_info *info, const aarch64_insn code,
1750 const aarch64_inst *inst,
1751 aarch64_operand_error *errors)
1752 {
1753 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1754 return (aarch64_ext_limm (self, info, code, inst, errors)
1755 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1756 }
1757
1758 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1759 and where MM occupies the most-significant part. The operand-dependent
1760 value specifies the number of bits in Zn. */
1761 bfd_boolean
1762 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1763 aarch64_opnd_info *info, aarch64_insn code,
1764 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1765 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1766 {
1767 unsigned int reg_bits = get_operand_specific_data (self);
1768 unsigned int val = extract_all_fields (self, code);
1769 info->reglane.regno = val & ((1 << reg_bits) - 1);
1770 info->reglane.index = val >> reg_bits;
1771 return TRUE;
1772 }
1773
1774 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1775 to use for Zn. The opcode-dependent value specifies the number
1776 of registers in the list. */
1777 bfd_boolean
1778 aarch64_ext_sve_reglist (const aarch64_operand *self,
1779 aarch64_opnd_info *info, aarch64_insn code,
1780 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1781 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1782 {
1783 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1784 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1785 return TRUE;
1786 }
1787
1788 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1789 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1790 field. */
1791 bfd_boolean
1792 aarch64_ext_sve_scale (const aarch64_operand *self,
1793 aarch64_opnd_info *info, aarch64_insn code,
1794 const aarch64_inst *inst, aarch64_operand_error *errors)
1795 {
1796 int val;
1797
1798 if (!aarch64_ext_imm (self, info, code, inst, errors))
1799 return FALSE;
1800 val = extract_field (FLD_SVE_imm4, code, 0);
1801 info->shifter.kind = AARCH64_MOD_MUL;
1802 info->shifter.amount = val + 1;
1803 info->shifter.operator_present = (val != 0);
1804 info->shifter.amount_present = (val != 0);
1805 return TRUE;
1806 }
1807
1808 /* Return the top set bit in VALUE, which is expected to be relatively
1809 small. */
1810 static uint64_t
1811 get_top_bit (uint64_t value)
1812 {
1813 while ((value & -value) != value)
1814 value -= value & -value;
1815 return value;
1816 }
1817
1818 /* Decode an SVE shift-left immediate. */
1819 bfd_boolean
1820 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1821 aarch64_opnd_info *info, const aarch64_insn code,
1822 const aarch64_inst *inst, aarch64_operand_error *errors)
1823 {
1824 if (!aarch64_ext_imm (self, info, code, inst, errors)
1825 || info->imm.value == 0)
1826 return FALSE;
1827
1828 info->imm.value -= get_top_bit (info->imm.value);
1829 return TRUE;
1830 }
1831
1832 /* Decode an SVE shift-right immediate. */
1833 bfd_boolean
1834 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1835 aarch64_opnd_info *info, const aarch64_insn code,
1836 const aarch64_inst *inst, aarch64_operand_error *errors)
1837 {
1838 if (!aarch64_ext_imm (self, info, code, inst, errors)
1839 || info->imm.value == 0)
1840 return FALSE;
1841
1842 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1843 return TRUE;
1844 }
1845 \f
1846 /* Bitfields that are commonly used to encode certain operands' information
1847 may be partially used as part of the base opcode in some instructions.
1848 For example, the bit 1 of the field 'size' in
1849 FCVTXN <Vb><d>, <Va><n>
1850 is actually part of the base opcode, while only size<0> is available
1851 for encoding the register type. Another example is the AdvSIMD
1852 instruction ORR (register), in which the field 'size' is also used for
1853 the base opcode, leaving only the field 'Q' available to encode the
1854 vector register arrangement specifier '8B' or '16B'.
1855
1856 This function tries to deduce the qualifier from the value of partially
1857 constrained field(s). Given the VALUE of such a field or fields, the
1858 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1859 operand encoding), the function returns the matching qualifier or
1860 AARCH64_OPND_QLF_NIL if nothing matches.
1861
1862 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1863 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1864 may end with AARCH64_OPND_QLF_NIL. */
1865
1866 static enum aarch64_opnd_qualifier
1867 get_qualifier_from_partial_encoding (aarch64_insn value,
1868 const enum aarch64_opnd_qualifier* \
1869 candidates,
1870 aarch64_insn mask)
1871 {
1872 int i;
1873 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1874 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1875 {
1876 aarch64_insn standard_value;
1877 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1878 break;
1879 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1880 if ((standard_value & mask) == (value & mask))
1881 return candidates[i];
1882 }
1883 return AARCH64_OPND_QLF_NIL;
1884 }
1885
1886 /* Given a list of qualifier sequences, return all possible valid qualifiers
1887 for operand IDX in QUALIFIERS.
1888 Assume QUALIFIERS is an array whose length is large enough. */
1889
1890 static void
1891 get_operand_possible_qualifiers (int idx,
1892 const aarch64_opnd_qualifier_seq_t *list,
1893 enum aarch64_opnd_qualifier *qualifiers)
1894 {
1895 int i;
1896 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1897 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1898 break;
1899 }
1900
1901 /* Decode the size Q field for e.g. SHADD.
1902 We tag one operand with the qualifer according to the code;
1903 whether the qualifier is valid for this opcode or not, it is the
1904 duty of the semantic checking. */
1905
1906 static int
1907 decode_sizeq (aarch64_inst *inst)
1908 {
1909 int idx;
1910 enum aarch64_opnd_qualifier qualifier;
1911 aarch64_insn code;
1912 aarch64_insn value, mask;
1913 enum aarch64_field_kind fld_sz;
1914 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1915
1916 if (inst->opcode->iclass == asisdlse
1917 || inst->opcode->iclass == asisdlsep
1918 || inst->opcode->iclass == asisdlso
1919 || inst->opcode->iclass == asisdlsop)
1920 fld_sz = FLD_vldst_size;
1921 else
1922 fld_sz = FLD_size;
1923
1924 code = inst->value;
1925 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1926 /* Obtain the info that which bits of fields Q and size are actually
1927 available for operand encoding. Opcodes like FMAXNM and FMLA have
1928 size[1] unavailable. */
1929 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1930
1931 /* The index of the operand we are going to tag a qualifier and the qualifer
1932 itself are reasoned from the value of the size and Q fields and the
1933 possible valid qualifier lists. */
1934 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1935 DEBUG_TRACE ("key idx: %d", idx);
1936
1937 /* For most related instruciton, size:Q are fully available for operand
1938 encoding. */
1939 if (mask == 0x7)
1940 {
1941 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1942 return 1;
1943 }
1944
1945 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1946 candidates);
1947 #ifdef DEBUG_AARCH64
1948 if (debug_dump)
1949 {
1950 int i;
1951 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1952 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1953 DEBUG_TRACE ("qualifier %d: %s", i,
1954 aarch64_get_qualifier_name(candidates[i]));
1955 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1956 }
1957 #endif /* DEBUG_AARCH64 */
1958
1959 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1960
1961 if (qualifier == AARCH64_OPND_QLF_NIL)
1962 return 0;
1963
1964 inst->operands[idx].qualifier = qualifier;
1965 return 1;
1966 }
1967
1968 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1969 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1970
1971 static int
1972 decode_asimd_fcvt (aarch64_inst *inst)
1973 {
1974 aarch64_field field = {0, 0};
1975 aarch64_insn value;
1976 enum aarch64_opnd_qualifier qualifier;
1977
1978 gen_sub_field (FLD_size, 0, 1, &field);
1979 value = extract_field_2 (&field, inst->value, 0);
1980 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1981 : AARCH64_OPND_QLF_V_2D;
1982 switch (inst->opcode->op)
1983 {
1984 case OP_FCVTN:
1985 case OP_FCVTN2:
1986 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1987 inst->operands[1].qualifier = qualifier;
1988 break;
1989 case OP_FCVTL:
1990 case OP_FCVTL2:
1991 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1992 inst->operands[0].qualifier = qualifier;
1993 break;
1994 default:
1995 assert (0);
1996 return 0;
1997 }
1998
1999 return 1;
2000 }
2001
2002 /* Decode size[0], i.e. bit 22, for
2003 e.g. FCVTXN <Vb><d>, <Va><n>. */
2004
2005 static int
2006 decode_asisd_fcvtxn (aarch64_inst *inst)
2007 {
2008 aarch64_field field = {0, 0};
2009 gen_sub_field (FLD_size, 0, 1, &field);
2010 if (!extract_field_2 (&field, inst->value, 0))
2011 return 0;
2012 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2013 return 1;
2014 }
2015
2016 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2017 static int
2018 decode_fcvt (aarch64_inst *inst)
2019 {
2020 enum aarch64_opnd_qualifier qualifier;
2021 aarch64_insn value;
2022 const aarch64_field field = {15, 2};
2023
2024 /* opc dstsize */
2025 value = extract_field_2 (&field, inst->value, 0);
2026 switch (value)
2027 {
2028 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2029 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2030 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2031 default: return 0;
2032 }
2033 inst->operands[0].qualifier = qualifier;
2034
2035 return 1;
2036 }
2037
2038 /* Do miscellaneous decodings that are not common enough to be driven by
2039 flags. */
2040
2041 static int
2042 do_misc_decoding (aarch64_inst *inst)
2043 {
2044 unsigned int value;
2045 switch (inst->opcode->op)
2046 {
2047 case OP_FCVT:
2048 return decode_fcvt (inst);
2049
2050 case OP_FCVTN:
2051 case OP_FCVTN2:
2052 case OP_FCVTL:
2053 case OP_FCVTL2:
2054 return decode_asimd_fcvt (inst);
2055
2056 case OP_FCVTXN_S:
2057 return decode_asisd_fcvtxn (inst);
2058
2059 case OP_MOV_P_P:
2060 case OP_MOVS_P_P:
2061 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2062 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2063 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2064
2065 case OP_MOV_Z_P_Z:
2066 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2067 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2068
2069 case OP_MOV_Z_V:
2070 /* Index must be zero. */
2071 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2072 return value > 0 && value <= 16 && value == (value & -value);
2073
2074 case OP_MOV_Z_Z:
2075 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2076 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2077
2078 case OP_MOV_Z_Zi:
2079 /* Index must be nonzero. */
2080 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2081 return value > 0 && value != (value & -value);
2082
2083 case OP_MOVM_P_P_P:
2084 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2085 == extract_field (FLD_SVE_Pm, inst->value, 0));
2086
2087 case OP_MOVZS_P_P_P:
2088 case OP_MOVZ_P_P_P:
2089 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2090 == extract_field (FLD_SVE_Pm, inst->value, 0));
2091
2092 case OP_NOTS_P_P_P_Z:
2093 case OP_NOT_P_P_P_Z:
2094 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2095 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2096
2097 default:
2098 return 0;
2099 }
2100 }
2101
2102 /* Opcodes that have fields shared by multiple operands are usually flagged
2103 with flags. In this function, we detect such flags, decode the related
2104 field(s) and store the information in one of the related operands. The
2105 'one' operand is not any operand but one of the operands that can
2106 accommadate all the information that has been decoded. */
2107
2108 static int
2109 do_special_decoding (aarch64_inst *inst)
2110 {
2111 int idx;
2112 aarch64_insn value;
2113 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2114 if (inst->opcode->flags & F_COND)
2115 {
2116 value = extract_field (FLD_cond2, inst->value, 0);
2117 inst->cond = get_cond_from_value (value);
2118 }
2119 /* 'sf' field. */
2120 if (inst->opcode->flags & F_SF)
2121 {
2122 idx = select_operand_for_sf_field_coding (inst->opcode);
2123 value = extract_field (FLD_sf, inst->value, 0);
2124 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2125 if ((inst->opcode->flags & F_N)
2126 && extract_field (FLD_N, inst->value, 0) != value)
2127 return 0;
2128 }
2129 /* 'sf' field. */
2130 if (inst->opcode->flags & F_LSE_SZ)
2131 {
2132 idx = select_operand_for_sf_field_coding (inst->opcode);
2133 value = extract_field (FLD_lse_sz, inst->value, 0);
2134 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2135 }
2136 /* size:Q fields. */
2137 if (inst->opcode->flags & F_SIZEQ)
2138 return decode_sizeq (inst);
2139
2140 if (inst->opcode->flags & F_FPTYPE)
2141 {
2142 idx = select_operand_for_fptype_field_coding (inst->opcode);
2143 value = extract_field (FLD_type, inst->value, 0);
2144 switch (value)
2145 {
2146 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2147 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2148 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2149 default: return 0;
2150 }
2151 }
2152
2153 if (inst->opcode->flags & F_SSIZE)
2154 {
2155 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2156 of the base opcode. */
2157 aarch64_insn mask;
2158 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2159 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2160 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2161 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2162 /* For most related instruciton, the 'size' field is fully available for
2163 operand encoding. */
2164 if (mask == 0x3)
2165 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2166 else
2167 {
2168 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2169 candidates);
2170 inst->operands[idx].qualifier
2171 = get_qualifier_from_partial_encoding (value, candidates, mask);
2172 }
2173 }
2174
2175 if (inst->opcode->flags & F_T)
2176 {
2177 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2178 int num = 0;
2179 unsigned val, Q;
2180 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2181 == AARCH64_OPND_CLASS_SIMD_REG);
2182 /* imm5<3:0> q <t>
2183 0000 x reserved
2184 xxx1 0 8b
2185 xxx1 1 16b
2186 xx10 0 4h
2187 xx10 1 8h
2188 x100 0 2s
2189 x100 1 4s
2190 1000 0 reserved
2191 1000 1 2d */
2192 val = extract_field (FLD_imm5, inst->value, 0);
2193 while ((val & 0x1) == 0 && ++num <= 3)
2194 val >>= 1;
2195 if (num > 3)
2196 return 0;
2197 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2198 inst->operands[0].qualifier =
2199 get_vreg_qualifier_from_value ((num << 1) | Q);
2200 }
2201
2202 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2203 {
2204 /* Use Rt to encode in the case of e.g.
2205 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2206 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2207 if (idx == -1)
2208 {
2209 /* Otherwise use the result operand, which has to be a integer
2210 register. */
2211 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2212 == AARCH64_OPND_CLASS_INT_REG);
2213 idx = 0;
2214 }
2215 assert (idx == 0 || idx == 1);
2216 value = extract_field (FLD_Q, inst->value, 0);
2217 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2218 }
2219
2220 if (inst->opcode->flags & F_LDS_SIZE)
2221 {
2222 aarch64_field field = {0, 0};
2223 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2224 == AARCH64_OPND_CLASS_INT_REG);
2225 gen_sub_field (FLD_opc, 0, 1, &field);
2226 value = extract_field_2 (&field, inst->value, 0);
2227 inst->operands[0].qualifier
2228 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2229 }
2230
2231 /* Miscellaneous decoding; done as the last step. */
2232 if (inst->opcode->flags & F_MISC)
2233 return do_misc_decoding (inst);
2234
2235 return 1;
2236 }
2237
2238 /* Converters converting a real opcode instruction to its alias form. */
2239
2240 /* ROR <Wd>, <Ws>, #<shift>
2241 is equivalent to:
2242 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2243 static int
2244 convert_extr_to_ror (aarch64_inst *inst)
2245 {
2246 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2247 {
2248 copy_operand_info (inst, 2, 3);
2249 inst->operands[3].type = AARCH64_OPND_NIL;
2250 return 1;
2251 }
2252 return 0;
2253 }
2254
2255 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2256 is equivalent to:
2257 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2258 static int
2259 convert_shll_to_xtl (aarch64_inst *inst)
2260 {
2261 if (inst->operands[2].imm.value == 0)
2262 {
2263 inst->operands[2].type = AARCH64_OPND_NIL;
2264 return 1;
2265 }
2266 return 0;
2267 }
2268
2269 /* Convert
2270 UBFM <Xd>, <Xn>, #<shift>, #63.
2271 to
2272 LSR <Xd>, <Xn>, #<shift>. */
2273 static int
2274 convert_bfm_to_sr (aarch64_inst *inst)
2275 {
2276 int64_t imms, val;
2277
2278 imms = inst->operands[3].imm.value;
2279 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2280 if (imms == val)
2281 {
2282 inst->operands[3].type = AARCH64_OPND_NIL;
2283 return 1;
2284 }
2285
2286 return 0;
2287 }
2288
2289 /* Convert MOV to ORR. */
2290 static int
2291 convert_orr_to_mov (aarch64_inst *inst)
2292 {
2293 /* MOV <Vd>.<T>, <Vn>.<T>
2294 is equivalent to:
2295 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2296 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2297 {
2298 inst->operands[2].type = AARCH64_OPND_NIL;
2299 return 1;
2300 }
2301 return 0;
2302 }
2303
2304 /* When <imms> >= <immr>, the instruction written:
2305 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2306 is equivalent to:
2307 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2308
2309 static int
2310 convert_bfm_to_bfx (aarch64_inst *inst)
2311 {
2312 int64_t immr, imms;
2313
2314 immr = inst->operands[2].imm.value;
2315 imms = inst->operands[3].imm.value;
2316 if (imms >= immr)
2317 {
2318 int64_t lsb = immr;
2319 inst->operands[2].imm.value = lsb;
2320 inst->operands[3].imm.value = imms + 1 - lsb;
2321 /* The two opcodes have different qualifiers for
2322 the immediate operands; reset to help the checking. */
2323 reset_operand_qualifier (inst, 2);
2324 reset_operand_qualifier (inst, 3);
2325 return 1;
2326 }
2327
2328 return 0;
2329 }
2330
2331 /* When <imms> < <immr>, the instruction written:
2332 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2333 is equivalent to:
2334 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2335
2336 static int
2337 convert_bfm_to_bfi (aarch64_inst *inst)
2338 {
2339 int64_t immr, imms, val;
2340
2341 immr = inst->operands[2].imm.value;
2342 imms = inst->operands[3].imm.value;
2343 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2344 if (imms < immr)
2345 {
2346 inst->operands[2].imm.value = (val - immr) & (val - 1);
2347 inst->operands[3].imm.value = imms + 1;
2348 /* The two opcodes have different qualifiers for
2349 the immediate operands; reset to help the checking. */
2350 reset_operand_qualifier (inst, 2);
2351 reset_operand_qualifier (inst, 3);
2352 return 1;
2353 }
2354
2355 return 0;
2356 }
2357
2358 /* The instruction written:
2359 BFC <Xd>, #<lsb>, #<width>
2360 is equivalent to:
2361 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2362
2363 static int
2364 convert_bfm_to_bfc (aarch64_inst *inst)
2365 {
2366 int64_t immr, imms, val;
2367
2368 /* Should have been assured by the base opcode value. */
2369 assert (inst->operands[1].reg.regno == 0x1f);
2370
2371 immr = inst->operands[2].imm.value;
2372 imms = inst->operands[3].imm.value;
2373 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2374 if (imms < immr)
2375 {
2376 /* Drop XZR from the second operand. */
2377 copy_operand_info (inst, 1, 2);
2378 copy_operand_info (inst, 2, 3);
2379 inst->operands[3].type = AARCH64_OPND_NIL;
2380
2381 /* Recalculate the immediates. */
2382 inst->operands[1].imm.value = (val - immr) & (val - 1);
2383 inst->operands[2].imm.value = imms + 1;
2384
2385 /* The two opcodes have different qualifiers for the operands; reset to
2386 help the checking. */
2387 reset_operand_qualifier (inst, 1);
2388 reset_operand_qualifier (inst, 2);
2389 reset_operand_qualifier (inst, 3);
2390
2391 return 1;
2392 }
2393
2394 return 0;
2395 }
2396
2397 /* The instruction written:
2398 LSL <Xd>, <Xn>, #<shift>
2399 is equivalent to:
2400 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2401
2402 static int
2403 convert_ubfm_to_lsl (aarch64_inst *inst)
2404 {
2405 int64_t immr = inst->operands[2].imm.value;
2406 int64_t imms = inst->operands[3].imm.value;
2407 int64_t val
2408 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2409
2410 if ((immr == 0 && imms == val) || immr == imms + 1)
2411 {
2412 inst->operands[3].type = AARCH64_OPND_NIL;
2413 inst->operands[2].imm.value = val - imms;
2414 return 1;
2415 }
2416
2417 return 0;
2418 }
2419
2420 /* CINC <Wd>, <Wn>, <cond>
2421 is equivalent to:
2422 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2423 where <cond> is not AL or NV. */
2424
2425 static int
2426 convert_from_csel (aarch64_inst *inst)
2427 {
2428 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2429 && (inst->operands[3].cond->value & 0xe) != 0xe)
2430 {
2431 copy_operand_info (inst, 2, 3);
2432 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2433 inst->operands[3].type = AARCH64_OPND_NIL;
2434 return 1;
2435 }
2436 return 0;
2437 }
2438
2439 /* CSET <Wd>, <cond>
2440 is equivalent to:
2441 CSINC <Wd>, WZR, WZR, invert(<cond>)
2442 where <cond> is not AL or NV. */
2443
2444 static int
2445 convert_csinc_to_cset (aarch64_inst *inst)
2446 {
2447 if (inst->operands[1].reg.regno == 0x1f
2448 && inst->operands[2].reg.regno == 0x1f
2449 && (inst->operands[3].cond->value & 0xe) != 0xe)
2450 {
2451 copy_operand_info (inst, 1, 3);
2452 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2453 inst->operands[3].type = AARCH64_OPND_NIL;
2454 inst->operands[2].type = AARCH64_OPND_NIL;
2455 return 1;
2456 }
2457 return 0;
2458 }
2459
2460 /* MOV <Wd>, #<imm>
2461 is equivalent to:
2462 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2463
2464 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2465 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2466 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2467 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2468 machine-instruction mnemonic must be used. */
2469
2470 static int
2471 convert_movewide_to_mov (aarch64_inst *inst)
2472 {
2473 uint64_t value = inst->operands[1].imm.value;
2474 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2475 if (value == 0 && inst->operands[1].shifter.amount != 0)
2476 return 0;
2477 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2478 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2479 value <<= inst->operands[1].shifter.amount;
2480 /* As an alias convertor, it has to be clear that the INST->OPCODE
2481 is the opcode of the real instruction. */
2482 if (inst->opcode->op == OP_MOVN)
2483 {
2484 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2485 value = ~value;
2486 /* A MOVN has an immediate that could be encoded by MOVZ. */
2487 if (aarch64_wide_constant_p (value, is32, NULL))
2488 return 0;
2489 }
2490 inst->operands[1].imm.value = value;
2491 inst->operands[1].shifter.amount = 0;
2492 return 1;
2493 }
2494
2495 /* MOV <Wd>, #<imm>
2496 is equivalent to:
2497 ORR <Wd>, WZR, #<imm>.
2498
2499 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2500 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2501 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2502 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2503 machine-instruction mnemonic must be used. */
2504
2505 static int
2506 convert_movebitmask_to_mov (aarch64_inst *inst)
2507 {
2508 int is32;
2509 uint64_t value;
2510
2511 /* Should have been assured by the base opcode value. */
2512 assert (inst->operands[1].reg.regno == 0x1f);
2513 copy_operand_info (inst, 1, 2);
2514 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2515 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2516 value = inst->operands[1].imm.value;
2517 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2518 instruction. */
2519 if (inst->operands[0].reg.regno != 0x1f
2520 && (aarch64_wide_constant_p (value, is32, NULL)
2521 || aarch64_wide_constant_p (~value, is32, NULL)))
2522 return 0;
2523
2524 inst->operands[2].type = AARCH64_OPND_NIL;
2525 return 1;
2526 }
2527
2528 /* Some alias opcodes are disassembled by being converted from their real-form.
2529 N.B. INST->OPCODE is the real opcode rather than the alias. */
2530
2531 static int
2532 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2533 {
2534 switch (alias->op)
2535 {
2536 case OP_ASR_IMM:
2537 case OP_LSR_IMM:
2538 return convert_bfm_to_sr (inst);
2539 case OP_LSL_IMM:
2540 return convert_ubfm_to_lsl (inst);
2541 case OP_CINC:
2542 case OP_CINV:
2543 case OP_CNEG:
2544 return convert_from_csel (inst);
2545 case OP_CSET:
2546 case OP_CSETM:
2547 return convert_csinc_to_cset (inst);
2548 case OP_UBFX:
2549 case OP_BFXIL:
2550 case OP_SBFX:
2551 return convert_bfm_to_bfx (inst);
2552 case OP_SBFIZ:
2553 case OP_BFI:
2554 case OP_UBFIZ:
2555 return convert_bfm_to_bfi (inst);
2556 case OP_BFC:
2557 return convert_bfm_to_bfc (inst);
2558 case OP_MOV_V:
2559 return convert_orr_to_mov (inst);
2560 case OP_MOV_IMM_WIDE:
2561 case OP_MOV_IMM_WIDEN:
2562 return convert_movewide_to_mov (inst);
2563 case OP_MOV_IMM_LOG:
2564 return convert_movebitmask_to_mov (inst);
2565 case OP_ROR_IMM:
2566 return convert_extr_to_ror (inst);
2567 case OP_SXTL:
2568 case OP_SXTL2:
2569 case OP_UXTL:
2570 case OP_UXTL2:
2571 return convert_shll_to_xtl (inst);
2572 default:
2573 return 0;
2574 }
2575 }
2576
2577 static bfd_boolean
2578 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2579 aarch64_inst *, int, aarch64_operand_error *errors);
2580
2581 /* Given the instruction information in *INST, check if the instruction has
2582 any alias form that can be used to represent *INST. If the answer is yes,
2583 update *INST to be in the form of the determined alias. */
2584
2585 /* In the opcode description table, the following flags are used in opcode
2586 entries to help establish the relations between the real and alias opcodes:
2587
2588 F_ALIAS: opcode is an alias
2589 F_HAS_ALIAS: opcode has alias(es)
2590 F_P1
2591 F_P2
2592 F_P3: Disassembly preference priority 1-3 (the larger the
2593 higher). If nothing is specified, it is the priority
2594 0 by default, i.e. the lowest priority.
2595
2596 Although the relation between the machine and the alias instructions are not
2597 explicitly described, it can be easily determined from the base opcode
2598 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2599 description entries:
2600
2601 The mask of an alias opcode must be equal to or a super-set (i.e. more
2602 constrained) of that of the aliased opcode; so is the base opcode value.
2603
2604 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2605 && (opcode->mask & real->mask) == real->mask
2606 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2607 then OPCODE is an alias of, and only of, the REAL instruction
2608
2609 The alias relationship is forced flat-structured to keep related algorithm
2610 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2611
2612 During the disassembling, the decoding decision tree (in
2613 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2614 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2615 not specified), the disassembler will check whether there is any alias
2616 instruction exists for this real instruction. If there is, the disassembler
2617 will try to disassemble the 32-bit binary again using the alias's rule, or
2618 try to convert the IR to the form of the alias. In the case of the multiple
2619 aliases, the aliases are tried one by one from the highest priority
2620 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2621 first succeeds first adopted.
2622
2623 You may ask why there is a need for the conversion of IR from one form to
2624 another in handling certain aliases. This is because on one hand it avoids
2625 adding more operand code to handle unusual encoding/decoding; on other
2626 hand, during the disassembling, the conversion is an effective approach to
2627 check the condition of an alias (as an alias may be adopted only if certain
2628 conditions are met).
2629
2630 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2631 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2632 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2633
2634 static void
2635 determine_disassembling_preference (struct aarch64_inst *inst,
2636 aarch64_operand_error *errors)
2637 {
2638 const aarch64_opcode *opcode;
2639 const aarch64_opcode *alias;
2640
2641 opcode = inst->opcode;
2642
2643 /* This opcode does not have an alias, so use itself. */
2644 if (!opcode_has_alias (opcode))
2645 return;
2646
2647 alias = aarch64_find_alias_opcode (opcode);
2648 assert (alias);
2649
2650 #ifdef DEBUG_AARCH64
2651 if (debug_dump)
2652 {
2653 const aarch64_opcode *tmp = alias;
2654 printf ("#### LIST orderd: ");
2655 while (tmp)
2656 {
2657 printf ("%s, ", tmp->name);
2658 tmp = aarch64_find_next_alias_opcode (tmp);
2659 }
2660 printf ("\n");
2661 }
2662 #endif /* DEBUG_AARCH64 */
2663
2664 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2665 {
2666 DEBUG_TRACE ("try %s", alias->name);
2667 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2668
2669 /* An alias can be a pseudo opcode which will never be used in the
2670 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2671 aliasing AND. */
2672 if (pseudo_opcode_p (alias))
2673 {
2674 DEBUG_TRACE ("skip pseudo %s", alias->name);
2675 continue;
2676 }
2677
2678 if ((inst->value & alias->mask) != alias->opcode)
2679 {
2680 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2681 continue;
2682 }
2683 /* No need to do any complicated transformation on operands, if the alias
2684 opcode does not have any operand. */
2685 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2686 {
2687 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2688 aarch64_replace_opcode (inst, alias);
2689 return;
2690 }
2691 if (alias->flags & F_CONV)
2692 {
2693 aarch64_inst copy;
2694 memcpy (&copy, inst, sizeof (aarch64_inst));
2695 /* ALIAS is the preference as long as the instruction can be
2696 successfully converted to the form of ALIAS. */
2697 if (convert_to_alias (&copy, alias) == 1)
2698 {
2699 aarch64_replace_opcode (&copy, alias);
2700 assert (aarch64_match_operands_constraint (&copy, NULL));
2701 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2702 memcpy (inst, &copy, sizeof (aarch64_inst));
2703 return;
2704 }
2705 }
2706 else
2707 {
2708 /* Directly decode the alias opcode. */
2709 aarch64_inst temp;
2710 memset (&temp, '\0', sizeof (aarch64_inst));
2711 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
2712 {
2713 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2714 memcpy (inst, &temp, sizeof (aarch64_inst));
2715 return;
2716 }
2717 }
2718 }
2719 }
2720
2721 /* Some instructions (including all SVE ones) use the instruction class
2722 to describe how a qualifiers_list index is represented in the instruction
2723 encoding. If INST is such an instruction, decode the appropriate fields
2724 and fill in the operand qualifiers accordingly. Return true if no
2725 problems are found. */
2726
2727 static bfd_boolean
2728 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2729 {
2730 int i, variant;
2731
2732 variant = 0;
2733 switch (inst->opcode->iclass)
2734 {
2735 case sve_cpy:
2736 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2737 break;
2738
2739 case sve_index:
2740 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2741 if ((i & 31) == 0)
2742 return FALSE;
2743 while ((i & 1) == 0)
2744 {
2745 i >>= 1;
2746 variant += 1;
2747 }
2748 break;
2749
2750 case sve_limm:
2751 /* Pick the smallest applicable element size. */
2752 if ((inst->value & 0x20600) == 0x600)
2753 variant = 0;
2754 else if ((inst->value & 0x20400) == 0x400)
2755 variant = 1;
2756 else if ((inst->value & 0x20000) == 0)
2757 variant = 2;
2758 else
2759 variant = 3;
2760 break;
2761
2762 case sve_misc:
2763 /* sve_misc instructions have only a single variant. */
2764 break;
2765
2766 case sve_movprfx:
2767 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2768 break;
2769
2770 case sve_pred_zm:
2771 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2772 break;
2773
2774 case sve_shift_pred:
2775 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2776 sve_shift:
2777 if (i == 0)
2778 return FALSE;
2779 while (i != 1)
2780 {
2781 i >>= 1;
2782 variant += 1;
2783 }
2784 break;
2785
2786 case sve_shift_unpred:
2787 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2788 goto sve_shift;
2789
2790 case sve_size_bhs:
2791 variant = extract_field (FLD_size, inst->value, 0);
2792 if (variant >= 3)
2793 return FALSE;
2794 break;
2795
2796 case sve_size_bhsd:
2797 variant = extract_field (FLD_size, inst->value, 0);
2798 break;
2799
2800 case sve_size_hsd:
2801 i = extract_field (FLD_size, inst->value, 0);
2802 if (i < 1)
2803 return FALSE;
2804 variant = i - 1;
2805 break;
2806
2807 case sve_size_bh:
2808 case sve_size_sd:
2809 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2810 break;
2811
2812 case sve_size_sd2:
2813 variant = extract_field (FLD_SVE_sz2, inst->value, 0);
2814 break;
2815
2816 case sve_size_hsd2:
2817 i = extract_field (FLD_SVE_size, inst->value, 0);
2818 if (i < 1)
2819 return FALSE;
2820 variant = i - 1;
2821 break;
2822
2823 case sve_size_13:
2824 /* Ignore low bit of this field since that is set in the opcode for
2825 instructions of this iclass. */
2826 i = (extract_field (FLD_size, inst->value, 0) & 2);
2827 variant = (i >> 1);
2828 break;
2829
2830 case sve_shift_tsz_bhsd:
2831 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2832 if (i == 0)
2833 return FALSE;
2834 while (i != 1)
2835 {
2836 i >>= 1;
2837 variant += 1;
2838 }
2839 break;
2840
2841 case sve_size_tsz_bhs:
2842 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
2843 if (i == 0)
2844 return FALSE;
2845 while (i != 1)
2846 {
2847 if (i & 1)
2848 return FALSE;
2849 i >>= 1;
2850 variant += 1;
2851 }
2852 break;
2853
2854 case sve_shift_tsz_hsd:
2855 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
2856 if (i == 0)
2857 return FALSE;
2858 while (i != 1)
2859 {
2860 i >>= 1;
2861 variant += 1;
2862 }
2863 break;
2864
2865 default:
2866 /* No mapping between instruction class and qualifiers. */
2867 return TRUE;
2868 }
2869
2870 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2871 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2872 return TRUE;
2873 }
2874 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2875 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2876 return 1.
2877
2878 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2879 determined and used to disassemble CODE; this is done just before the
2880 return. */
2881
2882 static bfd_boolean
2883 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2884 aarch64_inst *inst, int noaliases_p,
2885 aarch64_operand_error *errors)
2886 {
2887 int i;
2888
2889 DEBUG_TRACE ("enter with %s", opcode->name);
2890
2891 assert (opcode && inst);
2892
2893 /* Clear inst. */
2894 memset (inst, '\0', sizeof (aarch64_inst));
2895
2896 /* Check the base opcode. */
2897 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2898 {
2899 DEBUG_TRACE ("base opcode match FAIL");
2900 goto decode_fail;
2901 }
2902
2903 inst->opcode = opcode;
2904 inst->value = code;
2905
2906 /* Assign operand codes and indexes. */
2907 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2908 {
2909 if (opcode->operands[i] == AARCH64_OPND_NIL)
2910 break;
2911 inst->operands[i].type = opcode->operands[i];
2912 inst->operands[i].idx = i;
2913 }
2914
2915 /* Call the opcode decoder indicated by flags. */
2916 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2917 {
2918 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2919 goto decode_fail;
2920 }
2921
2922 /* Possibly use the instruction class to determine the correct
2923 qualifier. */
2924 if (!aarch64_decode_variant_using_iclass (inst))
2925 {
2926 DEBUG_TRACE ("iclass-based decoder FAIL");
2927 goto decode_fail;
2928 }
2929
2930 /* Call operand decoders. */
2931 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2932 {
2933 const aarch64_operand *opnd;
2934 enum aarch64_opnd type;
2935
2936 type = opcode->operands[i];
2937 if (type == AARCH64_OPND_NIL)
2938 break;
2939 opnd = &aarch64_operands[type];
2940 if (operand_has_extractor (opnd)
2941 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
2942 errors)))
2943 {
2944 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2945 goto decode_fail;
2946 }
2947 }
2948
2949 /* If the opcode has a verifier, then check it now. */
2950 if (opcode->verifier
2951 && opcode->verifier (inst, code, 0, FALSE, errors, NULL) != ERR_OK)
2952 {
2953 DEBUG_TRACE ("operand verifier FAIL");
2954 goto decode_fail;
2955 }
2956
2957 /* Match the qualifiers. */
2958 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2959 {
2960 /* Arriving here, the CODE has been determined as a valid instruction
2961 of OPCODE and *INST has been filled with information of this OPCODE
2962 instruction. Before the return, check if the instruction has any
2963 alias and should be disassembled in the form of its alias instead.
2964 If the answer is yes, *INST will be updated. */
2965 if (!noaliases_p)
2966 determine_disassembling_preference (inst, errors);
2967 DEBUG_TRACE ("SUCCESS");
2968 return TRUE;
2969 }
2970 else
2971 {
2972 DEBUG_TRACE ("constraint matching FAIL");
2973 }
2974
2975 decode_fail:
2976 return FALSE;
2977 }
2978 \f
2979 /* This does some user-friendly fix-up to *INST. It is currently focus on
2980 the adjustment of qualifiers to help the printed instruction
2981 recognized/understood more easily. */
2982
2983 static void
2984 user_friendly_fixup (aarch64_inst *inst)
2985 {
2986 switch (inst->opcode->iclass)
2987 {
2988 case testbranch:
2989 /* TBNZ Xn|Wn, #uimm6, label
2990 Test and Branch Not Zero: conditionally jumps to label if bit number
2991 uimm6 in register Xn is not zero. The bit number implies the width of
2992 the register, which may be written and should be disassembled as Wn if
2993 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2994 */
2995 if (inst->operands[1].imm.value < 32)
2996 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2997 break;
2998 default: break;
2999 }
3000 }
3001
3002 /* Decode INSN and fill in *INST the instruction information. An alias
3003 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
3004 success. */
3005
3006 enum err_type
3007 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3008 bfd_boolean noaliases_p,
3009 aarch64_operand_error *errors)
3010 {
3011 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3012
3013 #ifdef DEBUG_AARCH64
3014 if (debug_dump)
3015 {
3016 const aarch64_opcode *tmp = opcode;
3017 printf ("\n");
3018 DEBUG_TRACE ("opcode lookup:");
3019 while (tmp != NULL)
3020 {
3021 aarch64_verbose (" %s", tmp->name);
3022 tmp = aarch64_find_next_opcode (tmp);
3023 }
3024 }
3025 #endif /* DEBUG_AARCH64 */
3026
3027 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3028 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3029 opcode field and value, apart from the difference that one of them has an
3030 extra field as part of the opcode, but such a field is used for operand
3031 encoding in other opcode(s) ('immh' in the case of the example). */
3032 while (opcode != NULL)
3033 {
3034 /* But only one opcode can be decoded successfully for, as the
3035 decoding routine will check the constraint carefully. */
3036 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3037 return ERR_OK;
3038 opcode = aarch64_find_next_opcode (opcode);
3039 }
3040
3041 return ERR_UND;
3042 }
3043
3044 /* Print operands. */
3045
3046 static void
3047 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3048 const aarch64_opnd_info *opnds, struct disassemble_info *info,
3049 bfd_boolean *has_notes)
3050 {
3051 char *notes = NULL;
3052 int i, pcrel_p, num_printed;
3053 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3054 {
3055 char str[128];
3056 /* We regard the opcode operand info more, however we also look into
3057 the inst->operands to support the disassembling of the optional
3058 operand.
3059 The two operand code should be the same in all cases, apart from
3060 when the operand can be optional. */
3061 if (opcode->operands[i] == AARCH64_OPND_NIL
3062 || opnds[i].type == AARCH64_OPND_NIL)
3063 break;
3064
3065 /* Generate the operand string in STR. */
3066 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3067 &info->target, &notes);
3068
3069 /* Print the delimiter (taking account of omitted operand(s)). */
3070 if (str[0] != '\0')
3071 (*info->fprintf_func) (info->stream, "%s",
3072 num_printed++ == 0 ? "\t" : ", ");
3073
3074 /* Print the operand. */
3075 if (pcrel_p)
3076 (*info->print_address_func) (info->target, info);
3077 else
3078 (*info->fprintf_func) (info->stream, "%s", str);
3079 }
3080
3081 if (notes && !no_notes)
3082 {
3083 *has_notes = TRUE;
3084 (*info->fprintf_func) (info->stream, " // note: %s", notes);
3085 }
3086 }
3087
3088 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
3089
3090 static void
3091 remove_dot_suffix (char *name, const aarch64_inst *inst)
3092 {
3093 char *ptr;
3094 size_t len;
3095
3096 ptr = strchr (inst->opcode->name, '.');
3097 assert (ptr && inst->cond);
3098 len = ptr - inst->opcode->name;
3099 assert (len < 8);
3100 strncpy (name, inst->opcode->name, len);
3101 name[len] = '\0';
3102 }
3103
3104 /* Print the instruction mnemonic name. */
3105
3106 static void
3107 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3108 {
3109 if (inst->opcode->flags & F_COND)
3110 {
3111 /* For instructions that are truly conditionally executed, e.g. b.cond,
3112 prepare the full mnemonic name with the corresponding condition
3113 suffix. */
3114 char name[8];
3115
3116 remove_dot_suffix (name, inst);
3117 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
3118 }
3119 else
3120 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
3121 }
3122
3123 /* Decide whether we need to print a comment after the operands of
3124 instruction INST. */
3125
3126 static void
3127 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3128 {
3129 if (inst->opcode->flags & F_COND)
3130 {
3131 char name[8];
3132 unsigned int i, num_conds;
3133
3134 remove_dot_suffix (name, inst);
3135 num_conds = ARRAY_SIZE (inst->cond->names);
3136 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3137 (*info->fprintf_func) (info->stream, "%s %s.%s",
3138 i == 1 ? " //" : ",",
3139 name, inst->cond->names[i]);
3140 }
3141 }
3142
3143 /* Build notes from verifiers into a string for printing. */
3144
3145 static void
3146 print_verifier_notes (aarch64_operand_error *detail,
3147 struct disassemble_info *info)
3148 {
3149 if (no_notes)
3150 return;
3151
3152 /* The output of the verifier cannot be a fatal error, otherwise the assembly
3153 would not have succeeded. We can safely ignore these. */
3154 assert (detail->non_fatal);
3155 assert (detail->error);
3156
3157 /* If there are multiple verifier messages, concat them up to 1k. */
3158 (*info->fprintf_func) (info->stream, " // note: %s", detail->error);
3159 if (detail->index >= 0)
3160 (*info->fprintf_func) (info->stream, " at operand %d", detail->index + 1);
3161 }
3162
3163 /* Print the instruction according to *INST. */
3164
3165 static void
3166 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3167 const aarch64_insn code,
3168 struct disassemble_info *info,
3169 aarch64_operand_error *mismatch_details)
3170 {
3171 bfd_boolean has_notes = FALSE;
3172
3173 print_mnemonic_name (inst, info);
3174 print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
3175 print_comment (inst, info);
3176
3177 /* We've already printed a note, not enough space to print more so exit.
3178 Usually notes shouldn't overlap so it shouldn't happen that we have a note
3179 from a register and instruction at the same time. */
3180 if (has_notes)
3181 return;
3182
3183 /* Always run constraint verifiers, this is needed because constraints need to
3184 maintain a global state regardless of whether the instruction has the flag
3185 set or not. */
3186 enum err_type result = verify_constraints (inst, code, pc, FALSE,
3187 mismatch_details, &insn_sequence);
3188 switch (result)
3189 {
3190 case ERR_UND:
3191 case ERR_UNP:
3192 case ERR_NYI:
3193 assert (0);
3194 case ERR_VFI:
3195 print_verifier_notes (mismatch_details, info);
3196 break;
3197 default:
3198 break;
3199 }
3200 }
3201
3202 /* Entry-point of the instruction disassembler and printer. */
3203
3204 static void
3205 print_insn_aarch64_word (bfd_vma pc,
3206 uint32_t word,
3207 struct disassemble_info *info,
3208 aarch64_operand_error *errors)
3209 {
3210 static const char *err_msg[ERR_NR_ENTRIES+1] =
3211 {
3212 [ERR_OK] = "_",
3213 [ERR_UND] = "undefined",
3214 [ERR_UNP] = "unpredictable",
3215 [ERR_NYI] = "NYI"
3216 };
3217
3218 enum err_type ret;
3219 aarch64_inst inst;
3220
3221 info->insn_info_valid = 1;
3222 info->branch_delay_insns = 0;
3223 info->data_size = 0;
3224 info->target = 0;
3225 info->target2 = 0;
3226
3227 if (info->flags & INSN_HAS_RELOC)
3228 /* If the instruction has a reloc associated with it, then
3229 the offset field in the instruction will actually be the
3230 addend for the reloc. (If we are using REL type relocs).
3231 In such cases, we can ignore the pc when computing
3232 addresses, since the addend is not currently pc-relative. */
3233 pc = 0;
3234
3235 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3236
3237 if (((word >> 21) & 0x3ff) == 1)
3238 {
3239 /* RESERVED for ALES. */
3240 assert (ret != ERR_OK);
3241 ret = ERR_NYI;
3242 }
3243
3244 switch (ret)
3245 {
3246 case ERR_UND:
3247 case ERR_UNP:
3248 case ERR_NYI:
3249 /* Handle undefined instructions. */
3250 info->insn_type = dis_noninsn;
3251 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3252 word, err_msg[ret]);
3253 break;
3254 case ERR_OK:
3255 user_friendly_fixup (&inst);
3256 print_aarch64_insn (pc, &inst, word, info, errors);
3257 break;
3258 default:
3259 abort ();
3260 }
3261 }
3262
3263 /* Disallow mapping symbols ($x, $d etc) from
3264 being displayed in symbol relative addresses. */
3265
3266 bfd_boolean
3267 aarch64_symbol_is_valid (asymbol * sym,
3268 struct disassemble_info * info ATTRIBUTE_UNUSED)
3269 {
3270 const char * name;
3271
3272 if (sym == NULL)
3273 return FALSE;
3274
3275 name = bfd_asymbol_name (sym);
3276
3277 return name
3278 && (name[0] != '$'
3279 || (name[1] != 'x' && name[1] != 'd')
3280 || (name[2] != '\0' && name[2] != '.'));
3281 }
3282
3283 /* Print data bytes on INFO->STREAM. */
3284
3285 static void
3286 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3287 uint32_t word,
3288 struct disassemble_info *info,
3289 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3290 {
3291 switch (info->bytes_per_chunk)
3292 {
3293 case 1:
3294 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3295 break;
3296 case 2:
3297 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3298 break;
3299 case 4:
3300 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3301 break;
3302 default:
3303 abort ();
3304 }
3305 }
3306
3307 /* Try to infer the code or data type from a symbol.
3308 Returns nonzero if *MAP_TYPE was set. */
3309
3310 static int
3311 get_sym_code_type (struct disassemble_info *info, int n,
3312 enum map_type *map_type)
3313 {
3314 elf_symbol_type *es;
3315 unsigned int type;
3316 const char *name;
3317
3318 /* If the symbol is in a different section, ignore it. */
3319 if (info->section != NULL && info->section != info->symtab[n]->section)
3320 return FALSE;
3321
3322 es = *(elf_symbol_type **)(info->symtab + n);
3323 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3324
3325 /* If the symbol has function type then use that. */
3326 if (type == STT_FUNC)
3327 {
3328 *map_type = MAP_INSN;
3329 return TRUE;
3330 }
3331
3332 /* Check for mapping symbols. */
3333 name = bfd_asymbol_name(info->symtab[n]);
3334 if (name[0] == '$'
3335 && (name[1] == 'x' || name[1] == 'd')
3336 && (name[2] == '\0' || name[2] == '.'))
3337 {
3338 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3339 return TRUE;
3340 }
3341
3342 return FALSE;
3343 }
3344
3345 /* Entry-point of the AArch64 disassembler. */
3346
3347 int
3348 print_insn_aarch64 (bfd_vma pc,
3349 struct disassemble_info *info)
3350 {
3351 bfd_byte buffer[INSNLEN];
3352 int status;
3353 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3354 aarch64_operand_error *);
3355 bfd_boolean found = FALSE;
3356 unsigned int size = 4;
3357 unsigned long data;
3358 aarch64_operand_error errors;
3359
3360 if (info->disassembler_options)
3361 {
3362 set_default_aarch64_dis_options (info);
3363
3364 parse_aarch64_dis_options (info->disassembler_options);
3365
3366 /* To avoid repeated parsing of these options, we remove them here. */
3367 info->disassembler_options = NULL;
3368 }
3369
3370 /* Aarch64 instructions are always little-endian */
3371 info->endian_code = BFD_ENDIAN_LITTLE;
3372
3373 /* Default to DATA. A text section is required by the ABI to contain an
3374 INSN mapping symbol at the start. A data section has no such
3375 requirement, hence if no mapping symbol is found the section must
3376 contain only data. This however isn't very useful if the user has
3377 fully stripped the binaries. If this is the case use the section
3378 attributes to determine the default. If we have no section default to
3379 INSN as well, as we may be disassembling some raw bytes on a baremetal
3380 HEX file or similar. */
3381 enum map_type type = MAP_DATA;
3382 if ((info->section && info->section->flags & SEC_CODE) || !info->section)
3383 type = MAP_INSN;
3384
3385 /* First check the full symtab for a mapping symbol, even if there
3386 are no usable non-mapping symbols for this address. */
3387 if (info->symtab_size != 0
3388 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3389 {
3390 int last_sym = -1;
3391 bfd_vma addr, section_vma = 0;
3392 bfd_boolean can_use_search_opt_p;
3393 int n;
3394
3395 if (pc <= last_mapping_addr)
3396 last_mapping_sym = -1;
3397
3398 /* Start scanning at the start of the function, or wherever
3399 we finished last time. */
3400 n = info->symtab_pos + 1;
3401
3402 /* If the last stop offset is different from the current one it means we
3403 are disassembling a different glob of bytes. As such the optimization
3404 would not be safe and we should start over. */
3405 can_use_search_opt_p = last_mapping_sym >= 0
3406 && info->stop_offset == last_stop_offset;
3407
3408 if (n >= last_mapping_sym && can_use_search_opt_p)
3409 n = last_mapping_sym;
3410
3411 /* Look down while we haven't passed the location being disassembled.
3412 The reason for this is that there's no defined order between a symbol
3413 and an mapping symbol that may be at the same address. We may have to
3414 look at least one position ahead. */
3415 for (; n < info->symtab_size; n++)
3416 {
3417 addr = bfd_asymbol_value (info->symtab[n]);
3418 if (addr > pc)
3419 break;
3420 if (get_sym_code_type (info, n, &type))
3421 {
3422 last_sym = n;
3423 found = TRUE;
3424 }
3425 }
3426
3427 if (!found)
3428 {
3429 n = info->symtab_pos;
3430 if (n >= last_mapping_sym && can_use_search_opt_p)
3431 n = last_mapping_sym;
3432
3433 /* No mapping symbol found at this address. Look backwards
3434 for a preceeding one, but don't go pass the section start
3435 otherwise a data section with no mapping symbol can pick up
3436 a text mapping symbol of a preceeding section. The documentation
3437 says section can be NULL, in which case we will seek up all the
3438 way to the top. */
3439 if (info->section)
3440 section_vma = info->section->vma;
3441
3442 for (; n >= 0; n--)
3443 {
3444 addr = bfd_asymbol_value (info->symtab[n]);
3445 if (addr < section_vma)
3446 break;
3447
3448 if (get_sym_code_type (info, n, &type))
3449 {
3450 last_sym = n;
3451 found = TRUE;
3452 break;
3453 }
3454 }
3455 }
3456
3457 last_mapping_sym = last_sym;
3458 last_type = type;
3459 last_stop_offset = info->stop_offset;
3460
3461 /* Look a little bit ahead to see if we should print out
3462 less than four bytes of data. If there's a symbol,
3463 mapping or otherwise, after two bytes then don't
3464 print more. */
3465 if (last_type == MAP_DATA)
3466 {
3467 size = 4 - (pc & 3);
3468 for (n = last_sym + 1; n < info->symtab_size; n++)
3469 {
3470 addr = bfd_asymbol_value (info->symtab[n]);
3471 if (addr > pc)
3472 {
3473 if (addr - pc < size)
3474 size = addr - pc;
3475 break;
3476 }
3477 }
3478 /* If the next symbol is after three bytes, we need to
3479 print only part of the data, so that we can use either
3480 .byte or .short. */
3481 if (size == 3)
3482 size = (pc & 1) ? 1 : 2;
3483 }
3484 }
3485 else
3486 last_type = type;
3487
3488 /* PR 10263: Disassemble data if requested to do so by the user. */
3489 if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
3490 {
3491 /* size was set above. */
3492 info->bytes_per_chunk = size;
3493 info->display_endian = info->endian;
3494 printer = print_insn_data;
3495 }
3496 else
3497 {
3498 info->bytes_per_chunk = size = INSNLEN;
3499 info->display_endian = info->endian_code;
3500 printer = print_insn_aarch64_word;
3501 }
3502
3503 status = (*info->read_memory_func) (pc, buffer, size, info);
3504 if (status != 0)
3505 {
3506 (*info->memory_error_func) (status, pc, info);
3507 return -1;
3508 }
3509
3510 data = bfd_get_bits (buffer, size * 8,
3511 info->display_endian == BFD_ENDIAN_BIG);
3512
3513 (*printer) (pc, data, info, &errors);
3514
3515 return size;
3516 }
3517 \f
3518 void
3519 print_aarch64_disassembler_options (FILE *stream)
3520 {
3521 fprintf (stream, _("\n\
3522 The following AARCH64 specific disassembler options are supported for use\n\
3523 with the -M switch (multiple options should be separated by commas):\n"));
3524
3525 fprintf (stream, _("\n\
3526 no-aliases Don't print instruction aliases.\n"));
3527
3528 fprintf (stream, _("\n\
3529 aliases Do print instruction aliases.\n"));
3530
3531 fprintf (stream, _("\n\
3532 no-notes Don't print instruction notes.\n"));
3533
3534 fprintf (stream, _("\n\
3535 notes Do print instruction notes.\n"));
3536
3537 #ifdef DEBUG_AARCH64
3538 fprintf (stream, _("\n\
3539 debug_dump Temp switch for debug trace.\n"));
3540 #endif /* DEBUG_AARCH64 */
3541
3542 fprintf (stream, _("\n"));
3543 }
This page took 0.174321 seconds and 4 git commands to generate.