Automatic date update in version.in
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2021 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25 #include "opintl.h"
26
27 /* Utilities. */
28
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
32
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
38
39 static inline void
40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
41 {
42 uint32_t num;
43 const aarch64_field *field;
44 enum aarch64_field_kind kind;
45 va_list va;
46
47 va_start (va, mask);
48 num = va_arg (va, uint32_t);
49 assert (num <= 5);
50 while (num--)
51 {
52 kind = va_arg (va, enum aarch64_field_kind);
53 field = &fields[kind];
54 insert_field (kind, code, value, mask);
55 value >>= field->width;
56 }
57 va_end (va);
58 }
59
60 /* Insert a raw field value VALUE into all fields in SELF->fields.
61 The least significant bit goes in the final field. */
62
63 static void
64 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
65 aarch64_insn value)
66 {
67 unsigned int i;
68 enum aarch64_field_kind kind;
69
70 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
71 if (self->fields[i] != FLD_NIL)
72 {
73 kind = self->fields[i];
74 insert_field (kind, code, value, 0);
75 value >>= fields[kind].width;
76 }
77 }
78
79 /* Operand inserters. */
80
81 /* Insert nothing. */
82 bool
83 aarch64_ins_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
84 const aarch64_opnd_info *info ATTRIBUTE_UNUSED,
85 aarch64_insn *code ATTRIBUTE_UNUSED,
86 const aarch64_inst *inst ATTRIBUTE_UNUSED,
87 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
88 {
89 return true;
90 }
91
92 /* Insert register number. */
93 bool
94 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code,
96 const aarch64_inst *inst ATTRIBUTE_UNUSED,
97 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
98 {
99 insert_field (self->fields[0], code, info->reg.regno, 0);
100 return true;
101 }
102
103 /* Insert register number, index and/or other data for SIMD register element
104 operand, e.g. the last source operand in
105 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
106 bool
107 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
108 aarch64_insn *code, const aarch64_inst *inst,
109 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
110 {
111 /* regno */
112 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
113 /* index and/or type */
114 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
115 {
116 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
117 if (info->type == AARCH64_OPND_En
118 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
119 {
120 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
121 assert (info->idx == 1); /* Vn */
122 aarch64_insn value = info->reglane.index << pos;
123 insert_field (FLD_imm4, code, value, 0);
124 }
125 else
126 {
127 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
128 imm5<3:0> <V>
129 0000 RESERVED
130 xxx1 B
131 xx10 H
132 x100 S
133 1000 D */
134 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
135 insert_field (FLD_imm5, code, value, 0);
136 }
137 }
138 else if (inst->opcode->iclass == dotproduct)
139 {
140 unsigned reglane_index = info->reglane.index;
141 switch (info->qualifier)
142 {
143 case AARCH64_OPND_QLF_S_4B:
144 case AARCH64_OPND_QLF_S_2H:
145 /* L:H */
146 assert (reglane_index < 4);
147 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
148 break;
149 default:
150 assert (0);
151 }
152 }
153 else if (inst->opcode->iclass == cryptosm3)
154 {
155 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
156 unsigned reglane_index = info->reglane.index;
157 assert (reglane_index < 4);
158 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
159 }
160 else
161 {
162 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
163 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
164 unsigned reglane_index = info->reglane.index;
165
166 if (inst->opcode->op == OP_FCMLA_ELEM)
167 /* Complex operand takes two elements. */
168 reglane_index *= 2;
169
170 switch (info->qualifier)
171 {
172 case AARCH64_OPND_QLF_S_H:
173 /* H:L:M */
174 assert (reglane_index < 8);
175 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
176 break;
177 case AARCH64_OPND_QLF_S_S:
178 /* H:L */
179 assert (reglane_index < 4);
180 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
181 break;
182 case AARCH64_OPND_QLF_S_D:
183 /* H */
184 assert (reglane_index < 2);
185 insert_field (FLD_H, code, reglane_index, 0);
186 break;
187 default:
188 assert (0);
189 }
190 }
191 return true;
192 }
193
194 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
195 bool
196 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
197 aarch64_insn *code,
198 const aarch64_inst *inst ATTRIBUTE_UNUSED,
199 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
200 {
201 /* R */
202 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
203 /* len */
204 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
205 return true;
206 }
207
208 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
209 in AdvSIMD load/store instructions. */
210 bool
211 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
212 const aarch64_opnd_info *info, aarch64_insn *code,
213 const aarch64_inst *inst,
214 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
215 {
216 aarch64_insn value = 0;
217 /* Number of elements in each structure to be loaded/stored. */
218 unsigned num = get_opcode_dependent_value (inst->opcode);
219
220 /* Rt */
221 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
222 /* opcode */
223 switch (num)
224 {
225 case 1:
226 switch (info->reglist.num_regs)
227 {
228 case 1: value = 0x7; break;
229 case 2: value = 0xa; break;
230 case 3: value = 0x6; break;
231 case 4: value = 0x2; break;
232 default: assert (0);
233 }
234 break;
235 case 2:
236 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
237 break;
238 case 3:
239 value = 0x4;
240 break;
241 case 4:
242 value = 0x0;
243 break;
244 default:
245 assert (0);
246 }
247 insert_field (FLD_opcode, code, value, 0);
248
249 return true;
250 }
251
252 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
253 single structure to all lanes instructions. */
254 bool
255 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
256 const aarch64_opnd_info *info, aarch64_insn *code,
257 const aarch64_inst *inst,
258 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
259 {
260 aarch64_insn value;
261 /* The opcode dependent area stores the number of elements in
262 each structure to be loaded/stored. */
263 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
264
265 /* Rt */
266 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
267 /* S */
268 value = (aarch64_insn) 0;
269 if (is_ld1r && info->reglist.num_regs == 2)
270 /* OP_LD1R does not have alternating variant, but have "two consecutive"
271 instead. */
272 value = (aarch64_insn) 1;
273 insert_field (FLD_S, code, value, 0);
274
275 return true;
276 }
277
278 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
279 operand e.g. Vt in AdvSIMD load/store single element instructions. */
280 bool
281 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
282 const aarch64_opnd_info *info, aarch64_insn *code,
283 const aarch64_inst *inst ATTRIBUTE_UNUSED,
284 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
285 {
286 aarch64_field field = {0, 0};
287 aarch64_insn QSsize = 0; /* fields Q:S:size. */
288 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
289
290 assert (info->reglist.has_index);
291
292 /* Rt */
293 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
294 /* Encode the index, opcode<2:1> and size. */
295 switch (info->qualifier)
296 {
297 case AARCH64_OPND_QLF_S_B:
298 /* Index encoded in "Q:S:size". */
299 QSsize = info->reglist.index;
300 opcodeh2 = 0x0;
301 break;
302 case AARCH64_OPND_QLF_S_H:
303 /* Index encoded in "Q:S:size<1>". */
304 QSsize = info->reglist.index << 1;
305 opcodeh2 = 0x1;
306 break;
307 case AARCH64_OPND_QLF_S_S:
308 /* Index encoded in "Q:S". */
309 QSsize = info->reglist.index << 2;
310 opcodeh2 = 0x2;
311 break;
312 case AARCH64_OPND_QLF_S_D:
313 /* Index encoded in "Q". */
314 QSsize = info->reglist.index << 3 | 0x1;
315 opcodeh2 = 0x2;
316 break;
317 default:
318 assert (0);
319 }
320 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
321 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
322 insert_field_2 (&field, code, opcodeh2, 0);
323
324 return true;
325 }
326
327 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
328 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
329 or SSHR <V><d>, <V><n>, #<shift>. */
330 bool
331 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
332 const aarch64_opnd_info *info,
333 aarch64_insn *code, const aarch64_inst *inst,
334 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
335 {
336 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
337 aarch64_insn Q, imm;
338
339 if (inst->opcode->iclass == asimdshf)
340 {
341 /* Q
342 immh Q <T>
343 0000 x SEE AdvSIMD modified immediate
344 0001 0 8B
345 0001 1 16B
346 001x 0 4H
347 001x 1 8H
348 01xx 0 2S
349 01xx 1 4S
350 1xxx 0 RESERVED
351 1xxx 1 2D */
352 Q = (val & 0x1) ? 1 : 0;
353 insert_field (FLD_Q, code, Q, inst->opcode->mask);
354 val >>= 1;
355 }
356
357 assert (info->type == AARCH64_OPND_IMM_VLSR
358 || info->type == AARCH64_OPND_IMM_VLSL);
359
360 if (info->type == AARCH64_OPND_IMM_VLSR)
361 /* immh:immb
362 immh <shift>
363 0000 SEE AdvSIMD modified immediate
364 0001 (16-UInt(immh:immb))
365 001x (32-UInt(immh:immb))
366 01xx (64-UInt(immh:immb))
367 1xxx (128-UInt(immh:immb)) */
368 imm = (16 << (unsigned)val) - info->imm.value;
369 else
370 /* immh:immb
371 immh <shift>
372 0000 SEE AdvSIMD modified immediate
373 0001 (UInt(immh:immb)-8)
374 001x (UInt(immh:immb)-16)
375 01xx (UInt(immh:immb)-32)
376 1xxx (UInt(immh:immb)-64) */
377 imm = info->imm.value + (8 << (unsigned)val);
378 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
379
380 return true;
381 }
382
383 /* Insert fields for e.g. the immediate operands in
384 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
385 bool
386 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
387 aarch64_insn *code,
388 const aarch64_inst *inst ATTRIBUTE_UNUSED,
389 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
390 {
391 int64_t imm;
392
393 imm = info->imm.value;
394 if (operand_need_shift_by_two (self))
395 imm >>= 2;
396 if (operand_need_shift_by_four (self))
397 imm >>= 4;
398 insert_all_fields (self, code, imm);
399 return true;
400 }
401
402 /* Insert immediate and its shift amount for e.g. the last operand in
403 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
404 bool
405 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
406 aarch64_insn *code, const aarch64_inst *inst,
407 aarch64_operand_error *errors)
408 {
409 /* imm16 */
410 aarch64_ins_imm (self, info, code, inst, errors);
411 /* hw */
412 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
413 return true;
414 }
415
416 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
417 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
418 bool
419 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
420 const aarch64_opnd_info *info,
421 aarch64_insn *code,
422 const aarch64_inst *inst ATTRIBUTE_UNUSED,
423 aarch64_operand_error *errors
424 ATTRIBUTE_UNUSED)
425 {
426 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
427 uint64_t imm = info->imm.value;
428 enum aarch64_modifier_kind kind = info->shifter.kind;
429 int amount = info->shifter.amount;
430 aarch64_field field = {0, 0};
431
432 /* a:b:c:d:e:f:g:h */
433 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
434 {
435 /* Either MOVI <Dd>, #<imm>
436 or MOVI <Vd>.2D, #<imm>.
437 <imm> is a 64-bit immediate
438 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
439 encoded in "a:b:c:d:e:f:g:h". */
440 imm = aarch64_shrink_expanded_imm8 (imm);
441 assert ((int)imm >= 0);
442 }
443 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
444
445 if (kind == AARCH64_MOD_NONE)
446 return true;
447
448 /* shift amount partially in cmode */
449 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
450 if (kind == AARCH64_MOD_LSL)
451 {
452 /* AARCH64_MOD_LSL: shift zeros. */
453 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
454 assert (esize == 4 || esize == 2 || esize == 1);
455 /* For 8-bit move immediate, the optional LSL #0 does not require
456 encoding. */
457 if (esize == 1)
458 return true;
459 amount >>= 3;
460 if (esize == 4)
461 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
462 else
463 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
464 }
465 else
466 {
467 /* AARCH64_MOD_MSL: shift ones. */
468 amount >>= 4;
469 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
470 }
471 insert_field_2 (&field, code, amount, 0);
472
473 return true;
474 }
475
476 /* Insert fields for an 8-bit floating-point immediate. */
477 bool
478 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
479 aarch64_insn *code,
480 const aarch64_inst *inst ATTRIBUTE_UNUSED,
481 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
482 {
483 insert_all_fields (self, code, info->imm.value);
484 return true;
485 }
486
487 /* Insert 1-bit rotation immediate (#90 or #270). */
488 bool
489 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
490 const aarch64_opnd_info *info,
491 aarch64_insn *code, const aarch64_inst *inst,
492 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
493 {
494 uint64_t rot = (info->imm.value - 90) / 180;
495 assert (rot < 2U);
496 insert_field (self->fields[0], code, rot, inst->opcode->mask);
497 return true;
498 }
499
500 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
501 bool
502 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
503 const aarch64_opnd_info *info,
504 aarch64_insn *code, const aarch64_inst *inst,
505 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
506 {
507 uint64_t rot = info->imm.value / 90;
508 assert (rot < 4U);
509 insert_field (self->fields[0], code, rot, inst->opcode->mask);
510 return true;
511 }
512
513 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
514 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
515 bool
516 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
517 aarch64_insn *code,
518 const aarch64_inst *inst ATTRIBUTE_UNUSED,
519 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
520 {
521 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
522 return true;
523 }
524
525 /* Insert arithmetic immediate for e.g. the last operand in
526 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
527 bool
528 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
529 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
530 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
531 {
532 /* shift */
533 aarch64_insn value = info->shifter.amount ? 1 : 0;
534 insert_field (self->fields[0], code, value, 0);
535 /* imm12 (unsigned) */
536 insert_field (self->fields[1], code, info->imm.value, 0);
537 return true;
538 }
539
540 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
541 the operand should be inverted before encoding. */
542 static bool
543 aarch64_ins_limm_1 (const aarch64_operand *self,
544 const aarch64_opnd_info *info, aarch64_insn *code,
545 const aarch64_inst *inst, bool invert_p,
546 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
547 {
548 bool res;
549 aarch64_insn value;
550 uint64_t imm = info->imm.value;
551 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
552
553 if (invert_p)
554 imm = ~imm;
555 /* The constraint check should guarantee that this will work. */
556 res = aarch64_logical_immediate_p (imm, esize, &value);
557 if (res)
558 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
559 self->fields[0]);
560 return res;
561 }
562
563 /* Insert logical/bitmask immediate for e.g. the last operand in
564 ORR <Wd|WSP>, <Wn>, #<imm>. */
565 bool
566 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
567 aarch64_insn *code, const aarch64_inst *inst,
568 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
569 {
570 return aarch64_ins_limm_1 (self, info, code, inst,
571 inst->opcode->op == OP_BIC, errors);
572 }
573
574 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
575 bool
576 aarch64_ins_inv_limm (const aarch64_operand *self,
577 const aarch64_opnd_info *info, aarch64_insn *code,
578 const aarch64_inst *inst,
579 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
580 {
581 return aarch64_ins_limm_1 (self, info, code, inst, true, errors);
582 }
583
584 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
585 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
586 bool
587 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
588 aarch64_insn *code, const aarch64_inst *inst,
589 aarch64_operand_error *errors)
590 {
591 aarch64_insn value = 0;
592
593 assert (info->idx == 0);
594
595 /* Rt */
596 aarch64_ins_regno (self, info, code, inst, errors);
597 if (inst->opcode->iclass == ldstpair_indexed
598 || inst->opcode->iclass == ldstnapair_offs
599 || inst->opcode->iclass == ldstpair_off
600 || inst->opcode->iclass == loadlit)
601 {
602 /* size */
603 switch (info->qualifier)
604 {
605 case AARCH64_OPND_QLF_S_S: value = 0; break;
606 case AARCH64_OPND_QLF_S_D: value = 1; break;
607 case AARCH64_OPND_QLF_S_Q: value = 2; break;
608 default: assert (0);
609 }
610 insert_field (FLD_ldst_size, code, value, 0);
611 }
612 else
613 {
614 /* opc[1]:size */
615 value = aarch64_get_qualifier_standard_value (info->qualifier);
616 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
617 }
618
619 return true;
620 }
621
622 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
623 bool
624 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
625 const aarch64_opnd_info *info, aarch64_insn *code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED,
627 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
628 {
629 /* Rn */
630 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
631 return true;
632 }
633
634 /* Encode the address operand for e.g.
635 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
636 bool
637 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
638 const aarch64_opnd_info *info, aarch64_insn *code,
639 const aarch64_inst *inst ATTRIBUTE_UNUSED,
640 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
641 {
642 aarch64_insn S;
643 enum aarch64_modifier_kind kind = info->shifter.kind;
644
645 /* Rn */
646 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
647 /* Rm */
648 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
649 /* option */
650 if (kind == AARCH64_MOD_LSL)
651 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
652 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
653 /* S */
654 if (info->qualifier != AARCH64_OPND_QLF_S_B)
655 S = info->shifter.amount != 0;
656 else
657 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
658 S <amount>
659 0 [absent]
660 1 #0
661 Must be #0 if <extend> is explicitly LSL. */
662 S = info->shifter.operator_present && info->shifter.amount_present;
663 insert_field (FLD_S, code, S, 0);
664
665 return true;
666 }
667
668 /* Encode the address operand for e.g.
669 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
670 bool
671 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
672 const aarch64_opnd_info *info, aarch64_insn *code,
673 const aarch64_inst *inst ATTRIBUTE_UNUSED,
674 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
675 {
676 /* Rn */
677 insert_field (self->fields[0], code, info->addr.base_regno, 0);
678
679 /* simm9 */
680 int imm = info->addr.offset.imm;
681 insert_field (self->fields[1], code, imm, 0);
682
683 /* writeback */
684 if (info->addr.writeback)
685 {
686 assert (info->addr.preind == 1 && info->addr.postind == 0);
687 insert_field (self->fields[2], code, 1, 0);
688 }
689 return true;
690 }
691
692 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
693 bool
694 aarch64_ins_addr_simm (const aarch64_operand *self,
695 const aarch64_opnd_info *info,
696 aarch64_insn *code,
697 const aarch64_inst *inst ATTRIBUTE_UNUSED,
698 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
699 {
700 int imm;
701
702 /* Rn */
703 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
704 /* simm (imm9 or imm7) */
705 imm = info->addr.offset.imm;
706 if (self->fields[0] == FLD_imm7
707 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
708 /* scaled immediate in ld/st pair instructions.. */
709 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
710 insert_field (self->fields[0], code, imm, 0);
711 /* pre/post- index */
712 if (info->addr.writeback)
713 {
714 assert (inst->opcode->iclass != ldst_unscaled
715 && inst->opcode->iclass != ldstnapair_offs
716 && inst->opcode->iclass != ldstpair_off
717 && inst->opcode->iclass != ldst_unpriv);
718 assert (info->addr.preind != info->addr.postind);
719 if (info->addr.preind)
720 insert_field (self->fields[1], code, 1, 0);
721 }
722
723 return true;
724 }
725
726 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
727 bool
728 aarch64_ins_addr_simm10 (const aarch64_operand *self,
729 const aarch64_opnd_info *info,
730 aarch64_insn *code,
731 const aarch64_inst *inst ATTRIBUTE_UNUSED,
732 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
733 {
734 int imm;
735
736 /* Rn */
737 insert_field (self->fields[0], code, info->addr.base_regno, 0);
738 /* simm10 */
739 imm = info->addr.offset.imm >> 3;
740 insert_field (self->fields[1], code, imm >> 9, 0);
741 insert_field (self->fields[2], code, imm, 0);
742 /* writeback */
743 if (info->addr.writeback)
744 {
745 assert (info->addr.preind == 1 && info->addr.postind == 0);
746 insert_field (self->fields[3], code, 1, 0);
747 }
748 return true;
749 }
750
751 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
752 bool
753 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
754 const aarch64_opnd_info *info,
755 aarch64_insn *code,
756 const aarch64_inst *inst ATTRIBUTE_UNUSED,
757 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
758 {
759 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
760
761 /* Rn */
762 insert_field (self->fields[0], code, info->addr.base_regno, 0);
763 /* uimm12 */
764 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
765 return true;
766 }
767
768 /* Encode the address operand for e.g.
769 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
770 bool
771 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
772 const aarch64_opnd_info *info, aarch64_insn *code,
773 const aarch64_inst *inst ATTRIBUTE_UNUSED,
774 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
775 {
776 /* Rn */
777 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
778 /* Rm | #<amount> */
779 if (info->addr.offset.is_reg)
780 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
781 else
782 insert_field (FLD_Rm, code, 0x1f, 0);
783 return true;
784 }
785
786 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
787 bool
788 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
789 const aarch64_opnd_info *info, aarch64_insn *code,
790 const aarch64_inst *inst ATTRIBUTE_UNUSED,
791 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
792 {
793 /* cond */
794 insert_field (FLD_cond, code, info->cond->value, 0);
795 return true;
796 }
797
798 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
799 bool
800 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
801 const aarch64_opnd_info *info, aarch64_insn *code,
802 const aarch64_inst *inst,
803 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
804 {
805 /* If a system instruction check if we have any restrictions on which
806 registers it can use. */
807 if (inst->opcode->iclass == ic_system)
808 {
809 uint64_t opcode_flags
810 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
811 uint32_t sysreg_flags
812 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
813
814 /* Check to see if it's read-only, else check if it's write only.
815 if it's both or unspecified don't care. */
816 if (opcode_flags == F_SYS_READ
817 && sysreg_flags
818 && sysreg_flags != F_REG_READ)
819 {
820 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
821 detail->error = _("specified register cannot be read from");
822 detail->index = info->idx;
823 detail->non_fatal = true;
824 }
825 else if (opcode_flags == F_SYS_WRITE
826 && sysreg_flags
827 && sysreg_flags != F_REG_WRITE)
828 {
829 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
830 detail->error = _("specified register cannot be written to");
831 detail->index = info->idx;
832 detail->non_fatal = true;
833 }
834 }
835 /* op0:op1:CRn:CRm:op2 */
836 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
837 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
838 return true;
839 }
840
841 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
842 bool
843 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
844 const aarch64_opnd_info *info, aarch64_insn *code,
845 const aarch64_inst *inst ATTRIBUTE_UNUSED,
846 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
847 {
848 /* op1:op2 */
849 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
850 FLD_op2, FLD_op1);
851 return true;
852 }
853
854 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
855 bool
856 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
857 const aarch64_opnd_info *info, aarch64_insn *code,
858 const aarch64_inst *inst ATTRIBUTE_UNUSED,
859 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
860 {
861 /* op1:CRn:CRm:op2 */
862 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
863 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
864 return true;
865 }
866
867 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
868
869 bool
870 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
871 const aarch64_opnd_info *info, aarch64_insn *code,
872 const aarch64_inst *inst ATTRIBUTE_UNUSED,
873 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
874 {
875 /* CRm */
876 insert_field (FLD_CRm, code, info->barrier->value, 0);
877 return true;
878 }
879
880 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
881
882 bool
883 aarch64_ins_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
884 const aarch64_opnd_info *info, aarch64_insn *code,
885 const aarch64_inst *inst ATTRIBUTE_UNUSED,
886 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
887 {
888 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
889 encoded in CRm<3:2>. */
890 aarch64_insn value = (info->barrier->value >> 2) - 4;
891 insert_field (FLD_CRm_dsb_nxs, code, value, 0);
892 return true;
893 }
894
895 /* Encode the prefetch operation option operand for e.g.
896 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
897
898 bool
899 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
900 const aarch64_opnd_info *info, aarch64_insn *code,
901 const aarch64_inst *inst ATTRIBUTE_UNUSED,
902 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
903 {
904 /* prfop in Rt */
905 insert_field (FLD_Rt, code, info->prfop->value, 0);
906 return true;
907 }
908
909 /* Encode the hint number for instructions that alias HINT but take an
910 operand. */
911
912 bool
913 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
914 const aarch64_opnd_info *info, aarch64_insn *code,
915 const aarch64_inst *inst ATTRIBUTE_UNUSED,
916 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
917 {
918 /* CRm:op2. */
919 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
920 return true;
921 }
922
923 /* Encode the extended register operand for e.g.
924 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
925 bool
926 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
927 const aarch64_opnd_info *info, aarch64_insn *code,
928 const aarch64_inst *inst ATTRIBUTE_UNUSED,
929 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
930 {
931 enum aarch64_modifier_kind kind;
932
933 /* Rm */
934 insert_field (FLD_Rm, code, info->reg.regno, 0);
935 /* option */
936 kind = info->shifter.kind;
937 if (kind == AARCH64_MOD_LSL)
938 kind = info->qualifier == AARCH64_OPND_QLF_W
939 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
940 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
941 /* imm3 */
942 insert_field (FLD_imm3, code, info->shifter.amount, 0);
943
944 return true;
945 }
946
947 /* Encode the shifted register operand for e.g.
948 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
949 bool
950 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
951 const aarch64_opnd_info *info, aarch64_insn *code,
952 const aarch64_inst *inst ATTRIBUTE_UNUSED,
953 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
954 {
955 /* Rm */
956 insert_field (FLD_Rm, code, info->reg.regno, 0);
957 /* shift */
958 insert_field (FLD_shift, code,
959 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
960 /* imm6 */
961 insert_field (FLD_imm6, code, info->shifter.amount, 0);
962
963 return true;
964 }
965
966 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
967 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
968 SELF's operand-dependent value. fields[0] specifies the field that
969 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
970 bool
971 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
972 const aarch64_opnd_info *info,
973 aarch64_insn *code,
974 const aarch64_inst *inst ATTRIBUTE_UNUSED,
975 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
976 {
977 int factor = 1 + get_operand_specific_data (self);
978 insert_field (self->fields[0], code, info->addr.base_regno, 0);
979 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
980 return true;
981 }
982
983 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
984 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
985 SELF's operand-dependent value. fields[0] specifies the field that
986 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
987 bool
988 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
989 const aarch64_opnd_info *info,
990 aarch64_insn *code,
991 const aarch64_inst *inst ATTRIBUTE_UNUSED,
992 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
993 {
994 int factor = 1 + get_operand_specific_data (self);
995 insert_field (self->fields[0], code, info->addr.base_regno, 0);
996 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
997 return true;
998 }
999
1000 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1001 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1002 SELF's operand-dependent value. fields[0] specifies the field that
1003 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1004 and imm3 fields, with imm3 being the less-significant part. */
1005 bool
1006 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
1007 const aarch64_opnd_info *info,
1008 aarch64_insn *code,
1009 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1010 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1011 {
1012 int factor = 1 + get_operand_specific_data (self);
1013 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1014 insert_fields (code, info->addr.offset.imm / factor, 0,
1015 2, FLD_imm3, FLD_SVE_imm6);
1016 return true;
1017 }
1018
1019 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1020 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1021 value. fields[0] specifies the base register field. */
1022 bool
1023 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
1024 const aarch64_opnd_info *info, aarch64_insn *code,
1025 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1026 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1027 {
1028 int factor = 1 << get_operand_specific_data (self);
1029 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1030 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1031 return true;
1032 }
1033
1034 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1035 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1036 value. fields[0] specifies the base register field. */
1037 bool
1038 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1039 const aarch64_opnd_info *info, aarch64_insn *code,
1040 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1041 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1042 {
1043 int factor = 1 << get_operand_specific_data (self);
1044 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1045 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1046 return true;
1047 }
1048
1049 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1050 is SELF's operand-dependent value. fields[0] specifies the base
1051 register field and fields[1] specifies the offset register field. */
1052 bool
1053 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1054 const aarch64_opnd_info *info, aarch64_insn *code,
1055 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1056 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1057 {
1058 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1059 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1060 return true;
1061 }
1062
1063 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1064 <shift> is SELF's operand-dependent value. fields[0] specifies the
1065 base register field, fields[1] specifies the offset register field and
1066 fields[2] is a single-bit field that selects SXTW over UXTW. */
1067 bool
1068 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1069 const aarch64_opnd_info *info, aarch64_insn *code,
1070 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1071 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1072 {
1073 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1074 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1075 if (info->shifter.kind == AARCH64_MOD_UXTW)
1076 insert_field (self->fields[2], code, 0, 0);
1077 else
1078 insert_field (self->fields[2], code, 1, 0);
1079 return true;
1080 }
1081
1082 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1083 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1084 fields[0] specifies the base register field. */
1085 bool
1086 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1087 const aarch64_opnd_info *info, aarch64_insn *code,
1088 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1089 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1090 {
1091 int factor = 1 << get_operand_specific_data (self);
1092 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1093 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1094 return true;
1095 }
1096
1097 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1098 where <modifier> is fixed by the instruction and where <msz> is a
1099 2-bit unsigned number. fields[0] specifies the base register field
1100 and fields[1] specifies the offset register field. */
1101 static bool
1102 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1103 const aarch64_opnd_info *info, aarch64_insn *code,
1104 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1105 {
1106 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1107 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1108 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1109 return true;
1110 }
1111
1112 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1113 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1114 field and fields[1] specifies the offset register field. */
1115 bool
1116 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1117 const aarch64_opnd_info *info, aarch64_insn *code,
1118 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1119 aarch64_operand_error *errors)
1120 {
1121 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1122 }
1123
1124 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1125 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1126 field and fields[1] specifies the offset register field. */
1127 bool
1128 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1129 const aarch64_opnd_info *info,
1130 aarch64_insn *code,
1131 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1132 aarch64_operand_error *errors)
1133 {
1134 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1135 }
1136
1137 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1138 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1139 field and fields[1] specifies the offset register field. */
1140 bool
1141 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1142 const aarch64_opnd_info *info,
1143 aarch64_insn *code,
1144 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1145 aarch64_operand_error *errors)
1146 {
1147 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1148 }
1149
1150 /* Encode an SVE ADD/SUB immediate. */
1151 bool
1152 aarch64_ins_sve_aimm (const aarch64_operand *self,
1153 const aarch64_opnd_info *info, aarch64_insn *code,
1154 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1155 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1156 {
1157 if (info->shifter.amount == 8)
1158 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1159 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1160 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1161 else
1162 insert_all_fields (self, code, info->imm.value & 0xff);
1163 return true;
1164 }
1165
1166 /* Encode an SVE CPY/DUP immediate. */
1167 bool
1168 aarch64_ins_sve_asimm (const aarch64_operand *self,
1169 const aarch64_opnd_info *info, aarch64_insn *code,
1170 const aarch64_inst *inst,
1171 aarch64_operand_error *errors)
1172 {
1173 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1174 }
1175
1176 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1177 array specifies which field to use for Zn. MM is encoded in the
1178 concatenation of imm5 and SVE_tszh, with imm5 being the less
1179 significant part. */
1180 bool
1181 aarch64_ins_sve_index (const aarch64_operand *self,
1182 const aarch64_opnd_info *info, aarch64_insn *code,
1183 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1184 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1185 {
1186 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1187 insert_field (self->fields[0], code, info->reglane.regno, 0);
1188 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1189 2, FLD_imm5, FLD_SVE_tszh);
1190 return true;
1191 }
1192
1193 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1194 bool
1195 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1196 const aarch64_opnd_info *info, aarch64_insn *code,
1197 const aarch64_inst *inst,
1198 aarch64_operand_error *errors)
1199 {
1200 return aarch64_ins_limm (self, info, code, inst, errors);
1201 }
1202
1203 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1204 and where MM occupies the most-significant part. The operand-dependent
1205 value specifies the number of bits in Zn. */
1206 bool
1207 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1208 const aarch64_opnd_info *info, aarch64_insn *code,
1209 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1210 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1211 {
1212 unsigned int reg_bits = get_operand_specific_data (self);
1213 assert (info->reglane.regno < (1U << reg_bits));
1214 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1215 insert_all_fields (self, code, val);
1216 return true;
1217 }
1218
1219 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1220 to use for Zn. */
1221 bool
1222 aarch64_ins_sve_reglist (const aarch64_operand *self,
1223 const aarch64_opnd_info *info, aarch64_insn *code,
1224 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1225 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1226 {
1227 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1228 return true;
1229 }
1230
1231 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1232 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1233 field. */
1234 bool
1235 aarch64_ins_sve_scale (const aarch64_operand *self,
1236 const aarch64_opnd_info *info, aarch64_insn *code,
1237 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1238 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1239 {
1240 insert_all_fields (self, code, info->imm.value);
1241 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1242 return true;
1243 }
1244
1245 /* Encode an SVE shift left immediate. */
1246 bool
1247 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1248 const aarch64_opnd_info *info, aarch64_insn *code,
1249 const aarch64_inst *inst,
1250 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1251 {
1252 const aarch64_opnd_info *prev_operand;
1253 unsigned int esize;
1254
1255 assert (info->idx > 0);
1256 prev_operand = &inst->operands[info->idx - 1];
1257 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1258 insert_all_fields (self, code, 8 * esize + info->imm.value);
1259 return true;
1260 }
1261
1262 /* Encode an SVE shift right immediate. */
1263 bool
1264 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1265 const aarch64_opnd_info *info, aarch64_insn *code,
1266 const aarch64_inst *inst,
1267 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1268 {
1269 const aarch64_opnd_info *prev_operand;
1270 unsigned int esize;
1271
1272 unsigned int opnd_backshift = get_operand_specific_data (self);
1273 assert (info->idx >= (int)opnd_backshift);
1274 prev_operand = &inst->operands[info->idx - opnd_backshift];
1275 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1276 insert_all_fields (self, code, 16 * esize - info->imm.value);
1277 return true;
1278 }
1279
1280 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1281 The fields array specifies which field to use. */
1282 bool
1283 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1284 const aarch64_opnd_info *info,
1285 aarch64_insn *code,
1286 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1287 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1288 {
1289 if (info->imm.value == 0x3f000000)
1290 insert_field (self->fields[0], code, 0, 0);
1291 else
1292 insert_field (self->fields[0], code, 1, 0);
1293 return true;
1294 }
1295
1296 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1297 The fields array specifies which field to use. */
1298 bool
1299 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1300 const aarch64_opnd_info *info,
1301 aarch64_insn *code,
1302 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1303 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1304 {
1305 if (info->imm.value == 0x3f000000)
1306 insert_field (self->fields[0], code, 0, 0);
1307 else
1308 insert_field (self->fields[0], code, 1, 0);
1309 return true;
1310 }
1311
1312 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1313 The fields array specifies which field to use. */
1314 bool
1315 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1316 const aarch64_opnd_info *info,
1317 aarch64_insn *code,
1318 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1319 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1320 {
1321 if (info->imm.value == 0)
1322 insert_field (self->fields[0], code, 0, 0);
1323 else
1324 insert_field (self->fields[0], code, 1, 0);
1325 return true;
1326 }
1327
1328 /* Miscellaneous encoding functions. */
1329
1330 /* Encode size[0], i.e. bit 22, for
1331 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1332
1333 static void
1334 encode_asimd_fcvt (aarch64_inst *inst)
1335 {
1336 aarch64_insn value;
1337 aarch64_field field = {0, 0};
1338 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_NIL;
1339
1340 switch (inst->opcode->op)
1341 {
1342 case OP_FCVTN:
1343 case OP_FCVTN2:
1344 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1345 qualifier = inst->operands[1].qualifier;
1346 break;
1347 case OP_FCVTL:
1348 case OP_FCVTL2:
1349 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1350 qualifier = inst->operands[0].qualifier;
1351 break;
1352 default:
1353 assert (0);
1354 }
1355 assert (qualifier == AARCH64_OPND_QLF_V_4S
1356 || qualifier == AARCH64_OPND_QLF_V_2D);
1357 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1358 gen_sub_field (FLD_size, 0, 1, &field);
1359 insert_field_2 (&field, &inst->value, value, 0);
1360 }
1361
1362 /* Encode size[0], i.e. bit 22, for
1363 e.g. FCVTXN <Vb><d>, <Va><n>. */
1364
1365 static void
1366 encode_asisd_fcvtxn (aarch64_inst *inst)
1367 {
1368 aarch64_insn val = 1;
1369 aarch64_field field = {0, 0};
1370 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1371 gen_sub_field (FLD_size, 0, 1, &field);
1372 insert_field_2 (&field, &inst->value, val, 0);
1373 }
1374
1375 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1376 static void
1377 encode_fcvt (aarch64_inst *inst)
1378 {
1379 aarch64_insn val;
1380 const aarch64_field field = {15, 2};
1381
1382 /* opc dstsize */
1383 switch (inst->operands[0].qualifier)
1384 {
1385 case AARCH64_OPND_QLF_S_S: val = 0; break;
1386 case AARCH64_OPND_QLF_S_D: val = 1; break;
1387 case AARCH64_OPND_QLF_S_H: val = 3; break;
1388 default: abort ();
1389 }
1390 insert_field_2 (&field, &inst->value, val, 0);
1391
1392 return;
1393 }
1394
1395 /* Return the index in qualifiers_list that INST is using. Should only
1396 be called once the qualifiers are known to be valid. */
1397
1398 static int
1399 aarch64_get_variant (struct aarch64_inst *inst)
1400 {
1401 int i, nops, variant;
1402
1403 nops = aarch64_num_of_operands (inst->opcode);
1404 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1405 {
1406 for (i = 0; i < nops; ++i)
1407 if (inst->opcode->qualifiers_list[variant][i]
1408 != inst->operands[i].qualifier)
1409 break;
1410 if (i == nops)
1411 return variant;
1412 }
1413 abort ();
1414 }
1415
1416 /* Do miscellaneous encodings that are not common enough to be driven by
1417 flags. */
1418
1419 static void
1420 do_misc_encoding (aarch64_inst *inst)
1421 {
1422 unsigned int value;
1423
1424 switch (inst->opcode->op)
1425 {
1426 case OP_FCVT:
1427 encode_fcvt (inst);
1428 break;
1429 case OP_FCVTN:
1430 case OP_FCVTN2:
1431 case OP_FCVTL:
1432 case OP_FCVTL2:
1433 encode_asimd_fcvt (inst);
1434 break;
1435 case OP_FCVTXN_S:
1436 encode_asisd_fcvtxn (inst);
1437 break;
1438 case OP_MOV_P_P:
1439 case OP_MOVS_P_P:
1440 /* Copy Pn to Pm and Pg. */
1441 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1442 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1443 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1444 break;
1445 case OP_MOV_Z_P_Z:
1446 /* Copy Zd to Zm. */
1447 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1448 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1449 break;
1450 case OP_MOV_Z_V:
1451 /* Fill in the zero immediate. */
1452 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1453 2, FLD_imm5, FLD_SVE_tszh);
1454 break;
1455 case OP_MOV_Z_Z:
1456 /* Copy Zn to Zm. */
1457 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1458 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1459 break;
1460 case OP_MOV_Z_Zi:
1461 break;
1462 case OP_MOVM_P_P_P:
1463 /* Copy Pd to Pm. */
1464 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1465 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1466 break;
1467 case OP_MOVZS_P_P_P:
1468 case OP_MOVZ_P_P_P:
1469 /* Copy Pn to Pm. */
1470 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1471 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1472 break;
1473 case OP_NOTS_P_P_P_Z:
1474 case OP_NOT_P_P_P_Z:
1475 /* Copy Pg to Pm. */
1476 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1477 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1478 break;
1479 default: break;
1480 }
1481 }
1482
1483 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1484 static void
1485 encode_sizeq (aarch64_inst *inst)
1486 {
1487 aarch64_insn sizeq;
1488 enum aarch64_field_kind kind;
1489 int idx;
1490
1491 /* Get the index of the operand whose information we are going to use
1492 to encode the size and Q fields.
1493 This is deduced from the possible valid qualifier lists. */
1494 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1495 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1496 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1497 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1498 /* Q */
1499 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1500 /* size */
1501 if (inst->opcode->iclass == asisdlse
1502 || inst->opcode->iclass == asisdlsep
1503 || inst->opcode->iclass == asisdlso
1504 || inst->opcode->iclass == asisdlsop)
1505 kind = FLD_vldst_size;
1506 else
1507 kind = FLD_size;
1508 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1509 }
1510
1511 /* Opcodes that have fields shared by multiple operands are usually flagged
1512 with flags. In this function, we detect such flags and use the
1513 information in one of the related operands to do the encoding. The 'one'
1514 operand is not any operand but one of the operands that has the enough
1515 information for such an encoding. */
1516
1517 static void
1518 do_special_encoding (struct aarch64_inst *inst)
1519 {
1520 int idx;
1521 aarch64_insn value = 0;
1522
1523 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1524
1525 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1526 if (inst->opcode->flags & F_COND)
1527 {
1528 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1529 }
1530 if (inst->opcode->flags & F_SF)
1531 {
1532 idx = select_operand_for_sf_field_coding (inst->opcode);
1533 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1534 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1535 ? 1 : 0;
1536 insert_field (FLD_sf, &inst->value, value, 0);
1537 if (inst->opcode->flags & F_N)
1538 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1539 }
1540 if (inst->opcode->flags & F_LSE_SZ)
1541 {
1542 idx = select_operand_for_sf_field_coding (inst->opcode);
1543 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1544 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1545 ? 1 : 0;
1546 insert_field (FLD_lse_sz, &inst->value, value, 0);
1547 }
1548 if (inst->opcode->flags & F_SIZEQ)
1549 encode_sizeq (inst);
1550 if (inst->opcode->flags & F_FPTYPE)
1551 {
1552 idx = select_operand_for_fptype_field_coding (inst->opcode);
1553 switch (inst->operands[idx].qualifier)
1554 {
1555 case AARCH64_OPND_QLF_S_S: value = 0; break;
1556 case AARCH64_OPND_QLF_S_D: value = 1; break;
1557 case AARCH64_OPND_QLF_S_H: value = 3; break;
1558 default: assert (0);
1559 }
1560 insert_field (FLD_type, &inst->value, value, 0);
1561 }
1562 if (inst->opcode->flags & F_SSIZE)
1563 {
1564 enum aarch64_opnd_qualifier qualifier;
1565 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1566 qualifier = inst->operands[idx].qualifier;
1567 assert (qualifier >= AARCH64_OPND_QLF_S_B
1568 && qualifier <= AARCH64_OPND_QLF_S_Q);
1569 value = aarch64_get_qualifier_standard_value (qualifier);
1570 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1571 }
1572 if (inst->opcode->flags & F_T)
1573 {
1574 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1575 aarch64_field field = {0, 0};
1576 enum aarch64_opnd_qualifier qualifier;
1577
1578 idx = 0;
1579 qualifier = inst->operands[idx].qualifier;
1580 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1581 == AARCH64_OPND_CLASS_SIMD_REG
1582 && qualifier >= AARCH64_OPND_QLF_V_8B
1583 && qualifier <= AARCH64_OPND_QLF_V_2D);
1584 /* imm5<3:0> q <t>
1585 0000 x reserved
1586 xxx1 0 8b
1587 xxx1 1 16b
1588 xx10 0 4h
1589 xx10 1 8h
1590 x100 0 2s
1591 x100 1 4s
1592 1000 0 reserved
1593 1000 1 2d */
1594 value = aarch64_get_qualifier_standard_value (qualifier);
1595 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1596 num = (int) value >> 1;
1597 assert (num >= 0 && num <= 3);
1598 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1599 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1600 }
1601 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1602 {
1603 /* Use Rt to encode in the case of e.g.
1604 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1605 enum aarch64_opnd_qualifier qualifier;
1606 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1607 if (idx == -1)
1608 /* Otherwise use the result operand, which has to be a integer
1609 register. */
1610 idx = 0;
1611 assert (idx == 0 || idx == 1);
1612 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1613 == AARCH64_OPND_CLASS_INT_REG);
1614 qualifier = inst->operands[idx].qualifier;
1615 insert_field (FLD_Q, &inst->value,
1616 aarch64_get_qualifier_standard_value (qualifier), 0);
1617 }
1618 if (inst->opcode->flags & F_LDS_SIZE)
1619 {
1620 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1621 enum aarch64_opnd_qualifier qualifier;
1622 aarch64_field field = {0, 0};
1623 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1624 == AARCH64_OPND_CLASS_INT_REG);
1625 gen_sub_field (FLD_opc, 0, 1, &field);
1626 qualifier = inst->operands[0].qualifier;
1627 insert_field_2 (&field, &inst->value,
1628 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1629 }
1630 /* Miscellaneous encoding as the last step. */
1631 if (inst->opcode->flags & F_MISC)
1632 do_misc_encoding (inst);
1633
1634 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1635 }
1636
1637 /* Some instructions (including all SVE ones) use the instruction class
1638 to describe how a qualifiers_list index is represented in the instruction
1639 encoding. If INST is such an instruction, encode the chosen qualifier
1640 variant. */
1641
1642 static void
1643 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1644 {
1645 int variant = 0;
1646 switch (inst->opcode->iclass)
1647 {
1648 case sve_cpy:
1649 insert_fields (&inst->value, aarch64_get_variant (inst),
1650 0, 2, FLD_SVE_M_14, FLD_size);
1651 break;
1652
1653 case sve_index:
1654 case sve_shift_pred:
1655 case sve_shift_unpred:
1656 case sve_shift_tsz_hsd:
1657 case sve_shift_tsz_bhsd:
1658 /* For indices and shift amounts, the variant is encoded as
1659 part of the immediate. */
1660 break;
1661
1662 case sve_limm:
1663 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1664 and depend on the immediate. They don't have a separate
1665 encoding. */
1666 break;
1667
1668 case sve_misc:
1669 /* sve_misc instructions have only a single variant. */
1670 break;
1671
1672 case sve_movprfx:
1673 insert_fields (&inst->value, aarch64_get_variant (inst),
1674 0, 2, FLD_SVE_M_16, FLD_size);
1675 break;
1676
1677 case sve_pred_zm:
1678 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1679 break;
1680
1681 case sve_size_bhs:
1682 case sve_size_bhsd:
1683 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1684 break;
1685
1686 case sve_size_hsd:
1687 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1688 break;
1689
1690 case sve_size_bh:
1691 case sve_size_sd:
1692 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1693 break;
1694
1695 case sve_size_sd2:
1696 insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
1697 break;
1698
1699 case sve_size_hsd2:
1700 insert_field (FLD_SVE_size, &inst->value,
1701 aarch64_get_variant (inst) + 1, 0);
1702 break;
1703
1704 case sve_size_tsz_bhs:
1705 insert_fields (&inst->value,
1706 (1 << aarch64_get_variant (inst)),
1707 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
1708 break;
1709
1710 case sve_size_13:
1711 variant = aarch64_get_variant (inst) + 1;
1712 if (variant == 2)
1713 variant = 3;
1714 insert_field (FLD_size, &inst->value, variant, 0);
1715 break;
1716
1717 default:
1718 break;
1719 }
1720 }
1721
1722 /* Converters converting an alias opcode instruction to its real form. */
1723
1724 /* ROR <Wd>, <Ws>, #<shift>
1725 is equivalent to:
1726 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1727 static void
1728 convert_ror_to_extr (aarch64_inst *inst)
1729 {
1730 copy_operand_info (inst, 3, 2);
1731 copy_operand_info (inst, 2, 1);
1732 }
1733
1734 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1735 is equivalent to:
1736 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1737 static void
1738 convert_xtl_to_shll (aarch64_inst *inst)
1739 {
1740 inst->operands[2].qualifier = inst->operands[1].qualifier;
1741 inst->operands[2].imm.value = 0;
1742 }
1743
1744 /* Convert
1745 LSR <Xd>, <Xn>, #<shift>
1746 to
1747 UBFM <Xd>, <Xn>, #<shift>, #63. */
1748 static void
1749 convert_sr_to_bfm (aarch64_inst *inst)
1750 {
1751 inst->operands[3].imm.value =
1752 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1753 }
1754
1755 /* Convert MOV to ORR. */
1756 static void
1757 convert_mov_to_orr (aarch64_inst *inst)
1758 {
1759 /* MOV <Vd>.<T>, <Vn>.<T>
1760 is equivalent to:
1761 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1762 copy_operand_info (inst, 2, 1);
1763 }
1764
1765 /* When <imms> >= <immr>, the instruction written:
1766 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1767 is equivalent to:
1768 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1769
1770 static void
1771 convert_bfx_to_bfm (aarch64_inst *inst)
1772 {
1773 int64_t lsb, width;
1774
1775 /* Convert the operand. */
1776 lsb = inst->operands[2].imm.value;
1777 width = inst->operands[3].imm.value;
1778 inst->operands[2].imm.value = lsb;
1779 inst->operands[3].imm.value = lsb + width - 1;
1780 }
1781
1782 /* When <imms> < <immr>, the instruction written:
1783 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1784 is equivalent to:
1785 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1786
1787 static void
1788 convert_bfi_to_bfm (aarch64_inst *inst)
1789 {
1790 int64_t lsb, width;
1791
1792 /* Convert the operand. */
1793 lsb = inst->operands[2].imm.value;
1794 width = inst->operands[3].imm.value;
1795 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1796 {
1797 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1798 inst->operands[3].imm.value = width - 1;
1799 }
1800 else
1801 {
1802 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1803 inst->operands[3].imm.value = width - 1;
1804 }
1805 }
1806
1807 /* The instruction written:
1808 BFC <Xd>, #<lsb>, #<width>
1809 is equivalent to:
1810 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1811
1812 static void
1813 convert_bfc_to_bfm (aarch64_inst *inst)
1814 {
1815 int64_t lsb, width;
1816
1817 /* Insert XZR. */
1818 copy_operand_info (inst, 3, 2);
1819 copy_operand_info (inst, 2, 1);
1820 copy_operand_info (inst, 1, 0);
1821 inst->operands[1].reg.regno = 0x1f;
1822
1823 /* Convert the immediate operand. */
1824 lsb = inst->operands[2].imm.value;
1825 width = inst->operands[3].imm.value;
1826 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1827 {
1828 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1829 inst->operands[3].imm.value = width - 1;
1830 }
1831 else
1832 {
1833 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1834 inst->operands[3].imm.value = width - 1;
1835 }
1836 }
1837
1838 /* The instruction written:
1839 LSL <Xd>, <Xn>, #<shift>
1840 is equivalent to:
1841 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1842
1843 static void
1844 convert_lsl_to_ubfm (aarch64_inst *inst)
1845 {
1846 int64_t shift = inst->operands[2].imm.value;
1847
1848 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1849 {
1850 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1851 inst->operands[3].imm.value = 31 - shift;
1852 }
1853 else
1854 {
1855 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1856 inst->operands[3].imm.value = 63 - shift;
1857 }
1858 }
1859
1860 /* CINC <Wd>, <Wn>, <cond>
1861 is equivalent to:
1862 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1863
1864 static void
1865 convert_to_csel (aarch64_inst *inst)
1866 {
1867 copy_operand_info (inst, 3, 2);
1868 copy_operand_info (inst, 2, 1);
1869 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1870 }
1871
1872 /* CSET <Wd>, <cond>
1873 is equivalent to:
1874 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1875
1876 static void
1877 convert_cset_to_csinc (aarch64_inst *inst)
1878 {
1879 copy_operand_info (inst, 3, 1);
1880 copy_operand_info (inst, 2, 0);
1881 copy_operand_info (inst, 1, 0);
1882 inst->operands[1].reg.regno = 0x1f;
1883 inst->operands[2].reg.regno = 0x1f;
1884 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1885 }
1886
1887 /* MOV <Wd>, #<imm>
1888 is equivalent to:
1889 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1890
1891 static void
1892 convert_mov_to_movewide (aarch64_inst *inst)
1893 {
1894 int is32;
1895 uint32_t shift_amount;
1896 uint64_t value = ~(uint64_t)0;
1897
1898 switch (inst->opcode->op)
1899 {
1900 case OP_MOV_IMM_WIDE:
1901 value = inst->operands[1].imm.value;
1902 break;
1903 case OP_MOV_IMM_WIDEN:
1904 value = ~inst->operands[1].imm.value;
1905 break;
1906 default:
1907 assert (0);
1908 }
1909 inst->operands[1].type = AARCH64_OPND_HALF;
1910 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1911 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1912 /* The constraint check should have guaranteed this wouldn't happen. */
1913 assert (0);
1914 value >>= shift_amount;
1915 value &= 0xffff;
1916 inst->operands[1].imm.value = value;
1917 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1918 inst->operands[1].shifter.amount = shift_amount;
1919 }
1920
1921 /* MOV <Wd>, #<imm>
1922 is equivalent to:
1923 ORR <Wd>, WZR, #<imm>. */
1924
1925 static void
1926 convert_mov_to_movebitmask (aarch64_inst *inst)
1927 {
1928 copy_operand_info (inst, 2, 1);
1929 inst->operands[1].reg.regno = 0x1f;
1930 inst->operands[1].skip = 0;
1931 }
1932
1933 /* Some alias opcodes are assembled by being converted to their real-form. */
1934
1935 static void
1936 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1937 {
1938 const aarch64_opcode *alias = inst->opcode;
1939
1940 if ((alias->flags & F_CONV) == 0)
1941 goto convert_to_real_return;
1942
1943 switch (alias->op)
1944 {
1945 case OP_ASR_IMM:
1946 case OP_LSR_IMM:
1947 convert_sr_to_bfm (inst);
1948 break;
1949 case OP_LSL_IMM:
1950 convert_lsl_to_ubfm (inst);
1951 break;
1952 case OP_CINC:
1953 case OP_CINV:
1954 case OP_CNEG:
1955 convert_to_csel (inst);
1956 break;
1957 case OP_CSET:
1958 case OP_CSETM:
1959 convert_cset_to_csinc (inst);
1960 break;
1961 case OP_UBFX:
1962 case OP_BFXIL:
1963 case OP_SBFX:
1964 convert_bfx_to_bfm (inst);
1965 break;
1966 case OP_SBFIZ:
1967 case OP_BFI:
1968 case OP_UBFIZ:
1969 convert_bfi_to_bfm (inst);
1970 break;
1971 case OP_BFC:
1972 convert_bfc_to_bfm (inst);
1973 break;
1974 case OP_MOV_V:
1975 convert_mov_to_orr (inst);
1976 break;
1977 case OP_MOV_IMM_WIDE:
1978 case OP_MOV_IMM_WIDEN:
1979 convert_mov_to_movewide (inst);
1980 break;
1981 case OP_MOV_IMM_LOG:
1982 convert_mov_to_movebitmask (inst);
1983 break;
1984 case OP_ROR_IMM:
1985 convert_ror_to_extr (inst);
1986 break;
1987 case OP_SXTL:
1988 case OP_SXTL2:
1989 case OP_UXTL:
1990 case OP_UXTL2:
1991 convert_xtl_to_shll (inst);
1992 break;
1993 default:
1994 break;
1995 }
1996
1997 convert_to_real_return:
1998 aarch64_replace_opcode (inst, real);
1999 }
2000
2001 /* Encode *INST_ORI of the opcode code OPCODE.
2002 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2003 matched operand qualifier sequence in *QLF_SEQ. */
2004
2005 bool
2006 aarch64_opcode_encode (const aarch64_opcode *opcode,
2007 const aarch64_inst *inst_ori, aarch64_insn *code,
2008 aarch64_opnd_qualifier_t *qlf_seq,
2009 aarch64_operand_error *mismatch_detail,
2010 aarch64_instr_sequence* insn_sequence)
2011 {
2012 int i;
2013 const aarch64_opcode *aliased;
2014 aarch64_inst copy, *inst;
2015
2016 DEBUG_TRACE ("enter with %s", opcode->name);
2017
2018 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2019 copy = *inst_ori;
2020 inst = &copy;
2021
2022 assert (inst->opcode == NULL || inst->opcode == opcode);
2023 if (inst->opcode == NULL)
2024 inst->opcode = opcode;
2025
2026 /* Constrain the operands.
2027 After passing this, the encoding is guaranteed to succeed. */
2028 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
2029 {
2030 DEBUG_TRACE ("FAIL since operand constraint not met");
2031 return 0;
2032 }
2033
2034 /* Get the base value.
2035 Note: this has to be before the aliasing handling below in order to
2036 get the base value from the alias opcode before we move on to the
2037 aliased opcode for encoding. */
2038 inst->value = opcode->opcode;
2039
2040 /* No need to do anything else if the opcode does not have any operand. */
2041 if (aarch64_num_of_operands (opcode) == 0)
2042 goto encoding_exit;
2043
2044 /* Assign operand indexes and check types. Also put the matched
2045 operand qualifiers in *QLF_SEQ to return. */
2046 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2047 {
2048 assert (opcode->operands[i] == inst->operands[i].type);
2049 inst->operands[i].idx = i;
2050 if (qlf_seq != NULL)
2051 *qlf_seq = inst->operands[i].qualifier;
2052 }
2053
2054 aliased = aarch64_find_real_opcode (opcode);
2055 /* If the opcode is an alias and it does not ask for direct encoding by
2056 itself, the instruction will be transformed to the form of real opcode
2057 and the encoding will be carried out using the rules for the aliased
2058 opcode. */
2059 if (aliased != NULL && (opcode->flags & F_CONV))
2060 {
2061 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2062 aliased->name, opcode->name);
2063 /* Convert the operands to the form of the real opcode. */
2064 convert_to_real (inst, aliased);
2065 opcode = aliased;
2066 }
2067
2068 aarch64_opnd_info *info = inst->operands;
2069
2070 /* Call the inserter of each operand. */
2071 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2072 {
2073 const aarch64_operand *opnd;
2074 enum aarch64_opnd type = opcode->operands[i];
2075 if (type == AARCH64_OPND_NIL)
2076 break;
2077 if (info->skip)
2078 {
2079 DEBUG_TRACE ("skip the incomplete operand %d", i);
2080 continue;
2081 }
2082 opnd = &aarch64_operands[type];
2083 if (operand_has_inserter (opnd)
2084 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2085 mismatch_detail))
2086 return false;
2087 }
2088
2089 /* Call opcode encoders indicated by flags. */
2090 if (opcode_has_special_coder (opcode))
2091 do_special_encoding (inst);
2092
2093 /* Possibly use the instruction class to encode the chosen qualifier
2094 variant. */
2095 aarch64_encode_variant_using_iclass (inst);
2096
2097 /* Run a verifier if the instruction has one set. */
2098 if (opcode->verifier)
2099 {
2100 enum err_type result = opcode->verifier (inst, *code, 0, true,
2101 mismatch_detail, insn_sequence);
2102 switch (result)
2103 {
2104 case ERR_UND:
2105 case ERR_UNP:
2106 case ERR_NYI:
2107 return false;
2108 default:
2109 break;
2110 }
2111 }
2112
2113 /* Always run constrain verifiers, this is needed because constrains need to
2114 maintain a global state. Regardless if the instruction has the flag set
2115 or not. */
2116 enum err_type result = verify_constraints (inst, *code, 0, true,
2117 mismatch_detail, insn_sequence);
2118 switch (result)
2119 {
2120 case ERR_UND:
2121 case ERR_UNP:
2122 case ERR_NYI:
2123 return false;
2124 default:
2125 break;
2126 }
2127
2128
2129 encoding_exit:
2130 DEBUG_TRACE ("exit with %s", opcode->name);
2131
2132 *code = inst->value;
2133
2134 return true;
2135 }
This page took 0.078817 seconds and 4 git commands to generate.