gdb: add target_ops::supports_displaced_step
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
CommitLineData
a06ea964 1/* aarch64-asm.c -- AArch64 assembler support.
b3adc24a 2 Copyright (C) 2012-2020 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21#include "sysdep.h"
22#include <stdarg.h>
b5464a68 23#include "libiberty.h"
a06ea964 24#include "aarch64-asm.h"
f9830ec1 25#include "opintl.h"
a06ea964
NC
26
27/* Utilities. */
28
29/* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
32
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
9aff4b7a 36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
a06ea964
NC
37 the order of M, L, H. */
38
39static inline void
40insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
41{
42 uint32_t num;
43 const aarch64_field *field;
44 enum aarch64_field_kind kind;
45 va_list va;
46
47 va_start (va, mask);
48 num = va_arg (va, uint32_t);
49 assert (num <= 5);
50 while (num--)
51 {
52 kind = va_arg (va, enum aarch64_field_kind);
53 field = &fields[kind];
54 insert_field (kind, code, value, mask);
55 value >>= field->width;
56 }
57 va_end (va);
58}
59
b5464a68
RS
60/* Insert a raw field value VALUE into all fields in SELF->fields.
61 The least significant bit goes in the final field. */
62
63static void
64insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
65 aarch64_insn value)
66{
67 unsigned int i;
68 enum aarch64_field_kind kind;
69
70 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
71 if (self->fields[i] != FLD_NIL)
72 {
73 kind = self->fields[i];
74 insert_field (kind, code, value, 0);
75 value >>= fields[kind].width;
76 }
77}
78
a06ea964
NC
79/* Operand inserters. */
80
c2e5c986
SD
81/* Insert nothing. */
82bfd_boolean
83aarch64_ins_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
84 const aarch64_opnd_info *info ATTRIBUTE_UNUSED,
85 aarch64_insn *code ATTRIBUTE_UNUSED,
86 const aarch64_inst *inst ATTRIBUTE_UNUSED,
87 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
88{
89 return TRUE;
90}
91
a06ea964 92/* Insert register number. */
561a72d4 93bfd_boolean
a06ea964
NC
94aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code,
561a72d4
TC
96 const aarch64_inst *inst ATTRIBUTE_UNUSED,
97 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
98{
99 insert_field (self->fields[0], code, info->reg.regno, 0);
561a72d4 100 return TRUE;
a06ea964
NC
101}
102
103/* Insert register number, index and/or other data for SIMD register element
104 operand, e.g. the last source operand in
105 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
561a72d4 106bfd_boolean
a06ea964 107aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
561a72d4
TC
108 aarch64_insn *code, const aarch64_inst *inst,
109 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
110{
111 /* regno */
112 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
113 /* index and/or type */
114 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
115 {
116 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
117 if (info->type == AARCH64_OPND_En
118 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
119 {
120 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
121 assert (info->idx == 1); /* Vn */
122 aarch64_insn value = info->reglane.index << pos;
123 insert_field (FLD_imm4, code, value, 0);
124 }
125 else
126 {
127 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
128 imm5<3:0> <V>
129 0000 RESERVED
130 xxx1 B
131 xx10 H
132 x100 S
133 1000 D */
134 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
135 insert_field (FLD_imm5, code, value, 0);
136 }
137 }
65a55fbb
TC
138 else if (inst->opcode->iclass == dotproduct)
139 {
140 unsigned reglane_index = info->reglane.index;
141 switch (info->qualifier)
142 {
00c2093f 143 case AARCH64_OPND_QLF_S_4B:
df678013 144 case AARCH64_OPND_QLF_S_2H:
65a55fbb
TC
145 /* L:H */
146 assert (reglane_index < 4);
147 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
148 break;
149 default:
150 assert (0);
151 }
152 }
f42f1a1d
TC
153 else if (inst->opcode->iclass == cryptosm3)
154 {
155 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
156 unsigned reglane_index = info->reglane.index;
157 assert (reglane_index < 4);
158 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
159 }
a06ea964
NC
160 else
161 {
162 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
163 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
329d01f7 164 unsigned reglane_index = info->reglane.index;
c2c4ff8d
SN
165
166 if (inst->opcode->op == OP_FCMLA_ELEM)
167 /* Complex operand takes two elements. */
329d01f7 168 reglane_index *= 2;
c2c4ff8d 169
a06ea964
NC
170 switch (info->qualifier)
171 {
172 case AARCH64_OPND_QLF_S_H:
173 /* H:L:M */
329d01f7
MR
174 assert (reglane_index < 8);
175 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
a06ea964
NC
176 break;
177 case AARCH64_OPND_QLF_S_S:
178 /* H:L */
329d01f7
MR
179 assert (reglane_index < 4);
180 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
a06ea964
NC
181 break;
182 case AARCH64_OPND_QLF_S_D:
183 /* H */
329d01f7
MR
184 assert (reglane_index < 2);
185 insert_field (FLD_H, code, reglane_index, 0);
a06ea964
NC
186 break;
187 default:
188 assert (0);
189 }
190 }
561a72d4 191 return TRUE;
a06ea964
NC
192}
193
194/* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
561a72d4 195bfd_boolean
a06ea964
NC
196aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
197 aarch64_insn *code,
561a72d4
TC
198 const aarch64_inst *inst ATTRIBUTE_UNUSED,
199 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
200{
201 /* R */
202 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
203 /* len */
204 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
561a72d4 205 return TRUE;
a06ea964
NC
206}
207
208/* Insert Rt and opcode fields for a register list operand, e.g. Vt
209 in AdvSIMD load/store instructions. */
561a72d4 210bfd_boolean
a06ea964
NC
211aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
212 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
213 const aarch64_inst *inst,
214 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964 215{
4ad3b7ef 216 aarch64_insn value = 0;
a06ea964
NC
217 /* Number of elements in each structure to be loaded/stored. */
218 unsigned num = get_opcode_dependent_value (inst->opcode);
219
220 /* Rt */
221 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
222 /* opcode */
223 switch (num)
224 {
225 case 1:
226 switch (info->reglist.num_regs)
227 {
228 case 1: value = 0x7; break;
229 case 2: value = 0xa; break;
230 case 3: value = 0x6; break;
231 case 4: value = 0x2; break;
232 default: assert (0);
233 }
234 break;
235 case 2:
236 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
237 break;
238 case 3:
239 value = 0x4;
240 break;
241 case 4:
242 value = 0x0;
243 break;
244 default:
245 assert (0);
246 }
247 insert_field (FLD_opcode, code, value, 0);
248
561a72d4 249 return TRUE;
a06ea964
NC
250}
251
252/* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
253 single structure to all lanes instructions. */
561a72d4 254bfd_boolean
a06ea964
NC
255aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
256 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
257 const aarch64_inst *inst,
258 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
259{
260 aarch64_insn value;
261 /* The opcode dependent area stores the number of elements in
262 each structure to be loaded/stored. */
263 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
264
265 /* Rt */
266 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
267 /* S */
268 value = (aarch64_insn) 0;
269 if (is_ld1r && info->reglist.num_regs == 2)
270 /* OP_LD1R does not have alternating variant, but have "two consecutive"
271 instead. */
272 value = (aarch64_insn) 1;
273 insert_field (FLD_S, code, value, 0);
274
561a72d4 275 return TRUE;
a06ea964
NC
276}
277
278/* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
279 operand e.g. Vt in AdvSIMD load/store single element instructions. */
561a72d4 280bfd_boolean
a06ea964
NC
281aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
282 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
283 const aarch64_inst *inst ATTRIBUTE_UNUSED,
284 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
285{
286 aarch64_field field = {0, 0};
4ad3b7ef
KT
287 aarch64_insn QSsize = 0; /* fields Q:S:size. */
288 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
a06ea964
NC
289
290 assert (info->reglist.has_index);
291
292 /* Rt */
293 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
294 /* Encode the index, opcode<2:1> and size. */
295 switch (info->qualifier)
296 {
297 case AARCH64_OPND_QLF_S_B:
298 /* Index encoded in "Q:S:size". */
299 QSsize = info->reglist.index;
300 opcodeh2 = 0x0;
301 break;
302 case AARCH64_OPND_QLF_S_H:
303 /* Index encoded in "Q:S:size<1>". */
304 QSsize = info->reglist.index << 1;
305 opcodeh2 = 0x1;
306 break;
307 case AARCH64_OPND_QLF_S_S:
308 /* Index encoded in "Q:S". */
309 QSsize = info->reglist.index << 2;
310 opcodeh2 = 0x2;
311 break;
312 case AARCH64_OPND_QLF_S_D:
313 /* Index encoded in "Q". */
314 QSsize = info->reglist.index << 3 | 0x1;
315 opcodeh2 = 0x2;
316 break;
317 default:
318 assert (0);
319 }
320 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
321 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
322 insert_field_2 (&field, code, opcodeh2, 0);
323
561a72d4 324 return TRUE;
a06ea964
NC
325}
326
327/* Insert fields immh:immb and/or Q for e.g. the shift immediate in
328 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
329 or SSHR <V><d>, <V><n>, #<shift>. */
561a72d4 330bfd_boolean
a06ea964
NC
331aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
332 const aarch64_opnd_info *info,
561a72d4
TC
333 aarch64_insn *code, const aarch64_inst *inst,
334 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
335{
336 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
337 aarch64_insn Q, imm;
338
339 if (inst->opcode->iclass == asimdshf)
340 {
341 /* Q
342 immh Q <T>
343 0000 x SEE AdvSIMD modified immediate
344 0001 0 8B
345 0001 1 16B
346 001x 0 4H
347 001x 1 8H
348 01xx 0 2S
349 01xx 1 4S
350 1xxx 0 RESERVED
351 1xxx 1 2D */
352 Q = (val & 0x1) ? 1 : 0;
353 insert_field (FLD_Q, code, Q, inst->opcode->mask);
354 val >>= 1;
355 }
356
357 assert (info->type == AARCH64_OPND_IMM_VLSR
358 || info->type == AARCH64_OPND_IMM_VLSL);
359
360 if (info->type == AARCH64_OPND_IMM_VLSR)
361 /* immh:immb
362 immh <shift>
363 0000 SEE AdvSIMD modified immediate
364 0001 (16-UInt(immh:immb))
365 001x (32-UInt(immh:immb))
366 01xx (64-UInt(immh:immb))
367 1xxx (128-UInt(immh:immb)) */
368 imm = (16 << (unsigned)val) - info->imm.value;
369 else
370 /* immh:immb
371 immh <shift>
372 0000 SEE AdvSIMD modified immediate
373 0001 (UInt(immh:immb)-8)
374 001x (UInt(immh:immb)-16)
375 01xx (UInt(immh:immb)-32)
376 1xxx (UInt(immh:immb)-64) */
377 imm = info->imm.value + (8 << (unsigned)val);
378 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
379
561a72d4 380 return TRUE;
a06ea964
NC
381}
382
383/* Insert fields for e.g. the immediate operands in
384 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
561a72d4 385bfd_boolean
a06ea964
NC
386aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
387 aarch64_insn *code,
561a72d4
TC
388 const aarch64_inst *inst ATTRIBUTE_UNUSED,
389 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
390{
391 int64_t imm;
a06ea964
NC
392
393 imm = info->imm.value;
394 if (operand_need_shift_by_two (self))
395 imm >>= 2;
193614f2
SD
396 if (operand_need_shift_by_four (self))
397 imm >>= 4;
b5464a68 398 insert_all_fields (self, code, imm);
561a72d4 399 return TRUE;
a06ea964
NC
400}
401
402/* Insert immediate and its shift amount for e.g. the last operand in
403 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
561a72d4 404bfd_boolean
a06ea964 405aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
561a72d4
TC
406 aarch64_insn *code, const aarch64_inst *inst,
407 aarch64_operand_error *errors)
a06ea964
NC
408{
409 /* imm16 */
561a72d4 410 aarch64_ins_imm (self, info, code, inst, errors);
a06ea964
NC
411 /* hw */
412 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
561a72d4 413 return TRUE;
a06ea964
NC
414}
415
416/* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
417 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
561a72d4 418bfd_boolean
a06ea964
NC
419aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
420 const aarch64_opnd_info *info,
421 aarch64_insn *code,
561a72d4
TC
422 const aarch64_inst *inst ATTRIBUTE_UNUSED,
423 aarch64_operand_error *errors
424 ATTRIBUTE_UNUSED)
a06ea964
NC
425{
426 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
427 uint64_t imm = info->imm.value;
428 enum aarch64_modifier_kind kind = info->shifter.kind;
429 int amount = info->shifter.amount;
430 aarch64_field field = {0, 0};
431
432 /* a:b:c:d:e:f:g:h */
433 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
434 {
435 /* Either MOVI <Dd>, #<imm>
436 or MOVI <Vd>.2D, #<imm>.
437 <imm> is a 64-bit immediate
438 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
439 encoded in "a:b:c:d:e:f:g:h". */
440 imm = aarch64_shrink_expanded_imm8 (imm);
441 assert ((int)imm >= 0);
442 }
a06ea964
NC
443 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
444
445 if (kind == AARCH64_MOD_NONE)
561a72d4 446 return TRUE;
a06ea964
NC
447
448 /* shift amount partially in cmode */
449 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
450 if (kind == AARCH64_MOD_LSL)
451 {
452 /* AARCH64_MOD_LSL: shift zeros. */
453 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
f5555712
YZ
454 assert (esize == 4 || esize == 2 || esize == 1);
455 /* For 8-bit move immediate, the optional LSL #0 does not require
456 encoding. */
457 if (esize == 1)
561a72d4 458 return TRUE;
a06ea964
NC
459 amount >>= 3;
460 if (esize == 4)
461 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
462 else
463 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
464 }
465 else
466 {
467 /* AARCH64_MOD_MSL: shift ones. */
468 amount >>= 4;
469 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
470 }
471 insert_field_2 (&field, code, amount, 0);
472
561a72d4 473 return TRUE;
aa2aa4c6
RS
474}
475
476/* Insert fields for an 8-bit floating-point immediate. */
561a72d4 477bfd_boolean
aa2aa4c6
RS
478aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
479 aarch64_insn *code,
561a72d4
TC
480 const aarch64_inst *inst ATTRIBUTE_UNUSED,
481 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
aa2aa4c6
RS
482{
483 insert_all_fields (self, code, info->imm.value);
561a72d4 484 return TRUE;
a06ea964
NC
485}
486
582e12bf 487/* Insert 1-bit rotation immediate (#90 or #270). */
561a72d4 488bfd_boolean
582e12bf
RS
489aarch64_ins_imm_rotate1 (const aarch64_operand *self,
490 const aarch64_opnd_info *info,
561a72d4
TC
491 aarch64_insn *code, const aarch64_inst *inst,
492 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
c2c4ff8d 493{
582e12bf
RS
494 uint64_t rot = (info->imm.value - 90) / 180;
495 assert (rot < 2U);
c2c4ff8d 496 insert_field (self->fields[0], code, rot, inst->opcode->mask);
561a72d4 497 return TRUE;
582e12bf 498}
c2c4ff8d 499
582e12bf 500/* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
561a72d4 501bfd_boolean
582e12bf
RS
502aarch64_ins_imm_rotate2 (const aarch64_operand *self,
503 const aarch64_opnd_info *info,
561a72d4
TC
504 aarch64_insn *code, const aarch64_inst *inst,
505 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
582e12bf
RS
506{
507 uint64_t rot = info->imm.value / 90;
508 assert (rot < 4U);
509 insert_field (self->fields[0], code, rot, inst->opcode->mask);
561a72d4 510 return TRUE;
c2c4ff8d
SN
511}
512
a06ea964
NC
513/* Insert #<fbits> for the immediate operand in fp fix-point instructions,
514 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
561a72d4 515bfd_boolean
a06ea964
NC
516aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
517 aarch64_insn *code,
561a72d4
TC
518 const aarch64_inst *inst ATTRIBUTE_UNUSED,
519 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
520{
521 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
561a72d4 522 return TRUE;
a06ea964
NC
523}
524
525/* Insert arithmetic immediate for e.g. the last operand in
526 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
561a72d4 527bfd_boolean
a06ea964 528aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
561a72d4
TC
529 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
530 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
531{
532 /* shift */
533 aarch64_insn value = info->shifter.amount ? 1 : 0;
534 insert_field (self->fields[0], code, value, 0);
535 /* imm12 (unsigned) */
536 insert_field (self->fields[1], code, info->imm.value, 0);
561a72d4 537 return TRUE;
a06ea964
NC
538}
539
e950b345
RS
540/* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
541 the operand should be inverted before encoding. */
561a72d4 542static bfd_boolean
e950b345
RS
543aarch64_ins_limm_1 (const aarch64_operand *self,
544 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
545 const aarch64_inst *inst, bfd_boolean invert_p,
546 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
547{
548 aarch64_insn value;
549 uint64_t imm = info->imm.value;
42408347 550 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
a06ea964 551
e950b345 552 if (invert_p)
a06ea964 553 imm = ~imm;
535b785f
AM
554 /* The constraint check should have guaranteed this wouldn't happen. */
555 assert (aarch64_logical_immediate_p (imm, esize, &value));
a06ea964
NC
556
557 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
558 self->fields[0]);
561a72d4 559 return TRUE;
a06ea964
NC
560}
561
e950b345
RS
562/* Insert logical/bitmask immediate for e.g. the last operand in
563 ORR <Wd|WSP>, <Wn>, #<imm>. */
561a72d4 564bfd_boolean
e950b345 565aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
561a72d4
TC
566 aarch64_insn *code, const aarch64_inst *inst,
567 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
e950b345
RS
568{
569 return aarch64_ins_limm_1 (self, info, code, inst,
561a72d4 570 inst->opcode->op == OP_BIC, errors);
e950b345
RS
571}
572
573/* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
561a72d4 574bfd_boolean
e950b345
RS
575aarch64_ins_inv_limm (const aarch64_operand *self,
576 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
577 const aarch64_inst *inst,
578 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
e950b345 579{
561a72d4 580 return aarch64_ins_limm_1 (self, info, code, inst, TRUE, errors);
e950b345
RS
581}
582
a06ea964
NC
583/* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
584 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
561a72d4 585bfd_boolean
a06ea964 586aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
561a72d4
TC
587 aarch64_insn *code, const aarch64_inst *inst,
588 aarch64_operand_error *errors)
a06ea964 589{
4ad3b7ef 590 aarch64_insn value = 0;
a06ea964
NC
591
592 assert (info->idx == 0);
593
594 /* Rt */
561a72d4 595 aarch64_ins_regno (self, info, code, inst, errors);
a06ea964
NC
596 if (inst->opcode->iclass == ldstpair_indexed
597 || inst->opcode->iclass == ldstnapair_offs
598 || inst->opcode->iclass == ldstpair_off
599 || inst->opcode->iclass == loadlit)
600 {
601 /* size */
602 switch (info->qualifier)
603 {
604 case AARCH64_OPND_QLF_S_S: value = 0; break;
605 case AARCH64_OPND_QLF_S_D: value = 1; break;
606 case AARCH64_OPND_QLF_S_Q: value = 2; break;
607 default: assert (0);
608 }
609 insert_field (FLD_ldst_size, code, value, 0);
610 }
611 else
612 {
613 /* opc[1]:size */
614 value = aarch64_get_qualifier_standard_value (info->qualifier);
615 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
616 }
617
561a72d4 618 return TRUE;
a06ea964
NC
619}
620
621/* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
561a72d4 622bfd_boolean
a06ea964
NC
623aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
624 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
625 const aarch64_inst *inst ATTRIBUTE_UNUSED,
626 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
627{
628 /* Rn */
629 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
561a72d4 630 return TRUE;
a06ea964
NC
631}
632
633/* Encode the address operand for e.g.
634 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
561a72d4 635bfd_boolean
a06ea964
NC
636aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
637 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
638 const aarch64_inst *inst ATTRIBUTE_UNUSED,
639 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
640{
641 aarch64_insn S;
642 enum aarch64_modifier_kind kind = info->shifter.kind;
643
644 /* Rn */
645 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
646 /* Rm */
647 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
648 /* option */
649 if (kind == AARCH64_MOD_LSL)
650 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
651 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
652 /* S */
653 if (info->qualifier != AARCH64_OPND_QLF_S_B)
654 S = info->shifter.amount != 0;
655 else
656 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
657 S <amount>
658 0 [absent]
659 1 #0
660 Must be #0 if <extend> is explicitly LSL. */
661 S = info->shifter.operator_present && info->shifter.amount_present;
662 insert_field (FLD_S, code, S, 0);
663
561a72d4 664 return TRUE;
a06ea964
NC
665}
666
f42f1a1d
TC
667/* Encode the address operand for e.g.
668 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
561a72d4 669bfd_boolean
f42f1a1d
TC
670aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
671 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
672 const aarch64_inst *inst ATTRIBUTE_UNUSED,
673 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
f42f1a1d
TC
674{
675 /* Rn */
676 insert_field (self->fields[0], code, info->addr.base_regno, 0);
677
678 /* simm9 */
679 int imm = info->addr.offset.imm;
680 insert_field (self->fields[1], code, imm, 0);
681
682 /* writeback */
683 if (info->addr.writeback)
684 {
685 assert (info->addr.preind == 1 && info->addr.postind == 0);
686 insert_field (self->fields[2], code, 1, 0);
687 }
561a72d4 688 return TRUE;
f42f1a1d
TC
689}
690
a06ea964 691/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
561a72d4 692bfd_boolean
a06ea964
NC
693aarch64_ins_addr_simm (const aarch64_operand *self,
694 const aarch64_opnd_info *info,
062f38fa 695 aarch64_insn *code,
561a72d4
TC
696 const aarch64_inst *inst ATTRIBUTE_UNUSED,
697 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
698{
699 int imm;
700
701 /* Rn */
702 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
703 /* simm (imm9 or imm7) */
704 imm = info->addr.offset.imm;
fb3265b3
SD
705 if (self->fields[0] == FLD_imm7
706 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
a06ea964
NC
707 /* scaled immediate in ld/st pair instructions.. */
708 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
709 insert_field (self->fields[0], code, imm, 0);
710 /* pre/post- index */
711 if (info->addr.writeback)
712 {
713 assert (inst->opcode->iclass != ldst_unscaled
714 && inst->opcode->iclass != ldstnapair_offs
715 && inst->opcode->iclass != ldstpair_off
716 && inst->opcode->iclass != ldst_unpriv);
717 assert (info->addr.preind != info->addr.postind);
718 if (info->addr.preind)
719 insert_field (self->fields[1], code, 1, 0);
720 }
721
561a72d4 722 return TRUE;
a06ea964
NC
723}
724
3f06e550 725/* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
561a72d4 726bfd_boolean
3f06e550
SN
727aarch64_ins_addr_simm10 (const aarch64_operand *self,
728 const aarch64_opnd_info *info,
729 aarch64_insn *code,
561a72d4
TC
730 const aarch64_inst *inst ATTRIBUTE_UNUSED,
731 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3f06e550
SN
732{
733 int imm;
734
735 /* Rn */
736 insert_field (self->fields[0], code, info->addr.base_regno, 0);
737 /* simm10 */
738 imm = info->addr.offset.imm >> 3;
739 insert_field (self->fields[1], code, imm >> 9, 0);
740 insert_field (self->fields[2], code, imm, 0);
741 /* writeback */
742 if (info->addr.writeback)
743 {
744 assert (info->addr.preind == 1 && info->addr.postind == 0);
745 insert_field (self->fields[3], code, 1, 0);
746 }
561a72d4 747 return TRUE;
3f06e550
SN
748}
749
a06ea964 750/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
561a72d4 751bfd_boolean
a06ea964
NC
752aarch64_ins_addr_uimm12 (const aarch64_operand *self,
753 const aarch64_opnd_info *info,
754 aarch64_insn *code,
561a72d4
TC
755 const aarch64_inst *inst ATTRIBUTE_UNUSED,
756 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
757{
758 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
759
760 /* Rn */
761 insert_field (self->fields[0], code, info->addr.base_regno, 0);
762 /* uimm12 */
763 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
561a72d4 764 return TRUE;
a06ea964
NC
765}
766
767/* Encode the address operand for e.g.
768 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
561a72d4 769bfd_boolean
a06ea964
NC
770aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
771 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
772 const aarch64_inst *inst ATTRIBUTE_UNUSED,
773 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
774{
775 /* Rn */
776 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
777 /* Rm | #<amount> */
778 if (info->addr.offset.is_reg)
779 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
780 else
781 insert_field (FLD_Rm, code, 0x1f, 0);
561a72d4 782 return TRUE;
a06ea964
NC
783}
784
785/* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
561a72d4 786bfd_boolean
a06ea964
NC
787aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
788 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
789 const aarch64_inst *inst ATTRIBUTE_UNUSED,
790 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
791{
792 /* cond */
793 insert_field (FLD_cond, code, info->cond->value, 0);
561a72d4 794 return TRUE;
a06ea964
NC
795}
796
797/* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
561a72d4 798bfd_boolean
a06ea964
NC
799aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
800 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
801 const aarch64_inst *inst,
802 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
a06ea964 803{
f9830ec1
TC
804 /* If a system instruction check if we have any restrictions on which
805 registers it can use. */
806 if (inst->opcode->iclass == ic_system)
807 {
808 uint64_t opcode_flags
809 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
810 uint32_t sysreg_flags
811 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
812
813 /* Check to see if it's read-only, else check if it's write only.
814 if it's both or unspecified don't care. */
815 if (opcode_flags == F_SYS_READ
816 && sysreg_flags
817 && sysreg_flags != F_REG_READ)
818 {
819 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
820 detail->error = _("specified register cannot be read from");
821 detail->index = info->idx;
822 detail->non_fatal = TRUE;
823 }
824 else if (opcode_flags == F_SYS_WRITE
825 && sysreg_flags
826 && sysreg_flags != F_REG_WRITE)
827 {
828 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
829 detail->error = _("specified register cannot be written to");
830 detail->index = info->idx;
831 detail->non_fatal = TRUE;
832 }
833 }
a06ea964 834 /* op0:op1:CRn:CRm:op2 */
561a72d4 835 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
a06ea964 836 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
561a72d4 837 return TRUE;
a06ea964
NC
838}
839
840/* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
561a72d4 841bfd_boolean
a06ea964
NC
842aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
843 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
844 const aarch64_inst *inst ATTRIBUTE_UNUSED,
845 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
846{
847 /* op1:op2 */
848 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
849 FLD_op2, FLD_op1);
561a72d4 850 return TRUE;
a06ea964
NC
851}
852
853/* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
561a72d4 854bfd_boolean
a06ea964
NC
855aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
856 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
857 const aarch64_inst *inst ATTRIBUTE_UNUSED,
858 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
859{
860 /* op1:CRn:CRm:op2 */
861 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
862 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
561a72d4 863 return TRUE;
a06ea964
NC
864}
865
866/* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
867
561a72d4 868bfd_boolean
a06ea964
NC
869aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
870 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
871 const aarch64_inst *inst ATTRIBUTE_UNUSED,
872 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
873{
874 /* CRm */
875 insert_field (FLD_CRm, code, info->barrier->value, 0);
561a72d4 876 return TRUE;
a06ea964
NC
877}
878
879/* Encode the prefetch operation option operand for e.g.
880 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
881
561a72d4 882bfd_boolean
a06ea964
NC
883aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
884 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
885 const aarch64_inst *inst ATTRIBUTE_UNUSED,
886 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
887{
888 /* prfop in Rt */
889 insert_field (FLD_Rt, code, info->prfop->value, 0);
561a72d4 890 return TRUE;
a06ea964
NC
891}
892
9ed608f9
MW
893/* Encode the hint number for instructions that alias HINT but take an
894 operand. */
895
561a72d4 896bfd_boolean
9ed608f9
MW
897aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
898 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
899 const aarch64_inst *inst ATTRIBUTE_UNUSED,
900 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
9ed608f9
MW
901{
902 /* CRm:op2. */
903 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
561a72d4 904 return TRUE;
9ed608f9
MW
905}
906
a06ea964
NC
907/* Encode the extended register operand for e.g.
908 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
561a72d4 909bfd_boolean
a06ea964
NC
910aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
911 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
912 const aarch64_inst *inst ATTRIBUTE_UNUSED,
913 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
914{
915 enum aarch64_modifier_kind kind;
916
917 /* Rm */
918 insert_field (FLD_Rm, code, info->reg.regno, 0);
919 /* option */
920 kind = info->shifter.kind;
921 if (kind == AARCH64_MOD_LSL)
922 kind = info->qualifier == AARCH64_OPND_QLF_W
923 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
924 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
925 /* imm3 */
926 insert_field (FLD_imm3, code, info->shifter.amount, 0);
927
561a72d4 928 return TRUE;
a06ea964
NC
929}
930
931/* Encode the shifted register operand for e.g.
932 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
561a72d4 933bfd_boolean
a06ea964
NC
934aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
935 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
936 const aarch64_inst *inst ATTRIBUTE_UNUSED,
937 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
938{
939 /* Rm */
940 insert_field (FLD_Rm, code, info->reg.regno, 0);
941 /* shift */
942 insert_field (FLD_shift, code,
943 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
944 /* imm6 */
945 insert_field (FLD_imm6, code, info->shifter.amount, 0);
946
561a72d4 947 return TRUE;
a06ea964
NC
948}
949
98907a70
RS
950/* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
951 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
952 SELF's operand-dependent value. fields[0] specifies the field that
953 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
561a72d4 954bfd_boolean
98907a70
RS
955aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
956 const aarch64_opnd_info *info,
957 aarch64_insn *code,
561a72d4
TC
958 const aarch64_inst *inst ATTRIBUTE_UNUSED,
959 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
98907a70
RS
960{
961 int factor = 1 + get_operand_specific_data (self);
962 insert_field (self->fields[0], code, info->addr.base_regno, 0);
963 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
561a72d4 964 return TRUE;
98907a70
RS
965}
966
967/* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
968 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
969 SELF's operand-dependent value. fields[0] specifies the field that
970 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
561a72d4 971bfd_boolean
98907a70
RS
972aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
973 const aarch64_opnd_info *info,
974 aarch64_insn *code,
561a72d4
TC
975 const aarch64_inst *inst ATTRIBUTE_UNUSED,
976 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
98907a70
RS
977{
978 int factor = 1 + get_operand_specific_data (self);
979 insert_field (self->fields[0], code, info->addr.base_regno, 0);
980 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
561a72d4 981 return TRUE;
98907a70
RS
982}
983
984/* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
985 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
986 SELF's operand-dependent value. fields[0] specifies the field that
987 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
988 and imm3 fields, with imm3 being the less-significant part. */
561a72d4 989bfd_boolean
98907a70
RS
990aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
991 const aarch64_opnd_info *info,
992 aarch64_insn *code,
561a72d4
TC
993 const aarch64_inst *inst ATTRIBUTE_UNUSED,
994 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
98907a70
RS
995{
996 int factor = 1 + get_operand_specific_data (self);
997 insert_field (self->fields[0], code, info->addr.base_regno, 0);
998 insert_fields (code, info->addr.offset.imm / factor, 0,
999 2, FLD_imm3, FLD_SVE_imm6);
561a72d4 1000 return TRUE;
98907a70
RS
1001}
1002
582e12bf
RS
1003/* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1004 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1005 value. fields[0] specifies the base register field. */
561a72d4 1006bfd_boolean
582e12bf
RS
1007aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
1008 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1009 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1010 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
582e12bf
RS
1011{
1012 int factor = 1 << get_operand_specific_data (self);
1013 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1014 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
561a72d4 1015 return TRUE;
582e12bf
RS
1016}
1017
4df068de
RS
1018/* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1019 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1020 value. fields[0] specifies the base register field. */
561a72d4 1021bfd_boolean
4df068de
RS
1022aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1023 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1024 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1025 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4df068de
RS
1026{
1027 int factor = 1 << get_operand_specific_data (self);
1028 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1029 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
561a72d4 1030 return TRUE;
4df068de
RS
1031}
1032
1033/* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1034 is SELF's operand-dependent value. fields[0] specifies the base
1035 register field and fields[1] specifies the offset register field. */
561a72d4 1036bfd_boolean
4df068de
RS
1037aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1038 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1039 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1040 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4df068de
RS
1041{
1042 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1043 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
561a72d4 1044 return TRUE;
4df068de
RS
1045}
1046
1047/* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1048 <shift> is SELF's operand-dependent value. fields[0] specifies the
1049 base register field, fields[1] specifies the offset register field and
1050 fields[2] is a single-bit field that selects SXTW over UXTW. */
561a72d4 1051bfd_boolean
4df068de
RS
1052aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1053 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1054 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1055 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4df068de
RS
1056{
1057 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1058 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1059 if (info->shifter.kind == AARCH64_MOD_UXTW)
1060 insert_field (self->fields[2], code, 0, 0);
1061 else
1062 insert_field (self->fields[2], code, 1, 0);
561a72d4 1063 return TRUE;
4df068de
RS
1064}
1065
1066/* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1067 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1068 fields[0] specifies the base register field. */
561a72d4 1069bfd_boolean
4df068de
RS
1070aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1071 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1072 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1073 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4df068de
RS
1074{
1075 int factor = 1 << get_operand_specific_data (self);
1076 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1077 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
561a72d4 1078 return TRUE;
4df068de
RS
1079}
1080
1081/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1082 where <modifier> is fixed by the instruction and where <msz> is a
1083 2-bit unsigned number. fields[0] specifies the base register field
1084 and fields[1] specifies the offset register field. */
561a72d4 1085static bfd_boolean
4df068de 1086aarch64_ext_sve_addr_zz (const aarch64_operand *self,
561a72d4
TC
1087 const aarch64_opnd_info *info, aarch64_insn *code,
1088 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4df068de
RS
1089{
1090 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1091 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1092 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
561a72d4 1093 return TRUE;
4df068de
RS
1094}
1095
1096/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1097 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1098 field and fields[1] specifies the offset register field. */
561a72d4 1099bfd_boolean
4df068de
RS
1100aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1101 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1102 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1103 aarch64_operand_error *errors)
4df068de 1104{
561a72d4 1105 return aarch64_ext_sve_addr_zz (self, info, code, errors);
4df068de
RS
1106}
1107
1108/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1109 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1110 field and fields[1] specifies the offset register field. */
561a72d4 1111bfd_boolean
4df068de
RS
1112aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1113 const aarch64_opnd_info *info,
1114 aarch64_insn *code,
561a72d4
TC
1115 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1116 aarch64_operand_error *errors)
4df068de 1117{
561a72d4 1118 return aarch64_ext_sve_addr_zz (self, info, code, errors);
4df068de
RS
1119}
1120
1121/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1122 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1123 field and fields[1] specifies the offset register field. */
561a72d4 1124bfd_boolean
4df068de
RS
1125aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1126 const aarch64_opnd_info *info,
1127 aarch64_insn *code,
561a72d4
TC
1128 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1129 aarch64_operand_error *errors)
4df068de 1130{
561a72d4 1131 return aarch64_ext_sve_addr_zz (self, info, code, errors);
4df068de
RS
1132}
1133
e950b345 1134/* Encode an SVE ADD/SUB immediate. */
561a72d4 1135bfd_boolean
e950b345
RS
1136aarch64_ins_sve_aimm (const aarch64_operand *self,
1137 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1138 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1139 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
e950b345
RS
1140{
1141 if (info->shifter.amount == 8)
1142 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1143 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1144 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1145 else
1146 insert_all_fields (self, code, info->imm.value & 0xff);
561a72d4 1147 return TRUE;
e950b345
RS
1148}
1149
1150/* Encode an SVE CPY/DUP immediate. */
561a72d4 1151bfd_boolean
e950b345
RS
1152aarch64_ins_sve_asimm (const aarch64_operand *self,
1153 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1154 const aarch64_inst *inst,
1155 aarch64_operand_error *errors)
e950b345 1156{
561a72d4 1157 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
e950b345
RS
1158}
1159
f11ad6bc
RS
1160/* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1161 array specifies which field to use for Zn. MM is encoded in the
1162 concatenation of imm5 and SVE_tszh, with imm5 being the less
1163 significant part. */
561a72d4 1164bfd_boolean
f11ad6bc
RS
1165aarch64_ins_sve_index (const aarch64_operand *self,
1166 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1167 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1168 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
f11ad6bc
RS
1169{
1170 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1171 insert_field (self->fields[0], code, info->reglane.regno, 0);
1172 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1173 2, FLD_imm5, FLD_SVE_tszh);
561a72d4 1174 return TRUE;
f11ad6bc
RS
1175}
1176
e950b345 1177/* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
561a72d4 1178bfd_boolean
e950b345
RS
1179aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1180 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1181 const aarch64_inst *inst,
1182 aarch64_operand_error *errors)
e950b345 1183{
561a72d4 1184 return aarch64_ins_limm (self, info, code, inst, errors);
e950b345
RS
1185}
1186
582e12bf
RS
1187/* Encode Zn[MM], where Zn occupies the least-significant part of the field
1188 and where MM occupies the most-significant part. The operand-dependent
1189 value specifies the number of bits in Zn. */
561a72d4 1190bfd_boolean
582e12bf
RS
1191aarch64_ins_sve_quad_index (const aarch64_operand *self,
1192 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1193 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1194 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
582e12bf
RS
1195{
1196 unsigned int reg_bits = get_operand_specific_data (self);
1197 assert (info->reglane.regno < (1U << reg_bits));
1198 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1199 insert_all_fields (self, code, val);
561a72d4 1200 return TRUE;
582e12bf
RS
1201}
1202
f11ad6bc
RS
1203/* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1204 to use for Zn. */
561a72d4 1205bfd_boolean
f11ad6bc
RS
1206aarch64_ins_sve_reglist (const aarch64_operand *self,
1207 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1208 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1209 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
f11ad6bc
RS
1210{
1211 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
561a72d4 1212 return TRUE;
f11ad6bc
RS
1213}
1214
2442d846
RS
1215/* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1216 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1217 field. */
561a72d4 1218bfd_boolean
2442d846
RS
1219aarch64_ins_sve_scale (const aarch64_operand *self,
1220 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1221 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1222 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2442d846
RS
1223{
1224 insert_all_fields (self, code, info->imm.value);
1225 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
561a72d4 1226 return TRUE;
2442d846
RS
1227}
1228
e950b345 1229/* Encode an SVE shift left immediate. */
561a72d4 1230bfd_boolean
e950b345
RS
1231aarch64_ins_sve_shlimm (const aarch64_operand *self,
1232 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1233 const aarch64_inst *inst,
1234 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
e950b345
RS
1235{
1236 const aarch64_opnd_info *prev_operand;
1237 unsigned int esize;
1238
1239 assert (info->idx > 0);
1240 prev_operand = &inst->operands[info->idx - 1];
1241 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1242 insert_all_fields (self, code, 8 * esize + info->imm.value);
561a72d4 1243 return TRUE;
e950b345
RS
1244}
1245
1246/* Encode an SVE shift right immediate. */
561a72d4 1247bfd_boolean
e950b345
RS
1248aarch64_ins_sve_shrimm (const aarch64_operand *self,
1249 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1250 const aarch64_inst *inst,
1251 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
e950b345
RS
1252{
1253 const aarch64_opnd_info *prev_operand;
1254 unsigned int esize;
1255
3c17238b
MM
1256 unsigned int opnd_backshift = get_operand_specific_data (self);
1257 assert (info->idx >= (int)opnd_backshift);
1258 prev_operand = &inst->operands[info->idx - opnd_backshift];
e950b345
RS
1259 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1260 insert_all_fields (self, code, 16 * esize - info->imm.value);
561a72d4 1261 return TRUE;
e950b345
RS
1262}
1263
165d4950
RS
1264/* Encode a single-bit immediate that selects between #0.5 and #1.0.
1265 The fields array specifies which field to use. */
561a72d4 1266bfd_boolean
165d4950
RS
1267aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1268 const aarch64_opnd_info *info,
1269 aarch64_insn *code,
561a72d4
TC
1270 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1271 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
165d4950
RS
1272{
1273 if (info->imm.value == 0x3f000000)
1274 insert_field (self->fields[0], code, 0, 0);
1275 else
1276 insert_field (self->fields[0], code, 1, 0);
561a72d4 1277 return TRUE;
165d4950
RS
1278}
1279
1280/* Encode a single-bit immediate that selects between #0.5 and #2.0.
1281 The fields array specifies which field to use. */
561a72d4 1282bfd_boolean
165d4950
RS
1283aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1284 const aarch64_opnd_info *info,
1285 aarch64_insn *code,
561a72d4
TC
1286 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1287 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
165d4950
RS
1288{
1289 if (info->imm.value == 0x3f000000)
1290 insert_field (self->fields[0], code, 0, 0);
1291 else
1292 insert_field (self->fields[0], code, 1, 0);
561a72d4 1293 return TRUE;
165d4950
RS
1294}
1295
1296/* Encode a single-bit immediate that selects between #0.0 and #1.0.
1297 The fields array specifies which field to use. */
561a72d4 1298bfd_boolean
165d4950
RS
1299aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1300 const aarch64_opnd_info *info,
1301 aarch64_insn *code,
561a72d4
TC
1302 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1303 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
165d4950
RS
1304{
1305 if (info->imm.value == 0)
1306 insert_field (self->fields[0], code, 0, 0);
1307 else
1308 insert_field (self->fields[0], code, 1, 0);
561a72d4 1309 return TRUE;
165d4950
RS
1310}
1311
a06ea964
NC
1312/* Miscellaneous encoding functions. */
1313
1314/* Encode size[0], i.e. bit 22, for
1315 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1316
1317static void
1318encode_asimd_fcvt (aarch64_inst *inst)
1319{
1320 aarch64_insn value;
1321 aarch64_field field = {0, 0};
1322 enum aarch64_opnd_qualifier qualifier;
1323
1324 switch (inst->opcode->op)
1325 {
1326 case OP_FCVTN:
1327 case OP_FCVTN2:
1328 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1329 qualifier = inst->operands[1].qualifier;
1330 break;
1331 case OP_FCVTL:
1332 case OP_FCVTL2:
1333 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1334 qualifier = inst->operands[0].qualifier;
1335 break;
1336 default:
1337 assert (0);
1338 }
1339 assert (qualifier == AARCH64_OPND_QLF_V_4S
1340 || qualifier == AARCH64_OPND_QLF_V_2D);
1341 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1342 gen_sub_field (FLD_size, 0, 1, &field);
1343 insert_field_2 (&field, &inst->value, value, 0);
1344}
1345
1346/* Encode size[0], i.e. bit 22, for
1347 e.g. FCVTXN <Vb><d>, <Va><n>. */
1348
1349static void
1350encode_asisd_fcvtxn (aarch64_inst *inst)
1351{
1352 aarch64_insn val = 1;
1353 aarch64_field field = {0, 0};
1354 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1355 gen_sub_field (FLD_size, 0, 1, &field);
1356 insert_field_2 (&field, &inst->value, val, 0);
1357}
1358
1359/* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1360static void
1361encode_fcvt (aarch64_inst *inst)
1362{
1363 aarch64_insn val;
1364 const aarch64_field field = {15, 2};
1365
1366 /* opc dstsize */
1367 switch (inst->operands[0].qualifier)
1368 {
1369 case AARCH64_OPND_QLF_S_S: val = 0; break;
1370 case AARCH64_OPND_QLF_S_D: val = 1; break;
1371 case AARCH64_OPND_QLF_S_H: val = 3; break;
1372 default: abort ();
1373 }
1374 insert_field_2 (&field, &inst->value, val, 0);
1375
1376 return;
1377}
1378
116b6019
RS
1379/* Return the index in qualifiers_list that INST is using. Should only
1380 be called once the qualifiers are known to be valid. */
1381
1382static int
1383aarch64_get_variant (struct aarch64_inst *inst)
1384{
1385 int i, nops, variant;
1386
1387 nops = aarch64_num_of_operands (inst->opcode);
1388 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1389 {
1390 for (i = 0; i < nops; ++i)
1391 if (inst->opcode->qualifiers_list[variant][i]
1392 != inst->operands[i].qualifier)
1393 break;
1394 if (i == nops)
1395 return variant;
1396 }
1397 abort ();
1398}
1399
a06ea964
NC
1400/* Do miscellaneous encodings that are not common enough to be driven by
1401 flags. */
1402
1403static void
1404do_misc_encoding (aarch64_inst *inst)
1405{
c0890d26
RS
1406 unsigned int value;
1407
a06ea964
NC
1408 switch (inst->opcode->op)
1409 {
1410 case OP_FCVT:
1411 encode_fcvt (inst);
1412 break;
1413 case OP_FCVTN:
1414 case OP_FCVTN2:
1415 case OP_FCVTL:
1416 case OP_FCVTL2:
1417 encode_asimd_fcvt (inst);
1418 break;
1419 case OP_FCVTXN_S:
1420 encode_asisd_fcvtxn (inst);
1421 break;
c0890d26
RS
1422 case OP_MOV_P_P:
1423 case OP_MOVS_P_P:
1424 /* Copy Pn to Pm and Pg. */
1425 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1426 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1427 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1428 break;
1429 case OP_MOV_Z_P_Z:
1430 /* Copy Zd to Zm. */
1431 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1432 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1433 break;
1434 case OP_MOV_Z_V:
1435 /* Fill in the zero immediate. */
582e12bf
RS
1436 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1437 2, FLD_imm5, FLD_SVE_tszh);
c0890d26
RS
1438 break;
1439 case OP_MOV_Z_Z:
1440 /* Copy Zn to Zm. */
1441 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1442 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1443 break;
1444 case OP_MOV_Z_Zi:
1445 break;
1446 case OP_MOVM_P_P_P:
1447 /* Copy Pd to Pm. */
1448 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1449 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1450 break;
1451 case OP_MOVZS_P_P_P:
1452 case OP_MOVZ_P_P_P:
1453 /* Copy Pn to Pm. */
1454 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1455 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1456 break;
1457 case OP_NOTS_P_P_P_Z:
1458 case OP_NOT_P_P_P_Z:
1459 /* Copy Pg to Pm. */
1460 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1461 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1462 break;
a06ea964
NC
1463 default: break;
1464 }
1465}
1466
1467/* Encode the 'size' and 'Q' field for e.g. SHADD. */
1468static void
1469encode_sizeq (aarch64_inst *inst)
1470{
1471 aarch64_insn sizeq;
1472 enum aarch64_field_kind kind;
1473 int idx;
1474
1475 /* Get the index of the operand whose information we are going to use
1476 to encode the size and Q fields.
1477 This is deduced from the possible valid qualifier lists. */
1478 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1479 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1480 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1481 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1482 /* Q */
1483 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1484 /* size */
1485 if (inst->opcode->iclass == asisdlse
1486 || inst->opcode->iclass == asisdlsep
1487 || inst->opcode->iclass == asisdlso
1488 || inst->opcode->iclass == asisdlsop)
1489 kind = FLD_vldst_size;
1490 else
1491 kind = FLD_size;
1492 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1493}
1494
1495/* Opcodes that have fields shared by multiple operands are usually flagged
1496 with flags. In this function, we detect such flags and use the
1497 information in one of the related operands to do the encoding. The 'one'
1498 operand is not any operand but one of the operands that has the enough
1499 information for such an encoding. */
1500
1501static void
1502do_special_encoding (struct aarch64_inst *inst)
1503{
1504 int idx;
4ad3b7ef 1505 aarch64_insn value = 0;
a06ea964
NC
1506
1507 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1508
1509 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1510 if (inst->opcode->flags & F_COND)
1511 {
1512 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1513 }
1514 if (inst->opcode->flags & F_SF)
1515 {
1516 idx = select_operand_for_sf_field_coding (inst->opcode);
1517 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1518 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1519 ? 1 : 0;
1520 insert_field (FLD_sf, &inst->value, value, 0);
1521 if (inst->opcode->flags & F_N)
1522 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1523 }
ee804238
JW
1524 if (inst->opcode->flags & F_LSE_SZ)
1525 {
1526 idx = select_operand_for_sf_field_coding (inst->opcode);
1527 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1528 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1529 ? 1 : 0;
1530 insert_field (FLD_lse_sz, &inst->value, value, 0);
1531 }
a06ea964
NC
1532 if (inst->opcode->flags & F_SIZEQ)
1533 encode_sizeq (inst);
1534 if (inst->opcode->flags & F_FPTYPE)
1535 {
1536 idx = select_operand_for_fptype_field_coding (inst->opcode);
1537 switch (inst->operands[idx].qualifier)
1538 {
1539 case AARCH64_OPND_QLF_S_S: value = 0; break;
1540 case AARCH64_OPND_QLF_S_D: value = 1; break;
1541 case AARCH64_OPND_QLF_S_H: value = 3; break;
1542 default: assert (0);
1543 }
1544 insert_field (FLD_type, &inst->value, value, 0);
1545 }
1546 if (inst->opcode->flags & F_SSIZE)
1547 {
1548 enum aarch64_opnd_qualifier qualifier;
1549 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1550 qualifier = inst->operands[idx].qualifier;
1551 assert (qualifier >= AARCH64_OPND_QLF_S_B
1552 && qualifier <= AARCH64_OPND_QLF_S_Q);
1553 value = aarch64_get_qualifier_standard_value (qualifier);
1554 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1555 }
1556 if (inst->opcode->flags & F_T)
1557 {
1558 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1559 aarch64_field field = {0, 0};
1560 enum aarch64_opnd_qualifier qualifier;
1561
1562 idx = 0;
1563 qualifier = inst->operands[idx].qualifier;
1564 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1565 == AARCH64_OPND_CLASS_SIMD_REG
1566 && qualifier >= AARCH64_OPND_QLF_V_8B
1567 && qualifier <= AARCH64_OPND_QLF_V_2D);
1568 /* imm5<3:0> q <t>
1569 0000 x reserved
1570 xxx1 0 8b
1571 xxx1 1 16b
1572 xx10 0 4h
1573 xx10 1 8h
1574 x100 0 2s
1575 x100 1 4s
1576 1000 0 reserved
1577 1000 1 2d */
1578 value = aarch64_get_qualifier_standard_value (qualifier);
1579 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1580 num = (int) value >> 1;
1581 assert (num >= 0 && num <= 3);
1582 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1583 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1584 }
1585 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1586 {
1587 /* Use Rt to encode in the case of e.g.
1588 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1589 enum aarch64_opnd_qualifier qualifier;
1590 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1591 if (idx == -1)
1592 /* Otherwise use the result operand, which has to be a integer
1593 register. */
1594 idx = 0;
1595 assert (idx == 0 || idx == 1);
1596 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1597 == AARCH64_OPND_CLASS_INT_REG);
1598 qualifier = inst->operands[idx].qualifier;
1599 insert_field (FLD_Q, &inst->value,
1600 aarch64_get_qualifier_standard_value (qualifier), 0);
1601 }
1602 if (inst->opcode->flags & F_LDS_SIZE)
1603 {
1604 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1605 enum aarch64_opnd_qualifier qualifier;
1606 aarch64_field field = {0, 0};
1607 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1608 == AARCH64_OPND_CLASS_INT_REG);
1609 gen_sub_field (FLD_opc, 0, 1, &field);
1610 qualifier = inst->operands[0].qualifier;
1611 insert_field_2 (&field, &inst->value,
1612 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1613 }
1614 /* Miscellaneous encoding as the last step. */
1615 if (inst->opcode->flags & F_MISC)
1616 do_misc_encoding (inst);
1617
1618 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1619}
1620
116b6019
RS
1621/* Some instructions (including all SVE ones) use the instruction class
1622 to describe how a qualifiers_list index is represented in the instruction
1623 encoding. If INST is such an instruction, encode the chosen qualifier
1624 variant. */
1625
1626static void
1627aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1628{
cd50a87a 1629 int variant = 0;
116b6019
RS
1630 switch (inst->opcode->iclass)
1631 {
1632 case sve_cpy:
1633 insert_fields (&inst->value, aarch64_get_variant (inst),
1634 0, 2, FLD_SVE_M_14, FLD_size);
1635 break;
1636
1637 case sve_index:
1638 case sve_shift_pred:
1639 case sve_shift_unpred:
3c17238b 1640 case sve_shift_tsz_hsd:
1be5f94f 1641 case sve_shift_tsz_bhsd:
116b6019
RS
1642 /* For indices and shift amounts, the variant is encoded as
1643 part of the immediate. */
1644 break;
1645
1646 case sve_limm:
1647 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1648 and depend on the immediate. They don't have a separate
1649 encoding. */
1650 break;
1651
1652 case sve_misc:
1653 /* sve_misc instructions have only a single variant. */
1654 break;
1655
1656 case sve_movprfx:
1657 insert_fields (&inst->value, aarch64_get_variant (inst),
1658 0, 2, FLD_SVE_M_16, FLD_size);
1659 break;
1660
1661 case sve_pred_zm:
1662 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1663 break;
1664
1665 case sve_size_bhs:
1666 case sve_size_bhsd:
1667 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1668 break;
1669
1670 case sve_size_hsd:
1671 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1672 break;
1673
3c705960 1674 case sve_size_bh:
116b6019
RS
1675 case sve_size_sd:
1676 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1677 break;
1678
0a57e14f
MM
1679 case sve_size_sd2:
1680 insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
1681 break;
1682
3bd82c86
MM
1683 case sve_size_hsd2:
1684 insert_field (FLD_SVE_size, &inst->value,
1685 aarch64_get_variant (inst) + 1, 0);
1686 break;
1687
fd1dc4a0
MM
1688 case sve_size_tsz_bhs:
1689 insert_fields (&inst->value,
1690 (1 << aarch64_get_variant (inst)),
1691 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
1692 break;
1693
41be57ca
MM
1694 case sve_size_13:
1695 variant = aarch64_get_variant (inst) + 1;
cd50a87a
MM
1696 if (variant == 2)
1697 variant = 3;
1698 insert_field (FLD_size, &inst->value, variant, 0);
1699 break;
1700
116b6019
RS
1701 default:
1702 break;
1703 }
1704}
1705
a06ea964
NC
1706/* Converters converting an alias opcode instruction to its real form. */
1707
1708/* ROR <Wd>, <Ws>, #<shift>
1709 is equivalent to:
1710 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1711static void
1712convert_ror_to_extr (aarch64_inst *inst)
1713{
1714 copy_operand_info (inst, 3, 2);
1715 copy_operand_info (inst, 2, 1);
1716}
1717
e30181a5
YZ
1718/* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1719 is equivalent to:
1720 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1721static void
1722convert_xtl_to_shll (aarch64_inst *inst)
1723{
1724 inst->operands[2].qualifier = inst->operands[1].qualifier;
1725 inst->operands[2].imm.value = 0;
1726}
1727
a06ea964
NC
1728/* Convert
1729 LSR <Xd>, <Xn>, #<shift>
1730 to
1731 UBFM <Xd>, <Xn>, #<shift>, #63. */
1732static void
1733convert_sr_to_bfm (aarch64_inst *inst)
1734{
1735 inst->operands[3].imm.value =
1736 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1737}
1738
1739/* Convert MOV to ORR. */
1740static void
1741convert_mov_to_orr (aarch64_inst *inst)
1742{
1743 /* MOV <Vd>.<T>, <Vn>.<T>
1744 is equivalent to:
1745 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1746 copy_operand_info (inst, 2, 1);
1747}
1748
1749/* When <imms> >= <immr>, the instruction written:
1750 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1751 is equivalent to:
1752 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1753
1754static void
1755convert_bfx_to_bfm (aarch64_inst *inst)
1756{
1757 int64_t lsb, width;
1758
1759 /* Convert the operand. */
1760 lsb = inst->operands[2].imm.value;
1761 width = inst->operands[3].imm.value;
1762 inst->operands[2].imm.value = lsb;
1763 inst->operands[3].imm.value = lsb + width - 1;
1764}
1765
1766/* When <imms> < <immr>, the instruction written:
1767 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1768 is equivalent to:
1769 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1770
1771static void
1772convert_bfi_to_bfm (aarch64_inst *inst)
1773{
1774 int64_t lsb, width;
1775
1776 /* Convert the operand. */
1777 lsb = inst->operands[2].imm.value;
1778 width = inst->operands[3].imm.value;
1779 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1780 {
1781 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1782 inst->operands[3].imm.value = width - 1;
1783 }
1784 else
1785 {
1786 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1787 inst->operands[3].imm.value = width - 1;
1788 }
1789}
1790
d685192a
MW
1791/* The instruction written:
1792 BFC <Xd>, #<lsb>, #<width>
1793 is equivalent to:
1794 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1795
1796static void
1797convert_bfc_to_bfm (aarch64_inst *inst)
1798{
1799 int64_t lsb, width;
1800
1801 /* Insert XZR. */
1802 copy_operand_info (inst, 3, 2);
1803 copy_operand_info (inst, 2, 1);
11648de5 1804 copy_operand_info (inst, 1, 0);
d685192a
MW
1805 inst->operands[1].reg.regno = 0x1f;
1806
11648de5 1807 /* Convert the immediate operand. */
d685192a
MW
1808 lsb = inst->operands[2].imm.value;
1809 width = inst->operands[3].imm.value;
1810 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1811 {
1812 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1813 inst->operands[3].imm.value = width - 1;
1814 }
1815 else
1816 {
1817 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1818 inst->operands[3].imm.value = width - 1;
1819 }
1820}
1821
a06ea964
NC
1822/* The instruction written:
1823 LSL <Xd>, <Xn>, #<shift>
1824 is equivalent to:
1825 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1826
1827static void
1828convert_lsl_to_ubfm (aarch64_inst *inst)
1829{
1830 int64_t shift = inst->operands[2].imm.value;
1831
1832 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1833 {
1834 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1835 inst->operands[3].imm.value = 31 - shift;
1836 }
1837 else
1838 {
1839 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1840 inst->operands[3].imm.value = 63 - shift;
1841 }
1842}
1843
1844/* CINC <Wd>, <Wn>, <cond>
1845 is equivalent to:
1846 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1847
1848static void
1849convert_to_csel (aarch64_inst *inst)
1850{
1851 copy_operand_info (inst, 3, 2);
1852 copy_operand_info (inst, 2, 1);
1853 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1854}
1855
1856/* CSET <Wd>, <cond>
1857 is equivalent to:
1858 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1859
1860static void
1861convert_cset_to_csinc (aarch64_inst *inst)
1862{
1863 copy_operand_info (inst, 3, 1);
1864 copy_operand_info (inst, 2, 0);
1865 copy_operand_info (inst, 1, 0);
1866 inst->operands[1].reg.regno = 0x1f;
1867 inst->operands[2].reg.regno = 0x1f;
1868 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1869}
1870
1871/* MOV <Wd>, #<imm>
1872 is equivalent to:
1873 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1874
1875static void
1876convert_mov_to_movewide (aarch64_inst *inst)
1877{
1878 int is32;
1879 uint32_t shift_amount;
1880 uint64_t value;
1881
1882 switch (inst->opcode->op)
1883 {
1884 case OP_MOV_IMM_WIDE:
1885 value = inst->operands[1].imm.value;
1886 break;
1887 case OP_MOV_IMM_WIDEN:
1888 value = ~inst->operands[1].imm.value;
1889 break;
1890 default:
1891 assert (0);
1892 }
1893 inst->operands[1].type = AARCH64_OPND_HALF;
1894 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
062f38fa
RE
1895 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1896 /* The constraint check should have guaranteed this wouldn't happen. */
1897 assert (0);
a06ea964
NC
1898 value >>= shift_amount;
1899 value &= 0xffff;
1900 inst->operands[1].imm.value = value;
1901 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1902 inst->operands[1].shifter.amount = shift_amount;
1903}
1904
1905/* MOV <Wd>, #<imm>
1906 is equivalent to:
1907 ORR <Wd>, WZR, #<imm>. */
1908
1909static void
1910convert_mov_to_movebitmask (aarch64_inst *inst)
1911{
1912 copy_operand_info (inst, 2, 1);
1913 inst->operands[1].reg.regno = 0x1f;
1914 inst->operands[1].skip = 0;
1915}
1916
1917/* Some alias opcodes are assembled by being converted to their real-form. */
1918
1919static void
1920convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1921{
1922 const aarch64_opcode *alias = inst->opcode;
1923
1924 if ((alias->flags & F_CONV) == 0)
1925 goto convert_to_real_return;
1926
1927 switch (alias->op)
1928 {
1929 case OP_ASR_IMM:
1930 case OP_LSR_IMM:
1931 convert_sr_to_bfm (inst);
1932 break;
1933 case OP_LSL_IMM:
1934 convert_lsl_to_ubfm (inst);
1935 break;
1936 case OP_CINC:
1937 case OP_CINV:
1938 case OP_CNEG:
1939 convert_to_csel (inst);
1940 break;
1941 case OP_CSET:
1942 case OP_CSETM:
1943 convert_cset_to_csinc (inst);
1944 break;
1945 case OP_UBFX:
1946 case OP_BFXIL:
1947 case OP_SBFX:
1948 convert_bfx_to_bfm (inst);
1949 break;
1950 case OP_SBFIZ:
1951 case OP_BFI:
1952 case OP_UBFIZ:
1953 convert_bfi_to_bfm (inst);
1954 break;
d685192a
MW
1955 case OP_BFC:
1956 convert_bfc_to_bfm (inst);
1957 break;
a06ea964
NC
1958 case OP_MOV_V:
1959 convert_mov_to_orr (inst);
1960 break;
1961 case OP_MOV_IMM_WIDE:
1962 case OP_MOV_IMM_WIDEN:
1963 convert_mov_to_movewide (inst);
1964 break;
1965 case OP_MOV_IMM_LOG:
1966 convert_mov_to_movebitmask (inst);
1967 break;
1968 case OP_ROR_IMM:
1969 convert_ror_to_extr (inst);
1970 break;
e30181a5
YZ
1971 case OP_SXTL:
1972 case OP_SXTL2:
1973 case OP_UXTL:
1974 case OP_UXTL2:
1975 convert_xtl_to_shll (inst);
1976 break;
a06ea964
NC
1977 default:
1978 break;
1979 }
1980
dc1e8a47 1981 convert_to_real_return:
a06ea964
NC
1982 aarch64_replace_opcode (inst, real);
1983}
1984
1985/* Encode *INST_ORI of the opcode code OPCODE.
1986 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1987 matched operand qualifier sequence in *QLF_SEQ. */
1988
561a72d4 1989bfd_boolean
a06ea964
NC
1990aarch64_opcode_encode (const aarch64_opcode *opcode,
1991 const aarch64_inst *inst_ori, aarch64_insn *code,
1992 aarch64_opnd_qualifier_t *qlf_seq,
7e84b55d 1993 aarch64_operand_error *mismatch_detail,
bde90be2 1994 aarch64_instr_sequence* insn_sequence)
a06ea964
NC
1995{
1996 int i;
1997 const aarch64_opcode *aliased;
1998 aarch64_inst copy, *inst;
1999
2000 DEBUG_TRACE ("enter with %s", opcode->name);
2001
2002 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2003 copy = *inst_ori;
2004 inst = &copy;
2005
2006 assert (inst->opcode == NULL || inst->opcode == opcode);
2007 if (inst->opcode == NULL)
2008 inst->opcode = opcode;
2009
2010 /* Constrain the operands.
2011 After passing this, the encoding is guaranteed to succeed. */
2012 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
2013 {
2014 DEBUG_TRACE ("FAIL since operand constraint not met");
2015 return 0;
2016 }
2017
2018 /* Get the base value.
2019 Note: this has to be before the aliasing handling below in order to
2020 get the base value from the alias opcode before we move on to the
2021 aliased opcode for encoding. */
2022 inst->value = opcode->opcode;
2023
2024 /* No need to do anything else if the opcode does not have any operand. */
2025 if (aarch64_num_of_operands (opcode) == 0)
2026 goto encoding_exit;
2027
2028 /* Assign operand indexes and check types. Also put the matched
2029 operand qualifiers in *QLF_SEQ to return. */
2030 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2031 {
2032 assert (opcode->operands[i] == inst->operands[i].type);
2033 inst->operands[i].idx = i;
2034 if (qlf_seq != NULL)
2035 *qlf_seq = inst->operands[i].qualifier;
2036 }
2037
2038 aliased = aarch64_find_real_opcode (opcode);
2039 /* If the opcode is an alias and it does not ask for direct encoding by
2040 itself, the instruction will be transformed to the form of real opcode
2041 and the encoding will be carried out using the rules for the aliased
2042 opcode. */
2043 if (aliased != NULL && (opcode->flags & F_CONV))
2044 {
2045 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2046 aliased->name, opcode->name);
2047 /* Convert the operands to the form of the real opcode. */
2048 convert_to_real (inst, aliased);
2049 opcode = aliased;
2050 }
2051
2052 aarch64_opnd_info *info = inst->operands;
2053
2054 /* Call the inserter of each operand. */
2055 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2056 {
2057 const aarch64_operand *opnd;
2058 enum aarch64_opnd type = opcode->operands[i];
2059 if (type == AARCH64_OPND_NIL)
2060 break;
2061 if (info->skip)
2062 {
2063 DEBUG_TRACE ("skip the incomplete operand %d", i);
2064 continue;
2065 }
2066 opnd = &aarch64_operands[type];
561a72d4
TC
2067 if (operand_has_inserter (opnd)
2068 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2069 mismatch_detail))
2070 return FALSE;
a06ea964
NC
2071 }
2072
2073 /* Call opcode encoders indicated by flags. */
2074 if (opcode_has_special_coder (opcode))
2075 do_special_encoding (inst);
2076
116b6019
RS
2077 /* Possibly use the instruction class to encode the chosen qualifier
2078 variant. */
2079 aarch64_encode_variant_using_iclass (inst);
2080
bde90be2
TC
2081 /* Run a verifier if the instruction has one set. */
2082 if (opcode->verifier)
2083 {
2084 enum err_type result = opcode->verifier (inst, *code, 0, TRUE,
2085 mismatch_detail, insn_sequence);
2086 switch (result)
2087 {
2088 case ERR_UND:
2089 case ERR_UNP:
2090 case ERR_NYI:
2091 return FALSE;
2092 default:
2093 break;
2094 }
2095 }
2096
2097 /* Always run constrain verifiers, this is needed because constrains need to
2098 maintain a global state. Regardless if the instruction has the flag set
2099 or not. */
2100 enum err_type result = verify_constraints (inst, *code, 0, TRUE,
2101 mismatch_detail, insn_sequence);
2102 switch (result)
2103 {
2104 case ERR_UND:
2105 case ERR_UNP:
2106 case ERR_NYI:
2107 return FALSE;
2108 default:
2109 break;
2110 }
2111
2112
dc1e8a47 2113 encoding_exit:
a06ea964
NC
2114 DEBUG_TRACE ("exit with %s", opcode->name);
2115
2116 *code = inst->value;
2117
561a72d4 2118 return TRUE;
a06ea964 2119}
This page took 0.488995 seconds and 4 git commands to generate.