[AArch64][gas] Update MTE system register encodings
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
CommitLineData
a06ea964 1/* aarch64-asm.c -- AArch64 assembler support.
82704155 2 Copyright (C) 2012-2019 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21#include "sysdep.h"
22#include <stdarg.h>
b5464a68 23#include "libiberty.h"
a06ea964 24#include "aarch64-asm.h"
f9830ec1 25#include "opintl.h"
a06ea964
NC
26
27/* Utilities. */
28
29/* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
32
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
9aff4b7a 36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
a06ea964
NC
37 the order of M, L, H. */
38
39static inline void
40insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
41{
42 uint32_t num;
43 const aarch64_field *field;
44 enum aarch64_field_kind kind;
45 va_list va;
46
47 va_start (va, mask);
48 num = va_arg (va, uint32_t);
49 assert (num <= 5);
50 while (num--)
51 {
52 kind = va_arg (va, enum aarch64_field_kind);
53 field = &fields[kind];
54 insert_field (kind, code, value, mask);
55 value >>= field->width;
56 }
57 va_end (va);
58}
59
b5464a68
RS
60/* Insert a raw field value VALUE into all fields in SELF->fields.
61 The least significant bit goes in the final field. */
62
63static void
64insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
65 aarch64_insn value)
66{
67 unsigned int i;
68 enum aarch64_field_kind kind;
69
70 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
71 if (self->fields[i] != FLD_NIL)
72 {
73 kind = self->fields[i];
74 insert_field (kind, code, value, 0);
75 value >>= fields[kind].width;
76 }
77}
78
a06ea964
NC
79/* Operand inserters. */
80
81/* Insert register number. */
561a72d4 82bfd_boolean
a06ea964
NC
83aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
84 aarch64_insn *code,
561a72d4
TC
85 const aarch64_inst *inst ATTRIBUTE_UNUSED,
86 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
87{
88 insert_field (self->fields[0], code, info->reg.regno, 0);
561a72d4 89 return TRUE;
a06ea964
NC
90}
91
92/* Insert register number, index and/or other data for SIMD register element
93 operand, e.g. the last source operand in
94 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
561a72d4 95bfd_boolean
a06ea964 96aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
561a72d4
TC
97 aarch64_insn *code, const aarch64_inst *inst,
98 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
99{
100 /* regno */
101 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
102 /* index and/or type */
103 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
104 {
105 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
106 if (info->type == AARCH64_OPND_En
107 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
108 {
109 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
110 assert (info->idx == 1); /* Vn */
111 aarch64_insn value = info->reglane.index << pos;
112 insert_field (FLD_imm4, code, value, 0);
113 }
114 else
115 {
116 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
117 imm5<3:0> <V>
118 0000 RESERVED
119 xxx1 B
120 xx10 H
121 x100 S
122 1000 D */
123 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
124 insert_field (FLD_imm5, code, value, 0);
125 }
126 }
65a55fbb
TC
127 else if (inst->opcode->iclass == dotproduct)
128 {
129 unsigned reglane_index = info->reglane.index;
130 switch (info->qualifier)
131 {
00c2093f 132 case AARCH64_OPND_QLF_S_4B:
65a55fbb
TC
133 /* L:H */
134 assert (reglane_index < 4);
135 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
136 break;
137 default:
138 assert (0);
139 }
140 }
f42f1a1d
TC
141 else if (inst->opcode->iclass == cryptosm3)
142 {
143 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
144 unsigned reglane_index = info->reglane.index;
145 assert (reglane_index < 4);
146 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
147 }
a06ea964
NC
148 else
149 {
150 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
151 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
329d01f7 152 unsigned reglane_index = info->reglane.index;
c2c4ff8d
SN
153
154 if (inst->opcode->op == OP_FCMLA_ELEM)
155 /* Complex operand takes two elements. */
329d01f7 156 reglane_index *= 2;
c2c4ff8d 157
a06ea964
NC
158 switch (info->qualifier)
159 {
160 case AARCH64_OPND_QLF_S_H:
161 /* H:L:M */
329d01f7
MR
162 assert (reglane_index < 8);
163 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
a06ea964
NC
164 break;
165 case AARCH64_OPND_QLF_S_S:
166 /* H:L */
329d01f7
MR
167 assert (reglane_index < 4);
168 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
a06ea964
NC
169 break;
170 case AARCH64_OPND_QLF_S_D:
171 /* H */
329d01f7
MR
172 assert (reglane_index < 2);
173 insert_field (FLD_H, code, reglane_index, 0);
a06ea964
NC
174 break;
175 default:
176 assert (0);
177 }
178 }
561a72d4 179 return TRUE;
a06ea964
NC
180}
181
182/* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
561a72d4 183bfd_boolean
a06ea964
NC
184aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
185 aarch64_insn *code,
561a72d4
TC
186 const aarch64_inst *inst ATTRIBUTE_UNUSED,
187 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
188{
189 /* R */
190 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
191 /* len */
192 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
561a72d4 193 return TRUE;
a06ea964
NC
194}
195
196/* Insert Rt and opcode fields for a register list operand, e.g. Vt
197 in AdvSIMD load/store instructions. */
561a72d4 198bfd_boolean
a06ea964
NC
199aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
200 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
201 const aarch64_inst *inst,
202 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964 203{
4ad3b7ef 204 aarch64_insn value = 0;
a06ea964
NC
205 /* Number of elements in each structure to be loaded/stored. */
206 unsigned num = get_opcode_dependent_value (inst->opcode);
207
208 /* Rt */
209 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
210 /* opcode */
211 switch (num)
212 {
213 case 1:
214 switch (info->reglist.num_regs)
215 {
216 case 1: value = 0x7; break;
217 case 2: value = 0xa; break;
218 case 3: value = 0x6; break;
219 case 4: value = 0x2; break;
220 default: assert (0);
221 }
222 break;
223 case 2:
224 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
225 break;
226 case 3:
227 value = 0x4;
228 break;
229 case 4:
230 value = 0x0;
231 break;
232 default:
233 assert (0);
234 }
235 insert_field (FLD_opcode, code, value, 0);
236
561a72d4 237 return TRUE;
a06ea964
NC
238}
239
240/* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
241 single structure to all lanes instructions. */
561a72d4 242bfd_boolean
a06ea964
NC
243aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
244 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
245 const aarch64_inst *inst,
246 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
247{
248 aarch64_insn value;
249 /* The opcode dependent area stores the number of elements in
250 each structure to be loaded/stored. */
251 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
252
253 /* Rt */
254 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
255 /* S */
256 value = (aarch64_insn) 0;
257 if (is_ld1r && info->reglist.num_regs == 2)
258 /* OP_LD1R does not have alternating variant, but have "two consecutive"
259 instead. */
260 value = (aarch64_insn) 1;
261 insert_field (FLD_S, code, value, 0);
262
561a72d4 263 return TRUE;
a06ea964
NC
264}
265
266/* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
267 operand e.g. Vt in AdvSIMD load/store single element instructions. */
561a72d4 268bfd_boolean
a06ea964
NC
269aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
270 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
271 const aarch64_inst *inst ATTRIBUTE_UNUSED,
272 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
273{
274 aarch64_field field = {0, 0};
4ad3b7ef
KT
275 aarch64_insn QSsize = 0; /* fields Q:S:size. */
276 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
a06ea964
NC
277
278 assert (info->reglist.has_index);
279
280 /* Rt */
281 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
282 /* Encode the index, opcode<2:1> and size. */
283 switch (info->qualifier)
284 {
285 case AARCH64_OPND_QLF_S_B:
286 /* Index encoded in "Q:S:size". */
287 QSsize = info->reglist.index;
288 opcodeh2 = 0x0;
289 break;
290 case AARCH64_OPND_QLF_S_H:
291 /* Index encoded in "Q:S:size<1>". */
292 QSsize = info->reglist.index << 1;
293 opcodeh2 = 0x1;
294 break;
295 case AARCH64_OPND_QLF_S_S:
296 /* Index encoded in "Q:S". */
297 QSsize = info->reglist.index << 2;
298 opcodeh2 = 0x2;
299 break;
300 case AARCH64_OPND_QLF_S_D:
301 /* Index encoded in "Q". */
302 QSsize = info->reglist.index << 3 | 0x1;
303 opcodeh2 = 0x2;
304 break;
305 default:
306 assert (0);
307 }
308 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
309 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
310 insert_field_2 (&field, code, opcodeh2, 0);
311
561a72d4 312 return TRUE;
a06ea964
NC
313}
314
315/* Insert fields immh:immb and/or Q for e.g. the shift immediate in
316 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
317 or SSHR <V><d>, <V><n>, #<shift>. */
561a72d4 318bfd_boolean
a06ea964
NC
319aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
320 const aarch64_opnd_info *info,
561a72d4
TC
321 aarch64_insn *code, const aarch64_inst *inst,
322 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
323{
324 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
325 aarch64_insn Q, imm;
326
327 if (inst->opcode->iclass == asimdshf)
328 {
329 /* Q
330 immh Q <T>
331 0000 x SEE AdvSIMD modified immediate
332 0001 0 8B
333 0001 1 16B
334 001x 0 4H
335 001x 1 8H
336 01xx 0 2S
337 01xx 1 4S
338 1xxx 0 RESERVED
339 1xxx 1 2D */
340 Q = (val & 0x1) ? 1 : 0;
341 insert_field (FLD_Q, code, Q, inst->opcode->mask);
342 val >>= 1;
343 }
344
345 assert (info->type == AARCH64_OPND_IMM_VLSR
346 || info->type == AARCH64_OPND_IMM_VLSL);
347
348 if (info->type == AARCH64_OPND_IMM_VLSR)
349 /* immh:immb
350 immh <shift>
351 0000 SEE AdvSIMD modified immediate
352 0001 (16-UInt(immh:immb))
353 001x (32-UInt(immh:immb))
354 01xx (64-UInt(immh:immb))
355 1xxx (128-UInt(immh:immb)) */
356 imm = (16 << (unsigned)val) - info->imm.value;
357 else
358 /* immh:immb
359 immh <shift>
360 0000 SEE AdvSIMD modified immediate
361 0001 (UInt(immh:immb)-8)
362 001x (UInt(immh:immb)-16)
363 01xx (UInt(immh:immb)-32)
364 1xxx (UInt(immh:immb)-64) */
365 imm = info->imm.value + (8 << (unsigned)val);
366 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
367
561a72d4 368 return TRUE;
a06ea964
NC
369}
370
371/* Insert fields for e.g. the immediate operands in
372 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
561a72d4 373bfd_boolean
a06ea964
NC
374aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
375 aarch64_insn *code,
561a72d4
TC
376 const aarch64_inst *inst ATTRIBUTE_UNUSED,
377 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
378{
379 int64_t imm;
a06ea964
NC
380
381 imm = info->imm.value;
382 if (operand_need_shift_by_two (self))
383 imm >>= 2;
193614f2
SD
384 if (operand_need_shift_by_four (self))
385 imm >>= 4;
b5464a68 386 insert_all_fields (self, code, imm);
561a72d4 387 return TRUE;
a06ea964
NC
388}
389
390/* Insert immediate and its shift amount for e.g. the last operand in
391 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
561a72d4 392bfd_boolean
a06ea964 393aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
561a72d4
TC
394 aarch64_insn *code, const aarch64_inst *inst,
395 aarch64_operand_error *errors)
a06ea964
NC
396{
397 /* imm16 */
561a72d4 398 aarch64_ins_imm (self, info, code, inst, errors);
a06ea964
NC
399 /* hw */
400 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
561a72d4 401 return TRUE;
a06ea964
NC
402}
403
404/* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
405 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
561a72d4 406bfd_boolean
a06ea964
NC
407aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
408 const aarch64_opnd_info *info,
409 aarch64_insn *code,
561a72d4
TC
410 const aarch64_inst *inst ATTRIBUTE_UNUSED,
411 aarch64_operand_error *errors
412 ATTRIBUTE_UNUSED)
a06ea964
NC
413{
414 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
415 uint64_t imm = info->imm.value;
416 enum aarch64_modifier_kind kind = info->shifter.kind;
417 int amount = info->shifter.amount;
418 aarch64_field field = {0, 0};
419
420 /* a:b:c:d:e:f:g:h */
421 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
422 {
423 /* Either MOVI <Dd>, #<imm>
424 or MOVI <Vd>.2D, #<imm>.
425 <imm> is a 64-bit immediate
426 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
427 encoded in "a:b:c:d:e:f:g:h". */
428 imm = aarch64_shrink_expanded_imm8 (imm);
429 assert ((int)imm >= 0);
430 }
a06ea964
NC
431 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
432
433 if (kind == AARCH64_MOD_NONE)
561a72d4 434 return TRUE;
a06ea964
NC
435
436 /* shift amount partially in cmode */
437 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
438 if (kind == AARCH64_MOD_LSL)
439 {
440 /* AARCH64_MOD_LSL: shift zeros. */
441 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
f5555712
YZ
442 assert (esize == 4 || esize == 2 || esize == 1);
443 /* For 8-bit move immediate, the optional LSL #0 does not require
444 encoding. */
445 if (esize == 1)
561a72d4 446 return TRUE;
a06ea964
NC
447 amount >>= 3;
448 if (esize == 4)
449 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
450 else
451 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
452 }
453 else
454 {
455 /* AARCH64_MOD_MSL: shift ones. */
456 amount >>= 4;
457 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
458 }
459 insert_field_2 (&field, code, amount, 0);
460
561a72d4 461 return TRUE;
aa2aa4c6
RS
462}
463
464/* Insert fields for an 8-bit floating-point immediate. */
561a72d4 465bfd_boolean
aa2aa4c6
RS
466aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
467 aarch64_insn *code,
561a72d4
TC
468 const aarch64_inst *inst ATTRIBUTE_UNUSED,
469 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
aa2aa4c6
RS
470{
471 insert_all_fields (self, code, info->imm.value);
561a72d4 472 return TRUE;
a06ea964
NC
473}
474
582e12bf 475/* Insert 1-bit rotation immediate (#90 or #270). */
561a72d4 476bfd_boolean
582e12bf
RS
477aarch64_ins_imm_rotate1 (const aarch64_operand *self,
478 const aarch64_opnd_info *info,
561a72d4
TC
479 aarch64_insn *code, const aarch64_inst *inst,
480 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
c2c4ff8d 481{
582e12bf
RS
482 uint64_t rot = (info->imm.value - 90) / 180;
483 assert (rot < 2U);
c2c4ff8d 484 insert_field (self->fields[0], code, rot, inst->opcode->mask);
561a72d4 485 return TRUE;
582e12bf 486}
c2c4ff8d 487
582e12bf 488/* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
561a72d4 489bfd_boolean
582e12bf
RS
490aarch64_ins_imm_rotate2 (const aarch64_operand *self,
491 const aarch64_opnd_info *info,
561a72d4
TC
492 aarch64_insn *code, const aarch64_inst *inst,
493 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
582e12bf
RS
494{
495 uint64_t rot = info->imm.value / 90;
496 assert (rot < 4U);
497 insert_field (self->fields[0], code, rot, inst->opcode->mask);
561a72d4 498 return TRUE;
c2c4ff8d
SN
499}
500
a06ea964
NC
501/* Insert #<fbits> for the immediate operand in fp fix-point instructions,
502 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
561a72d4 503bfd_boolean
a06ea964
NC
504aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
505 aarch64_insn *code,
561a72d4
TC
506 const aarch64_inst *inst ATTRIBUTE_UNUSED,
507 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
508{
509 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
561a72d4 510 return TRUE;
a06ea964
NC
511}
512
513/* Insert arithmetic immediate for e.g. the last operand in
514 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
561a72d4 515bfd_boolean
a06ea964 516aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
561a72d4
TC
517 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
518 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
519{
520 /* shift */
521 aarch64_insn value = info->shifter.amount ? 1 : 0;
522 insert_field (self->fields[0], code, value, 0);
523 /* imm12 (unsigned) */
524 insert_field (self->fields[1], code, info->imm.value, 0);
561a72d4 525 return TRUE;
a06ea964
NC
526}
527
e950b345
RS
528/* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
529 the operand should be inverted before encoding. */
561a72d4 530static bfd_boolean
e950b345
RS
531aarch64_ins_limm_1 (const aarch64_operand *self,
532 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
533 const aarch64_inst *inst, bfd_boolean invert_p,
534 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
535{
536 aarch64_insn value;
537 uint64_t imm = info->imm.value;
42408347 538 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
a06ea964 539
e950b345 540 if (invert_p)
a06ea964 541 imm = ~imm;
535b785f
AM
542 /* The constraint check should have guaranteed this wouldn't happen. */
543 assert (aarch64_logical_immediate_p (imm, esize, &value));
a06ea964
NC
544
545 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
546 self->fields[0]);
561a72d4 547 return TRUE;
a06ea964
NC
548}
549
e950b345
RS
550/* Insert logical/bitmask immediate for e.g. the last operand in
551 ORR <Wd|WSP>, <Wn>, #<imm>. */
561a72d4 552bfd_boolean
e950b345 553aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
561a72d4
TC
554 aarch64_insn *code, const aarch64_inst *inst,
555 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
e950b345
RS
556{
557 return aarch64_ins_limm_1 (self, info, code, inst,
561a72d4 558 inst->opcode->op == OP_BIC, errors);
e950b345
RS
559}
560
561/* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
561a72d4 562bfd_boolean
e950b345
RS
563aarch64_ins_inv_limm (const aarch64_operand *self,
564 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
565 const aarch64_inst *inst,
566 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
e950b345 567{
561a72d4 568 return aarch64_ins_limm_1 (self, info, code, inst, TRUE, errors);
e950b345
RS
569}
570
a06ea964
NC
571/* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
572 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
561a72d4 573bfd_boolean
a06ea964 574aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
561a72d4
TC
575 aarch64_insn *code, const aarch64_inst *inst,
576 aarch64_operand_error *errors)
a06ea964 577{
4ad3b7ef 578 aarch64_insn value = 0;
a06ea964
NC
579
580 assert (info->idx == 0);
581
582 /* Rt */
561a72d4 583 aarch64_ins_regno (self, info, code, inst, errors);
a06ea964
NC
584 if (inst->opcode->iclass == ldstpair_indexed
585 || inst->opcode->iclass == ldstnapair_offs
586 || inst->opcode->iclass == ldstpair_off
587 || inst->opcode->iclass == loadlit)
588 {
589 /* size */
590 switch (info->qualifier)
591 {
592 case AARCH64_OPND_QLF_S_S: value = 0; break;
593 case AARCH64_OPND_QLF_S_D: value = 1; break;
594 case AARCH64_OPND_QLF_S_Q: value = 2; break;
595 default: assert (0);
596 }
597 insert_field (FLD_ldst_size, code, value, 0);
598 }
599 else
600 {
601 /* opc[1]:size */
602 value = aarch64_get_qualifier_standard_value (info->qualifier);
603 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
604 }
605
561a72d4 606 return TRUE;
a06ea964
NC
607}
608
609/* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
561a72d4 610bfd_boolean
a06ea964
NC
611aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
612 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
613 const aarch64_inst *inst ATTRIBUTE_UNUSED,
614 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
615{
616 /* Rn */
617 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
561a72d4 618 return TRUE;
a06ea964
NC
619}
620
621/* Encode the address operand for e.g.
622 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
561a72d4 623bfd_boolean
a06ea964
NC
624aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
625 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
626 const aarch64_inst *inst ATTRIBUTE_UNUSED,
627 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
628{
629 aarch64_insn S;
630 enum aarch64_modifier_kind kind = info->shifter.kind;
631
632 /* Rn */
633 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
634 /* Rm */
635 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
636 /* option */
637 if (kind == AARCH64_MOD_LSL)
638 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
639 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
640 /* S */
641 if (info->qualifier != AARCH64_OPND_QLF_S_B)
642 S = info->shifter.amount != 0;
643 else
644 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
645 S <amount>
646 0 [absent]
647 1 #0
648 Must be #0 if <extend> is explicitly LSL. */
649 S = info->shifter.operator_present && info->shifter.amount_present;
650 insert_field (FLD_S, code, S, 0);
651
561a72d4 652 return TRUE;
a06ea964
NC
653}
654
f42f1a1d
TC
655/* Encode the address operand for e.g.
656 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
561a72d4 657bfd_boolean
f42f1a1d
TC
658aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
659 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
660 const aarch64_inst *inst ATTRIBUTE_UNUSED,
661 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
f42f1a1d
TC
662{
663 /* Rn */
664 insert_field (self->fields[0], code, info->addr.base_regno, 0);
665
666 /* simm9 */
667 int imm = info->addr.offset.imm;
668 insert_field (self->fields[1], code, imm, 0);
669
670 /* writeback */
671 if (info->addr.writeback)
672 {
673 assert (info->addr.preind == 1 && info->addr.postind == 0);
674 insert_field (self->fields[2], code, 1, 0);
675 }
561a72d4 676 return TRUE;
f42f1a1d
TC
677}
678
a06ea964 679/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
561a72d4 680bfd_boolean
a06ea964
NC
681aarch64_ins_addr_simm (const aarch64_operand *self,
682 const aarch64_opnd_info *info,
062f38fa 683 aarch64_insn *code,
561a72d4
TC
684 const aarch64_inst *inst ATTRIBUTE_UNUSED,
685 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
686{
687 int imm;
688
689 /* Rn */
690 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
691 /* simm (imm9 or imm7) */
692 imm = info->addr.offset.imm;
fb3265b3
SD
693 if (self->fields[0] == FLD_imm7
694 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
a06ea964
NC
695 /* scaled immediate in ld/st pair instructions.. */
696 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
697 insert_field (self->fields[0], code, imm, 0);
698 /* pre/post- index */
699 if (info->addr.writeback)
700 {
701 assert (inst->opcode->iclass != ldst_unscaled
702 && inst->opcode->iclass != ldstnapair_offs
703 && inst->opcode->iclass != ldstpair_off
704 && inst->opcode->iclass != ldst_unpriv);
705 assert (info->addr.preind != info->addr.postind);
706 if (info->addr.preind)
707 insert_field (self->fields[1], code, 1, 0);
708 }
709
561a72d4 710 return TRUE;
a06ea964
NC
711}
712
3f06e550 713/* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
561a72d4 714bfd_boolean
3f06e550
SN
715aarch64_ins_addr_simm10 (const aarch64_operand *self,
716 const aarch64_opnd_info *info,
717 aarch64_insn *code,
561a72d4
TC
718 const aarch64_inst *inst ATTRIBUTE_UNUSED,
719 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3f06e550
SN
720{
721 int imm;
722
723 /* Rn */
724 insert_field (self->fields[0], code, info->addr.base_regno, 0);
725 /* simm10 */
726 imm = info->addr.offset.imm >> 3;
727 insert_field (self->fields[1], code, imm >> 9, 0);
728 insert_field (self->fields[2], code, imm, 0);
729 /* writeback */
730 if (info->addr.writeback)
731 {
732 assert (info->addr.preind == 1 && info->addr.postind == 0);
733 insert_field (self->fields[3], code, 1, 0);
734 }
561a72d4 735 return TRUE;
3f06e550
SN
736}
737
a06ea964 738/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
561a72d4 739bfd_boolean
a06ea964
NC
740aarch64_ins_addr_uimm12 (const aarch64_operand *self,
741 const aarch64_opnd_info *info,
742 aarch64_insn *code,
561a72d4
TC
743 const aarch64_inst *inst ATTRIBUTE_UNUSED,
744 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
745{
746 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
747
748 /* Rn */
749 insert_field (self->fields[0], code, info->addr.base_regno, 0);
750 /* uimm12 */
751 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
561a72d4 752 return TRUE;
a06ea964
NC
753}
754
755/* Encode the address operand for e.g.
756 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
561a72d4 757bfd_boolean
a06ea964
NC
758aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
759 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
760 const aarch64_inst *inst ATTRIBUTE_UNUSED,
761 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
762{
763 /* Rn */
764 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
765 /* Rm | #<amount> */
766 if (info->addr.offset.is_reg)
767 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
768 else
769 insert_field (FLD_Rm, code, 0x1f, 0);
561a72d4 770 return TRUE;
a06ea964
NC
771}
772
773/* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
561a72d4 774bfd_boolean
a06ea964
NC
775aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
776 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
777 const aarch64_inst *inst ATTRIBUTE_UNUSED,
778 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
779{
780 /* cond */
781 insert_field (FLD_cond, code, info->cond->value, 0);
561a72d4 782 return TRUE;
a06ea964
NC
783}
784
785/* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
561a72d4 786bfd_boolean
a06ea964
NC
787aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
788 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
789 const aarch64_inst *inst,
790 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
a06ea964 791{
f9830ec1
TC
792 /* If a system instruction check if we have any restrictions on which
793 registers it can use. */
794 if (inst->opcode->iclass == ic_system)
795 {
796 uint64_t opcode_flags
797 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
798 uint32_t sysreg_flags
799 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
800
801 /* Check to see if it's read-only, else check if it's write only.
802 if it's both or unspecified don't care. */
803 if (opcode_flags == F_SYS_READ
804 && sysreg_flags
805 && sysreg_flags != F_REG_READ)
806 {
807 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
808 detail->error = _("specified register cannot be read from");
809 detail->index = info->idx;
810 detail->non_fatal = TRUE;
811 }
812 else if (opcode_flags == F_SYS_WRITE
813 && sysreg_flags
814 && sysreg_flags != F_REG_WRITE)
815 {
816 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
817 detail->error = _("specified register cannot be written to");
818 detail->index = info->idx;
819 detail->non_fatal = TRUE;
820 }
821 }
a06ea964 822 /* op0:op1:CRn:CRm:op2 */
561a72d4 823 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
a06ea964 824 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
561a72d4 825 return TRUE;
a06ea964
NC
826}
827
828/* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
561a72d4 829bfd_boolean
a06ea964
NC
830aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
831 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
832 const aarch64_inst *inst ATTRIBUTE_UNUSED,
833 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
834{
835 /* op1:op2 */
836 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
837 FLD_op2, FLD_op1);
561a72d4 838 return TRUE;
a06ea964
NC
839}
840
841/* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
561a72d4 842bfd_boolean
a06ea964
NC
843aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
844 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
845 const aarch64_inst *inst ATTRIBUTE_UNUSED,
846 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
847{
848 /* op1:CRn:CRm:op2 */
849 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
850 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
561a72d4 851 return TRUE;
a06ea964
NC
852}
853
854/* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
855
561a72d4 856bfd_boolean
a06ea964
NC
857aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
858 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
859 const aarch64_inst *inst ATTRIBUTE_UNUSED,
860 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
861{
862 /* CRm */
863 insert_field (FLD_CRm, code, info->barrier->value, 0);
561a72d4 864 return TRUE;
a06ea964
NC
865}
866
867/* Encode the prefetch operation option operand for e.g.
868 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
869
561a72d4 870bfd_boolean
a06ea964
NC
871aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
872 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
873 const aarch64_inst *inst ATTRIBUTE_UNUSED,
874 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
875{
876 /* prfop in Rt */
877 insert_field (FLD_Rt, code, info->prfop->value, 0);
561a72d4 878 return TRUE;
a06ea964
NC
879}
880
9ed608f9
MW
881/* Encode the hint number for instructions that alias HINT but take an
882 operand. */
883
561a72d4 884bfd_boolean
9ed608f9
MW
885aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
886 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
887 const aarch64_inst *inst ATTRIBUTE_UNUSED,
888 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
9ed608f9
MW
889{
890 /* CRm:op2. */
891 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
561a72d4 892 return TRUE;
9ed608f9
MW
893}
894
a06ea964
NC
895/* Encode the extended register operand for e.g.
896 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
561a72d4 897bfd_boolean
a06ea964
NC
898aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
899 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
900 const aarch64_inst *inst ATTRIBUTE_UNUSED,
901 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
902{
903 enum aarch64_modifier_kind kind;
904
905 /* Rm */
906 insert_field (FLD_Rm, code, info->reg.regno, 0);
907 /* option */
908 kind = info->shifter.kind;
909 if (kind == AARCH64_MOD_LSL)
910 kind = info->qualifier == AARCH64_OPND_QLF_W
911 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
912 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
913 /* imm3 */
914 insert_field (FLD_imm3, code, info->shifter.amount, 0);
915
561a72d4 916 return TRUE;
a06ea964
NC
917}
918
919/* Encode the shifted register operand for e.g.
920 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
561a72d4 921bfd_boolean
a06ea964
NC
922aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
923 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
924 const aarch64_inst *inst ATTRIBUTE_UNUSED,
925 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
a06ea964
NC
926{
927 /* Rm */
928 insert_field (FLD_Rm, code, info->reg.regno, 0);
929 /* shift */
930 insert_field (FLD_shift, code,
931 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
932 /* imm6 */
933 insert_field (FLD_imm6, code, info->shifter.amount, 0);
934
561a72d4 935 return TRUE;
a06ea964
NC
936}
937
98907a70
RS
938/* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
939 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
940 SELF's operand-dependent value. fields[0] specifies the field that
941 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
561a72d4 942bfd_boolean
98907a70
RS
943aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
944 const aarch64_opnd_info *info,
945 aarch64_insn *code,
561a72d4
TC
946 const aarch64_inst *inst ATTRIBUTE_UNUSED,
947 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
98907a70
RS
948{
949 int factor = 1 + get_operand_specific_data (self);
950 insert_field (self->fields[0], code, info->addr.base_regno, 0);
951 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
561a72d4 952 return TRUE;
98907a70
RS
953}
954
955/* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
956 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
957 SELF's operand-dependent value. fields[0] specifies the field that
958 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
561a72d4 959bfd_boolean
98907a70
RS
960aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
961 const aarch64_opnd_info *info,
962 aarch64_insn *code,
561a72d4
TC
963 const aarch64_inst *inst ATTRIBUTE_UNUSED,
964 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
98907a70
RS
965{
966 int factor = 1 + get_operand_specific_data (self);
967 insert_field (self->fields[0], code, info->addr.base_regno, 0);
968 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
561a72d4 969 return TRUE;
98907a70
RS
970}
971
972/* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
973 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
974 SELF's operand-dependent value. fields[0] specifies the field that
975 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
976 and imm3 fields, with imm3 being the less-significant part. */
561a72d4 977bfd_boolean
98907a70
RS
978aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
979 const aarch64_opnd_info *info,
980 aarch64_insn *code,
561a72d4
TC
981 const aarch64_inst *inst ATTRIBUTE_UNUSED,
982 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
98907a70
RS
983{
984 int factor = 1 + get_operand_specific_data (self);
985 insert_field (self->fields[0], code, info->addr.base_regno, 0);
986 insert_fields (code, info->addr.offset.imm / factor, 0,
987 2, FLD_imm3, FLD_SVE_imm6);
561a72d4 988 return TRUE;
98907a70
RS
989}
990
582e12bf
RS
991/* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
992 is a 4-bit signed number and where <shift> is SELF's operand-dependent
993 value. fields[0] specifies the base register field. */
561a72d4 994bfd_boolean
582e12bf
RS
995aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
996 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
997 const aarch64_inst *inst ATTRIBUTE_UNUSED,
998 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
582e12bf
RS
999{
1000 int factor = 1 << get_operand_specific_data (self);
1001 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1002 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
561a72d4 1003 return TRUE;
582e12bf
RS
1004}
1005
4df068de
RS
1006/* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1007 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1008 value. fields[0] specifies the base register field. */
561a72d4 1009bfd_boolean
4df068de
RS
1010aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1011 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1012 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1013 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4df068de
RS
1014{
1015 int factor = 1 << get_operand_specific_data (self);
1016 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1017 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
561a72d4 1018 return TRUE;
4df068de
RS
1019}
1020
1021/* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1022 is SELF's operand-dependent value. fields[0] specifies the base
1023 register field and fields[1] specifies the offset register field. */
561a72d4 1024bfd_boolean
4df068de
RS
1025aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1026 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1027 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1028 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4df068de
RS
1029{
1030 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1031 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
561a72d4 1032 return TRUE;
4df068de
RS
1033}
1034
1035/* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1036 <shift> is SELF's operand-dependent value. fields[0] specifies the
1037 base register field, fields[1] specifies the offset register field and
1038 fields[2] is a single-bit field that selects SXTW over UXTW. */
561a72d4 1039bfd_boolean
4df068de
RS
1040aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1041 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1042 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1043 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4df068de
RS
1044{
1045 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1046 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1047 if (info->shifter.kind == AARCH64_MOD_UXTW)
1048 insert_field (self->fields[2], code, 0, 0);
1049 else
1050 insert_field (self->fields[2], code, 1, 0);
561a72d4 1051 return TRUE;
4df068de
RS
1052}
1053
1054/* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1055 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1056 fields[0] specifies the base register field. */
561a72d4 1057bfd_boolean
4df068de
RS
1058aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1059 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1060 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1061 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4df068de
RS
1062{
1063 int factor = 1 << get_operand_specific_data (self);
1064 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1065 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
561a72d4 1066 return TRUE;
4df068de
RS
1067}
1068
1069/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1070 where <modifier> is fixed by the instruction and where <msz> is a
1071 2-bit unsigned number. fields[0] specifies the base register field
1072 and fields[1] specifies the offset register field. */
561a72d4 1073static bfd_boolean
4df068de 1074aarch64_ext_sve_addr_zz (const aarch64_operand *self,
561a72d4
TC
1075 const aarch64_opnd_info *info, aarch64_insn *code,
1076 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4df068de
RS
1077{
1078 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1079 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1080 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
561a72d4 1081 return TRUE;
4df068de
RS
1082}
1083
1084/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1085 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1086 field and fields[1] specifies the offset register field. */
561a72d4 1087bfd_boolean
4df068de
RS
1088aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1089 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1090 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1091 aarch64_operand_error *errors)
4df068de 1092{
561a72d4 1093 return aarch64_ext_sve_addr_zz (self, info, code, errors);
4df068de
RS
1094}
1095
1096/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1097 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1098 field and fields[1] specifies the offset register field. */
561a72d4 1099bfd_boolean
4df068de
RS
1100aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1101 const aarch64_opnd_info *info,
1102 aarch64_insn *code,
561a72d4
TC
1103 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1104 aarch64_operand_error *errors)
4df068de 1105{
561a72d4 1106 return aarch64_ext_sve_addr_zz (self, info, code, errors);
4df068de
RS
1107}
1108
1109/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1110 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1111 field and fields[1] specifies the offset register field. */
561a72d4 1112bfd_boolean
4df068de
RS
1113aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1114 const aarch64_opnd_info *info,
1115 aarch64_insn *code,
561a72d4
TC
1116 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1117 aarch64_operand_error *errors)
4df068de 1118{
561a72d4 1119 return aarch64_ext_sve_addr_zz (self, info, code, errors);
4df068de
RS
1120}
1121
e950b345 1122/* Encode an SVE ADD/SUB immediate. */
561a72d4 1123bfd_boolean
e950b345
RS
1124aarch64_ins_sve_aimm (const aarch64_operand *self,
1125 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1126 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1127 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
e950b345
RS
1128{
1129 if (info->shifter.amount == 8)
1130 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1131 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1132 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1133 else
1134 insert_all_fields (self, code, info->imm.value & 0xff);
561a72d4 1135 return TRUE;
e950b345
RS
1136}
1137
1138/* Encode an SVE CPY/DUP immediate. */
561a72d4 1139bfd_boolean
e950b345
RS
1140aarch64_ins_sve_asimm (const aarch64_operand *self,
1141 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1142 const aarch64_inst *inst,
1143 aarch64_operand_error *errors)
e950b345 1144{
561a72d4 1145 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
e950b345
RS
1146}
1147
f11ad6bc
RS
1148/* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1149 array specifies which field to use for Zn. MM is encoded in the
1150 concatenation of imm5 and SVE_tszh, with imm5 being the less
1151 significant part. */
561a72d4 1152bfd_boolean
f11ad6bc
RS
1153aarch64_ins_sve_index (const aarch64_operand *self,
1154 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1155 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1156 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
f11ad6bc
RS
1157{
1158 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1159 insert_field (self->fields[0], code, info->reglane.regno, 0);
1160 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1161 2, FLD_imm5, FLD_SVE_tszh);
561a72d4 1162 return TRUE;
f11ad6bc
RS
1163}
1164
e950b345 1165/* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
561a72d4 1166bfd_boolean
e950b345
RS
1167aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1168 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1169 const aarch64_inst *inst,
1170 aarch64_operand_error *errors)
e950b345 1171{
561a72d4 1172 return aarch64_ins_limm (self, info, code, inst, errors);
e950b345
RS
1173}
1174
582e12bf
RS
1175/* Encode Zn[MM], where Zn occupies the least-significant part of the field
1176 and where MM occupies the most-significant part. The operand-dependent
1177 value specifies the number of bits in Zn. */
561a72d4 1178bfd_boolean
582e12bf
RS
1179aarch64_ins_sve_quad_index (const aarch64_operand *self,
1180 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1181 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1182 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
582e12bf
RS
1183{
1184 unsigned int reg_bits = get_operand_specific_data (self);
1185 assert (info->reglane.regno < (1U << reg_bits));
1186 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1187 insert_all_fields (self, code, val);
561a72d4 1188 return TRUE;
582e12bf
RS
1189}
1190
f11ad6bc
RS
1191/* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1192 to use for Zn. */
561a72d4 1193bfd_boolean
f11ad6bc
RS
1194aarch64_ins_sve_reglist (const aarch64_operand *self,
1195 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1196 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1197 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
f11ad6bc
RS
1198{
1199 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
561a72d4 1200 return TRUE;
f11ad6bc
RS
1201}
1202
2442d846
RS
1203/* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1204 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1205 field. */
561a72d4 1206bfd_boolean
2442d846
RS
1207aarch64_ins_sve_scale (const aarch64_operand *self,
1208 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1209 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1210 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2442d846
RS
1211{
1212 insert_all_fields (self, code, info->imm.value);
1213 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
561a72d4 1214 return TRUE;
2442d846
RS
1215}
1216
e950b345 1217/* Encode an SVE shift left immediate. */
561a72d4 1218bfd_boolean
e950b345
RS
1219aarch64_ins_sve_shlimm (const aarch64_operand *self,
1220 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1221 const aarch64_inst *inst,
1222 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
e950b345
RS
1223{
1224 const aarch64_opnd_info *prev_operand;
1225 unsigned int esize;
1226
1227 assert (info->idx > 0);
1228 prev_operand = &inst->operands[info->idx - 1];
1229 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1230 insert_all_fields (self, code, 8 * esize + info->imm.value);
561a72d4 1231 return TRUE;
e950b345
RS
1232}
1233
1234/* Encode an SVE shift right immediate. */
561a72d4 1235bfd_boolean
e950b345
RS
1236aarch64_ins_sve_shrimm (const aarch64_operand *self,
1237 const aarch64_opnd_info *info, aarch64_insn *code,
561a72d4
TC
1238 const aarch64_inst *inst,
1239 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
e950b345
RS
1240{
1241 const aarch64_opnd_info *prev_operand;
1242 unsigned int esize;
1243
3c17238b
MM
1244 unsigned int opnd_backshift = get_operand_specific_data (self);
1245 assert (info->idx >= (int)opnd_backshift);
1246 prev_operand = &inst->operands[info->idx - opnd_backshift];
e950b345
RS
1247 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1248 insert_all_fields (self, code, 16 * esize - info->imm.value);
561a72d4 1249 return TRUE;
e950b345
RS
1250}
1251
165d4950
RS
1252/* Encode a single-bit immediate that selects between #0.5 and #1.0.
1253 The fields array specifies which field to use. */
561a72d4 1254bfd_boolean
165d4950
RS
1255aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1256 const aarch64_opnd_info *info,
1257 aarch64_insn *code,
561a72d4
TC
1258 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1259 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
165d4950
RS
1260{
1261 if (info->imm.value == 0x3f000000)
1262 insert_field (self->fields[0], code, 0, 0);
1263 else
1264 insert_field (self->fields[0], code, 1, 0);
561a72d4 1265 return TRUE;
165d4950
RS
1266}
1267
1268/* Encode a single-bit immediate that selects between #0.5 and #2.0.
1269 The fields array specifies which field to use. */
561a72d4 1270bfd_boolean
165d4950
RS
1271aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1272 const aarch64_opnd_info *info,
1273 aarch64_insn *code,
561a72d4
TC
1274 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1275 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
165d4950
RS
1276{
1277 if (info->imm.value == 0x3f000000)
1278 insert_field (self->fields[0], code, 0, 0);
1279 else
1280 insert_field (self->fields[0], code, 1, 0);
561a72d4 1281 return TRUE;
165d4950
RS
1282}
1283
1284/* Encode a single-bit immediate that selects between #0.0 and #1.0.
1285 The fields array specifies which field to use. */
561a72d4 1286bfd_boolean
165d4950
RS
1287aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1288 const aarch64_opnd_info *info,
1289 aarch64_insn *code,
561a72d4
TC
1290 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1291 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
165d4950
RS
1292{
1293 if (info->imm.value == 0)
1294 insert_field (self->fields[0], code, 0, 0);
1295 else
1296 insert_field (self->fields[0], code, 1, 0);
561a72d4 1297 return TRUE;
165d4950
RS
1298}
1299
a06ea964
NC
1300/* Miscellaneous encoding functions. */
1301
1302/* Encode size[0], i.e. bit 22, for
1303 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1304
1305static void
1306encode_asimd_fcvt (aarch64_inst *inst)
1307{
1308 aarch64_insn value;
1309 aarch64_field field = {0, 0};
1310 enum aarch64_opnd_qualifier qualifier;
1311
1312 switch (inst->opcode->op)
1313 {
1314 case OP_FCVTN:
1315 case OP_FCVTN2:
1316 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1317 qualifier = inst->operands[1].qualifier;
1318 break;
1319 case OP_FCVTL:
1320 case OP_FCVTL2:
1321 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1322 qualifier = inst->operands[0].qualifier;
1323 break;
1324 default:
1325 assert (0);
1326 }
1327 assert (qualifier == AARCH64_OPND_QLF_V_4S
1328 || qualifier == AARCH64_OPND_QLF_V_2D);
1329 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1330 gen_sub_field (FLD_size, 0, 1, &field);
1331 insert_field_2 (&field, &inst->value, value, 0);
1332}
1333
1334/* Encode size[0], i.e. bit 22, for
1335 e.g. FCVTXN <Vb><d>, <Va><n>. */
1336
1337static void
1338encode_asisd_fcvtxn (aarch64_inst *inst)
1339{
1340 aarch64_insn val = 1;
1341 aarch64_field field = {0, 0};
1342 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1343 gen_sub_field (FLD_size, 0, 1, &field);
1344 insert_field_2 (&field, &inst->value, val, 0);
1345}
1346
1347/* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1348static void
1349encode_fcvt (aarch64_inst *inst)
1350{
1351 aarch64_insn val;
1352 const aarch64_field field = {15, 2};
1353
1354 /* opc dstsize */
1355 switch (inst->operands[0].qualifier)
1356 {
1357 case AARCH64_OPND_QLF_S_S: val = 0; break;
1358 case AARCH64_OPND_QLF_S_D: val = 1; break;
1359 case AARCH64_OPND_QLF_S_H: val = 3; break;
1360 default: abort ();
1361 }
1362 insert_field_2 (&field, &inst->value, val, 0);
1363
1364 return;
1365}
1366
116b6019
RS
1367/* Return the index in qualifiers_list that INST is using. Should only
1368 be called once the qualifiers are known to be valid. */
1369
1370static int
1371aarch64_get_variant (struct aarch64_inst *inst)
1372{
1373 int i, nops, variant;
1374
1375 nops = aarch64_num_of_operands (inst->opcode);
1376 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1377 {
1378 for (i = 0; i < nops; ++i)
1379 if (inst->opcode->qualifiers_list[variant][i]
1380 != inst->operands[i].qualifier)
1381 break;
1382 if (i == nops)
1383 return variant;
1384 }
1385 abort ();
1386}
1387
a06ea964
NC
1388/* Do miscellaneous encodings that are not common enough to be driven by
1389 flags. */
1390
1391static void
1392do_misc_encoding (aarch64_inst *inst)
1393{
c0890d26
RS
1394 unsigned int value;
1395
a06ea964
NC
1396 switch (inst->opcode->op)
1397 {
1398 case OP_FCVT:
1399 encode_fcvt (inst);
1400 break;
1401 case OP_FCVTN:
1402 case OP_FCVTN2:
1403 case OP_FCVTL:
1404 case OP_FCVTL2:
1405 encode_asimd_fcvt (inst);
1406 break;
1407 case OP_FCVTXN_S:
1408 encode_asisd_fcvtxn (inst);
1409 break;
c0890d26
RS
1410 case OP_MOV_P_P:
1411 case OP_MOVS_P_P:
1412 /* Copy Pn to Pm and Pg. */
1413 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1414 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1415 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1416 break;
1417 case OP_MOV_Z_P_Z:
1418 /* Copy Zd to Zm. */
1419 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1420 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1421 break;
1422 case OP_MOV_Z_V:
1423 /* Fill in the zero immediate. */
582e12bf
RS
1424 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1425 2, FLD_imm5, FLD_SVE_tszh);
c0890d26
RS
1426 break;
1427 case OP_MOV_Z_Z:
1428 /* Copy Zn to Zm. */
1429 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1430 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1431 break;
1432 case OP_MOV_Z_Zi:
1433 break;
1434 case OP_MOVM_P_P_P:
1435 /* Copy Pd to Pm. */
1436 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1437 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1438 break;
1439 case OP_MOVZS_P_P_P:
1440 case OP_MOVZ_P_P_P:
1441 /* Copy Pn to Pm. */
1442 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1443 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1444 break;
1445 case OP_NOTS_P_P_P_Z:
1446 case OP_NOT_P_P_P_Z:
1447 /* Copy Pg to Pm. */
1448 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1449 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1450 break;
a06ea964
NC
1451 default: break;
1452 }
1453}
1454
1455/* Encode the 'size' and 'Q' field for e.g. SHADD. */
1456static void
1457encode_sizeq (aarch64_inst *inst)
1458{
1459 aarch64_insn sizeq;
1460 enum aarch64_field_kind kind;
1461 int idx;
1462
1463 /* Get the index of the operand whose information we are going to use
1464 to encode the size and Q fields.
1465 This is deduced from the possible valid qualifier lists. */
1466 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1467 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1468 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1469 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1470 /* Q */
1471 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1472 /* size */
1473 if (inst->opcode->iclass == asisdlse
1474 || inst->opcode->iclass == asisdlsep
1475 || inst->opcode->iclass == asisdlso
1476 || inst->opcode->iclass == asisdlsop)
1477 kind = FLD_vldst_size;
1478 else
1479 kind = FLD_size;
1480 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1481}
1482
1483/* Opcodes that have fields shared by multiple operands are usually flagged
1484 with flags. In this function, we detect such flags and use the
1485 information in one of the related operands to do the encoding. The 'one'
1486 operand is not any operand but one of the operands that has the enough
1487 information for such an encoding. */
1488
1489static void
1490do_special_encoding (struct aarch64_inst *inst)
1491{
1492 int idx;
4ad3b7ef 1493 aarch64_insn value = 0;
a06ea964
NC
1494
1495 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1496
1497 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1498 if (inst->opcode->flags & F_COND)
1499 {
1500 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1501 }
1502 if (inst->opcode->flags & F_SF)
1503 {
1504 idx = select_operand_for_sf_field_coding (inst->opcode);
1505 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1506 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1507 ? 1 : 0;
1508 insert_field (FLD_sf, &inst->value, value, 0);
1509 if (inst->opcode->flags & F_N)
1510 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1511 }
ee804238
JW
1512 if (inst->opcode->flags & F_LSE_SZ)
1513 {
1514 idx = select_operand_for_sf_field_coding (inst->opcode);
1515 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1516 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1517 ? 1 : 0;
1518 insert_field (FLD_lse_sz, &inst->value, value, 0);
1519 }
a06ea964
NC
1520 if (inst->opcode->flags & F_SIZEQ)
1521 encode_sizeq (inst);
1522 if (inst->opcode->flags & F_FPTYPE)
1523 {
1524 idx = select_operand_for_fptype_field_coding (inst->opcode);
1525 switch (inst->operands[idx].qualifier)
1526 {
1527 case AARCH64_OPND_QLF_S_S: value = 0; break;
1528 case AARCH64_OPND_QLF_S_D: value = 1; break;
1529 case AARCH64_OPND_QLF_S_H: value = 3; break;
1530 default: assert (0);
1531 }
1532 insert_field (FLD_type, &inst->value, value, 0);
1533 }
1534 if (inst->opcode->flags & F_SSIZE)
1535 {
1536 enum aarch64_opnd_qualifier qualifier;
1537 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1538 qualifier = inst->operands[idx].qualifier;
1539 assert (qualifier >= AARCH64_OPND_QLF_S_B
1540 && qualifier <= AARCH64_OPND_QLF_S_Q);
1541 value = aarch64_get_qualifier_standard_value (qualifier);
1542 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1543 }
1544 if (inst->opcode->flags & F_T)
1545 {
1546 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1547 aarch64_field field = {0, 0};
1548 enum aarch64_opnd_qualifier qualifier;
1549
1550 idx = 0;
1551 qualifier = inst->operands[idx].qualifier;
1552 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1553 == AARCH64_OPND_CLASS_SIMD_REG
1554 && qualifier >= AARCH64_OPND_QLF_V_8B
1555 && qualifier <= AARCH64_OPND_QLF_V_2D);
1556 /* imm5<3:0> q <t>
1557 0000 x reserved
1558 xxx1 0 8b
1559 xxx1 1 16b
1560 xx10 0 4h
1561 xx10 1 8h
1562 x100 0 2s
1563 x100 1 4s
1564 1000 0 reserved
1565 1000 1 2d */
1566 value = aarch64_get_qualifier_standard_value (qualifier);
1567 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1568 num = (int) value >> 1;
1569 assert (num >= 0 && num <= 3);
1570 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1571 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1572 }
1573 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1574 {
1575 /* Use Rt to encode in the case of e.g.
1576 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1577 enum aarch64_opnd_qualifier qualifier;
1578 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1579 if (idx == -1)
1580 /* Otherwise use the result operand, which has to be a integer
1581 register. */
1582 idx = 0;
1583 assert (idx == 0 || idx == 1);
1584 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1585 == AARCH64_OPND_CLASS_INT_REG);
1586 qualifier = inst->operands[idx].qualifier;
1587 insert_field (FLD_Q, &inst->value,
1588 aarch64_get_qualifier_standard_value (qualifier), 0);
1589 }
1590 if (inst->opcode->flags & F_LDS_SIZE)
1591 {
1592 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1593 enum aarch64_opnd_qualifier qualifier;
1594 aarch64_field field = {0, 0};
1595 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1596 == AARCH64_OPND_CLASS_INT_REG);
1597 gen_sub_field (FLD_opc, 0, 1, &field);
1598 qualifier = inst->operands[0].qualifier;
1599 insert_field_2 (&field, &inst->value,
1600 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1601 }
1602 /* Miscellaneous encoding as the last step. */
1603 if (inst->opcode->flags & F_MISC)
1604 do_misc_encoding (inst);
1605
1606 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1607}
1608
116b6019
RS
1609/* Some instructions (including all SVE ones) use the instruction class
1610 to describe how a qualifiers_list index is represented in the instruction
1611 encoding. If INST is such an instruction, encode the chosen qualifier
1612 variant. */
1613
1614static void
1615aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1616{
cd50a87a 1617 int variant = 0;
116b6019
RS
1618 switch (inst->opcode->iclass)
1619 {
1620 case sve_cpy:
1621 insert_fields (&inst->value, aarch64_get_variant (inst),
1622 0, 2, FLD_SVE_M_14, FLD_size);
1623 break;
1624
1625 case sve_index:
1626 case sve_shift_pred:
1627 case sve_shift_unpred:
3c17238b 1628 case sve_shift_tsz_hsd:
1be5f94f 1629 case sve_shift_tsz_bhsd:
116b6019
RS
1630 /* For indices and shift amounts, the variant is encoded as
1631 part of the immediate. */
1632 break;
1633
1634 case sve_limm:
1635 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1636 and depend on the immediate. They don't have a separate
1637 encoding. */
1638 break;
1639
1640 case sve_misc:
1641 /* sve_misc instructions have only a single variant. */
1642 break;
1643
1644 case sve_movprfx:
1645 insert_fields (&inst->value, aarch64_get_variant (inst),
1646 0, 2, FLD_SVE_M_16, FLD_size);
1647 break;
1648
1649 case sve_pred_zm:
1650 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1651 break;
1652
1653 case sve_size_bhs:
1654 case sve_size_bhsd:
1655 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1656 break;
1657
1658 case sve_size_hsd:
1659 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1660 break;
1661
3c705960 1662 case sve_size_bh:
116b6019
RS
1663 case sve_size_sd:
1664 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1665 break;
1666
0a57e14f
MM
1667 case sve_size_sd2:
1668 insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
1669 break;
1670
3bd82c86
MM
1671 case sve_size_hsd2:
1672 insert_field (FLD_SVE_size, &inst->value,
1673 aarch64_get_variant (inst) + 1, 0);
1674 break;
1675
fd1dc4a0
MM
1676 case sve_size_tsz_bhs:
1677 insert_fields (&inst->value,
1678 (1 << aarch64_get_variant (inst)),
1679 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
1680 break;
1681
41be57ca
MM
1682 case sve_size_13:
1683 variant = aarch64_get_variant (inst) + 1;
cd50a87a
MM
1684 if (variant == 2)
1685 variant = 3;
1686 insert_field (FLD_size, &inst->value, variant, 0);
1687 break;
1688
116b6019
RS
1689 default:
1690 break;
1691 }
1692}
1693
a06ea964
NC
1694/* Converters converting an alias opcode instruction to its real form. */
1695
1696/* ROR <Wd>, <Ws>, #<shift>
1697 is equivalent to:
1698 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1699static void
1700convert_ror_to_extr (aarch64_inst *inst)
1701{
1702 copy_operand_info (inst, 3, 2);
1703 copy_operand_info (inst, 2, 1);
1704}
1705
e30181a5
YZ
1706/* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1707 is equivalent to:
1708 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1709static void
1710convert_xtl_to_shll (aarch64_inst *inst)
1711{
1712 inst->operands[2].qualifier = inst->operands[1].qualifier;
1713 inst->operands[2].imm.value = 0;
1714}
1715
a06ea964
NC
1716/* Convert
1717 LSR <Xd>, <Xn>, #<shift>
1718 to
1719 UBFM <Xd>, <Xn>, #<shift>, #63. */
1720static void
1721convert_sr_to_bfm (aarch64_inst *inst)
1722{
1723 inst->operands[3].imm.value =
1724 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1725}
1726
1727/* Convert MOV to ORR. */
1728static void
1729convert_mov_to_orr (aarch64_inst *inst)
1730{
1731 /* MOV <Vd>.<T>, <Vn>.<T>
1732 is equivalent to:
1733 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1734 copy_operand_info (inst, 2, 1);
1735}
1736
1737/* When <imms> >= <immr>, the instruction written:
1738 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1739 is equivalent to:
1740 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1741
1742static void
1743convert_bfx_to_bfm (aarch64_inst *inst)
1744{
1745 int64_t lsb, width;
1746
1747 /* Convert the operand. */
1748 lsb = inst->operands[2].imm.value;
1749 width = inst->operands[3].imm.value;
1750 inst->operands[2].imm.value = lsb;
1751 inst->operands[3].imm.value = lsb + width - 1;
1752}
1753
1754/* When <imms> < <immr>, the instruction written:
1755 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1756 is equivalent to:
1757 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1758
1759static void
1760convert_bfi_to_bfm (aarch64_inst *inst)
1761{
1762 int64_t lsb, width;
1763
1764 /* Convert the operand. */
1765 lsb = inst->operands[2].imm.value;
1766 width = inst->operands[3].imm.value;
1767 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1768 {
1769 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1770 inst->operands[3].imm.value = width - 1;
1771 }
1772 else
1773 {
1774 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1775 inst->operands[3].imm.value = width - 1;
1776 }
1777}
1778
d685192a
MW
1779/* The instruction written:
1780 BFC <Xd>, #<lsb>, #<width>
1781 is equivalent to:
1782 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1783
1784static void
1785convert_bfc_to_bfm (aarch64_inst *inst)
1786{
1787 int64_t lsb, width;
1788
1789 /* Insert XZR. */
1790 copy_operand_info (inst, 3, 2);
1791 copy_operand_info (inst, 2, 1);
11648de5 1792 copy_operand_info (inst, 1, 0);
d685192a
MW
1793 inst->operands[1].reg.regno = 0x1f;
1794
11648de5 1795 /* Convert the immediate operand. */
d685192a
MW
1796 lsb = inst->operands[2].imm.value;
1797 width = inst->operands[3].imm.value;
1798 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1799 {
1800 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1801 inst->operands[3].imm.value = width - 1;
1802 }
1803 else
1804 {
1805 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1806 inst->operands[3].imm.value = width - 1;
1807 }
1808}
1809
a06ea964
NC
1810/* The instruction written:
1811 LSL <Xd>, <Xn>, #<shift>
1812 is equivalent to:
1813 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1814
1815static void
1816convert_lsl_to_ubfm (aarch64_inst *inst)
1817{
1818 int64_t shift = inst->operands[2].imm.value;
1819
1820 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1821 {
1822 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1823 inst->operands[3].imm.value = 31 - shift;
1824 }
1825 else
1826 {
1827 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1828 inst->operands[3].imm.value = 63 - shift;
1829 }
1830}
1831
1832/* CINC <Wd>, <Wn>, <cond>
1833 is equivalent to:
1834 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1835
1836static void
1837convert_to_csel (aarch64_inst *inst)
1838{
1839 copy_operand_info (inst, 3, 2);
1840 copy_operand_info (inst, 2, 1);
1841 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1842}
1843
1844/* CSET <Wd>, <cond>
1845 is equivalent to:
1846 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1847
1848static void
1849convert_cset_to_csinc (aarch64_inst *inst)
1850{
1851 copy_operand_info (inst, 3, 1);
1852 copy_operand_info (inst, 2, 0);
1853 copy_operand_info (inst, 1, 0);
1854 inst->operands[1].reg.regno = 0x1f;
1855 inst->operands[2].reg.regno = 0x1f;
1856 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1857}
1858
1859/* MOV <Wd>, #<imm>
1860 is equivalent to:
1861 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1862
1863static void
1864convert_mov_to_movewide (aarch64_inst *inst)
1865{
1866 int is32;
1867 uint32_t shift_amount;
1868 uint64_t value;
1869
1870 switch (inst->opcode->op)
1871 {
1872 case OP_MOV_IMM_WIDE:
1873 value = inst->operands[1].imm.value;
1874 break;
1875 case OP_MOV_IMM_WIDEN:
1876 value = ~inst->operands[1].imm.value;
1877 break;
1878 default:
1879 assert (0);
1880 }
1881 inst->operands[1].type = AARCH64_OPND_HALF;
1882 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
062f38fa
RE
1883 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1884 /* The constraint check should have guaranteed this wouldn't happen. */
1885 assert (0);
a06ea964
NC
1886 value >>= shift_amount;
1887 value &= 0xffff;
1888 inst->operands[1].imm.value = value;
1889 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1890 inst->operands[1].shifter.amount = shift_amount;
1891}
1892
1893/* MOV <Wd>, #<imm>
1894 is equivalent to:
1895 ORR <Wd>, WZR, #<imm>. */
1896
1897static void
1898convert_mov_to_movebitmask (aarch64_inst *inst)
1899{
1900 copy_operand_info (inst, 2, 1);
1901 inst->operands[1].reg.regno = 0x1f;
1902 inst->operands[1].skip = 0;
1903}
1904
1905/* Some alias opcodes are assembled by being converted to their real-form. */
1906
1907static void
1908convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1909{
1910 const aarch64_opcode *alias = inst->opcode;
1911
1912 if ((alias->flags & F_CONV) == 0)
1913 goto convert_to_real_return;
1914
1915 switch (alias->op)
1916 {
1917 case OP_ASR_IMM:
1918 case OP_LSR_IMM:
1919 convert_sr_to_bfm (inst);
1920 break;
1921 case OP_LSL_IMM:
1922 convert_lsl_to_ubfm (inst);
1923 break;
1924 case OP_CINC:
1925 case OP_CINV:
1926 case OP_CNEG:
1927 convert_to_csel (inst);
1928 break;
1929 case OP_CSET:
1930 case OP_CSETM:
1931 convert_cset_to_csinc (inst);
1932 break;
1933 case OP_UBFX:
1934 case OP_BFXIL:
1935 case OP_SBFX:
1936 convert_bfx_to_bfm (inst);
1937 break;
1938 case OP_SBFIZ:
1939 case OP_BFI:
1940 case OP_UBFIZ:
1941 convert_bfi_to_bfm (inst);
1942 break;
d685192a
MW
1943 case OP_BFC:
1944 convert_bfc_to_bfm (inst);
1945 break;
a06ea964
NC
1946 case OP_MOV_V:
1947 convert_mov_to_orr (inst);
1948 break;
1949 case OP_MOV_IMM_WIDE:
1950 case OP_MOV_IMM_WIDEN:
1951 convert_mov_to_movewide (inst);
1952 break;
1953 case OP_MOV_IMM_LOG:
1954 convert_mov_to_movebitmask (inst);
1955 break;
1956 case OP_ROR_IMM:
1957 convert_ror_to_extr (inst);
1958 break;
e30181a5
YZ
1959 case OP_SXTL:
1960 case OP_SXTL2:
1961 case OP_UXTL:
1962 case OP_UXTL2:
1963 convert_xtl_to_shll (inst);
1964 break;
a06ea964
NC
1965 default:
1966 break;
1967 }
1968
1969convert_to_real_return:
1970 aarch64_replace_opcode (inst, real);
1971}
1972
1973/* Encode *INST_ORI of the opcode code OPCODE.
1974 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1975 matched operand qualifier sequence in *QLF_SEQ. */
1976
561a72d4 1977bfd_boolean
a06ea964
NC
1978aarch64_opcode_encode (const aarch64_opcode *opcode,
1979 const aarch64_inst *inst_ori, aarch64_insn *code,
1980 aarch64_opnd_qualifier_t *qlf_seq,
7e84b55d 1981 aarch64_operand_error *mismatch_detail,
bde90be2 1982 aarch64_instr_sequence* insn_sequence)
a06ea964
NC
1983{
1984 int i;
1985 const aarch64_opcode *aliased;
1986 aarch64_inst copy, *inst;
1987
1988 DEBUG_TRACE ("enter with %s", opcode->name);
1989
1990 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1991 copy = *inst_ori;
1992 inst = &copy;
1993
1994 assert (inst->opcode == NULL || inst->opcode == opcode);
1995 if (inst->opcode == NULL)
1996 inst->opcode = opcode;
1997
1998 /* Constrain the operands.
1999 After passing this, the encoding is guaranteed to succeed. */
2000 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
2001 {
2002 DEBUG_TRACE ("FAIL since operand constraint not met");
2003 return 0;
2004 }
2005
2006 /* Get the base value.
2007 Note: this has to be before the aliasing handling below in order to
2008 get the base value from the alias opcode before we move on to the
2009 aliased opcode for encoding. */
2010 inst->value = opcode->opcode;
2011
2012 /* No need to do anything else if the opcode does not have any operand. */
2013 if (aarch64_num_of_operands (opcode) == 0)
2014 goto encoding_exit;
2015
2016 /* Assign operand indexes and check types. Also put the matched
2017 operand qualifiers in *QLF_SEQ to return. */
2018 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2019 {
2020 assert (opcode->operands[i] == inst->operands[i].type);
2021 inst->operands[i].idx = i;
2022 if (qlf_seq != NULL)
2023 *qlf_seq = inst->operands[i].qualifier;
2024 }
2025
2026 aliased = aarch64_find_real_opcode (opcode);
2027 /* If the opcode is an alias and it does not ask for direct encoding by
2028 itself, the instruction will be transformed to the form of real opcode
2029 and the encoding will be carried out using the rules for the aliased
2030 opcode. */
2031 if (aliased != NULL && (opcode->flags & F_CONV))
2032 {
2033 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2034 aliased->name, opcode->name);
2035 /* Convert the operands to the form of the real opcode. */
2036 convert_to_real (inst, aliased);
2037 opcode = aliased;
2038 }
2039
2040 aarch64_opnd_info *info = inst->operands;
2041
2042 /* Call the inserter of each operand. */
2043 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2044 {
2045 const aarch64_operand *opnd;
2046 enum aarch64_opnd type = opcode->operands[i];
2047 if (type == AARCH64_OPND_NIL)
2048 break;
2049 if (info->skip)
2050 {
2051 DEBUG_TRACE ("skip the incomplete operand %d", i);
2052 continue;
2053 }
2054 opnd = &aarch64_operands[type];
561a72d4
TC
2055 if (operand_has_inserter (opnd)
2056 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2057 mismatch_detail))
2058 return FALSE;
a06ea964
NC
2059 }
2060
2061 /* Call opcode encoders indicated by flags. */
2062 if (opcode_has_special_coder (opcode))
2063 do_special_encoding (inst);
2064
116b6019
RS
2065 /* Possibly use the instruction class to encode the chosen qualifier
2066 variant. */
2067 aarch64_encode_variant_using_iclass (inst);
2068
bde90be2
TC
2069 /* Run a verifier if the instruction has one set. */
2070 if (opcode->verifier)
2071 {
2072 enum err_type result = opcode->verifier (inst, *code, 0, TRUE,
2073 mismatch_detail, insn_sequence);
2074 switch (result)
2075 {
2076 case ERR_UND:
2077 case ERR_UNP:
2078 case ERR_NYI:
2079 return FALSE;
2080 default:
2081 break;
2082 }
2083 }
2084
2085 /* Always run constrain verifiers, this is needed because constrains need to
2086 maintain a global state. Regardless if the instruction has the flag set
2087 or not. */
2088 enum err_type result = verify_constraints (inst, *code, 0, TRUE,
2089 mismatch_detail, insn_sequence);
2090 switch (result)
2091 {
2092 case ERR_UND:
2093 case ERR_UNP:
2094 case ERR_NYI:
2095 return FALSE;
2096 default:
2097 break;
2098 }
2099
2100
a06ea964
NC
2101encoding_exit:
2102 DEBUG_TRACE ("exit with %s", opcode->name);
2103
2104 *code = inst->value;
2105
561a72d4 2106 return TRUE;
a06ea964 2107}
This page took 0.532437 seconds and 4 git commands to generate.