MIPS/Linux/native: Supply $zero for the !PTRACE_GETREGS case
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
CommitLineData
a06ea964 1/* aarch64-asm.c -- AArch64 assembler support.
219d1afa 2 Copyright (C) 2012-2018 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21#include "sysdep.h"
22#include <stdarg.h>
b5464a68 23#include "libiberty.h"
a06ea964
NC
24#include "aarch64-asm.h"
25
26/* Utilities. */
27
28/* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
31
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
9aff4b7a 35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
a06ea964
NC
36 the order of M, L, H. */
37
38static inline void
39insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
40{
41 uint32_t num;
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
44 va_list va;
45
46 va_start (va, mask);
47 num = va_arg (va, uint32_t);
48 assert (num <= 5);
49 while (num--)
50 {
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
55 }
56 va_end (va);
57}
58
b5464a68
RS
59/* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
61
62static void
63insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
64 aarch64_insn value)
65{
66 unsigned int i;
67 enum aarch64_field_kind kind;
68
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
71 {
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
75 }
76}
77
a06ea964
NC
78/* Operand inserters. */
79
80/* Insert register number. */
81const char *
82aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
83 aarch64_insn *code,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
85{
86 insert_field (self->fields[0], code, info->reg.regno, 0);
87 return NULL;
88}
89
90/* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
93const char *
94aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
96{
97 /* regno */
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
101 {
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
105 {
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
110 }
111 else
112 {
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
114 imm5<3:0> <V>
115 0000 RESERVED
116 xxx1 B
117 xx10 H
118 x100 S
119 1000 D */
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
122 }
123 }
65a55fbb
TC
124 else if (inst->opcode->iclass == dotproduct)
125 {
126 unsigned reglane_index = info->reglane.index;
127 switch (info->qualifier)
128 {
00c2093f 129 case AARCH64_OPND_QLF_S_4B:
65a55fbb
TC
130 /* L:H */
131 assert (reglane_index < 4);
132 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
133 break;
134 default:
135 assert (0);
136 }
137 }
f42f1a1d
TC
138 else if (inst->opcode->iclass == cryptosm3)
139 {
140 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
141 unsigned reglane_index = info->reglane.index;
142 assert (reglane_index < 4);
143 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
144 }
a06ea964
NC
145 else
146 {
147 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
148 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
329d01f7 149 unsigned reglane_index = info->reglane.index;
c2c4ff8d
SN
150
151 if (inst->opcode->op == OP_FCMLA_ELEM)
152 /* Complex operand takes two elements. */
329d01f7 153 reglane_index *= 2;
c2c4ff8d 154
a06ea964
NC
155 switch (info->qualifier)
156 {
157 case AARCH64_OPND_QLF_S_H:
158 /* H:L:M */
329d01f7
MR
159 assert (reglane_index < 8);
160 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
a06ea964
NC
161 break;
162 case AARCH64_OPND_QLF_S_S:
163 /* H:L */
329d01f7
MR
164 assert (reglane_index < 4);
165 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
a06ea964
NC
166 break;
167 case AARCH64_OPND_QLF_S_D:
168 /* H */
329d01f7
MR
169 assert (reglane_index < 2);
170 insert_field (FLD_H, code, reglane_index, 0);
a06ea964
NC
171 break;
172 default:
173 assert (0);
174 }
175 }
176 return NULL;
177}
178
179/* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
180const char *
181aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
182 aarch64_insn *code,
183 const aarch64_inst *inst ATTRIBUTE_UNUSED)
184{
185 /* R */
186 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
187 /* len */
188 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
189 return NULL;
190}
191
192/* Insert Rt and opcode fields for a register list operand, e.g. Vt
193 in AdvSIMD load/store instructions. */
194const char *
195aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
196 const aarch64_opnd_info *info, aarch64_insn *code,
197 const aarch64_inst *inst)
198{
4ad3b7ef 199 aarch64_insn value = 0;
a06ea964
NC
200 /* Number of elements in each structure to be loaded/stored. */
201 unsigned num = get_opcode_dependent_value (inst->opcode);
202
203 /* Rt */
204 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
205 /* opcode */
206 switch (num)
207 {
208 case 1:
209 switch (info->reglist.num_regs)
210 {
211 case 1: value = 0x7; break;
212 case 2: value = 0xa; break;
213 case 3: value = 0x6; break;
214 case 4: value = 0x2; break;
215 default: assert (0);
216 }
217 break;
218 case 2:
219 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
220 break;
221 case 3:
222 value = 0x4;
223 break;
224 case 4:
225 value = 0x0;
226 break;
227 default:
228 assert (0);
229 }
230 insert_field (FLD_opcode, code, value, 0);
231
232 return NULL;
233}
234
235/* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
236 single structure to all lanes instructions. */
237const char *
238aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
239 const aarch64_opnd_info *info, aarch64_insn *code,
240 const aarch64_inst *inst)
241{
242 aarch64_insn value;
243 /* The opcode dependent area stores the number of elements in
244 each structure to be loaded/stored. */
245 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
246
247 /* Rt */
248 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
249 /* S */
250 value = (aarch64_insn) 0;
251 if (is_ld1r && info->reglist.num_regs == 2)
252 /* OP_LD1R does not have alternating variant, but have "two consecutive"
253 instead. */
254 value = (aarch64_insn) 1;
255 insert_field (FLD_S, code, value, 0);
256
257 return NULL;
258}
259
260/* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
261 operand e.g. Vt in AdvSIMD load/store single element instructions. */
262const char *
263aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
264 const aarch64_opnd_info *info, aarch64_insn *code,
265 const aarch64_inst *inst ATTRIBUTE_UNUSED)
266{
267 aarch64_field field = {0, 0};
4ad3b7ef
KT
268 aarch64_insn QSsize = 0; /* fields Q:S:size. */
269 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
a06ea964
NC
270
271 assert (info->reglist.has_index);
272
273 /* Rt */
274 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
275 /* Encode the index, opcode<2:1> and size. */
276 switch (info->qualifier)
277 {
278 case AARCH64_OPND_QLF_S_B:
279 /* Index encoded in "Q:S:size". */
280 QSsize = info->reglist.index;
281 opcodeh2 = 0x0;
282 break;
283 case AARCH64_OPND_QLF_S_H:
284 /* Index encoded in "Q:S:size<1>". */
285 QSsize = info->reglist.index << 1;
286 opcodeh2 = 0x1;
287 break;
288 case AARCH64_OPND_QLF_S_S:
289 /* Index encoded in "Q:S". */
290 QSsize = info->reglist.index << 2;
291 opcodeh2 = 0x2;
292 break;
293 case AARCH64_OPND_QLF_S_D:
294 /* Index encoded in "Q". */
295 QSsize = info->reglist.index << 3 | 0x1;
296 opcodeh2 = 0x2;
297 break;
298 default:
299 assert (0);
300 }
301 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
302 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
303 insert_field_2 (&field, code, opcodeh2, 0);
304
305 return NULL;
306}
307
308/* Insert fields immh:immb and/or Q for e.g. the shift immediate in
309 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
310 or SSHR <V><d>, <V><n>, #<shift>. */
311const char *
312aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
313 const aarch64_opnd_info *info,
314 aarch64_insn *code, const aarch64_inst *inst)
315{
316 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
317 aarch64_insn Q, imm;
318
319 if (inst->opcode->iclass == asimdshf)
320 {
321 /* Q
322 immh Q <T>
323 0000 x SEE AdvSIMD modified immediate
324 0001 0 8B
325 0001 1 16B
326 001x 0 4H
327 001x 1 8H
328 01xx 0 2S
329 01xx 1 4S
330 1xxx 0 RESERVED
331 1xxx 1 2D */
332 Q = (val & 0x1) ? 1 : 0;
333 insert_field (FLD_Q, code, Q, inst->opcode->mask);
334 val >>= 1;
335 }
336
337 assert (info->type == AARCH64_OPND_IMM_VLSR
338 || info->type == AARCH64_OPND_IMM_VLSL);
339
340 if (info->type == AARCH64_OPND_IMM_VLSR)
341 /* immh:immb
342 immh <shift>
343 0000 SEE AdvSIMD modified immediate
344 0001 (16-UInt(immh:immb))
345 001x (32-UInt(immh:immb))
346 01xx (64-UInt(immh:immb))
347 1xxx (128-UInt(immh:immb)) */
348 imm = (16 << (unsigned)val) - info->imm.value;
349 else
350 /* immh:immb
351 immh <shift>
352 0000 SEE AdvSIMD modified immediate
353 0001 (UInt(immh:immb)-8)
354 001x (UInt(immh:immb)-16)
355 01xx (UInt(immh:immb)-32)
356 1xxx (UInt(immh:immb)-64) */
357 imm = info->imm.value + (8 << (unsigned)val);
358 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
359
360 return NULL;
361}
362
363/* Insert fields for e.g. the immediate operands in
364 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
365const char *
366aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
367 aarch64_insn *code,
368 const aarch64_inst *inst ATTRIBUTE_UNUSED)
369{
370 int64_t imm;
a06ea964
NC
371
372 imm = info->imm.value;
373 if (operand_need_shift_by_two (self))
374 imm >>= 2;
b5464a68 375 insert_all_fields (self, code, imm);
a06ea964
NC
376 return NULL;
377}
378
379/* Insert immediate and its shift amount for e.g. the last operand in
380 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
381const char *
382aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
062f38fa 383 aarch64_insn *code, const aarch64_inst *inst)
a06ea964
NC
384{
385 /* imm16 */
386 aarch64_ins_imm (self, info, code, inst);
387 /* hw */
388 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
389 return NULL;
390}
391
392/* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
393 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
394const char *
395aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
396 const aarch64_opnd_info *info,
397 aarch64_insn *code,
398 const aarch64_inst *inst ATTRIBUTE_UNUSED)
399{
400 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
401 uint64_t imm = info->imm.value;
402 enum aarch64_modifier_kind kind = info->shifter.kind;
403 int amount = info->shifter.amount;
404 aarch64_field field = {0, 0};
405
406 /* a:b:c:d:e:f:g:h */
407 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
408 {
409 /* Either MOVI <Dd>, #<imm>
410 or MOVI <Vd>.2D, #<imm>.
411 <imm> is a 64-bit immediate
412 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
413 encoded in "a:b:c:d:e:f:g:h". */
414 imm = aarch64_shrink_expanded_imm8 (imm);
415 assert ((int)imm >= 0);
416 }
a06ea964
NC
417 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
418
419 if (kind == AARCH64_MOD_NONE)
420 return NULL;
421
422 /* shift amount partially in cmode */
423 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
424 if (kind == AARCH64_MOD_LSL)
425 {
426 /* AARCH64_MOD_LSL: shift zeros. */
427 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
f5555712
YZ
428 assert (esize == 4 || esize == 2 || esize == 1);
429 /* For 8-bit move immediate, the optional LSL #0 does not require
430 encoding. */
431 if (esize == 1)
432 return NULL;
a06ea964
NC
433 amount >>= 3;
434 if (esize == 4)
435 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
436 else
437 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
438 }
439 else
440 {
441 /* AARCH64_MOD_MSL: shift ones. */
442 amount >>= 4;
443 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
444 }
445 insert_field_2 (&field, code, amount, 0);
446
447 return NULL;
aa2aa4c6
RS
448}
449
450/* Insert fields for an 8-bit floating-point immediate. */
451const char *
452aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
453 aarch64_insn *code,
454 const aarch64_inst *inst ATTRIBUTE_UNUSED)
455{
456 insert_all_fields (self, code, info->imm.value);
457 return NULL;
a06ea964
NC
458}
459
582e12bf 460/* Insert 1-bit rotation immediate (#90 or #270). */
c2c4ff8d 461const char *
582e12bf
RS
462aarch64_ins_imm_rotate1 (const aarch64_operand *self,
463 const aarch64_opnd_info *info,
464 aarch64_insn *code, const aarch64_inst *inst)
c2c4ff8d 465{
582e12bf
RS
466 uint64_t rot = (info->imm.value - 90) / 180;
467 assert (rot < 2U);
c2c4ff8d 468 insert_field (self->fields[0], code, rot, inst->opcode->mask);
582e12bf
RS
469 return NULL;
470}
c2c4ff8d 471
582e12bf
RS
472/* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
473const char *
474aarch64_ins_imm_rotate2 (const aarch64_operand *self,
475 const aarch64_opnd_info *info,
476 aarch64_insn *code, const aarch64_inst *inst)
477{
478 uint64_t rot = info->imm.value / 90;
479 assert (rot < 4U);
480 insert_field (self->fields[0], code, rot, inst->opcode->mask);
c2c4ff8d
SN
481 return NULL;
482}
483
a06ea964
NC
484/* Insert #<fbits> for the immediate operand in fp fix-point instructions,
485 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
486const char *
487aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
488 aarch64_insn *code,
489 const aarch64_inst *inst ATTRIBUTE_UNUSED)
490{
491 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
492 return NULL;
493}
494
495/* Insert arithmetic immediate for e.g. the last operand in
496 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
497const char *
498aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
499 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
500{
501 /* shift */
502 aarch64_insn value = info->shifter.amount ? 1 : 0;
503 insert_field (self->fields[0], code, value, 0);
504 /* imm12 (unsigned) */
505 insert_field (self->fields[1], code, info->imm.value, 0);
506 return NULL;
507}
508
e950b345
RS
509/* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
510 the operand should be inverted before encoding. */
511static const char *
512aarch64_ins_limm_1 (const aarch64_operand *self,
513 const aarch64_opnd_info *info, aarch64_insn *code,
514 const aarch64_inst *inst, bfd_boolean invert_p)
a06ea964
NC
515{
516 aarch64_insn value;
517 uint64_t imm = info->imm.value;
42408347 518 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
a06ea964 519
e950b345 520 if (invert_p)
a06ea964 521 imm = ~imm;
535b785f
AM
522 /* The constraint check should have guaranteed this wouldn't happen. */
523 assert (aarch64_logical_immediate_p (imm, esize, &value));
a06ea964
NC
524
525 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
526 self->fields[0]);
527 return NULL;
528}
529
e950b345
RS
530/* Insert logical/bitmask immediate for e.g. the last operand in
531 ORR <Wd|WSP>, <Wn>, #<imm>. */
532const char *
533aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
534 aarch64_insn *code, const aarch64_inst *inst)
535{
536 return aarch64_ins_limm_1 (self, info, code, inst,
537 inst->opcode->op == OP_BIC);
538}
539
540/* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
541const char *
542aarch64_ins_inv_limm (const aarch64_operand *self,
543 const aarch64_opnd_info *info, aarch64_insn *code,
544 const aarch64_inst *inst)
545{
546 return aarch64_ins_limm_1 (self, info, code, inst, TRUE);
547}
548
a06ea964
NC
549/* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
550 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
551const char *
552aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
553 aarch64_insn *code, const aarch64_inst *inst)
554{
4ad3b7ef 555 aarch64_insn value = 0;
a06ea964
NC
556
557 assert (info->idx == 0);
558
559 /* Rt */
560 aarch64_ins_regno (self, info, code, inst);
561 if (inst->opcode->iclass == ldstpair_indexed
562 || inst->opcode->iclass == ldstnapair_offs
563 || inst->opcode->iclass == ldstpair_off
564 || inst->opcode->iclass == loadlit)
565 {
566 /* size */
567 switch (info->qualifier)
568 {
569 case AARCH64_OPND_QLF_S_S: value = 0; break;
570 case AARCH64_OPND_QLF_S_D: value = 1; break;
571 case AARCH64_OPND_QLF_S_Q: value = 2; break;
572 default: assert (0);
573 }
574 insert_field (FLD_ldst_size, code, value, 0);
575 }
576 else
577 {
578 /* opc[1]:size */
579 value = aarch64_get_qualifier_standard_value (info->qualifier);
580 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
581 }
582
583 return NULL;
584}
585
586/* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
587const char *
588aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
589 const aarch64_opnd_info *info, aarch64_insn *code,
590 const aarch64_inst *inst ATTRIBUTE_UNUSED)
591{
592 /* Rn */
593 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
594 return NULL;
595}
596
597/* Encode the address operand for e.g.
598 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
599const char *
600aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
601 const aarch64_opnd_info *info, aarch64_insn *code,
602 const aarch64_inst *inst ATTRIBUTE_UNUSED)
603{
604 aarch64_insn S;
605 enum aarch64_modifier_kind kind = info->shifter.kind;
606
607 /* Rn */
608 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
609 /* Rm */
610 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
611 /* option */
612 if (kind == AARCH64_MOD_LSL)
613 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
614 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
615 /* S */
616 if (info->qualifier != AARCH64_OPND_QLF_S_B)
617 S = info->shifter.amount != 0;
618 else
619 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
620 S <amount>
621 0 [absent]
622 1 #0
623 Must be #0 if <extend> is explicitly LSL. */
624 S = info->shifter.operator_present && info->shifter.amount_present;
625 insert_field (FLD_S, code, S, 0);
626
627 return NULL;
628}
629
f42f1a1d
TC
630/* Encode the address operand for e.g.
631 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
632const char *
633aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
634 const aarch64_opnd_info *info, aarch64_insn *code,
635 const aarch64_inst *inst ATTRIBUTE_UNUSED)
636{
637 /* Rn */
638 insert_field (self->fields[0], code, info->addr.base_regno, 0);
639
640 /* simm9 */
641 int imm = info->addr.offset.imm;
642 insert_field (self->fields[1], code, imm, 0);
643
644 /* writeback */
645 if (info->addr.writeback)
646 {
647 assert (info->addr.preind == 1 && info->addr.postind == 0);
648 insert_field (self->fields[2], code, 1, 0);
649 }
650 return NULL;
651}
652
a06ea964
NC
653/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
654const char *
655aarch64_ins_addr_simm (const aarch64_operand *self,
656 const aarch64_opnd_info *info,
062f38fa
RE
657 aarch64_insn *code,
658 const aarch64_inst *inst ATTRIBUTE_UNUSED)
a06ea964
NC
659{
660 int imm;
661
662 /* Rn */
663 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
664 /* simm (imm9 or imm7) */
665 imm = info->addr.offset.imm;
666 if (self->fields[0] == FLD_imm7)
667 /* scaled immediate in ld/st pair instructions.. */
668 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
669 insert_field (self->fields[0], code, imm, 0);
670 /* pre/post- index */
671 if (info->addr.writeback)
672 {
673 assert (inst->opcode->iclass != ldst_unscaled
674 && inst->opcode->iclass != ldstnapair_offs
675 && inst->opcode->iclass != ldstpair_off
676 && inst->opcode->iclass != ldst_unpriv);
677 assert (info->addr.preind != info->addr.postind);
678 if (info->addr.preind)
679 insert_field (self->fields[1], code, 1, 0);
680 }
681
682 return NULL;
683}
684
3f06e550
SN
685/* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
686const char *
687aarch64_ins_addr_simm10 (const aarch64_operand *self,
688 const aarch64_opnd_info *info,
689 aarch64_insn *code,
690 const aarch64_inst *inst ATTRIBUTE_UNUSED)
691{
692 int imm;
693
694 /* Rn */
695 insert_field (self->fields[0], code, info->addr.base_regno, 0);
696 /* simm10 */
697 imm = info->addr.offset.imm >> 3;
698 insert_field (self->fields[1], code, imm >> 9, 0);
699 insert_field (self->fields[2], code, imm, 0);
700 /* writeback */
701 if (info->addr.writeback)
702 {
703 assert (info->addr.preind == 1 && info->addr.postind == 0);
704 insert_field (self->fields[3], code, 1, 0);
705 }
706 return NULL;
707}
708
a06ea964
NC
709/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
710const char *
711aarch64_ins_addr_uimm12 (const aarch64_operand *self,
712 const aarch64_opnd_info *info,
713 aarch64_insn *code,
714 const aarch64_inst *inst ATTRIBUTE_UNUSED)
715{
716 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
717
718 /* Rn */
719 insert_field (self->fields[0], code, info->addr.base_regno, 0);
720 /* uimm12 */
721 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
722 return NULL;
723}
724
725/* Encode the address operand for e.g.
726 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
727const char *
728aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
729 const aarch64_opnd_info *info, aarch64_insn *code,
730 const aarch64_inst *inst ATTRIBUTE_UNUSED)
731{
732 /* Rn */
733 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
734 /* Rm | #<amount> */
735 if (info->addr.offset.is_reg)
736 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
737 else
738 insert_field (FLD_Rm, code, 0x1f, 0);
739 return NULL;
740}
741
742/* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
743const char *
744aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
745 const aarch64_opnd_info *info, aarch64_insn *code,
746 const aarch64_inst *inst ATTRIBUTE_UNUSED)
747{
748 /* cond */
749 insert_field (FLD_cond, code, info->cond->value, 0);
750 return NULL;
751}
752
753/* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
754const char *
755aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
756 const aarch64_opnd_info *info, aarch64_insn *code,
757 const aarch64_inst *inst ATTRIBUTE_UNUSED)
758{
759 /* op0:op1:CRn:CRm:op2 */
760 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
761 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
762 return NULL;
763}
764
765/* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
766const char *
767aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
768 const aarch64_opnd_info *info, aarch64_insn *code,
769 const aarch64_inst *inst ATTRIBUTE_UNUSED)
770{
771 /* op1:op2 */
772 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
773 FLD_op2, FLD_op1);
774 return NULL;
775}
776
777/* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
778const char *
779aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
780 const aarch64_opnd_info *info, aarch64_insn *code,
781 const aarch64_inst *inst ATTRIBUTE_UNUSED)
782{
783 /* op1:CRn:CRm:op2 */
784 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
785 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
786 return NULL;
787}
788
789/* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
790
791const char *
792aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
793 const aarch64_opnd_info *info, aarch64_insn *code,
794 const aarch64_inst *inst ATTRIBUTE_UNUSED)
795{
796 /* CRm */
797 insert_field (FLD_CRm, code, info->barrier->value, 0);
798 return NULL;
799}
800
801/* Encode the prefetch operation option operand for e.g.
802 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
803
804const char *
805aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
806 const aarch64_opnd_info *info, aarch64_insn *code,
807 const aarch64_inst *inst ATTRIBUTE_UNUSED)
808{
809 /* prfop in Rt */
810 insert_field (FLD_Rt, code, info->prfop->value, 0);
811 return NULL;
812}
813
9ed608f9
MW
814/* Encode the hint number for instructions that alias HINT but take an
815 operand. */
816
817const char *
818aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
819 const aarch64_opnd_info *info, aarch64_insn *code,
820 const aarch64_inst *inst ATTRIBUTE_UNUSED)
821{
822 /* CRm:op2. */
823 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
824 return NULL;
825}
826
a06ea964
NC
827/* Encode the extended register operand for e.g.
828 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
829const char *
830aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
831 const aarch64_opnd_info *info, aarch64_insn *code,
832 const aarch64_inst *inst ATTRIBUTE_UNUSED)
833{
834 enum aarch64_modifier_kind kind;
835
836 /* Rm */
837 insert_field (FLD_Rm, code, info->reg.regno, 0);
838 /* option */
839 kind = info->shifter.kind;
840 if (kind == AARCH64_MOD_LSL)
841 kind = info->qualifier == AARCH64_OPND_QLF_W
842 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
843 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
844 /* imm3 */
845 insert_field (FLD_imm3, code, info->shifter.amount, 0);
846
847 return NULL;
848}
849
850/* Encode the shifted register operand for e.g.
851 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
852const char *
853aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
854 const aarch64_opnd_info *info, aarch64_insn *code,
855 const aarch64_inst *inst ATTRIBUTE_UNUSED)
856{
857 /* Rm */
858 insert_field (FLD_Rm, code, info->reg.regno, 0);
859 /* shift */
860 insert_field (FLD_shift, code,
861 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
862 /* imm6 */
863 insert_field (FLD_imm6, code, info->shifter.amount, 0);
864
865 return NULL;
866}
867
98907a70
RS
868/* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
869 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
870 SELF's operand-dependent value. fields[0] specifies the field that
871 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
872const char *
873aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
874 const aarch64_opnd_info *info,
875 aarch64_insn *code,
876 const aarch64_inst *inst ATTRIBUTE_UNUSED)
877{
878 int factor = 1 + get_operand_specific_data (self);
879 insert_field (self->fields[0], code, info->addr.base_regno, 0);
880 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
881 return NULL;
882}
883
884/* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
885 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
886 SELF's operand-dependent value. fields[0] specifies the field that
887 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
888const char *
889aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
890 const aarch64_opnd_info *info,
891 aarch64_insn *code,
892 const aarch64_inst *inst ATTRIBUTE_UNUSED)
893{
894 int factor = 1 + get_operand_specific_data (self);
895 insert_field (self->fields[0], code, info->addr.base_regno, 0);
896 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
897 return NULL;
898}
899
900/* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
901 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
902 SELF's operand-dependent value. fields[0] specifies the field that
903 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
904 and imm3 fields, with imm3 being the less-significant part. */
905const char *
906aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
907 const aarch64_opnd_info *info,
908 aarch64_insn *code,
909 const aarch64_inst *inst ATTRIBUTE_UNUSED)
910{
911 int factor = 1 + get_operand_specific_data (self);
912 insert_field (self->fields[0], code, info->addr.base_regno, 0);
913 insert_fields (code, info->addr.offset.imm / factor, 0,
914 2, FLD_imm3, FLD_SVE_imm6);
915 return NULL;
916}
917
582e12bf
RS
918/* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
919 is a 4-bit signed number and where <shift> is SELF's operand-dependent
920 value. fields[0] specifies the base register field. */
921const char *
922aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
923 const aarch64_opnd_info *info, aarch64_insn *code,
924 const aarch64_inst *inst ATTRIBUTE_UNUSED)
925{
926 int factor = 1 << get_operand_specific_data (self);
927 insert_field (self->fields[0], code, info->addr.base_regno, 0);
928 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
929 return NULL;
930}
931
4df068de
RS
932/* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
933 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
934 value. fields[0] specifies the base register field. */
935const char *
936aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
937 const aarch64_opnd_info *info, aarch64_insn *code,
938 const aarch64_inst *inst ATTRIBUTE_UNUSED)
939{
940 int factor = 1 << get_operand_specific_data (self);
941 insert_field (self->fields[0], code, info->addr.base_regno, 0);
942 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
943 return NULL;
944}
945
946/* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
947 is SELF's operand-dependent value. fields[0] specifies the base
948 register field and fields[1] specifies the offset register field. */
949const char *
950aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
951 const aarch64_opnd_info *info, aarch64_insn *code,
952 const aarch64_inst *inst ATTRIBUTE_UNUSED)
953{
954 insert_field (self->fields[0], code, info->addr.base_regno, 0);
955 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
956 return NULL;
957}
958
959/* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
960 <shift> is SELF's operand-dependent value. fields[0] specifies the
961 base register field, fields[1] specifies the offset register field and
962 fields[2] is a single-bit field that selects SXTW over UXTW. */
963const char *
964aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
965 const aarch64_opnd_info *info, aarch64_insn *code,
966 const aarch64_inst *inst ATTRIBUTE_UNUSED)
967{
968 insert_field (self->fields[0], code, info->addr.base_regno, 0);
969 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
970 if (info->shifter.kind == AARCH64_MOD_UXTW)
971 insert_field (self->fields[2], code, 0, 0);
972 else
973 insert_field (self->fields[2], code, 1, 0);
974 return NULL;
975}
976
977/* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
978 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
979 fields[0] specifies the base register field. */
980const char *
981aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
982 const aarch64_opnd_info *info, aarch64_insn *code,
983 const aarch64_inst *inst ATTRIBUTE_UNUSED)
984{
985 int factor = 1 << get_operand_specific_data (self);
986 insert_field (self->fields[0], code, info->addr.base_regno, 0);
987 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
988 return NULL;
989}
990
991/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
992 where <modifier> is fixed by the instruction and where <msz> is a
993 2-bit unsigned number. fields[0] specifies the base register field
994 and fields[1] specifies the offset register field. */
995static const char *
996aarch64_ext_sve_addr_zz (const aarch64_operand *self,
997 const aarch64_opnd_info *info, aarch64_insn *code)
998{
999 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1000 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1001 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1002 return NULL;
1003}
1004
1005/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1006 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1007 field and fields[1] specifies the offset register field. */
1008const char *
1009aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1010 const aarch64_opnd_info *info, aarch64_insn *code,
1011 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1012{
1013 return aarch64_ext_sve_addr_zz (self, info, code);
1014}
1015
1016/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1017 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1018 field and fields[1] specifies the offset register field. */
1019const char *
1020aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1021 const aarch64_opnd_info *info,
1022 aarch64_insn *code,
1023 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1024{
1025 return aarch64_ext_sve_addr_zz (self, info, code);
1026}
1027
1028/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1029 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1030 field and fields[1] specifies the offset register field. */
1031const char *
1032aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1033 const aarch64_opnd_info *info,
1034 aarch64_insn *code,
1035 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1036{
1037 return aarch64_ext_sve_addr_zz (self, info, code);
1038}
1039
e950b345
RS
1040/* Encode an SVE ADD/SUB immediate. */
1041const char *
1042aarch64_ins_sve_aimm (const aarch64_operand *self,
1043 const aarch64_opnd_info *info, aarch64_insn *code,
1044 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1045{
1046 if (info->shifter.amount == 8)
1047 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1048 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1049 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1050 else
1051 insert_all_fields (self, code, info->imm.value & 0xff);
1052 return NULL;
1053}
1054
1055/* Encode an SVE CPY/DUP immediate. */
1056const char *
1057aarch64_ins_sve_asimm (const aarch64_operand *self,
1058 const aarch64_opnd_info *info, aarch64_insn *code,
1059 const aarch64_inst *inst)
1060{
1061 return aarch64_ins_sve_aimm (self, info, code, inst);
1062}
1063
f11ad6bc
RS
1064/* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1065 array specifies which field to use for Zn. MM is encoded in the
1066 concatenation of imm5 and SVE_tszh, with imm5 being the less
1067 significant part. */
1068const char *
1069aarch64_ins_sve_index (const aarch64_operand *self,
1070 const aarch64_opnd_info *info, aarch64_insn *code,
1071 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1072{
1073 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1074 insert_field (self->fields[0], code, info->reglane.regno, 0);
1075 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1076 2, FLD_imm5, FLD_SVE_tszh);
1077 return NULL;
1078}
1079
e950b345
RS
1080/* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1081const char *
1082aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1083 const aarch64_opnd_info *info, aarch64_insn *code,
1084 const aarch64_inst *inst)
1085{
1086 return aarch64_ins_limm (self, info, code, inst);
1087}
1088
582e12bf
RS
1089/* Encode Zn[MM], where Zn occupies the least-significant part of the field
1090 and where MM occupies the most-significant part. The operand-dependent
1091 value specifies the number of bits in Zn. */
1092const char *
1093aarch64_ins_sve_quad_index (const aarch64_operand *self,
1094 const aarch64_opnd_info *info, aarch64_insn *code,
1095 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1096{
1097 unsigned int reg_bits = get_operand_specific_data (self);
1098 assert (info->reglane.regno < (1U << reg_bits));
1099 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1100 insert_all_fields (self, code, val);
1101 return NULL;
1102}
1103
f11ad6bc
RS
1104/* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1105 to use for Zn. */
1106const char *
1107aarch64_ins_sve_reglist (const aarch64_operand *self,
1108 const aarch64_opnd_info *info, aarch64_insn *code,
1109 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1110{
1111 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1112 return NULL;
1113}
1114
2442d846
RS
1115/* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1116 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1117 field. */
1118const char *
1119aarch64_ins_sve_scale (const aarch64_operand *self,
1120 const aarch64_opnd_info *info, aarch64_insn *code,
1121 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1122{
1123 insert_all_fields (self, code, info->imm.value);
1124 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1125 return NULL;
1126}
1127
e950b345
RS
1128/* Encode an SVE shift left immediate. */
1129const char *
1130aarch64_ins_sve_shlimm (const aarch64_operand *self,
1131 const aarch64_opnd_info *info, aarch64_insn *code,
1132 const aarch64_inst *inst)
1133{
1134 const aarch64_opnd_info *prev_operand;
1135 unsigned int esize;
1136
1137 assert (info->idx > 0);
1138 prev_operand = &inst->operands[info->idx - 1];
1139 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1140 insert_all_fields (self, code, 8 * esize + info->imm.value);
1141 return NULL;
1142}
1143
1144/* Encode an SVE shift right immediate. */
1145const char *
1146aarch64_ins_sve_shrimm (const aarch64_operand *self,
1147 const aarch64_opnd_info *info, aarch64_insn *code,
1148 const aarch64_inst *inst)
1149{
1150 const aarch64_opnd_info *prev_operand;
1151 unsigned int esize;
1152
1153 assert (info->idx > 0);
1154 prev_operand = &inst->operands[info->idx - 1];
1155 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1156 insert_all_fields (self, code, 16 * esize - info->imm.value);
1157 return NULL;
1158}
1159
165d4950
RS
1160/* Encode a single-bit immediate that selects between #0.5 and #1.0.
1161 The fields array specifies which field to use. */
1162const char *
1163aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1164 const aarch64_opnd_info *info,
1165 aarch64_insn *code,
1166 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1167{
1168 if (info->imm.value == 0x3f000000)
1169 insert_field (self->fields[0], code, 0, 0);
1170 else
1171 insert_field (self->fields[0], code, 1, 0);
1172 return NULL;
1173}
1174
1175/* Encode a single-bit immediate that selects between #0.5 and #2.0.
1176 The fields array specifies which field to use. */
1177const char *
1178aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1179 const aarch64_opnd_info *info,
1180 aarch64_insn *code,
1181 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1182{
1183 if (info->imm.value == 0x3f000000)
1184 insert_field (self->fields[0], code, 0, 0);
1185 else
1186 insert_field (self->fields[0], code, 1, 0);
1187 return NULL;
1188}
1189
1190/* Encode a single-bit immediate that selects between #0.0 and #1.0.
1191 The fields array specifies which field to use. */
1192const char *
1193aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1194 const aarch64_opnd_info *info,
1195 aarch64_insn *code,
1196 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1197{
1198 if (info->imm.value == 0)
1199 insert_field (self->fields[0], code, 0, 0);
1200 else
1201 insert_field (self->fields[0], code, 1, 0);
1202 return NULL;
1203}
1204
a06ea964
NC
1205/* Miscellaneous encoding functions. */
1206
1207/* Encode size[0], i.e. bit 22, for
1208 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1209
1210static void
1211encode_asimd_fcvt (aarch64_inst *inst)
1212{
1213 aarch64_insn value;
1214 aarch64_field field = {0, 0};
1215 enum aarch64_opnd_qualifier qualifier;
1216
1217 switch (inst->opcode->op)
1218 {
1219 case OP_FCVTN:
1220 case OP_FCVTN2:
1221 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1222 qualifier = inst->operands[1].qualifier;
1223 break;
1224 case OP_FCVTL:
1225 case OP_FCVTL2:
1226 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1227 qualifier = inst->operands[0].qualifier;
1228 break;
1229 default:
1230 assert (0);
1231 }
1232 assert (qualifier == AARCH64_OPND_QLF_V_4S
1233 || qualifier == AARCH64_OPND_QLF_V_2D);
1234 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1235 gen_sub_field (FLD_size, 0, 1, &field);
1236 insert_field_2 (&field, &inst->value, value, 0);
1237}
1238
1239/* Encode size[0], i.e. bit 22, for
1240 e.g. FCVTXN <Vb><d>, <Va><n>. */
1241
1242static void
1243encode_asisd_fcvtxn (aarch64_inst *inst)
1244{
1245 aarch64_insn val = 1;
1246 aarch64_field field = {0, 0};
1247 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1248 gen_sub_field (FLD_size, 0, 1, &field);
1249 insert_field_2 (&field, &inst->value, val, 0);
1250}
1251
1252/* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1253static void
1254encode_fcvt (aarch64_inst *inst)
1255{
1256 aarch64_insn val;
1257 const aarch64_field field = {15, 2};
1258
1259 /* opc dstsize */
1260 switch (inst->operands[0].qualifier)
1261 {
1262 case AARCH64_OPND_QLF_S_S: val = 0; break;
1263 case AARCH64_OPND_QLF_S_D: val = 1; break;
1264 case AARCH64_OPND_QLF_S_H: val = 3; break;
1265 default: abort ();
1266 }
1267 insert_field_2 (&field, &inst->value, val, 0);
1268
1269 return;
1270}
1271
116b6019
RS
1272/* Return the index in qualifiers_list that INST is using. Should only
1273 be called once the qualifiers are known to be valid. */
1274
1275static int
1276aarch64_get_variant (struct aarch64_inst *inst)
1277{
1278 int i, nops, variant;
1279
1280 nops = aarch64_num_of_operands (inst->opcode);
1281 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1282 {
1283 for (i = 0; i < nops; ++i)
1284 if (inst->opcode->qualifiers_list[variant][i]
1285 != inst->operands[i].qualifier)
1286 break;
1287 if (i == nops)
1288 return variant;
1289 }
1290 abort ();
1291}
1292
a06ea964
NC
1293/* Do miscellaneous encodings that are not common enough to be driven by
1294 flags. */
1295
1296static void
1297do_misc_encoding (aarch64_inst *inst)
1298{
c0890d26
RS
1299 unsigned int value;
1300
a06ea964
NC
1301 switch (inst->opcode->op)
1302 {
1303 case OP_FCVT:
1304 encode_fcvt (inst);
1305 break;
1306 case OP_FCVTN:
1307 case OP_FCVTN2:
1308 case OP_FCVTL:
1309 case OP_FCVTL2:
1310 encode_asimd_fcvt (inst);
1311 break;
1312 case OP_FCVTXN_S:
1313 encode_asisd_fcvtxn (inst);
1314 break;
c0890d26
RS
1315 case OP_MOV_P_P:
1316 case OP_MOVS_P_P:
1317 /* Copy Pn to Pm and Pg. */
1318 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1319 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1320 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1321 break;
1322 case OP_MOV_Z_P_Z:
1323 /* Copy Zd to Zm. */
1324 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1325 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1326 break;
1327 case OP_MOV_Z_V:
1328 /* Fill in the zero immediate. */
582e12bf
RS
1329 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1330 2, FLD_imm5, FLD_SVE_tszh);
c0890d26
RS
1331 break;
1332 case OP_MOV_Z_Z:
1333 /* Copy Zn to Zm. */
1334 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1335 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1336 break;
1337 case OP_MOV_Z_Zi:
1338 break;
1339 case OP_MOVM_P_P_P:
1340 /* Copy Pd to Pm. */
1341 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1342 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1343 break;
1344 case OP_MOVZS_P_P_P:
1345 case OP_MOVZ_P_P_P:
1346 /* Copy Pn to Pm. */
1347 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1348 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1349 break;
1350 case OP_NOTS_P_P_P_Z:
1351 case OP_NOT_P_P_P_Z:
1352 /* Copy Pg to Pm. */
1353 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1354 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1355 break;
a06ea964
NC
1356 default: break;
1357 }
1358}
1359
1360/* Encode the 'size' and 'Q' field for e.g. SHADD. */
1361static void
1362encode_sizeq (aarch64_inst *inst)
1363{
1364 aarch64_insn sizeq;
1365 enum aarch64_field_kind kind;
1366 int idx;
1367
1368 /* Get the index of the operand whose information we are going to use
1369 to encode the size and Q fields.
1370 This is deduced from the possible valid qualifier lists. */
1371 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1372 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1373 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1374 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1375 /* Q */
1376 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1377 /* size */
1378 if (inst->opcode->iclass == asisdlse
1379 || inst->opcode->iclass == asisdlsep
1380 || inst->opcode->iclass == asisdlso
1381 || inst->opcode->iclass == asisdlsop)
1382 kind = FLD_vldst_size;
1383 else
1384 kind = FLD_size;
1385 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1386}
1387
1388/* Opcodes that have fields shared by multiple operands are usually flagged
1389 with flags. In this function, we detect such flags and use the
1390 information in one of the related operands to do the encoding. The 'one'
1391 operand is not any operand but one of the operands that has the enough
1392 information for such an encoding. */
1393
1394static void
1395do_special_encoding (struct aarch64_inst *inst)
1396{
1397 int idx;
4ad3b7ef 1398 aarch64_insn value = 0;
a06ea964
NC
1399
1400 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1401
1402 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1403 if (inst->opcode->flags & F_COND)
1404 {
1405 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1406 }
1407 if (inst->opcode->flags & F_SF)
1408 {
1409 idx = select_operand_for_sf_field_coding (inst->opcode);
1410 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1411 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1412 ? 1 : 0;
1413 insert_field (FLD_sf, &inst->value, value, 0);
1414 if (inst->opcode->flags & F_N)
1415 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1416 }
ee804238
JW
1417 if (inst->opcode->flags & F_LSE_SZ)
1418 {
1419 idx = select_operand_for_sf_field_coding (inst->opcode);
1420 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1421 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1422 ? 1 : 0;
1423 insert_field (FLD_lse_sz, &inst->value, value, 0);
1424 }
a06ea964
NC
1425 if (inst->opcode->flags & F_SIZEQ)
1426 encode_sizeq (inst);
1427 if (inst->opcode->flags & F_FPTYPE)
1428 {
1429 idx = select_operand_for_fptype_field_coding (inst->opcode);
1430 switch (inst->operands[idx].qualifier)
1431 {
1432 case AARCH64_OPND_QLF_S_S: value = 0; break;
1433 case AARCH64_OPND_QLF_S_D: value = 1; break;
1434 case AARCH64_OPND_QLF_S_H: value = 3; break;
1435 default: assert (0);
1436 }
1437 insert_field (FLD_type, &inst->value, value, 0);
1438 }
1439 if (inst->opcode->flags & F_SSIZE)
1440 {
1441 enum aarch64_opnd_qualifier qualifier;
1442 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1443 qualifier = inst->operands[idx].qualifier;
1444 assert (qualifier >= AARCH64_OPND_QLF_S_B
1445 && qualifier <= AARCH64_OPND_QLF_S_Q);
1446 value = aarch64_get_qualifier_standard_value (qualifier);
1447 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1448 }
1449 if (inst->opcode->flags & F_T)
1450 {
1451 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1452 aarch64_field field = {0, 0};
1453 enum aarch64_opnd_qualifier qualifier;
1454
1455 idx = 0;
1456 qualifier = inst->operands[idx].qualifier;
1457 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1458 == AARCH64_OPND_CLASS_SIMD_REG
1459 && qualifier >= AARCH64_OPND_QLF_V_8B
1460 && qualifier <= AARCH64_OPND_QLF_V_2D);
1461 /* imm5<3:0> q <t>
1462 0000 x reserved
1463 xxx1 0 8b
1464 xxx1 1 16b
1465 xx10 0 4h
1466 xx10 1 8h
1467 x100 0 2s
1468 x100 1 4s
1469 1000 0 reserved
1470 1000 1 2d */
1471 value = aarch64_get_qualifier_standard_value (qualifier);
1472 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1473 num = (int) value >> 1;
1474 assert (num >= 0 && num <= 3);
1475 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1476 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1477 }
1478 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1479 {
1480 /* Use Rt to encode in the case of e.g.
1481 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1482 enum aarch64_opnd_qualifier qualifier;
1483 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1484 if (idx == -1)
1485 /* Otherwise use the result operand, which has to be a integer
1486 register. */
1487 idx = 0;
1488 assert (idx == 0 || idx == 1);
1489 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1490 == AARCH64_OPND_CLASS_INT_REG);
1491 qualifier = inst->operands[idx].qualifier;
1492 insert_field (FLD_Q, &inst->value,
1493 aarch64_get_qualifier_standard_value (qualifier), 0);
1494 }
1495 if (inst->opcode->flags & F_LDS_SIZE)
1496 {
1497 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1498 enum aarch64_opnd_qualifier qualifier;
1499 aarch64_field field = {0, 0};
1500 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1501 == AARCH64_OPND_CLASS_INT_REG);
1502 gen_sub_field (FLD_opc, 0, 1, &field);
1503 qualifier = inst->operands[0].qualifier;
1504 insert_field_2 (&field, &inst->value,
1505 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1506 }
1507 /* Miscellaneous encoding as the last step. */
1508 if (inst->opcode->flags & F_MISC)
1509 do_misc_encoding (inst);
1510
1511 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1512}
1513
116b6019
RS
1514/* Some instructions (including all SVE ones) use the instruction class
1515 to describe how a qualifiers_list index is represented in the instruction
1516 encoding. If INST is such an instruction, encode the chosen qualifier
1517 variant. */
1518
1519static void
1520aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1521{
1522 switch (inst->opcode->iclass)
1523 {
1524 case sve_cpy:
1525 insert_fields (&inst->value, aarch64_get_variant (inst),
1526 0, 2, FLD_SVE_M_14, FLD_size);
1527 break;
1528
1529 case sve_index:
1530 case sve_shift_pred:
1531 case sve_shift_unpred:
1532 /* For indices and shift amounts, the variant is encoded as
1533 part of the immediate. */
1534 break;
1535
1536 case sve_limm:
1537 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1538 and depend on the immediate. They don't have a separate
1539 encoding. */
1540 break;
1541
1542 case sve_misc:
1543 /* sve_misc instructions have only a single variant. */
1544 break;
1545
1546 case sve_movprfx:
1547 insert_fields (&inst->value, aarch64_get_variant (inst),
1548 0, 2, FLD_SVE_M_16, FLD_size);
1549 break;
1550
1551 case sve_pred_zm:
1552 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1553 break;
1554
1555 case sve_size_bhs:
1556 case sve_size_bhsd:
1557 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1558 break;
1559
1560 case sve_size_hsd:
1561 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1562 break;
1563
1564 case sve_size_sd:
1565 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1566 break;
1567
1568 default:
1569 break;
1570 }
1571}
1572
a06ea964
NC
1573/* Converters converting an alias opcode instruction to its real form. */
1574
1575/* ROR <Wd>, <Ws>, #<shift>
1576 is equivalent to:
1577 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1578static void
1579convert_ror_to_extr (aarch64_inst *inst)
1580{
1581 copy_operand_info (inst, 3, 2);
1582 copy_operand_info (inst, 2, 1);
1583}
1584
e30181a5
YZ
1585/* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1586 is equivalent to:
1587 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1588static void
1589convert_xtl_to_shll (aarch64_inst *inst)
1590{
1591 inst->operands[2].qualifier = inst->operands[1].qualifier;
1592 inst->operands[2].imm.value = 0;
1593}
1594
a06ea964
NC
1595/* Convert
1596 LSR <Xd>, <Xn>, #<shift>
1597 to
1598 UBFM <Xd>, <Xn>, #<shift>, #63. */
1599static void
1600convert_sr_to_bfm (aarch64_inst *inst)
1601{
1602 inst->operands[3].imm.value =
1603 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1604}
1605
1606/* Convert MOV to ORR. */
1607static void
1608convert_mov_to_orr (aarch64_inst *inst)
1609{
1610 /* MOV <Vd>.<T>, <Vn>.<T>
1611 is equivalent to:
1612 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1613 copy_operand_info (inst, 2, 1);
1614}
1615
1616/* When <imms> >= <immr>, the instruction written:
1617 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1618 is equivalent to:
1619 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1620
1621static void
1622convert_bfx_to_bfm (aarch64_inst *inst)
1623{
1624 int64_t lsb, width;
1625
1626 /* Convert the operand. */
1627 lsb = inst->operands[2].imm.value;
1628 width = inst->operands[3].imm.value;
1629 inst->operands[2].imm.value = lsb;
1630 inst->operands[3].imm.value = lsb + width - 1;
1631}
1632
1633/* When <imms> < <immr>, the instruction written:
1634 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1635 is equivalent to:
1636 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1637
1638static void
1639convert_bfi_to_bfm (aarch64_inst *inst)
1640{
1641 int64_t lsb, width;
1642
1643 /* Convert the operand. */
1644 lsb = inst->operands[2].imm.value;
1645 width = inst->operands[3].imm.value;
1646 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1647 {
1648 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1649 inst->operands[3].imm.value = width - 1;
1650 }
1651 else
1652 {
1653 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1654 inst->operands[3].imm.value = width - 1;
1655 }
1656}
1657
d685192a
MW
1658/* The instruction written:
1659 BFC <Xd>, #<lsb>, #<width>
1660 is equivalent to:
1661 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1662
1663static void
1664convert_bfc_to_bfm (aarch64_inst *inst)
1665{
1666 int64_t lsb, width;
1667
1668 /* Insert XZR. */
1669 copy_operand_info (inst, 3, 2);
1670 copy_operand_info (inst, 2, 1);
11648de5 1671 copy_operand_info (inst, 1, 0);
d685192a
MW
1672 inst->operands[1].reg.regno = 0x1f;
1673
11648de5 1674 /* Convert the immediate operand. */
d685192a
MW
1675 lsb = inst->operands[2].imm.value;
1676 width = inst->operands[3].imm.value;
1677 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1678 {
1679 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1680 inst->operands[3].imm.value = width - 1;
1681 }
1682 else
1683 {
1684 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1685 inst->operands[3].imm.value = width - 1;
1686 }
1687}
1688
a06ea964
NC
1689/* The instruction written:
1690 LSL <Xd>, <Xn>, #<shift>
1691 is equivalent to:
1692 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1693
1694static void
1695convert_lsl_to_ubfm (aarch64_inst *inst)
1696{
1697 int64_t shift = inst->operands[2].imm.value;
1698
1699 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1700 {
1701 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1702 inst->operands[3].imm.value = 31 - shift;
1703 }
1704 else
1705 {
1706 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1707 inst->operands[3].imm.value = 63 - shift;
1708 }
1709}
1710
1711/* CINC <Wd>, <Wn>, <cond>
1712 is equivalent to:
1713 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1714
1715static void
1716convert_to_csel (aarch64_inst *inst)
1717{
1718 copy_operand_info (inst, 3, 2);
1719 copy_operand_info (inst, 2, 1);
1720 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1721}
1722
1723/* CSET <Wd>, <cond>
1724 is equivalent to:
1725 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1726
1727static void
1728convert_cset_to_csinc (aarch64_inst *inst)
1729{
1730 copy_operand_info (inst, 3, 1);
1731 copy_operand_info (inst, 2, 0);
1732 copy_operand_info (inst, 1, 0);
1733 inst->operands[1].reg.regno = 0x1f;
1734 inst->operands[2].reg.regno = 0x1f;
1735 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1736}
1737
1738/* MOV <Wd>, #<imm>
1739 is equivalent to:
1740 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1741
1742static void
1743convert_mov_to_movewide (aarch64_inst *inst)
1744{
1745 int is32;
1746 uint32_t shift_amount;
1747 uint64_t value;
1748
1749 switch (inst->opcode->op)
1750 {
1751 case OP_MOV_IMM_WIDE:
1752 value = inst->operands[1].imm.value;
1753 break;
1754 case OP_MOV_IMM_WIDEN:
1755 value = ~inst->operands[1].imm.value;
1756 break;
1757 default:
1758 assert (0);
1759 }
1760 inst->operands[1].type = AARCH64_OPND_HALF;
1761 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
062f38fa
RE
1762 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1763 /* The constraint check should have guaranteed this wouldn't happen. */
1764 assert (0);
a06ea964
NC
1765 value >>= shift_amount;
1766 value &= 0xffff;
1767 inst->operands[1].imm.value = value;
1768 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1769 inst->operands[1].shifter.amount = shift_amount;
1770}
1771
1772/* MOV <Wd>, #<imm>
1773 is equivalent to:
1774 ORR <Wd>, WZR, #<imm>. */
1775
1776static void
1777convert_mov_to_movebitmask (aarch64_inst *inst)
1778{
1779 copy_operand_info (inst, 2, 1);
1780 inst->operands[1].reg.regno = 0x1f;
1781 inst->operands[1].skip = 0;
1782}
1783
1784/* Some alias opcodes are assembled by being converted to their real-form. */
1785
1786static void
1787convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1788{
1789 const aarch64_opcode *alias = inst->opcode;
1790
1791 if ((alias->flags & F_CONV) == 0)
1792 goto convert_to_real_return;
1793
1794 switch (alias->op)
1795 {
1796 case OP_ASR_IMM:
1797 case OP_LSR_IMM:
1798 convert_sr_to_bfm (inst);
1799 break;
1800 case OP_LSL_IMM:
1801 convert_lsl_to_ubfm (inst);
1802 break;
1803 case OP_CINC:
1804 case OP_CINV:
1805 case OP_CNEG:
1806 convert_to_csel (inst);
1807 break;
1808 case OP_CSET:
1809 case OP_CSETM:
1810 convert_cset_to_csinc (inst);
1811 break;
1812 case OP_UBFX:
1813 case OP_BFXIL:
1814 case OP_SBFX:
1815 convert_bfx_to_bfm (inst);
1816 break;
1817 case OP_SBFIZ:
1818 case OP_BFI:
1819 case OP_UBFIZ:
1820 convert_bfi_to_bfm (inst);
1821 break;
d685192a
MW
1822 case OP_BFC:
1823 convert_bfc_to_bfm (inst);
1824 break;
a06ea964
NC
1825 case OP_MOV_V:
1826 convert_mov_to_orr (inst);
1827 break;
1828 case OP_MOV_IMM_WIDE:
1829 case OP_MOV_IMM_WIDEN:
1830 convert_mov_to_movewide (inst);
1831 break;
1832 case OP_MOV_IMM_LOG:
1833 convert_mov_to_movebitmask (inst);
1834 break;
1835 case OP_ROR_IMM:
1836 convert_ror_to_extr (inst);
1837 break;
e30181a5
YZ
1838 case OP_SXTL:
1839 case OP_SXTL2:
1840 case OP_UXTL:
1841 case OP_UXTL2:
1842 convert_xtl_to_shll (inst);
1843 break;
a06ea964
NC
1844 default:
1845 break;
1846 }
1847
1848convert_to_real_return:
1849 aarch64_replace_opcode (inst, real);
1850}
1851
1852/* Encode *INST_ORI of the opcode code OPCODE.
1853 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1854 matched operand qualifier sequence in *QLF_SEQ. */
1855
1856int
1857aarch64_opcode_encode (const aarch64_opcode *opcode,
1858 const aarch64_inst *inst_ori, aarch64_insn *code,
1859 aarch64_opnd_qualifier_t *qlf_seq,
1860 aarch64_operand_error *mismatch_detail)
1861{
1862 int i;
1863 const aarch64_opcode *aliased;
1864 aarch64_inst copy, *inst;
1865
1866 DEBUG_TRACE ("enter with %s", opcode->name);
1867
1868 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1869 copy = *inst_ori;
1870 inst = &copy;
1871
1872 assert (inst->opcode == NULL || inst->opcode == opcode);
1873 if (inst->opcode == NULL)
1874 inst->opcode = opcode;
1875
1876 /* Constrain the operands.
1877 After passing this, the encoding is guaranteed to succeed. */
1878 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1879 {
1880 DEBUG_TRACE ("FAIL since operand constraint not met");
1881 return 0;
1882 }
1883
1884 /* Get the base value.
1885 Note: this has to be before the aliasing handling below in order to
1886 get the base value from the alias opcode before we move on to the
1887 aliased opcode for encoding. */
1888 inst->value = opcode->opcode;
1889
1890 /* No need to do anything else if the opcode does not have any operand. */
1891 if (aarch64_num_of_operands (opcode) == 0)
1892 goto encoding_exit;
1893
1894 /* Assign operand indexes and check types. Also put the matched
1895 operand qualifiers in *QLF_SEQ to return. */
1896 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1897 {
1898 assert (opcode->operands[i] == inst->operands[i].type);
1899 inst->operands[i].idx = i;
1900 if (qlf_seq != NULL)
1901 *qlf_seq = inst->operands[i].qualifier;
1902 }
1903
1904 aliased = aarch64_find_real_opcode (opcode);
1905 /* If the opcode is an alias and it does not ask for direct encoding by
1906 itself, the instruction will be transformed to the form of real opcode
1907 and the encoding will be carried out using the rules for the aliased
1908 opcode. */
1909 if (aliased != NULL && (opcode->flags & F_CONV))
1910 {
1911 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1912 aliased->name, opcode->name);
1913 /* Convert the operands to the form of the real opcode. */
1914 convert_to_real (inst, aliased);
1915 opcode = aliased;
1916 }
1917
1918 aarch64_opnd_info *info = inst->operands;
1919
1920 /* Call the inserter of each operand. */
1921 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1922 {
1923 const aarch64_operand *opnd;
1924 enum aarch64_opnd type = opcode->operands[i];
1925 if (type == AARCH64_OPND_NIL)
1926 break;
1927 if (info->skip)
1928 {
1929 DEBUG_TRACE ("skip the incomplete operand %d", i);
1930 continue;
1931 }
1932 opnd = &aarch64_operands[type];
1933 if (operand_has_inserter (opnd))
1934 aarch64_insert_operand (opnd, info, &inst->value, inst);
1935 }
1936
1937 /* Call opcode encoders indicated by flags. */
1938 if (opcode_has_special_coder (opcode))
1939 do_special_encoding (inst);
1940
116b6019
RS
1941 /* Possibly use the instruction class to encode the chosen qualifier
1942 variant. */
1943 aarch64_encode_variant_using_iclass (inst);
1944
a06ea964
NC
1945encoding_exit:
1946 DEBUG_TRACE ("exit with %s", opcode->name);
1947
1948 *code = inst->value;
1949
1950 return 1;
1951}
This page took 0.347479 seconds and 4 git commands to generate.