[ARM] Assembler and disassembler support Dot Product Extension
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
CommitLineData
a06ea964 1/* aarch64-asm.c -- AArch64 assembler support.
2571583a 2 Copyright (C) 2012-2017 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21#include "sysdep.h"
22#include <stdarg.h>
b5464a68 23#include "libiberty.h"
a06ea964
NC
24#include "aarch64-asm.h"
25
26/* Utilities. */
27
28/* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
31
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
9aff4b7a 35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
a06ea964
NC
36 the order of M, L, H. */
37
38static inline void
39insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
40{
41 uint32_t num;
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
44 va_list va;
45
46 va_start (va, mask);
47 num = va_arg (va, uint32_t);
48 assert (num <= 5);
49 while (num--)
50 {
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
55 }
56 va_end (va);
57}
58
b5464a68
RS
59/* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
61
62static void
63insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
64 aarch64_insn value)
65{
66 unsigned int i;
67 enum aarch64_field_kind kind;
68
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
71 {
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
75 }
76}
77
a06ea964
NC
78/* Operand inserters. */
79
80/* Insert register number. */
81const char *
82aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
83 aarch64_insn *code,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
85{
86 insert_field (self->fields[0], code, info->reg.regno, 0);
87 return NULL;
88}
89
90/* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
93const char *
94aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
96{
97 /* regno */
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
101 {
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
105 {
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
110 }
111 else
112 {
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
114 imm5<3:0> <V>
115 0000 RESERVED
116 xxx1 B
117 xx10 H
118 x100 S
119 1000 D */
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
122 }
123 }
124 else
125 {
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
329d01f7 128 unsigned reglane_index = info->reglane.index;
c2c4ff8d
SN
129
130 if (inst->opcode->op == OP_FCMLA_ELEM)
131 /* Complex operand takes two elements. */
329d01f7 132 reglane_index *= 2;
c2c4ff8d 133
a06ea964
NC
134 switch (info->qualifier)
135 {
136 case AARCH64_OPND_QLF_S_H:
137 /* H:L:M */
329d01f7
MR
138 assert (reglane_index < 8);
139 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
a06ea964
NC
140 break;
141 case AARCH64_OPND_QLF_S_S:
142 /* H:L */
329d01f7
MR
143 assert (reglane_index < 4);
144 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
a06ea964
NC
145 break;
146 case AARCH64_OPND_QLF_S_D:
147 /* H */
329d01f7
MR
148 assert (reglane_index < 2);
149 insert_field (FLD_H, code, reglane_index, 0);
a06ea964
NC
150 break;
151 default:
152 assert (0);
153 }
154 }
155 return NULL;
156}
157
158/* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
159const char *
160aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
161 aarch64_insn *code,
162 const aarch64_inst *inst ATTRIBUTE_UNUSED)
163{
164 /* R */
165 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
166 /* len */
167 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
168 return NULL;
169}
170
171/* Insert Rt and opcode fields for a register list operand, e.g. Vt
172 in AdvSIMD load/store instructions. */
173const char *
174aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
175 const aarch64_opnd_info *info, aarch64_insn *code,
176 const aarch64_inst *inst)
177{
4ad3b7ef 178 aarch64_insn value = 0;
a06ea964
NC
179 /* Number of elements in each structure to be loaded/stored. */
180 unsigned num = get_opcode_dependent_value (inst->opcode);
181
182 /* Rt */
183 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
184 /* opcode */
185 switch (num)
186 {
187 case 1:
188 switch (info->reglist.num_regs)
189 {
190 case 1: value = 0x7; break;
191 case 2: value = 0xa; break;
192 case 3: value = 0x6; break;
193 case 4: value = 0x2; break;
194 default: assert (0);
195 }
196 break;
197 case 2:
198 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
199 break;
200 case 3:
201 value = 0x4;
202 break;
203 case 4:
204 value = 0x0;
205 break;
206 default:
207 assert (0);
208 }
209 insert_field (FLD_opcode, code, value, 0);
210
211 return NULL;
212}
213
214/* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
215 single structure to all lanes instructions. */
216const char *
217aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
218 const aarch64_opnd_info *info, aarch64_insn *code,
219 const aarch64_inst *inst)
220{
221 aarch64_insn value;
222 /* The opcode dependent area stores the number of elements in
223 each structure to be loaded/stored. */
224 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
225
226 /* Rt */
227 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
228 /* S */
229 value = (aarch64_insn) 0;
230 if (is_ld1r && info->reglist.num_regs == 2)
231 /* OP_LD1R does not have alternating variant, but have "two consecutive"
232 instead. */
233 value = (aarch64_insn) 1;
234 insert_field (FLD_S, code, value, 0);
235
236 return NULL;
237}
238
239/* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
240 operand e.g. Vt in AdvSIMD load/store single element instructions. */
241const char *
242aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
243 const aarch64_opnd_info *info, aarch64_insn *code,
244 const aarch64_inst *inst ATTRIBUTE_UNUSED)
245{
246 aarch64_field field = {0, 0};
4ad3b7ef
KT
247 aarch64_insn QSsize = 0; /* fields Q:S:size. */
248 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
a06ea964
NC
249
250 assert (info->reglist.has_index);
251
252 /* Rt */
253 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
254 /* Encode the index, opcode<2:1> and size. */
255 switch (info->qualifier)
256 {
257 case AARCH64_OPND_QLF_S_B:
258 /* Index encoded in "Q:S:size". */
259 QSsize = info->reglist.index;
260 opcodeh2 = 0x0;
261 break;
262 case AARCH64_OPND_QLF_S_H:
263 /* Index encoded in "Q:S:size<1>". */
264 QSsize = info->reglist.index << 1;
265 opcodeh2 = 0x1;
266 break;
267 case AARCH64_OPND_QLF_S_S:
268 /* Index encoded in "Q:S". */
269 QSsize = info->reglist.index << 2;
270 opcodeh2 = 0x2;
271 break;
272 case AARCH64_OPND_QLF_S_D:
273 /* Index encoded in "Q". */
274 QSsize = info->reglist.index << 3 | 0x1;
275 opcodeh2 = 0x2;
276 break;
277 default:
278 assert (0);
279 }
280 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
281 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
282 insert_field_2 (&field, code, opcodeh2, 0);
283
284 return NULL;
285}
286
287/* Insert fields immh:immb and/or Q for e.g. the shift immediate in
288 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
289 or SSHR <V><d>, <V><n>, #<shift>. */
290const char *
291aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
292 const aarch64_opnd_info *info,
293 aarch64_insn *code, const aarch64_inst *inst)
294{
295 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
296 aarch64_insn Q, imm;
297
298 if (inst->opcode->iclass == asimdshf)
299 {
300 /* Q
301 immh Q <T>
302 0000 x SEE AdvSIMD modified immediate
303 0001 0 8B
304 0001 1 16B
305 001x 0 4H
306 001x 1 8H
307 01xx 0 2S
308 01xx 1 4S
309 1xxx 0 RESERVED
310 1xxx 1 2D */
311 Q = (val & 0x1) ? 1 : 0;
312 insert_field (FLD_Q, code, Q, inst->opcode->mask);
313 val >>= 1;
314 }
315
316 assert (info->type == AARCH64_OPND_IMM_VLSR
317 || info->type == AARCH64_OPND_IMM_VLSL);
318
319 if (info->type == AARCH64_OPND_IMM_VLSR)
320 /* immh:immb
321 immh <shift>
322 0000 SEE AdvSIMD modified immediate
323 0001 (16-UInt(immh:immb))
324 001x (32-UInt(immh:immb))
325 01xx (64-UInt(immh:immb))
326 1xxx (128-UInt(immh:immb)) */
327 imm = (16 << (unsigned)val) - info->imm.value;
328 else
329 /* immh:immb
330 immh <shift>
331 0000 SEE AdvSIMD modified immediate
332 0001 (UInt(immh:immb)-8)
333 001x (UInt(immh:immb)-16)
334 01xx (UInt(immh:immb)-32)
335 1xxx (UInt(immh:immb)-64) */
336 imm = info->imm.value + (8 << (unsigned)val);
337 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
338
339 return NULL;
340}
341
342/* Insert fields for e.g. the immediate operands in
343 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
344const char *
345aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
346 aarch64_insn *code,
347 const aarch64_inst *inst ATTRIBUTE_UNUSED)
348{
349 int64_t imm;
a06ea964
NC
350
351 imm = info->imm.value;
352 if (operand_need_shift_by_two (self))
353 imm >>= 2;
b5464a68 354 insert_all_fields (self, code, imm);
a06ea964
NC
355 return NULL;
356}
357
358/* Insert immediate and its shift amount for e.g. the last operand in
359 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
360const char *
361aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
062f38fa 362 aarch64_insn *code, const aarch64_inst *inst)
a06ea964
NC
363{
364 /* imm16 */
365 aarch64_ins_imm (self, info, code, inst);
366 /* hw */
367 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
368 return NULL;
369}
370
371/* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
372 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
373const char *
374aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
375 const aarch64_opnd_info *info,
376 aarch64_insn *code,
377 const aarch64_inst *inst ATTRIBUTE_UNUSED)
378{
379 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
380 uint64_t imm = info->imm.value;
381 enum aarch64_modifier_kind kind = info->shifter.kind;
382 int amount = info->shifter.amount;
383 aarch64_field field = {0, 0};
384
385 /* a:b:c:d:e:f:g:h */
386 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
387 {
388 /* Either MOVI <Dd>, #<imm>
389 or MOVI <Vd>.2D, #<imm>.
390 <imm> is a 64-bit immediate
391 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
392 encoded in "a:b:c:d:e:f:g:h". */
393 imm = aarch64_shrink_expanded_imm8 (imm);
394 assert ((int)imm >= 0);
395 }
a06ea964
NC
396 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
397
398 if (kind == AARCH64_MOD_NONE)
399 return NULL;
400
401 /* shift amount partially in cmode */
402 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
403 if (kind == AARCH64_MOD_LSL)
404 {
405 /* AARCH64_MOD_LSL: shift zeros. */
406 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
f5555712
YZ
407 assert (esize == 4 || esize == 2 || esize == 1);
408 /* For 8-bit move immediate, the optional LSL #0 does not require
409 encoding. */
410 if (esize == 1)
411 return NULL;
a06ea964
NC
412 amount >>= 3;
413 if (esize == 4)
414 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
415 else
416 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
417 }
418 else
419 {
420 /* AARCH64_MOD_MSL: shift ones. */
421 amount >>= 4;
422 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
423 }
424 insert_field_2 (&field, code, amount, 0);
425
426 return NULL;
aa2aa4c6
RS
427}
428
429/* Insert fields for an 8-bit floating-point immediate. */
430const char *
431aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
432 aarch64_insn *code,
433 const aarch64_inst *inst ATTRIBUTE_UNUSED)
434{
435 insert_all_fields (self, code, info->imm.value);
436 return NULL;
a06ea964
NC
437}
438
582e12bf 439/* Insert 1-bit rotation immediate (#90 or #270). */
c2c4ff8d 440const char *
582e12bf
RS
441aarch64_ins_imm_rotate1 (const aarch64_operand *self,
442 const aarch64_opnd_info *info,
443 aarch64_insn *code, const aarch64_inst *inst)
c2c4ff8d 444{
582e12bf
RS
445 uint64_t rot = (info->imm.value - 90) / 180;
446 assert (rot < 2U);
c2c4ff8d 447 insert_field (self->fields[0], code, rot, inst->opcode->mask);
582e12bf
RS
448 return NULL;
449}
c2c4ff8d 450
582e12bf
RS
451/* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
452const char *
453aarch64_ins_imm_rotate2 (const aarch64_operand *self,
454 const aarch64_opnd_info *info,
455 aarch64_insn *code, const aarch64_inst *inst)
456{
457 uint64_t rot = info->imm.value / 90;
458 assert (rot < 4U);
459 insert_field (self->fields[0], code, rot, inst->opcode->mask);
c2c4ff8d
SN
460 return NULL;
461}
462
a06ea964
NC
463/* Insert #<fbits> for the immediate operand in fp fix-point instructions,
464 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
465const char *
466aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
467 aarch64_insn *code,
468 const aarch64_inst *inst ATTRIBUTE_UNUSED)
469{
470 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
471 return NULL;
472}
473
474/* Insert arithmetic immediate for e.g. the last operand in
475 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
476const char *
477aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
478 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
479{
480 /* shift */
481 aarch64_insn value = info->shifter.amount ? 1 : 0;
482 insert_field (self->fields[0], code, value, 0);
483 /* imm12 (unsigned) */
484 insert_field (self->fields[1], code, info->imm.value, 0);
485 return NULL;
486}
487
e950b345
RS
488/* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
489 the operand should be inverted before encoding. */
490static const char *
491aarch64_ins_limm_1 (const aarch64_operand *self,
492 const aarch64_opnd_info *info, aarch64_insn *code,
493 const aarch64_inst *inst, bfd_boolean invert_p)
a06ea964
NC
494{
495 aarch64_insn value;
496 uint64_t imm = info->imm.value;
42408347 497 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
a06ea964 498
e950b345 499 if (invert_p)
a06ea964 500 imm = ~imm;
535b785f
AM
501 /* The constraint check should have guaranteed this wouldn't happen. */
502 assert (aarch64_logical_immediate_p (imm, esize, &value));
a06ea964
NC
503
504 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
505 self->fields[0]);
506 return NULL;
507}
508
e950b345
RS
509/* Insert logical/bitmask immediate for e.g. the last operand in
510 ORR <Wd|WSP>, <Wn>, #<imm>. */
511const char *
512aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
513 aarch64_insn *code, const aarch64_inst *inst)
514{
515 return aarch64_ins_limm_1 (self, info, code, inst,
516 inst->opcode->op == OP_BIC);
517}
518
519/* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
520const char *
521aarch64_ins_inv_limm (const aarch64_operand *self,
522 const aarch64_opnd_info *info, aarch64_insn *code,
523 const aarch64_inst *inst)
524{
525 return aarch64_ins_limm_1 (self, info, code, inst, TRUE);
526}
527
a06ea964
NC
528/* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
529 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
530const char *
531aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
532 aarch64_insn *code, const aarch64_inst *inst)
533{
4ad3b7ef 534 aarch64_insn value = 0;
a06ea964
NC
535
536 assert (info->idx == 0);
537
538 /* Rt */
539 aarch64_ins_regno (self, info, code, inst);
540 if (inst->opcode->iclass == ldstpair_indexed
541 || inst->opcode->iclass == ldstnapair_offs
542 || inst->opcode->iclass == ldstpair_off
543 || inst->opcode->iclass == loadlit)
544 {
545 /* size */
546 switch (info->qualifier)
547 {
548 case AARCH64_OPND_QLF_S_S: value = 0; break;
549 case AARCH64_OPND_QLF_S_D: value = 1; break;
550 case AARCH64_OPND_QLF_S_Q: value = 2; break;
551 default: assert (0);
552 }
553 insert_field (FLD_ldst_size, code, value, 0);
554 }
555 else
556 {
557 /* opc[1]:size */
558 value = aarch64_get_qualifier_standard_value (info->qualifier);
559 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
560 }
561
562 return NULL;
563}
564
565/* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
566const char *
567aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
568 const aarch64_opnd_info *info, aarch64_insn *code,
569 const aarch64_inst *inst ATTRIBUTE_UNUSED)
570{
571 /* Rn */
572 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
573 return NULL;
574}
575
576/* Encode the address operand for e.g.
577 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
578const char *
579aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
580 const aarch64_opnd_info *info, aarch64_insn *code,
581 const aarch64_inst *inst ATTRIBUTE_UNUSED)
582{
583 aarch64_insn S;
584 enum aarch64_modifier_kind kind = info->shifter.kind;
585
586 /* Rn */
587 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
588 /* Rm */
589 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
590 /* option */
591 if (kind == AARCH64_MOD_LSL)
592 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
593 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
594 /* S */
595 if (info->qualifier != AARCH64_OPND_QLF_S_B)
596 S = info->shifter.amount != 0;
597 else
598 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
599 S <amount>
600 0 [absent]
601 1 #0
602 Must be #0 if <extend> is explicitly LSL. */
603 S = info->shifter.operator_present && info->shifter.amount_present;
604 insert_field (FLD_S, code, S, 0);
605
606 return NULL;
607}
608
609/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
610const char *
611aarch64_ins_addr_simm (const aarch64_operand *self,
612 const aarch64_opnd_info *info,
062f38fa
RE
613 aarch64_insn *code,
614 const aarch64_inst *inst ATTRIBUTE_UNUSED)
a06ea964
NC
615{
616 int imm;
617
618 /* Rn */
619 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
620 /* simm (imm9 or imm7) */
621 imm = info->addr.offset.imm;
622 if (self->fields[0] == FLD_imm7)
623 /* scaled immediate in ld/st pair instructions.. */
624 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
625 insert_field (self->fields[0], code, imm, 0);
626 /* pre/post- index */
627 if (info->addr.writeback)
628 {
629 assert (inst->opcode->iclass != ldst_unscaled
630 && inst->opcode->iclass != ldstnapair_offs
631 && inst->opcode->iclass != ldstpair_off
632 && inst->opcode->iclass != ldst_unpriv);
633 assert (info->addr.preind != info->addr.postind);
634 if (info->addr.preind)
635 insert_field (self->fields[1], code, 1, 0);
636 }
637
638 return NULL;
639}
640
3f06e550
SN
641/* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
642const char *
643aarch64_ins_addr_simm10 (const aarch64_operand *self,
644 const aarch64_opnd_info *info,
645 aarch64_insn *code,
646 const aarch64_inst *inst ATTRIBUTE_UNUSED)
647{
648 int imm;
649
650 /* Rn */
651 insert_field (self->fields[0], code, info->addr.base_regno, 0);
652 /* simm10 */
653 imm = info->addr.offset.imm >> 3;
654 insert_field (self->fields[1], code, imm >> 9, 0);
655 insert_field (self->fields[2], code, imm, 0);
656 /* writeback */
657 if (info->addr.writeback)
658 {
659 assert (info->addr.preind == 1 && info->addr.postind == 0);
660 insert_field (self->fields[3], code, 1, 0);
661 }
662 return NULL;
663}
664
a06ea964
NC
665/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
666const char *
667aarch64_ins_addr_uimm12 (const aarch64_operand *self,
668 const aarch64_opnd_info *info,
669 aarch64_insn *code,
670 const aarch64_inst *inst ATTRIBUTE_UNUSED)
671{
672 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
673
674 /* Rn */
675 insert_field (self->fields[0], code, info->addr.base_regno, 0);
676 /* uimm12 */
677 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
678 return NULL;
679}
680
681/* Encode the address operand for e.g.
682 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
683const char *
684aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
685 const aarch64_opnd_info *info, aarch64_insn *code,
686 const aarch64_inst *inst ATTRIBUTE_UNUSED)
687{
688 /* Rn */
689 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
690 /* Rm | #<amount> */
691 if (info->addr.offset.is_reg)
692 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
693 else
694 insert_field (FLD_Rm, code, 0x1f, 0);
695 return NULL;
696}
697
698/* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
699const char *
700aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
701 const aarch64_opnd_info *info, aarch64_insn *code,
702 const aarch64_inst *inst ATTRIBUTE_UNUSED)
703{
704 /* cond */
705 insert_field (FLD_cond, code, info->cond->value, 0);
706 return NULL;
707}
708
709/* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
710const char *
711aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
712 const aarch64_opnd_info *info, aarch64_insn *code,
713 const aarch64_inst *inst ATTRIBUTE_UNUSED)
714{
715 /* op0:op1:CRn:CRm:op2 */
716 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
717 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
718 return NULL;
719}
720
721/* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
722const char *
723aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
724 const aarch64_opnd_info *info, aarch64_insn *code,
725 const aarch64_inst *inst ATTRIBUTE_UNUSED)
726{
727 /* op1:op2 */
728 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
729 FLD_op2, FLD_op1);
730 return NULL;
731}
732
733/* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
734const char *
735aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
736 const aarch64_opnd_info *info, aarch64_insn *code,
737 const aarch64_inst *inst ATTRIBUTE_UNUSED)
738{
739 /* op1:CRn:CRm:op2 */
740 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
741 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
742 return NULL;
743}
744
745/* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
746
747const char *
748aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
749 const aarch64_opnd_info *info, aarch64_insn *code,
750 const aarch64_inst *inst ATTRIBUTE_UNUSED)
751{
752 /* CRm */
753 insert_field (FLD_CRm, code, info->barrier->value, 0);
754 return NULL;
755}
756
757/* Encode the prefetch operation option operand for e.g.
758 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
759
760const char *
761aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
762 const aarch64_opnd_info *info, aarch64_insn *code,
763 const aarch64_inst *inst ATTRIBUTE_UNUSED)
764{
765 /* prfop in Rt */
766 insert_field (FLD_Rt, code, info->prfop->value, 0);
767 return NULL;
768}
769
9ed608f9
MW
770/* Encode the hint number for instructions that alias HINT but take an
771 operand. */
772
773const char *
774aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
775 const aarch64_opnd_info *info, aarch64_insn *code,
776 const aarch64_inst *inst ATTRIBUTE_UNUSED)
777{
778 /* CRm:op2. */
779 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
780 return NULL;
781}
782
a06ea964
NC
783/* Encode the extended register operand for e.g.
784 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
785const char *
786aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
787 const aarch64_opnd_info *info, aarch64_insn *code,
788 const aarch64_inst *inst ATTRIBUTE_UNUSED)
789{
790 enum aarch64_modifier_kind kind;
791
792 /* Rm */
793 insert_field (FLD_Rm, code, info->reg.regno, 0);
794 /* option */
795 kind = info->shifter.kind;
796 if (kind == AARCH64_MOD_LSL)
797 kind = info->qualifier == AARCH64_OPND_QLF_W
798 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
799 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
800 /* imm3 */
801 insert_field (FLD_imm3, code, info->shifter.amount, 0);
802
803 return NULL;
804}
805
806/* Encode the shifted register operand for e.g.
807 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
808const char *
809aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
810 const aarch64_opnd_info *info, aarch64_insn *code,
811 const aarch64_inst *inst ATTRIBUTE_UNUSED)
812{
813 /* Rm */
814 insert_field (FLD_Rm, code, info->reg.regno, 0);
815 /* shift */
816 insert_field (FLD_shift, code,
817 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
818 /* imm6 */
819 insert_field (FLD_imm6, code, info->shifter.amount, 0);
820
821 return NULL;
822}
823
98907a70
RS
824/* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
825 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
826 SELF's operand-dependent value. fields[0] specifies the field that
827 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
828const char *
829aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
830 const aarch64_opnd_info *info,
831 aarch64_insn *code,
832 const aarch64_inst *inst ATTRIBUTE_UNUSED)
833{
834 int factor = 1 + get_operand_specific_data (self);
835 insert_field (self->fields[0], code, info->addr.base_regno, 0);
836 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
837 return NULL;
838}
839
840/* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
841 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
842 SELF's operand-dependent value. fields[0] specifies the field that
843 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
844const char *
845aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
846 const aarch64_opnd_info *info,
847 aarch64_insn *code,
848 const aarch64_inst *inst ATTRIBUTE_UNUSED)
849{
850 int factor = 1 + get_operand_specific_data (self);
851 insert_field (self->fields[0], code, info->addr.base_regno, 0);
852 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
853 return NULL;
854}
855
856/* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
857 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
858 SELF's operand-dependent value. fields[0] specifies the field that
859 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
860 and imm3 fields, with imm3 being the less-significant part. */
861const char *
862aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
863 const aarch64_opnd_info *info,
864 aarch64_insn *code,
865 const aarch64_inst *inst ATTRIBUTE_UNUSED)
866{
867 int factor = 1 + get_operand_specific_data (self);
868 insert_field (self->fields[0], code, info->addr.base_regno, 0);
869 insert_fields (code, info->addr.offset.imm / factor, 0,
870 2, FLD_imm3, FLD_SVE_imm6);
871 return NULL;
872}
873
582e12bf
RS
874/* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
875 is a 4-bit signed number and where <shift> is SELF's operand-dependent
876 value. fields[0] specifies the base register field. */
877const char *
878aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
879 const aarch64_opnd_info *info, aarch64_insn *code,
880 const aarch64_inst *inst ATTRIBUTE_UNUSED)
881{
882 int factor = 1 << get_operand_specific_data (self);
883 insert_field (self->fields[0], code, info->addr.base_regno, 0);
884 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
885 return NULL;
886}
887
4df068de
RS
888/* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
889 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
890 value. fields[0] specifies the base register field. */
891const char *
892aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
893 const aarch64_opnd_info *info, aarch64_insn *code,
894 const aarch64_inst *inst ATTRIBUTE_UNUSED)
895{
896 int factor = 1 << get_operand_specific_data (self);
897 insert_field (self->fields[0], code, info->addr.base_regno, 0);
898 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
899 return NULL;
900}
901
902/* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
903 is SELF's operand-dependent value. fields[0] specifies the base
904 register field and fields[1] specifies the offset register field. */
905const char *
906aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
907 const aarch64_opnd_info *info, aarch64_insn *code,
908 const aarch64_inst *inst ATTRIBUTE_UNUSED)
909{
910 insert_field (self->fields[0], code, info->addr.base_regno, 0);
911 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
912 return NULL;
913}
914
915/* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
916 <shift> is SELF's operand-dependent value. fields[0] specifies the
917 base register field, fields[1] specifies the offset register field and
918 fields[2] is a single-bit field that selects SXTW over UXTW. */
919const char *
920aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
921 const aarch64_opnd_info *info, aarch64_insn *code,
922 const aarch64_inst *inst ATTRIBUTE_UNUSED)
923{
924 insert_field (self->fields[0], code, info->addr.base_regno, 0);
925 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
926 if (info->shifter.kind == AARCH64_MOD_UXTW)
927 insert_field (self->fields[2], code, 0, 0);
928 else
929 insert_field (self->fields[2], code, 1, 0);
930 return NULL;
931}
932
933/* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
934 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
935 fields[0] specifies the base register field. */
936const char *
937aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
938 const aarch64_opnd_info *info, aarch64_insn *code,
939 const aarch64_inst *inst ATTRIBUTE_UNUSED)
940{
941 int factor = 1 << get_operand_specific_data (self);
942 insert_field (self->fields[0], code, info->addr.base_regno, 0);
943 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
944 return NULL;
945}
946
947/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
948 where <modifier> is fixed by the instruction and where <msz> is a
949 2-bit unsigned number. fields[0] specifies the base register field
950 and fields[1] specifies the offset register field. */
951static const char *
952aarch64_ext_sve_addr_zz (const aarch64_operand *self,
953 const aarch64_opnd_info *info, aarch64_insn *code)
954{
955 insert_field (self->fields[0], code, info->addr.base_regno, 0);
956 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
957 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
958 return NULL;
959}
960
961/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
962 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
963 field and fields[1] specifies the offset register field. */
964const char *
965aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
966 const aarch64_opnd_info *info, aarch64_insn *code,
967 const aarch64_inst *inst ATTRIBUTE_UNUSED)
968{
969 return aarch64_ext_sve_addr_zz (self, info, code);
970}
971
972/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
973 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
974 field and fields[1] specifies the offset register field. */
975const char *
976aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
977 const aarch64_opnd_info *info,
978 aarch64_insn *code,
979 const aarch64_inst *inst ATTRIBUTE_UNUSED)
980{
981 return aarch64_ext_sve_addr_zz (self, info, code);
982}
983
984/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
985 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
986 field and fields[1] specifies the offset register field. */
987const char *
988aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
989 const aarch64_opnd_info *info,
990 aarch64_insn *code,
991 const aarch64_inst *inst ATTRIBUTE_UNUSED)
992{
993 return aarch64_ext_sve_addr_zz (self, info, code);
994}
995
e950b345
RS
996/* Encode an SVE ADD/SUB immediate. */
997const char *
998aarch64_ins_sve_aimm (const aarch64_operand *self,
999 const aarch64_opnd_info *info, aarch64_insn *code,
1000 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1001{
1002 if (info->shifter.amount == 8)
1003 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1004 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1005 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1006 else
1007 insert_all_fields (self, code, info->imm.value & 0xff);
1008 return NULL;
1009}
1010
1011/* Encode an SVE CPY/DUP immediate. */
1012const char *
1013aarch64_ins_sve_asimm (const aarch64_operand *self,
1014 const aarch64_opnd_info *info, aarch64_insn *code,
1015 const aarch64_inst *inst)
1016{
1017 return aarch64_ins_sve_aimm (self, info, code, inst);
1018}
1019
f11ad6bc
RS
1020/* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1021 array specifies which field to use for Zn. MM is encoded in the
1022 concatenation of imm5 and SVE_tszh, with imm5 being the less
1023 significant part. */
1024const char *
1025aarch64_ins_sve_index (const aarch64_operand *self,
1026 const aarch64_opnd_info *info, aarch64_insn *code,
1027 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1028{
1029 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1030 insert_field (self->fields[0], code, info->reglane.regno, 0);
1031 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1032 2, FLD_imm5, FLD_SVE_tszh);
1033 return NULL;
1034}
1035
e950b345
RS
1036/* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1037const char *
1038aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1039 const aarch64_opnd_info *info, aarch64_insn *code,
1040 const aarch64_inst *inst)
1041{
1042 return aarch64_ins_limm (self, info, code, inst);
1043}
1044
582e12bf
RS
1045/* Encode Zn[MM], where Zn occupies the least-significant part of the field
1046 and where MM occupies the most-significant part. The operand-dependent
1047 value specifies the number of bits in Zn. */
1048const char *
1049aarch64_ins_sve_quad_index (const aarch64_operand *self,
1050 const aarch64_opnd_info *info, aarch64_insn *code,
1051 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1052{
1053 unsigned int reg_bits = get_operand_specific_data (self);
1054 assert (info->reglane.regno < (1U << reg_bits));
1055 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1056 insert_all_fields (self, code, val);
1057 return NULL;
1058}
1059
f11ad6bc
RS
1060/* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1061 to use for Zn. */
1062const char *
1063aarch64_ins_sve_reglist (const aarch64_operand *self,
1064 const aarch64_opnd_info *info, aarch64_insn *code,
1065 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1066{
1067 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1068 return NULL;
1069}
1070
2442d846
RS
1071/* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1072 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1073 field. */
1074const char *
1075aarch64_ins_sve_scale (const aarch64_operand *self,
1076 const aarch64_opnd_info *info, aarch64_insn *code,
1077 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1078{
1079 insert_all_fields (self, code, info->imm.value);
1080 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1081 return NULL;
1082}
1083
e950b345
RS
1084/* Encode an SVE shift left immediate. */
1085const char *
1086aarch64_ins_sve_shlimm (const aarch64_operand *self,
1087 const aarch64_opnd_info *info, aarch64_insn *code,
1088 const aarch64_inst *inst)
1089{
1090 const aarch64_opnd_info *prev_operand;
1091 unsigned int esize;
1092
1093 assert (info->idx > 0);
1094 prev_operand = &inst->operands[info->idx - 1];
1095 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1096 insert_all_fields (self, code, 8 * esize + info->imm.value);
1097 return NULL;
1098}
1099
1100/* Encode an SVE shift right immediate. */
1101const char *
1102aarch64_ins_sve_shrimm (const aarch64_operand *self,
1103 const aarch64_opnd_info *info, aarch64_insn *code,
1104 const aarch64_inst *inst)
1105{
1106 const aarch64_opnd_info *prev_operand;
1107 unsigned int esize;
1108
1109 assert (info->idx > 0);
1110 prev_operand = &inst->operands[info->idx - 1];
1111 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1112 insert_all_fields (self, code, 16 * esize - info->imm.value);
1113 return NULL;
1114}
1115
165d4950
RS
1116/* Encode a single-bit immediate that selects between #0.5 and #1.0.
1117 The fields array specifies which field to use. */
1118const char *
1119aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1120 const aarch64_opnd_info *info,
1121 aarch64_insn *code,
1122 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1123{
1124 if (info->imm.value == 0x3f000000)
1125 insert_field (self->fields[0], code, 0, 0);
1126 else
1127 insert_field (self->fields[0], code, 1, 0);
1128 return NULL;
1129}
1130
1131/* Encode a single-bit immediate that selects between #0.5 and #2.0.
1132 The fields array specifies which field to use. */
1133const char *
1134aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1135 const aarch64_opnd_info *info,
1136 aarch64_insn *code,
1137 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1138{
1139 if (info->imm.value == 0x3f000000)
1140 insert_field (self->fields[0], code, 0, 0);
1141 else
1142 insert_field (self->fields[0], code, 1, 0);
1143 return NULL;
1144}
1145
1146/* Encode a single-bit immediate that selects between #0.0 and #1.0.
1147 The fields array specifies which field to use. */
1148const char *
1149aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1150 const aarch64_opnd_info *info,
1151 aarch64_insn *code,
1152 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1153{
1154 if (info->imm.value == 0)
1155 insert_field (self->fields[0], code, 0, 0);
1156 else
1157 insert_field (self->fields[0], code, 1, 0);
1158 return NULL;
1159}
1160
a06ea964
NC
1161/* Miscellaneous encoding functions. */
1162
1163/* Encode size[0], i.e. bit 22, for
1164 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1165
1166static void
1167encode_asimd_fcvt (aarch64_inst *inst)
1168{
1169 aarch64_insn value;
1170 aarch64_field field = {0, 0};
1171 enum aarch64_opnd_qualifier qualifier;
1172
1173 switch (inst->opcode->op)
1174 {
1175 case OP_FCVTN:
1176 case OP_FCVTN2:
1177 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1178 qualifier = inst->operands[1].qualifier;
1179 break;
1180 case OP_FCVTL:
1181 case OP_FCVTL2:
1182 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1183 qualifier = inst->operands[0].qualifier;
1184 break;
1185 default:
1186 assert (0);
1187 }
1188 assert (qualifier == AARCH64_OPND_QLF_V_4S
1189 || qualifier == AARCH64_OPND_QLF_V_2D);
1190 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1191 gen_sub_field (FLD_size, 0, 1, &field);
1192 insert_field_2 (&field, &inst->value, value, 0);
1193}
1194
1195/* Encode size[0], i.e. bit 22, for
1196 e.g. FCVTXN <Vb><d>, <Va><n>. */
1197
1198static void
1199encode_asisd_fcvtxn (aarch64_inst *inst)
1200{
1201 aarch64_insn val = 1;
1202 aarch64_field field = {0, 0};
1203 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1204 gen_sub_field (FLD_size, 0, 1, &field);
1205 insert_field_2 (&field, &inst->value, val, 0);
1206}
1207
1208/* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1209static void
1210encode_fcvt (aarch64_inst *inst)
1211{
1212 aarch64_insn val;
1213 const aarch64_field field = {15, 2};
1214
1215 /* opc dstsize */
1216 switch (inst->operands[0].qualifier)
1217 {
1218 case AARCH64_OPND_QLF_S_S: val = 0; break;
1219 case AARCH64_OPND_QLF_S_D: val = 1; break;
1220 case AARCH64_OPND_QLF_S_H: val = 3; break;
1221 default: abort ();
1222 }
1223 insert_field_2 (&field, &inst->value, val, 0);
1224
1225 return;
1226}
1227
116b6019
RS
1228/* Return the index in qualifiers_list that INST is using. Should only
1229 be called once the qualifiers are known to be valid. */
1230
1231static int
1232aarch64_get_variant (struct aarch64_inst *inst)
1233{
1234 int i, nops, variant;
1235
1236 nops = aarch64_num_of_operands (inst->opcode);
1237 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1238 {
1239 for (i = 0; i < nops; ++i)
1240 if (inst->opcode->qualifiers_list[variant][i]
1241 != inst->operands[i].qualifier)
1242 break;
1243 if (i == nops)
1244 return variant;
1245 }
1246 abort ();
1247}
1248
a06ea964
NC
1249/* Do miscellaneous encodings that are not common enough to be driven by
1250 flags. */
1251
1252static void
1253do_misc_encoding (aarch64_inst *inst)
1254{
c0890d26
RS
1255 unsigned int value;
1256
a06ea964
NC
1257 switch (inst->opcode->op)
1258 {
1259 case OP_FCVT:
1260 encode_fcvt (inst);
1261 break;
1262 case OP_FCVTN:
1263 case OP_FCVTN2:
1264 case OP_FCVTL:
1265 case OP_FCVTL2:
1266 encode_asimd_fcvt (inst);
1267 break;
1268 case OP_FCVTXN_S:
1269 encode_asisd_fcvtxn (inst);
1270 break;
c0890d26
RS
1271 case OP_MOV_P_P:
1272 case OP_MOVS_P_P:
1273 /* Copy Pn to Pm and Pg. */
1274 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1275 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1276 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1277 break;
1278 case OP_MOV_Z_P_Z:
1279 /* Copy Zd to Zm. */
1280 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1281 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1282 break;
1283 case OP_MOV_Z_V:
1284 /* Fill in the zero immediate. */
582e12bf
RS
1285 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1286 2, FLD_imm5, FLD_SVE_tszh);
c0890d26
RS
1287 break;
1288 case OP_MOV_Z_Z:
1289 /* Copy Zn to Zm. */
1290 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1291 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1292 break;
1293 case OP_MOV_Z_Zi:
1294 break;
1295 case OP_MOVM_P_P_P:
1296 /* Copy Pd to Pm. */
1297 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1298 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1299 break;
1300 case OP_MOVZS_P_P_P:
1301 case OP_MOVZ_P_P_P:
1302 /* Copy Pn to Pm. */
1303 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1304 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1305 break;
1306 case OP_NOTS_P_P_P_Z:
1307 case OP_NOT_P_P_P_Z:
1308 /* Copy Pg to Pm. */
1309 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1310 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1311 break;
a06ea964
NC
1312 default: break;
1313 }
1314}
1315
1316/* Encode the 'size' and 'Q' field for e.g. SHADD. */
1317static void
1318encode_sizeq (aarch64_inst *inst)
1319{
1320 aarch64_insn sizeq;
1321 enum aarch64_field_kind kind;
1322 int idx;
1323
1324 /* Get the index of the operand whose information we are going to use
1325 to encode the size and Q fields.
1326 This is deduced from the possible valid qualifier lists. */
1327 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1328 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1329 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1330 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1331 /* Q */
1332 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1333 /* size */
1334 if (inst->opcode->iclass == asisdlse
1335 || inst->opcode->iclass == asisdlsep
1336 || inst->opcode->iclass == asisdlso
1337 || inst->opcode->iclass == asisdlsop)
1338 kind = FLD_vldst_size;
1339 else
1340 kind = FLD_size;
1341 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1342}
1343
1344/* Opcodes that have fields shared by multiple operands are usually flagged
1345 with flags. In this function, we detect such flags and use the
1346 information in one of the related operands to do the encoding. The 'one'
1347 operand is not any operand but one of the operands that has the enough
1348 information for such an encoding. */
1349
1350static void
1351do_special_encoding (struct aarch64_inst *inst)
1352{
1353 int idx;
4ad3b7ef 1354 aarch64_insn value = 0;
a06ea964
NC
1355
1356 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1357
1358 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1359 if (inst->opcode->flags & F_COND)
1360 {
1361 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1362 }
1363 if (inst->opcode->flags & F_SF)
1364 {
1365 idx = select_operand_for_sf_field_coding (inst->opcode);
1366 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1367 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1368 ? 1 : 0;
1369 insert_field (FLD_sf, &inst->value, value, 0);
1370 if (inst->opcode->flags & F_N)
1371 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1372 }
ee804238
JW
1373 if (inst->opcode->flags & F_LSE_SZ)
1374 {
1375 idx = select_operand_for_sf_field_coding (inst->opcode);
1376 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1377 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1378 ? 1 : 0;
1379 insert_field (FLD_lse_sz, &inst->value, value, 0);
1380 }
a06ea964
NC
1381 if (inst->opcode->flags & F_SIZEQ)
1382 encode_sizeq (inst);
1383 if (inst->opcode->flags & F_FPTYPE)
1384 {
1385 idx = select_operand_for_fptype_field_coding (inst->opcode);
1386 switch (inst->operands[idx].qualifier)
1387 {
1388 case AARCH64_OPND_QLF_S_S: value = 0; break;
1389 case AARCH64_OPND_QLF_S_D: value = 1; break;
1390 case AARCH64_OPND_QLF_S_H: value = 3; break;
1391 default: assert (0);
1392 }
1393 insert_field (FLD_type, &inst->value, value, 0);
1394 }
1395 if (inst->opcode->flags & F_SSIZE)
1396 {
1397 enum aarch64_opnd_qualifier qualifier;
1398 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1399 qualifier = inst->operands[idx].qualifier;
1400 assert (qualifier >= AARCH64_OPND_QLF_S_B
1401 && qualifier <= AARCH64_OPND_QLF_S_Q);
1402 value = aarch64_get_qualifier_standard_value (qualifier);
1403 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1404 }
1405 if (inst->opcode->flags & F_T)
1406 {
1407 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1408 aarch64_field field = {0, 0};
1409 enum aarch64_opnd_qualifier qualifier;
1410
1411 idx = 0;
1412 qualifier = inst->operands[idx].qualifier;
1413 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1414 == AARCH64_OPND_CLASS_SIMD_REG
1415 && qualifier >= AARCH64_OPND_QLF_V_8B
1416 && qualifier <= AARCH64_OPND_QLF_V_2D);
1417 /* imm5<3:0> q <t>
1418 0000 x reserved
1419 xxx1 0 8b
1420 xxx1 1 16b
1421 xx10 0 4h
1422 xx10 1 8h
1423 x100 0 2s
1424 x100 1 4s
1425 1000 0 reserved
1426 1000 1 2d */
1427 value = aarch64_get_qualifier_standard_value (qualifier);
1428 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1429 num = (int) value >> 1;
1430 assert (num >= 0 && num <= 3);
1431 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1432 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1433 }
1434 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1435 {
1436 /* Use Rt to encode in the case of e.g.
1437 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1438 enum aarch64_opnd_qualifier qualifier;
1439 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1440 if (idx == -1)
1441 /* Otherwise use the result operand, which has to be a integer
1442 register. */
1443 idx = 0;
1444 assert (idx == 0 || idx == 1);
1445 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1446 == AARCH64_OPND_CLASS_INT_REG);
1447 qualifier = inst->operands[idx].qualifier;
1448 insert_field (FLD_Q, &inst->value,
1449 aarch64_get_qualifier_standard_value (qualifier), 0);
1450 }
1451 if (inst->opcode->flags & F_LDS_SIZE)
1452 {
1453 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1454 enum aarch64_opnd_qualifier qualifier;
1455 aarch64_field field = {0, 0};
1456 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1457 == AARCH64_OPND_CLASS_INT_REG);
1458 gen_sub_field (FLD_opc, 0, 1, &field);
1459 qualifier = inst->operands[0].qualifier;
1460 insert_field_2 (&field, &inst->value,
1461 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1462 }
1463 /* Miscellaneous encoding as the last step. */
1464 if (inst->opcode->flags & F_MISC)
1465 do_misc_encoding (inst);
1466
1467 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1468}
1469
116b6019
RS
1470/* Some instructions (including all SVE ones) use the instruction class
1471 to describe how a qualifiers_list index is represented in the instruction
1472 encoding. If INST is such an instruction, encode the chosen qualifier
1473 variant. */
1474
1475static void
1476aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1477{
1478 switch (inst->opcode->iclass)
1479 {
1480 case sve_cpy:
1481 insert_fields (&inst->value, aarch64_get_variant (inst),
1482 0, 2, FLD_SVE_M_14, FLD_size);
1483 break;
1484
1485 case sve_index:
1486 case sve_shift_pred:
1487 case sve_shift_unpred:
1488 /* For indices and shift amounts, the variant is encoded as
1489 part of the immediate. */
1490 break;
1491
1492 case sve_limm:
1493 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1494 and depend on the immediate. They don't have a separate
1495 encoding. */
1496 break;
1497
1498 case sve_misc:
1499 /* sve_misc instructions have only a single variant. */
1500 break;
1501
1502 case sve_movprfx:
1503 insert_fields (&inst->value, aarch64_get_variant (inst),
1504 0, 2, FLD_SVE_M_16, FLD_size);
1505 break;
1506
1507 case sve_pred_zm:
1508 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1509 break;
1510
1511 case sve_size_bhs:
1512 case sve_size_bhsd:
1513 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1514 break;
1515
1516 case sve_size_hsd:
1517 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1518 break;
1519
1520 case sve_size_sd:
1521 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1522 break;
1523
1524 default:
1525 break;
1526 }
1527}
1528
a06ea964
NC
1529/* Converters converting an alias opcode instruction to its real form. */
1530
1531/* ROR <Wd>, <Ws>, #<shift>
1532 is equivalent to:
1533 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1534static void
1535convert_ror_to_extr (aarch64_inst *inst)
1536{
1537 copy_operand_info (inst, 3, 2);
1538 copy_operand_info (inst, 2, 1);
1539}
1540
e30181a5
YZ
1541/* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1542 is equivalent to:
1543 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1544static void
1545convert_xtl_to_shll (aarch64_inst *inst)
1546{
1547 inst->operands[2].qualifier = inst->operands[1].qualifier;
1548 inst->operands[2].imm.value = 0;
1549}
1550
a06ea964
NC
1551/* Convert
1552 LSR <Xd>, <Xn>, #<shift>
1553 to
1554 UBFM <Xd>, <Xn>, #<shift>, #63. */
1555static void
1556convert_sr_to_bfm (aarch64_inst *inst)
1557{
1558 inst->operands[3].imm.value =
1559 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1560}
1561
1562/* Convert MOV to ORR. */
1563static void
1564convert_mov_to_orr (aarch64_inst *inst)
1565{
1566 /* MOV <Vd>.<T>, <Vn>.<T>
1567 is equivalent to:
1568 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1569 copy_operand_info (inst, 2, 1);
1570}
1571
1572/* When <imms> >= <immr>, the instruction written:
1573 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1574 is equivalent to:
1575 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1576
1577static void
1578convert_bfx_to_bfm (aarch64_inst *inst)
1579{
1580 int64_t lsb, width;
1581
1582 /* Convert the operand. */
1583 lsb = inst->operands[2].imm.value;
1584 width = inst->operands[3].imm.value;
1585 inst->operands[2].imm.value = lsb;
1586 inst->operands[3].imm.value = lsb + width - 1;
1587}
1588
1589/* When <imms> < <immr>, the instruction written:
1590 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1591 is equivalent to:
1592 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1593
1594static void
1595convert_bfi_to_bfm (aarch64_inst *inst)
1596{
1597 int64_t lsb, width;
1598
1599 /* Convert the operand. */
1600 lsb = inst->operands[2].imm.value;
1601 width = inst->operands[3].imm.value;
1602 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1603 {
1604 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1605 inst->operands[3].imm.value = width - 1;
1606 }
1607 else
1608 {
1609 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1610 inst->operands[3].imm.value = width - 1;
1611 }
1612}
1613
d685192a
MW
1614/* The instruction written:
1615 BFC <Xd>, #<lsb>, #<width>
1616 is equivalent to:
1617 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1618
1619static void
1620convert_bfc_to_bfm (aarch64_inst *inst)
1621{
1622 int64_t lsb, width;
1623
1624 /* Insert XZR. */
1625 copy_operand_info (inst, 3, 2);
1626 copy_operand_info (inst, 2, 1);
11648de5 1627 copy_operand_info (inst, 1, 0);
d685192a
MW
1628 inst->operands[1].reg.regno = 0x1f;
1629
11648de5 1630 /* Convert the immediate operand. */
d685192a
MW
1631 lsb = inst->operands[2].imm.value;
1632 width = inst->operands[3].imm.value;
1633 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1634 {
1635 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1636 inst->operands[3].imm.value = width - 1;
1637 }
1638 else
1639 {
1640 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1641 inst->operands[3].imm.value = width - 1;
1642 }
1643}
1644
a06ea964
NC
1645/* The instruction written:
1646 LSL <Xd>, <Xn>, #<shift>
1647 is equivalent to:
1648 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1649
1650static void
1651convert_lsl_to_ubfm (aarch64_inst *inst)
1652{
1653 int64_t shift = inst->operands[2].imm.value;
1654
1655 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1656 {
1657 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1658 inst->operands[3].imm.value = 31 - shift;
1659 }
1660 else
1661 {
1662 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1663 inst->operands[3].imm.value = 63 - shift;
1664 }
1665}
1666
1667/* CINC <Wd>, <Wn>, <cond>
1668 is equivalent to:
1669 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1670
1671static void
1672convert_to_csel (aarch64_inst *inst)
1673{
1674 copy_operand_info (inst, 3, 2);
1675 copy_operand_info (inst, 2, 1);
1676 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1677}
1678
1679/* CSET <Wd>, <cond>
1680 is equivalent to:
1681 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1682
1683static void
1684convert_cset_to_csinc (aarch64_inst *inst)
1685{
1686 copy_operand_info (inst, 3, 1);
1687 copy_operand_info (inst, 2, 0);
1688 copy_operand_info (inst, 1, 0);
1689 inst->operands[1].reg.regno = 0x1f;
1690 inst->operands[2].reg.regno = 0x1f;
1691 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1692}
1693
1694/* MOV <Wd>, #<imm>
1695 is equivalent to:
1696 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1697
1698static void
1699convert_mov_to_movewide (aarch64_inst *inst)
1700{
1701 int is32;
1702 uint32_t shift_amount;
1703 uint64_t value;
1704
1705 switch (inst->opcode->op)
1706 {
1707 case OP_MOV_IMM_WIDE:
1708 value = inst->operands[1].imm.value;
1709 break;
1710 case OP_MOV_IMM_WIDEN:
1711 value = ~inst->operands[1].imm.value;
1712 break;
1713 default:
1714 assert (0);
1715 }
1716 inst->operands[1].type = AARCH64_OPND_HALF;
1717 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
062f38fa
RE
1718 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1719 /* The constraint check should have guaranteed this wouldn't happen. */
1720 assert (0);
a06ea964
NC
1721 value >>= shift_amount;
1722 value &= 0xffff;
1723 inst->operands[1].imm.value = value;
1724 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1725 inst->operands[1].shifter.amount = shift_amount;
1726}
1727
1728/* MOV <Wd>, #<imm>
1729 is equivalent to:
1730 ORR <Wd>, WZR, #<imm>. */
1731
1732static void
1733convert_mov_to_movebitmask (aarch64_inst *inst)
1734{
1735 copy_operand_info (inst, 2, 1);
1736 inst->operands[1].reg.regno = 0x1f;
1737 inst->operands[1].skip = 0;
1738}
1739
1740/* Some alias opcodes are assembled by being converted to their real-form. */
1741
1742static void
1743convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1744{
1745 const aarch64_opcode *alias = inst->opcode;
1746
1747 if ((alias->flags & F_CONV) == 0)
1748 goto convert_to_real_return;
1749
1750 switch (alias->op)
1751 {
1752 case OP_ASR_IMM:
1753 case OP_LSR_IMM:
1754 convert_sr_to_bfm (inst);
1755 break;
1756 case OP_LSL_IMM:
1757 convert_lsl_to_ubfm (inst);
1758 break;
1759 case OP_CINC:
1760 case OP_CINV:
1761 case OP_CNEG:
1762 convert_to_csel (inst);
1763 break;
1764 case OP_CSET:
1765 case OP_CSETM:
1766 convert_cset_to_csinc (inst);
1767 break;
1768 case OP_UBFX:
1769 case OP_BFXIL:
1770 case OP_SBFX:
1771 convert_bfx_to_bfm (inst);
1772 break;
1773 case OP_SBFIZ:
1774 case OP_BFI:
1775 case OP_UBFIZ:
1776 convert_bfi_to_bfm (inst);
1777 break;
d685192a
MW
1778 case OP_BFC:
1779 convert_bfc_to_bfm (inst);
1780 break;
a06ea964
NC
1781 case OP_MOV_V:
1782 convert_mov_to_orr (inst);
1783 break;
1784 case OP_MOV_IMM_WIDE:
1785 case OP_MOV_IMM_WIDEN:
1786 convert_mov_to_movewide (inst);
1787 break;
1788 case OP_MOV_IMM_LOG:
1789 convert_mov_to_movebitmask (inst);
1790 break;
1791 case OP_ROR_IMM:
1792 convert_ror_to_extr (inst);
1793 break;
e30181a5
YZ
1794 case OP_SXTL:
1795 case OP_SXTL2:
1796 case OP_UXTL:
1797 case OP_UXTL2:
1798 convert_xtl_to_shll (inst);
1799 break;
a06ea964
NC
1800 default:
1801 break;
1802 }
1803
1804convert_to_real_return:
1805 aarch64_replace_opcode (inst, real);
1806}
1807
1808/* Encode *INST_ORI of the opcode code OPCODE.
1809 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1810 matched operand qualifier sequence in *QLF_SEQ. */
1811
1812int
1813aarch64_opcode_encode (const aarch64_opcode *opcode,
1814 const aarch64_inst *inst_ori, aarch64_insn *code,
1815 aarch64_opnd_qualifier_t *qlf_seq,
1816 aarch64_operand_error *mismatch_detail)
1817{
1818 int i;
1819 const aarch64_opcode *aliased;
1820 aarch64_inst copy, *inst;
1821
1822 DEBUG_TRACE ("enter with %s", opcode->name);
1823
1824 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1825 copy = *inst_ori;
1826 inst = &copy;
1827
1828 assert (inst->opcode == NULL || inst->opcode == opcode);
1829 if (inst->opcode == NULL)
1830 inst->opcode = opcode;
1831
1832 /* Constrain the operands.
1833 After passing this, the encoding is guaranteed to succeed. */
1834 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1835 {
1836 DEBUG_TRACE ("FAIL since operand constraint not met");
1837 return 0;
1838 }
1839
1840 /* Get the base value.
1841 Note: this has to be before the aliasing handling below in order to
1842 get the base value from the alias opcode before we move on to the
1843 aliased opcode for encoding. */
1844 inst->value = opcode->opcode;
1845
1846 /* No need to do anything else if the opcode does not have any operand. */
1847 if (aarch64_num_of_operands (opcode) == 0)
1848 goto encoding_exit;
1849
1850 /* Assign operand indexes and check types. Also put the matched
1851 operand qualifiers in *QLF_SEQ to return. */
1852 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1853 {
1854 assert (opcode->operands[i] == inst->operands[i].type);
1855 inst->operands[i].idx = i;
1856 if (qlf_seq != NULL)
1857 *qlf_seq = inst->operands[i].qualifier;
1858 }
1859
1860 aliased = aarch64_find_real_opcode (opcode);
1861 /* If the opcode is an alias and it does not ask for direct encoding by
1862 itself, the instruction will be transformed to the form of real opcode
1863 and the encoding will be carried out using the rules for the aliased
1864 opcode. */
1865 if (aliased != NULL && (opcode->flags & F_CONV))
1866 {
1867 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1868 aliased->name, opcode->name);
1869 /* Convert the operands to the form of the real opcode. */
1870 convert_to_real (inst, aliased);
1871 opcode = aliased;
1872 }
1873
1874 aarch64_opnd_info *info = inst->operands;
1875
1876 /* Call the inserter of each operand. */
1877 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1878 {
1879 const aarch64_operand *opnd;
1880 enum aarch64_opnd type = opcode->operands[i];
1881 if (type == AARCH64_OPND_NIL)
1882 break;
1883 if (info->skip)
1884 {
1885 DEBUG_TRACE ("skip the incomplete operand %d", i);
1886 continue;
1887 }
1888 opnd = &aarch64_operands[type];
1889 if (operand_has_inserter (opnd))
1890 aarch64_insert_operand (opnd, info, &inst->value, inst);
1891 }
1892
1893 /* Call opcode encoders indicated by flags. */
1894 if (opcode_has_special_coder (opcode))
1895 do_special_encoding (inst);
1896
116b6019
RS
1897 /* Possibly use the instruction class to encode the chosen qualifier
1898 variant. */
1899 aarch64_encode_variant_using_iclass (inst);
1900
a06ea964
NC
1901encoding_exit:
1902 DEBUG_TRACE ("exit with %s", opcode->name);
1903
1904 *code = inst->value;
1905
1906 return 1;
1907}
This page took 0.298441 seconds and 4 git commands to generate.