[AArch64][SVE 29/32] Add new SVE core & FP register operands
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
CommitLineData
a06ea964 1/* aarch64-asm.c -- AArch64 assembler support.
6f2750fe 2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21#include "sysdep.h"
22#include <stdarg.h>
b5464a68 23#include "libiberty.h"
a06ea964
NC
24#include "aarch64-asm.h"
25
26/* Utilities. */
27
28/* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
31
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
9aff4b7a 35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
a06ea964
NC
36 the order of M, L, H. */
37
38static inline void
39insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
40{
41 uint32_t num;
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
44 va_list va;
45
46 va_start (va, mask);
47 num = va_arg (va, uint32_t);
48 assert (num <= 5);
49 while (num--)
50 {
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
55 }
56 va_end (va);
57}
58
b5464a68
RS
59/* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
61
62static void
63insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
64 aarch64_insn value)
65{
66 unsigned int i;
67 enum aarch64_field_kind kind;
68
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
71 {
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
75 }
76}
77
a06ea964
NC
78/* Operand inserters. */
79
80/* Insert register number. */
81const char *
82aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
83 aarch64_insn *code,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
85{
86 insert_field (self->fields[0], code, info->reg.regno, 0);
87 return NULL;
88}
89
90/* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
93const char *
94aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
96{
97 /* regno */
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
101 {
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
105 {
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
110 }
111 else
112 {
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
114 imm5<3:0> <V>
115 0000 RESERVED
116 xxx1 B
117 xx10 H
118 x100 S
119 1000 D */
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
122 }
123 }
124 else
125 {
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 switch (info->qualifier)
129 {
130 case AARCH64_OPND_QLF_S_H:
131 /* H:L:M */
132 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
133 break;
134 case AARCH64_OPND_QLF_S_S:
135 /* H:L */
136 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
137 break;
138 case AARCH64_OPND_QLF_S_D:
139 /* H */
140 insert_field (FLD_H, code, info->reglane.index, 0);
141 break;
142 default:
143 assert (0);
144 }
145 }
146 return NULL;
147}
148
149/* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
150const char *
151aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
152 aarch64_insn *code,
153 const aarch64_inst *inst ATTRIBUTE_UNUSED)
154{
155 /* R */
156 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
157 /* len */
158 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
159 return NULL;
160}
161
162/* Insert Rt and opcode fields for a register list operand, e.g. Vt
163 in AdvSIMD load/store instructions. */
164const char *
165aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
166 const aarch64_opnd_info *info, aarch64_insn *code,
167 const aarch64_inst *inst)
168{
4ad3b7ef 169 aarch64_insn value = 0;
a06ea964
NC
170 /* Number of elements in each structure to be loaded/stored. */
171 unsigned num = get_opcode_dependent_value (inst->opcode);
172
173 /* Rt */
174 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
175 /* opcode */
176 switch (num)
177 {
178 case 1:
179 switch (info->reglist.num_regs)
180 {
181 case 1: value = 0x7; break;
182 case 2: value = 0xa; break;
183 case 3: value = 0x6; break;
184 case 4: value = 0x2; break;
185 default: assert (0);
186 }
187 break;
188 case 2:
189 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
190 break;
191 case 3:
192 value = 0x4;
193 break;
194 case 4:
195 value = 0x0;
196 break;
197 default:
198 assert (0);
199 }
200 insert_field (FLD_opcode, code, value, 0);
201
202 return NULL;
203}
204
205/* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
206 single structure to all lanes instructions. */
207const char *
208aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
209 const aarch64_opnd_info *info, aarch64_insn *code,
210 const aarch64_inst *inst)
211{
212 aarch64_insn value;
213 /* The opcode dependent area stores the number of elements in
214 each structure to be loaded/stored. */
215 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
216
217 /* Rt */
218 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
219 /* S */
220 value = (aarch64_insn) 0;
221 if (is_ld1r && info->reglist.num_regs == 2)
222 /* OP_LD1R does not have alternating variant, but have "two consecutive"
223 instead. */
224 value = (aarch64_insn) 1;
225 insert_field (FLD_S, code, value, 0);
226
227 return NULL;
228}
229
230/* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
231 operand e.g. Vt in AdvSIMD load/store single element instructions. */
232const char *
233aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
234 const aarch64_opnd_info *info, aarch64_insn *code,
235 const aarch64_inst *inst ATTRIBUTE_UNUSED)
236{
237 aarch64_field field = {0, 0};
4ad3b7ef
KT
238 aarch64_insn QSsize = 0; /* fields Q:S:size. */
239 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
a06ea964
NC
240
241 assert (info->reglist.has_index);
242
243 /* Rt */
244 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
245 /* Encode the index, opcode<2:1> and size. */
246 switch (info->qualifier)
247 {
248 case AARCH64_OPND_QLF_S_B:
249 /* Index encoded in "Q:S:size". */
250 QSsize = info->reglist.index;
251 opcodeh2 = 0x0;
252 break;
253 case AARCH64_OPND_QLF_S_H:
254 /* Index encoded in "Q:S:size<1>". */
255 QSsize = info->reglist.index << 1;
256 opcodeh2 = 0x1;
257 break;
258 case AARCH64_OPND_QLF_S_S:
259 /* Index encoded in "Q:S". */
260 QSsize = info->reglist.index << 2;
261 opcodeh2 = 0x2;
262 break;
263 case AARCH64_OPND_QLF_S_D:
264 /* Index encoded in "Q". */
265 QSsize = info->reglist.index << 3 | 0x1;
266 opcodeh2 = 0x2;
267 break;
268 default:
269 assert (0);
270 }
271 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
272 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
273 insert_field_2 (&field, code, opcodeh2, 0);
274
275 return NULL;
276}
277
278/* Insert fields immh:immb and/or Q for e.g. the shift immediate in
279 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
280 or SSHR <V><d>, <V><n>, #<shift>. */
281const char *
282aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
283 const aarch64_opnd_info *info,
284 aarch64_insn *code, const aarch64_inst *inst)
285{
286 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
287 aarch64_insn Q, imm;
288
289 if (inst->opcode->iclass == asimdshf)
290 {
291 /* Q
292 immh Q <T>
293 0000 x SEE AdvSIMD modified immediate
294 0001 0 8B
295 0001 1 16B
296 001x 0 4H
297 001x 1 8H
298 01xx 0 2S
299 01xx 1 4S
300 1xxx 0 RESERVED
301 1xxx 1 2D */
302 Q = (val & 0x1) ? 1 : 0;
303 insert_field (FLD_Q, code, Q, inst->opcode->mask);
304 val >>= 1;
305 }
306
307 assert (info->type == AARCH64_OPND_IMM_VLSR
308 || info->type == AARCH64_OPND_IMM_VLSL);
309
310 if (info->type == AARCH64_OPND_IMM_VLSR)
311 /* immh:immb
312 immh <shift>
313 0000 SEE AdvSIMD modified immediate
314 0001 (16-UInt(immh:immb))
315 001x (32-UInt(immh:immb))
316 01xx (64-UInt(immh:immb))
317 1xxx (128-UInt(immh:immb)) */
318 imm = (16 << (unsigned)val) - info->imm.value;
319 else
320 /* immh:immb
321 immh <shift>
322 0000 SEE AdvSIMD modified immediate
323 0001 (UInt(immh:immb)-8)
324 001x (UInt(immh:immb)-16)
325 01xx (UInt(immh:immb)-32)
326 1xxx (UInt(immh:immb)-64) */
327 imm = info->imm.value + (8 << (unsigned)val);
328 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
329
330 return NULL;
331}
332
333/* Insert fields for e.g. the immediate operands in
334 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
335const char *
336aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
337 aarch64_insn *code,
338 const aarch64_inst *inst ATTRIBUTE_UNUSED)
339{
340 int64_t imm;
a06ea964
NC
341
342 imm = info->imm.value;
343 if (operand_need_shift_by_two (self))
344 imm >>= 2;
b5464a68 345 insert_all_fields (self, code, imm);
a06ea964
NC
346 return NULL;
347}
348
349/* Insert immediate and its shift amount for e.g. the last operand in
350 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
351const char *
352aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
062f38fa 353 aarch64_insn *code, const aarch64_inst *inst)
a06ea964
NC
354{
355 /* imm16 */
356 aarch64_ins_imm (self, info, code, inst);
357 /* hw */
358 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
359 return NULL;
360}
361
362/* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
363 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
364const char *
365aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
366 const aarch64_opnd_info *info,
367 aarch64_insn *code,
368 const aarch64_inst *inst ATTRIBUTE_UNUSED)
369{
370 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
371 uint64_t imm = info->imm.value;
372 enum aarch64_modifier_kind kind = info->shifter.kind;
373 int amount = info->shifter.amount;
374 aarch64_field field = {0, 0};
375
376 /* a:b:c:d:e:f:g:h */
377 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
378 {
379 /* Either MOVI <Dd>, #<imm>
380 or MOVI <Vd>.2D, #<imm>.
381 <imm> is a 64-bit immediate
382 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
383 encoded in "a:b:c:d:e:f:g:h". */
384 imm = aarch64_shrink_expanded_imm8 (imm);
385 assert ((int)imm >= 0);
386 }
a06ea964
NC
387 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
388
389 if (kind == AARCH64_MOD_NONE)
390 return NULL;
391
392 /* shift amount partially in cmode */
393 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
394 if (kind == AARCH64_MOD_LSL)
395 {
396 /* AARCH64_MOD_LSL: shift zeros. */
397 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
f5555712
YZ
398 assert (esize == 4 || esize == 2 || esize == 1);
399 /* For 8-bit move immediate, the optional LSL #0 does not require
400 encoding. */
401 if (esize == 1)
402 return NULL;
a06ea964
NC
403 amount >>= 3;
404 if (esize == 4)
405 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
406 else
407 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
408 }
409 else
410 {
411 /* AARCH64_MOD_MSL: shift ones. */
412 amount >>= 4;
413 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
414 }
415 insert_field_2 (&field, code, amount, 0);
416
417 return NULL;
aa2aa4c6
RS
418}
419
420/* Insert fields for an 8-bit floating-point immediate. */
421const char *
422aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
423 aarch64_insn *code,
424 const aarch64_inst *inst ATTRIBUTE_UNUSED)
425{
426 insert_all_fields (self, code, info->imm.value);
427 return NULL;
a06ea964
NC
428}
429
430/* Insert #<fbits> for the immediate operand in fp fix-point instructions,
431 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
432const char *
433aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
434 aarch64_insn *code,
435 const aarch64_inst *inst ATTRIBUTE_UNUSED)
436{
437 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
438 return NULL;
439}
440
441/* Insert arithmetic immediate for e.g. the last operand in
442 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
443const char *
444aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
445 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
446{
447 /* shift */
448 aarch64_insn value = info->shifter.amount ? 1 : 0;
449 insert_field (self->fields[0], code, value, 0);
450 /* imm12 (unsigned) */
451 insert_field (self->fields[1], code, info->imm.value, 0);
452 return NULL;
453}
454
e950b345
RS
455/* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
456 the operand should be inverted before encoding. */
457static const char *
458aarch64_ins_limm_1 (const aarch64_operand *self,
459 const aarch64_opnd_info *info, aarch64_insn *code,
460 const aarch64_inst *inst, bfd_boolean invert_p)
a06ea964
NC
461{
462 aarch64_insn value;
463 uint64_t imm = info->imm.value;
42408347 464 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
a06ea964 465
e950b345 466 if (invert_p)
a06ea964 467 imm = ~imm;
42408347 468 if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
a06ea964
NC
469 /* The constraint check should have guaranteed this wouldn't happen. */
470 assert (0);
471
472 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
473 self->fields[0]);
474 return NULL;
475}
476
e950b345
RS
477/* Insert logical/bitmask immediate for e.g. the last operand in
478 ORR <Wd|WSP>, <Wn>, #<imm>. */
479const char *
480aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
481 aarch64_insn *code, const aarch64_inst *inst)
482{
483 return aarch64_ins_limm_1 (self, info, code, inst,
484 inst->opcode->op == OP_BIC);
485}
486
487/* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
488const char *
489aarch64_ins_inv_limm (const aarch64_operand *self,
490 const aarch64_opnd_info *info, aarch64_insn *code,
491 const aarch64_inst *inst)
492{
493 return aarch64_ins_limm_1 (self, info, code, inst, TRUE);
494}
495
a06ea964
NC
496/* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
497 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
498const char *
499aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
500 aarch64_insn *code, const aarch64_inst *inst)
501{
4ad3b7ef 502 aarch64_insn value = 0;
a06ea964
NC
503
504 assert (info->idx == 0);
505
506 /* Rt */
507 aarch64_ins_regno (self, info, code, inst);
508 if (inst->opcode->iclass == ldstpair_indexed
509 || inst->opcode->iclass == ldstnapair_offs
510 || inst->opcode->iclass == ldstpair_off
511 || inst->opcode->iclass == loadlit)
512 {
513 /* size */
514 switch (info->qualifier)
515 {
516 case AARCH64_OPND_QLF_S_S: value = 0; break;
517 case AARCH64_OPND_QLF_S_D: value = 1; break;
518 case AARCH64_OPND_QLF_S_Q: value = 2; break;
519 default: assert (0);
520 }
521 insert_field (FLD_ldst_size, code, value, 0);
522 }
523 else
524 {
525 /* opc[1]:size */
526 value = aarch64_get_qualifier_standard_value (info->qualifier);
527 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
528 }
529
530 return NULL;
531}
532
533/* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
534const char *
535aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
536 const aarch64_opnd_info *info, aarch64_insn *code,
537 const aarch64_inst *inst ATTRIBUTE_UNUSED)
538{
539 /* Rn */
540 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
541 return NULL;
542}
543
544/* Encode the address operand for e.g.
545 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
546const char *
547aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
548 const aarch64_opnd_info *info, aarch64_insn *code,
549 const aarch64_inst *inst ATTRIBUTE_UNUSED)
550{
551 aarch64_insn S;
552 enum aarch64_modifier_kind kind = info->shifter.kind;
553
554 /* Rn */
555 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
556 /* Rm */
557 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
558 /* option */
559 if (kind == AARCH64_MOD_LSL)
560 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
561 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
562 /* S */
563 if (info->qualifier != AARCH64_OPND_QLF_S_B)
564 S = info->shifter.amount != 0;
565 else
566 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
567 S <amount>
568 0 [absent]
569 1 #0
570 Must be #0 if <extend> is explicitly LSL. */
571 S = info->shifter.operator_present && info->shifter.amount_present;
572 insert_field (FLD_S, code, S, 0);
573
574 return NULL;
575}
576
577/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
578const char *
579aarch64_ins_addr_simm (const aarch64_operand *self,
580 const aarch64_opnd_info *info,
062f38fa
RE
581 aarch64_insn *code,
582 const aarch64_inst *inst ATTRIBUTE_UNUSED)
a06ea964
NC
583{
584 int imm;
585
586 /* Rn */
587 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
588 /* simm (imm9 or imm7) */
589 imm = info->addr.offset.imm;
590 if (self->fields[0] == FLD_imm7)
591 /* scaled immediate in ld/st pair instructions.. */
592 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
593 insert_field (self->fields[0], code, imm, 0);
594 /* pre/post- index */
595 if (info->addr.writeback)
596 {
597 assert (inst->opcode->iclass != ldst_unscaled
598 && inst->opcode->iclass != ldstnapair_offs
599 && inst->opcode->iclass != ldstpair_off
600 && inst->opcode->iclass != ldst_unpriv);
601 assert (info->addr.preind != info->addr.postind);
602 if (info->addr.preind)
603 insert_field (self->fields[1], code, 1, 0);
604 }
605
606 return NULL;
607}
608
609/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
610const char *
611aarch64_ins_addr_uimm12 (const aarch64_operand *self,
612 const aarch64_opnd_info *info,
613 aarch64_insn *code,
614 const aarch64_inst *inst ATTRIBUTE_UNUSED)
615{
616 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
617
618 /* Rn */
619 insert_field (self->fields[0], code, info->addr.base_regno, 0);
620 /* uimm12 */
621 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
622 return NULL;
623}
624
625/* Encode the address operand for e.g.
626 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
627const char *
628aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
629 const aarch64_opnd_info *info, aarch64_insn *code,
630 const aarch64_inst *inst ATTRIBUTE_UNUSED)
631{
632 /* Rn */
633 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
634 /* Rm | #<amount> */
635 if (info->addr.offset.is_reg)
636 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
637 else
638 insert_field (FLD_Rm, code, 0x1f, 0);
639 return NULL;
640}
641
642/* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
643const char *
644aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
645 const aarch64_opnd_info *info, aarch64_insn *code,
646 const aarch64_inst *inst ATTRIBUTE_UNUSED)
647{
648 /* cond */
649 insert_field (FLD_cond, code, info->cond->value, 0);
650 return NULL;
651}
652
653/* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
654const char *
655aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
656 const aarch64_opnd_info *info, aarch64_insn *code,
657 const aarch64_inst *inst ATTRIBUTE_UNUSED)
658{
659 /* op0:op1:CRn:CRm:op2 */
660 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
661 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
662 return NULL;
663}
664
665/* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
666const char *
667aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
668 const aarch64_opnd_info *info, aarch64_insn *code,
669 const aarch64_inst *inst ATTRIBUTE_UNUSED)
670{
671 /* op1:op2 */
672 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
673 FLD_op2, FLD_op1);
674 return NULL;
675}
676
677/* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
678const char *
679aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
680 const aarch64_opnd_info *info, aarch64_insn *code,
681 const aarch64_inst *inst ATTRIBUTE_UNUSED)
682{
683 /* op1:CRn:CRm:op2 */
684 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
685 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
686 return NULL;
687}
688
689/* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
690
691const char *
692aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
693 const aarch64_opnd_info *info, aarch64_insn *code,
694 const aarch64_inst *inst ATTRIBUTE_UNUSED)
695{
696 /* CRm */
697 insert_field (FLD_CRm, code, info->barrier->value, 0);
698 return NULL;
699}
700
701/* Encode the prefetch operation option operand for e.g.
702 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
703
704const char *
705aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
706 const aarch64_opnd_info *info, aarch64_insn *code,
707 const aarch64_inst *inst ATTRIBUTE_UNUSED)
708{
709 /* prfop in Rt */
710 insert_field (FLD_Rt, code, info->prfop->value, 0);
711 return NULL;
712}
713
9ed608f9
MW
714/* Encode the hint number for instructions that alias HINT but take an
715 operand. */
716
717const char *
718aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
719 const aarch64_opnd_info *info, aarch64_insn *code,
720 const aarch64_inst *inst ATTRIBUTE_UNUSED)
721{
722 /* CRm:op2. */
723 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
724 return NULL;
725}
726
a06ea964
NC
727/* Encode the extended register operand for e.g.
728 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
729const char *
730aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
731 const aarch64_opnd_info *info, aarch64_insn *code,
732 const aarch64_inst *inst ATTRIBUTE_UNUSED)
733{
734 enum aarch64_modifier_kind kind;
735
736 /* Rm */
737 insert_field (FLD_Rm, code, info->reg.regno, 0);
738 /* option */
739 kind = info->shifter.kind;
740 if (kind == AARCH64_MOD_LSL)
741 kind = info->qualifier == AARCH64_OPND_QLF_W
742 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
743 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
744 /* imm3 */
745 insert_field (FLD_imm3, code, info->shifter.amount, 0);
746
747 return NULL;
748}
749
750/* Encode the shifted register operand for e.g.
751 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
752const char *
753aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
754 const aarch64_opnd_info *info, aarch64_insn *code,
755 const aarch64_inst *inst ATTRIBUTE_UNUSED)
756{
757 /* Rm */
758 insert_field (FLD_Rm, code, info->reg.regno, 0);
759 /* shift */
760 insert_field (FLD_shift, code,
761 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
762 /* imm6 */
763 insert_field (FLD_imm6, code, info->shifter.amount, 0);
764
765 return NULL;
766}
767
98907a70
RS
768/* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
769 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
770 SELF's operand-dependent value. fields[0] specifies the field that
771 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
772const char *
773aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
774 const aarch64_opnd_info *info,
775 aarch64_insn *code,
776 const aarch64_inst *inst ATTRIBUTE_UNUSED)
777{
778 int factor = 1 + get_operand_specific_data (self);
779 insert_field (self->fields[0], code, info->addr.base_regno, 0);
780 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
781 return NULL;
782}
783
784/* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
785 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
786 SELF's operand-dependent value. fields[0] specifies the field that
787 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
788const char *
789aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
790 const aarch64_opnd_info *info,
791 aarch64_insn *code,
792 const aarch64_inst *inst ATTRIBUTE_UNUSED)
793{
794 int factor = 1 + get_operand_specific_data (self);
795 insert_field (self->fields[0], code, info->addr.base_regno, 0);
796 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
797 return NULL;
798}
799
800/* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
801 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
802 SELF's operand-dependent value. fields[0] specifies the field that
803 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
804 and imm3 fields, with imm3 being the less-significant part. */
805const char *
806aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
807 const aarch64_opnd_info *info,
808 aarch64_insn *code,
809 const aarch64_inst *inst ATTRIBUTE_UNUSED)
810{
811 int factor = 1 + get_operand_specific_data (self);
812 insert_field (self->fields[0], code, info->addr.base_regno, 0);
813 insert_fields (code, info->addr.offset.imm / factor, 0,
814 2, FLD_imm3, FLD_SVE_imm6);
815 return NULL;
816}
817
4df068de
RS
818/* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
819 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
820 value. fields[0] specifies the base register field. */
821const char *
822aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
823 const aarch64_opnd_info *info, aarch64_insn *code,
824 const aarch64_inst *inst ATTRIBUTE_UNUSED)
825{
826 int factor = 1 << get_operand_specific_data (self);
827 insert_field (self->fields[0], code, info->addr.base_regno, 0);
828 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
829 return NULL;
830}
831
832/* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
833 is SELF's operand-dependent value. fields[0] specifies the base
834 register field and fields[1] specifies the offset register field. */
835const char *
836aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
837 const aarch64_opnd_info *info, aarch64_insn *code,
838 const aarch64_inst *inst ATTRIBUTE_UNUSED)
839{
840 insert_field (self->fields[0], code, info->addr.base_regno, 0);
841 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
842 return NULL;
843}
844
845/* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
846 <shift> is SELF's operand-dependent value. fields[0] specifies the
847 base register field, fields[1] specifies the offset register field and
848 fields[2] is a single-bit field that selects SXTW over UXTW. */
849const char *
850aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
851 const aarch64_opnd_info *info, aarch64_insn *code,
852 const aarch64_inst *inst ATTRIBUTE_UNUSED)
853{
854 insert_field (self->fields[0], code, info->addr.base_regno, 0);
855 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
856 if (info->shifter.kind == AARCH64_MOD_UXTW)
857 insert_field (self->fields[2], code, 0, 0);
858 else
859 insert_field (self->fields[2], code, 1, 0);
860 return NULL;
861}
862
863/* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
864 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
865 fields[0] specifies the base register field. */
866const char *
867aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
868 const aarch64_opnd_info *info, aarch64_insn *code,
869 const aarch64_inst *inst ATTRIBUTE_UNUSED)
870{
871 int factor = 1 << get_operand_specific_data (self);
872 insert_field (self->fields[0], code, info->addr.base_regno, 0);
873 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
874 return NULL;
875}
876
877/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
878 where <modifier> is fixed by the instruction and where <msz> is a
879 2-bit unsigned number. fields[0] specifies the base register field
880 and fields[1] specifies the offset register field. */
881static const char *
882aarch64_ext_sve_addr_zz (const aarch64_operand *self,
883 const aarch64_opnd_info *info, aarch64_insn *code)
884{
885 insert_field (self->fields[0], code, info->addr.base_regno, 0);
886 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
887 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
888 return NULL;
889}
890
891/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
892 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
893 field and fields[1] specifies the offset register field. */
894const char *
895aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
896 const aarch64_opnd_info *info, aarch64_insn *code,
897 const aarch64_inst *inst ATTRIBUTE_UNUSED)
898{
899 return aarch64_ext_sve_addr_zz (self, info, code);
900}
901
902/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
903 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
904 field and fields[1] specifies the offset register field. */
905const char *
906aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
907 const aarch64_opnd_info *info,
908 aarch64_insn *code,
909 const aarch64_inst *inst ATTRIBUTE_UNUSED)
910{
911 return aarch64_ext_sve_addr_zz (self, info, code);
912}
913
914/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
915 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
916 field and fields[1] specifies the offset register field. */
917const char *
918aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
919 const aarch64_opnd_info *info,
920 aarch64_insn *code,
921 const aarch64_inst *inst ATTRIBUTE_UNUSED)
922{
923 return aarch64_ext_sve_addr_zz (self, info, code);
924}
925
e950b345
RS
926/* Encode an SVE ADD/SUB immediate. */
927const char *
928aarch64_ins_sve_aimm (const aarch64_operand *self,
929 const aarch64_opnd_info *info, aarch64_insn *code,
930 const aarch64_inst *inst ATTRIBUTE_UNUSED)
931{
932 if (info->shifter.amount == 8)
933 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
934 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
935 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
936 else
937 insert_all_fields (self, code, info->imm.value & 0xff);
938 return NULL;
939}
940
941/* Encode an SVE CPY/DUP immediate. */
942const char *
943aarch64_ins_sve_asimm (const aarch64_operand *self,
944 const aarch64_opnd_info *info, aarch64_insn *code,
945 const aarch64_inst *inst)
946{
947 return aarch64_ins_sve_aimm (self, info, code, inst);
948}
949
f11ad6bc
RS
950/* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
951 array specifies which field to use for Zn. MM is encoded in the
952 concatenation of imm5 and SVE_tszh, with imm5 being the less
953 significant part. */
954const char *
955aarch64_ins_sve_index (const aarch64_operand *self,
956 const aarch64_opnd_info *info, aarch64_insn *code,
957 const aarch64_inst *inst ATTRIBUTE_UNUSED)
958{
959 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
960 insert_field (self->fields[0], code, info->reglane.regno, 0);
961 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
962 2, FLD_imm5, FLD_SVE_tszh);
963 return NULL;
964}
965
e950b345
RS
966/* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
967const char *
968aarch64_ins_sve_limm_mov (const aarch64_operand *self,
969 const aarch64_opnd_info *info, aarch64_insn *code,
970 const aarch64_inst *inst)
971{
972 return aarch64_ins_limm (self, info, code, inst);
973}
974
f11ad6bc
RS
975/* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
976 to use for Zn. */
977const char *
978aarch64_ins_sve_reglist (const aarch64_operand *self,
979 const aarch64_opnd_info *info, aarch64_insn *code,
980 const aarch64_inst *inst ATTRIBUTE_UNUSED)
981{
982 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
983 return NULL;
984}
985
2442d846
RS
986/* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
987 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
988 field. */
989const char *
990aarch64_ins_sve_scale (const aarch64_operand *self,
991 const aarch64_opnd_info *info, aarch64_insn *code,
992 const aarch64_inst *inst ATTRIBUTE_UNUSED)
993{
994 insert_all_fields (self, code, info->imm.value);
995 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
996 return NULL;
997}
998
e950b345
RS
999/* Encode an SVE shift left immediate. */
1000const char *
1001aarch64_ins_sve_shlimm (const aarch64_operand *self,
1002 const aarch64_opnd_info *info, aarch64_insn *code,
1003 const aarch64_inst *inst)
1004{
1005 const aarch64_opnd_info *prev_operand;
1006 unsigned int esize;
1007
1008 assert (info->idx > 0);
1009 prev_operand = &inst->operands[info->idx - 1];
1010 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1011 insert_all_fields (self, code, 8 * esize + info->imm.value);
1012 return NULL;
1013}
1014
1015/* Encode an SVE shift right immediate. */
1016const char *
1017aarch64_ins_sve_shrimm (const aarch64_operand *self,
1018 const aarch64_opnd_info *info, aarch64_insn *code,
1019 const aarch64_inst *inst)
1020{
1021 const aarch64_opnd_info *prev_operand;
1022 unsigned int esize;
1023
1024 assert (info->idx > 0);
1025 prev_operand = &inst->operands[info->idx - 1];
1026 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1027 insert_all_fields (self, code, 16 * esize - info->imm.value);
1028 return NULL;
1029}
1030
165d4950
RS
1031/* Encode a single-bit immediate that selects between #0.5 and #1.0.
1032 The fields array specifies which field to use. */
1033const char *
1034aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1035 const aarch64_opnd_info *info,
1036 aarch64_insn *code,
1037 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1038{
1039 if (info->imm.value == 0x3f000000)
1040 insert_field (self->fields[0], code, 0, 0);
1041 else
1042 insert_field (self->fields[0], code, 1, 0);
1043 return NULL;
1044}
1045
1046/* Encode a single-bit immediate that selects between #0.5 and #2.0.
1047 The fields array specifies which field to use. */
1048const char *
1049aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1050 const aarch64_opnd_info *info,
1051 aarch64_insn *code,
1052 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1053{
1054 if (info->imm.value == 0x3f000000)
1055 insert_field (self->fields[0], code, 0, 0);
1056 else
1057 insert_field (self->fields[0], code, 1, 0);
1058 return NULL;
1059}
1060
1061/* Encode a single-bit immediate that selects between #0.0 and #1.0.
1062 The fields array specifies which field to use. */
1063const char *
1064aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1065 const aarch64_opnd_info *info,
1066 aarch64_insn *code,
1067 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1068{
1069 if (info->imm.value == 0)
1070 insert_field (self->fields[0], code, 0, 0);
1071 else
1072 insert_field (self->fields[0], code, 1, 0);
1073 return NULL;
1074}
1075
a06ea964
NC
1076/* Miscellaneous encoding functions. */
1077
1078/* Encode size[0], i.e. bit 22, for
1079 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1080
1081static void
1082encode_asimd_fcvt (aarch64_inst *inst)
1083{
1084 aarch64_insn value;
1085 aarch64_field field = {0, 0};
1086 enum aarch64_opnd_qualifier qualifier;
1087
1088 switch (inst->opcode->op)
1089 {
1090 case OP_FCVTN:
1091 case OP_FCVTN2:
1092 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1093 qualifier = inst->operands[1].qualifier;
1094 break;
1095 case OP_FCVTL:
1096 case OP_FCVTL2:
1097 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1098 qualifier = inst->operands[0].qualifier;
1099 break;
1100 default:
1101 assert (0);
1102 }
1103 assert (qualifier == AARCH64_OPND_QLF_V_4S
1104 || qualifier == AARCH64_OPND_QLF_V_2D);
1105 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1106 gen_sub_field (FLD_size, 0, 1, &field);
1107 insert_field_2 (&field, &inst->value, value, 0);
1108}
1109
1110/* Encode size[0], i.e. bit 22, for
1111 e.g. FCVTXN <Vb><d>, <Va><n>. */
1112
1113static void
1114encode_asisd_fcvtxn (aarch64_inst *inst)
1115{
1116 aarch64_insn val = 1;
1117 aarch64_field field = {0, 0};
1118 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1119 gen_sub_field (FLD_size, 0, 1, &field);
1120 insert_field_2 (&field, &inst->value, val, 0);
1121}
1122
1123/* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1124static void
1125encode_fcvt (aarch64_inst *inst)
1126{
1127 aarch64_insn val;
1128 const aarch64_field field = {15, 2};
1129
1130 /* opc dstsize */
1131 switch (inst->operands[0].qualifier)
1132 {
1133 case AARCH64_OPND_QLF_S_S: val = 0; break;
1134 case AARCH64_OPND_QLF_S_D: val = 1; break;
1135 case AARCH64_OPND_QLF_S_H: val = 3; break;
1136 default: abort ();
1137 }
1138 insert_field_2 (&field, &inst->value, val, 0);
1139
1140 return;
1141}
1142
1143/* Do miscellaneous encodings that are not common enough to be driven by
1144 flags. */
1145
1146static void
1147do_misc_encoding (aarch64_inst *inst)
1148{
1149 switch (inst->opcode->op)
1150 {
1151 case OP_FCVT:
1152 encode_fcvt (inst);
1153 break;
1154 case OP_FCVTN:
1155 case OP_FCVTN2:
1156 case OP_FCVTL:
1157 case OP_FCVTL2:
1158 encode_asimd_fcvt (inst);
1159 break;
1160 case OP_FCVTXN_S:
1161 encode_asisd_fcvtxn (inst);
1162 break;
1163 default: break;
1164 }
1165}
1166
1167/* Encode the 'size' and 'Q' field for e.g. SHADD. */
1168static void
1169encode_sizeq (aarch64_inst *inst)
1170{
1171 aarch64_insn sizeq;
1172 enum aarch64_field_kind kind;
1173 int idx;
1174
1175 /* Get the index of the operand whose information we are going to use
1176 to encode the size and Q fields.
1177 This is deduced from the possible valid qualifier lists. */
1178 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1179 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1180 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1181 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1182 /* Q */
1183 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1184 /* size */
1185 if (inst->opcode->iclass == asisdlse
1186 || inst->opcode->iclass == asisdlsep
1187 || inst->opcode->iclass == asisdlso
1188 || inst->opcode->iclass == asisdlsop)
1189 kind = FLD_vldst_size;
1190 else
1191 kind = FLD_size;
1192 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1193}
1194
1195/* Opcodes that have fields shared by multiple operands are usually flagged
1196 with flags. In this function, we detect such flags and use the
1197 information in one of the related operands to do the encoding. The 'one'
1198 operand is not any operand but one of the operands that has the enough
1199 information for such an encoding. */
1200
1201static void
1202do_special_encoding (struct aarch64_inst *inst)
1203{
1204 int idx;
4ad3b7ef 1205 aarch64_insn value = 0;
a06ea964
NC
1206
1207 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1208
1209 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1210 if (inst->opcode->flags & F_COND)
1211 {
1212 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1213 }
1214 if (inst->opcode->flags & F_SF)
1215 {
1216 idx = select_operand_for_sf_field_coding (inst->opcode);
1217 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1218 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1219 ? 1 : 0;
1220 insert_field (FLD_sf, &inst->value, value, 0);
1221 if (inst->opcode->flags & F_N)
1222 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1223 }
ee804238
JW
1224 if (inst->opcode->flags & F_LSE_SZ)
1225 {
1226 idx = select_operand_for_sf_field_coding (inst->opcode);
1227 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1228 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1229 ? 1 : 0;
1230 insert_field (FLD_lse_sz, &inst->value, value, 0);
1231 }
a06ea964
NC
1232 if (inst->opcode->flags & F_SIZEQ)
1233 encode_sizeq (inst);
1234 if (inst->opcode->flags & F_FPTYPE)
1235 {
1236 idx = select_operand_for_fptype_field_coding (inst->opcode);
1237 switch (inst->operands[idx].qualifier)
1238 {
1239 case AARCH64_OPND_QLF_S_S: value = 0; break;
1240 case AARCH64_OPND_QLF_S_D: value = 1; break;
1241 case AARCH64_OPND_QLF_S_H: value = 3; break;
1242 default: assert (0);
1243 }
1244 insert_field (FLD_type, &inst->value, value, 0);
1245 }
1246 if (inst->opcode->flags & F_SSIZE)
1247 {
1248 enum aarch64_opnd_qualifier qualifier;
1249 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1250 qualifier = inst->operands[idx].qualifier;
1251 assert (qualifier >= AARCH64_OPND_QLF_S_B
1252 && qualifier <= AARCH64_OPND_QLF_S_Q);
1253 value = aarch64_get_qualifier_standard_value (qualifier);
1254 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1255 }
1256 if (inst->opcode->flags & F_T)
1257 {
1258 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1259 aarch64_field field = {0, 0};
1260 enum aarch64_opnd_qualifier qualifier;
1261
1262 idx = 0;
1263 qualifier = inst->operands[idx].qualifier;
1264 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1265 == AARCH64_OPND_CLASS_SIMD_REG
1266 && qualifier >= AARCH64_OPND_QLF_V_8B
1267 && qualifier <= AARCH64_OPND_QLF_V_2D);
1268 /* imm5<3:0> q <t>
1269 0000 x reserved
1270 xxx1 0 8b
1271 xxx1 1 16b
1272 xx10 0 4h
1273 xx10 1 8h
1274 x100 0 2s
1275 x100 1 4s
1276 1000 0 reserved
1277 1000 1 2d */
1278 value = aarch64_get_qualifier_standard_value (qualifier);
1279 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1280 num = (int) value >> 1;
1281 assert (num >= 0 && num <= 3);
1282 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1283 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1284 }
1285 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1286 {
1287 /* Use Rt to encode in the case of e.g.
1288 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1289 enum aarch64_opnd_qualifier qualifier;
1290 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1291 if (idx == -1)
1292 /* Otherwise use the result operand, which has to be a integer
1293 register. */
1294 idx = 0;
1295 assert (idx == 0 || idx == 1);
1296 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1297 == AARCH64_OPND_CLASS_INT_REG);
1298 qualifier = inst->operands[idx].qualifier;
1299 insert_field (FLD_Q, &inst->value,
1300 aarch64_get_qualifier_standard_value (qualifier), 0);
1301 }
1302 if (inst->opcode->flags & F_LDS_SIZE)
1303 {
1304 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1305 enum aarch64_opnd_qualifier qualifier;
1306 aarch64_field field = {0, 0};
1307 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1308 == AARCH64_OPND_CLASS_INT_REG);
1309 gen_sub_field (FLD_opc, 0, 1, &field);
1310 qualifier = inst->operands[0].qualifier;
1311 insert_field_2 (&field, &inst->value,
1312 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1313 }
1314 /* Miscellaneous encoding as the last step. */
1315 if (inst->opcode->flags & F_MISC)
1316 do_misc_encoding (inst);
1317
1318 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1319}
1320
1321/* Converters converting an alias opcode instruction to its real form. */
1322
1323/* ROR <Wd>, <Ws>, #<shift>
1324 is equivalent to:
1325 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1326static void
1327convert_ror_to_extr (aarch64_inst *inst)
1328{
1329 copy_operand_info (inst, 3, 2);
1330 copy_operand_info (inst, 2, 1);
1331}
1332
e30181a5
YZ
1333/* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1334 is equivalent to:
1335 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1336static void
1337convert_xtl_to_shll (aarch64_inst *inst)
1338{
1339 inst->operands[2].qualifier = inst->operands[1].qualifier;
1340 inst->operands[2].imm.value = 0;
1341}
1342
a06ea964
NC
1343/* Convert
1344 LSR <Xd>, <Xn>, #<shift>
1345 to
1346 UBFM <Xd>, <Xn>, #<shift>, #63. */
1347static void
1348convert_sr_to_bfm (aarch64_inst *inst)
1349{
1350 inst->operands[3].imm.value =
1351 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1352}
1353
1354/* Convert MOV to ORR. */
1355static void
1356convert_mov_to_orr (aarch64_inst *inst)
1357{
1358 /* MOV <Vd>.<T>, <Vn>.<T>
1359 is equivalent to:
1360 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1361 copy_operand_info (inst, 2, 1);
1362}
1363
1364/* When <imms> >= <immr>, the instruction written:
1365 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1366 is equivalent to:
1367 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1368
1369static void
1370convert_bfx_to_bfm (aarch64_inst *inst)
1371{
1372 int64_t lsb, width;
1373
1374 /* Convert the operand. */
1375 lsb = inst->operands[2].imm.value;
1376 width = inst->operands[3].imm.value;
1377 inst->operands[2].imm.value = lsb;
1378 inst->operands[3].imm.value = lsb + width - 1;
1379}
1380
1381/* When <imms> < <immr>, the instruction written:
1382 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1383 is equivalent to:
1384 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1385
1386static void
1387convert_bfi_to_bfm (aarch64_inst *inst)
1388{
1389 int64_t lsb, width;
1390
1391 /* Convert the operand. */
1392 lsb = inst->operands[2].imm.value;
1393 width = inst->operands[3].imm.value;
1394 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1395 {
1396 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1397 inst->operands[3].imm.value = width - 1;
1398 }
1399 else
1400 {
1401 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1402 inst->operands[3].imm.value = width - 1;
1403 }
1404}
1405
d685192a
MW
1406/* The instruction written:
1407 BFC <Xd>, #<lsb>, #<width>
1408 is equivalent to:
1409 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1410
1411static void
1412convert_bfc_to_bfm (aarch64_inst *inst)
1413{
1414 int64_t lsb, width;
1415
1416 /* Insert XZR. */
1417 copy_operand_info (inst, 3, 2);
1418 copy_operand_info (inst, 2, 1);
1419 copy_operand_info (inst, 2, 0);
1420 inst->operands[1].reg.regno = 0x1f;
1421
1422 /* Convert the immedate operand. */
1423 lsb = inst->operands[2].imm.value;
1424 width = inst->operands[3].imm.value;
1425 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1426 {
1427 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1428 inst->operands[3].imm.value = width - 1;
1429 }
1430 else
1431 {
1432 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1433 inst->operands[3].imm.value = width - 1;
1434 }
1435}
1436
a06ea964
NC
1437/* The instruction written:
1438 LSL <Xd>, <Xn>, #<shift>
1439 is equivalent to:
1440 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1441
1442static void
1443convert_lsl_to_ubfm (aarch64_inst *inst)
1444{
1445 int64_t shift = inst->operands[2].imm.value;
1446
1447 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1448 {
1449 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1450 inst->operands[3].imm.value = 31 - shift;
1451 }
1452 else
1453 {
1454 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1455 inst->operands[3].imm.value = 63 - shift;
1456 }
1457}
1458
1459/* CINC <Wd>, <Wn>, <cond>
1460 is equivalent to:
1461 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1462
1463static void
1464convert_to_csel (aarch64_inst *inst)
1465{
1466 copy_operand_info (inst, 3, 2);
1467 copy_operand_info (inst, 2, 1);
1468 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1469}
1470
1471/* CSET <Wd>, <cond>
1472 is equivalent to:
1473 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1474
1475static void
1476convert_cset_to_csinc (aarch64_inst *inst)
1477{
1478 copy_operand_info (inst, 3, 1);
1479 copy_operand_info (inst, 2, 0);
1480 copy_operand_info (inst, 1, 0);
1481 inst->operands[1].reg.regno = 0x1f;
1482 inst->operands[2].reg.regno = 0x1f;
1483 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1484}
1485
1486/* MOV <Wd>, #<imm>
1487 is equivalent to:
1488 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1489
1490static void
1491convert_mov_to_movewide (aarch64_inst *inst)
1492{
1493 int is32;
1494 uint32_t shift_amount;
1495 uint64_t value;
1496
1497 switch (inst->opcode->op)
1498 {
1499 case OP_MOV_IMM_WIDE:
1500 value = inst->operands[1].imm.value;
1501 break;
1502 case OP_MOV_IMM_WIDEN:
1503 value = ~inst->operands[1].imm.value;
1504 break;
1505 default:
1506 assert (0);
1507 }
1508 inst->operands[1].type = AARCH64_OPND_HALF;
1509 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
062f38fa
RE
1510 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1511 /* The constraint check should have guaranteed this wouldn't happen. */
1512 assert (0);
a06ea964
NC
1513 value >>= shift_amount;
1514 value &= 0xffff;
1515 inst->operands[1].imm.value = value;
1516 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1517 inst->operands[1].shifter.amount = shift_amount;
1518}
1519
1520/* MOV <Wd>, #<imm>
1521 is equivalent to:
1522 ORR <Wd>, WZR, #<imm>. */
1523
1524static void
1525convert_mov_to_movebitmask (aarch64_inst *inst)
1526{
1527 copy_operand_info (inst, 2, 1);
1528 inst->operands[1].reg.regno = 0x1f;
1529 inst->operands[1].skip = 0;
1530}
1531
1532/* Some alias opcodes are assembled by being converted to their real-form. */
1533
1534static void
1535convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1536{
1537 const aarch64_opcode *alias = inst->opcode;
1538
1539 if ((alias->flags & F_CONV) == 0)
1540 goto convert_to_real_return;
1541
1542 switch (alias->op)
1543 {
1544 case OP_ASR_IMM:
1545 case OP_LSR_IMM:
1546 convert_sr_to_bfm (inst);
1547 break;
1548 case OP_LSL_IMM:
1549 convert_lsl_to_ubfm (inst);
1550 break;
1551 case OP_CINC:
1552 case OP_CINV:
1553 case OP_CNEG:
1554 convert_to_csel (inst);
1555 break;
1556 case OP_CSET:
1557 case OP_CSETM:
1558 convert_cset_to_csinc (inst);
1559 break;
1560 case OP_UBFX:
1561 case OP_BFXIL:
1562 case OP_SBFX:
1563 convert_bfx_to_bfm (inst);
1564 break;
1565 case OP_SBFIZ:
1566 case OP_BFI:
1567 case OP_UBFIZ:
1568 convert_bfi_to_bfm (inst);
1569 break;
d685192a
MW
1570 case OP_BFC:
1571 convert_bfc_to_bfm (inst);
1572 break;
a06ea964
NC
1573 case OP_MOV_V:
1574 convert_mov_to_orr (inst);
1575 break;
1576 case OP_MOV_IMM_WIDE:
1577 case OP_MOV_IMM_WIDEN:
1578 convert_mov_to_movewide (inst);
1579 break;
1580 case OP_MOV_IMM_LOG:
1581 convert_mov_to_movebitmask (inst);
1582 break;
1583 case OP_ROR_IMM:
1584 convert_ror_to_extr (inst);
1585 break;
e30181a5
YZ
1586 case OP_SXTL:
1587 case OP_SXTL2:
1588 case OP_UXTL:
1589 case OP_UXTL2:
1590 convert_xtl_to_shll (inst);
1591 break;
a06ea964
NC
1592 default:
1593 break;
1594 }
1595
1596convert_to_real_return:
1597 aarch64_replace_opcode (inst, real);
1598}
1599
1600/* Encode *INST_ORI of the opcode code OPCODE.
1601 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1602 matched operand qualifier sequence in *QLF_SEQ. */
1603
1604int
1605aarch64_opcode_encode (const aarch64_opcode *opcode,
1606 const aarch64_inst *inst_ori, aarch64_insn *code,
1607 aarch64_opnd_qualifier_t *qlf_seq,
1608 aarch64_operand_error *mismatch_detail)
1609{
1610 int i;
1611 const aarch64_opcode *aliased;
1612 aarch64_inst copy, *inst;
1613
1614 DEBUG_TRACE ("enter with %s", opcode->name);
1615
1616 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1617 copy = *inst_ori;
1618 inst = &copy;
1619
1620 assert (inst->opcode == NULL || inst->opcode == opcode);
1621 if (inst->opcode == NULL)
1622 inst->opcode = opcode;
1623
1624 /* Constrain the operands.
1625 After passing this, the encoding is guaranteed to succeed. */
1626 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1627 {
1628 DEBUG_TRACE ("FAIL since operand constraint not met");
1629 return 0;
1630 }
1631
1632 /* Get the base value.
1633 Note: this has to be before the aliasing handling below in order to
1634 get the base value from the alias opcode before we move on to the
1635 aliased opcode for encoding. */
1636 inst->value = opcode->opcode;
1637
1638 /* No need to do anything else if the opcode does not have any operand. */
1639 if (aarch64_num_of_operands (opcode) == 0)
1640 goto encoding_exit;
1641
1642 /* Assign operand indexes and check types. Also put the matched
1643 operand qualifiers in *QLF_SEQ to return. */
1644 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1645 {
1646 assert (opcode->operands[i] == inst->operands[i].type);
1647 inst->operands[i].idx = i;
1648 if (qlf_seq != NULL)
1649 *qlf_seq = inst->operands[i].qualifier;
1650 }
1651
1652 aliased = aarch64_find_real_opcode (opcode);
1653 /* If the opcode is an alias and it does not ask for direct encoding by
1654 itself, the instruction will be transformed to the form of real opcode
1655 and the encoding will be carried out using the rules for the aliased
1656 opcode. */
1657 if (aliased != NULL && (opcode->flags & F_CONV))
1658 {
1659 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1660 aliased->name, opcode->name);
1661 /* Convert the operands to the form of the real opcode. */
1662 convert_to_real (inst, aliased);
1663 opcode = aliased;
1664 }
1665
1666 aarch64_opnd_info *info = inst->operands;
1667
1668 /* Call the inserter of each operand. */
1669 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1670 {
1671 const aarch64_operand *opnd;
1672 enum aarch64_opnd type = opcode->operands[i];
1673 if (type == AARCH64_OPND_NIL)
1674 break;
1675 if (info->skip)
1676 {
1677 DEBUG_TRACE ("skip the incomplete operand %d", i);
1678 continue;
1679 }
1680 opnd = &aarch64_operands[type];
1681 if (operand_has_inserter (opnd))
1682 aarch64_insert_operand (opnd, info, &inst->value, inst);
1683 }
1684
1685 /* Call opcode encoders indicated by flags. */
1686 if (opcode_has_special_coder (opcode))
1687 do_special_encoding (inst);
1688
1689encoding_exit:
1690 DEBUG_TRACE ("exit with %s", opcode->name);
1691
1692 *code = inst->value;
1693
1694 return 1;
1695}
This page took 0.257877 seconds and 4 git commands to generate.