[AArch64] Add ARMv8.3 FCMLA and FCADD instructions
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25
26 /* Utilities. */
27
28 /* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
31
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
36 the order of M, L, H. */
37
38 static inline void
39 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
40 {
41 uint32_t num;
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
44 va_list va;
45
46 va_start (va, mask);
47 num = va_arg (va, uint32_t);
48 assert (num <= 5);
49 while (num--)
50 {
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
55 }
56 va_end (va);
57 }
58
59 /* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
61
62 static void
63 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
64 aarch64_insn value)
65 {
66 unsigned int i;
67 enum aarch64_field_kind kind;
68
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
71 {
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
75 }
76 }
77
78 /* Operand inserters. */
79
80 /* Insert register number. */
81 const char *
82 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
83 aarch64_insn *code,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
85 {
86 insert_field (self->fields[0], code, info->reg.regno, 0);
87 return NULL;
88 }
89
90 /* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
93 const char *
94 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
96 {
97 /* regno */
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
101 {
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
105 {
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
110 }
111 else
112 {
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
114 imm5<3:0> <V>
115 0000 RESERVED
116 xxx1 B
117 xx10 H
118 x100 S
119 1000 D */
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
122 }
123 }
124 else
125 {
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 unsigned index = info->reglane.index;
129
130 if (inst->opcode->op == OP_FCMLA_ELEM)
131 /* Complex operand takes two elements. */
132 index *= 2;
133
134 switch (info->qualifier)
135 {
136 case AARCH64_OPND_QLF_S_H:
137 /* H:L:M */
138 assert (index < 8);
139 insert_fields (code, index, 0, 3, FLD_M, FLD_L, FLD_H);
140 break;
141 case AARCH64_OPND_QLF_S_S:
142 /* H:L */
143 assert (index < 4);
144 insert_fields (code, index, 0, 2, FLD_L, FLD_H);
145 break;
146 case AARCH64_OPND_QLF_S_D:
147 /* H */
148 assert (index < 2);
149 insert_field (FLD_H, code, index, 0);
150 break;
151 default:
152 assert (0);
153 }
154 }
155 return NULL;
156 }
157
158 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
159 const char *
160 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
161 aarch64_insn *code,
162 const aarch64_inst *inst ATTRIBUTE_UNUSED)
163 {
164 /* R */
165 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
166 /* len */
167 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
168 return NULL;
169 }
170
171 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
172 in AdvSIMD load/store instructions. */
173 const char *
174 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
175 const aarch64_opnd_info *info, aarch64_insn *code,
176 const aarch64_inst *inst)
177 {
178 aarch64_insn value = 0;
179 /* Number of elements in each structure to be loaded/stored. */
180 unsigned num = get_opcode_dependent_value (inst->opcode);
181
182 /* Rt */
183 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
184 /* opcode */
185 switch (num)
186 {
187 case 1:
188 switch (info->reglist.num_regs)
189 {
190 case 1: value = 0x7; break;
191 case 2: value = 0xa; break;
192 case 3: value = 0x6; break;
193 case 4: value = 0x2; break;
194 default: assert (0);
195 }
196 break;
197 case 2:
198 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
199 break;
200 case 3:
201 value = 0x4;
202 break;
203 case 4:
204 value = 0x0;
205 break;
206 default:
207 assert (0);
208 }
209 insert_field (FLD_opcode, code, value, 0);
210
211 return NULL;
212 }
213
214 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
215 single structure to all lanes instructions. */
216 const char *
217 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
218 const aarch64_opnd_info *info, aarch64_insn *code,
219 const aarch64_inst *inst)
220 {
221 aarch64_insn value;
222 /* The opcode dependent area stores the number of elements in
223 each structure to be loaded/stored. */
224 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
225
226 /* Rt */
227 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
228 /* S */
229 value = (aarch64_insn) 0;
230 if (is_ld1r && info->reglist.num_regs == 2)
231 /* OP_LD1R does not have alternating variant, but have "two consecutive"
232 instead. */
233 value = (aarch64_insn) 1;
234 insert_field (FLD_S, code, value, 0);
235
236 return NULL;
237 }
238
239 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
240 operand e.g. Vt in AdvSIMD load/store single element instructions. */
241 const char *
242 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
243 const aarch64_opnd_info *info, aarch64_insn *code,
244 const aarch64_inst *inst ATTRIBUTE_UNUSED)
245 {
246 aarch64_field field = {0, 0};
247 aarch64_insn QSsize = 0; /* fields Q:S:size. */
248 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
249
250 assert (info->reglist.has_index);
251
252 /* Rt */
253 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
254 /* Encode the index, opcode<2:1> and size. */
255 switch (info->qualifier)
256 {
257 case AARCH64_OPND_QLF_S_B:
258 /* Index encoded in "Q:S:size". */
259 QSsize = info->reglist.index;
260 opcodeh2 = 0x0;
261 break;
262 case AARCH64_OPND_QLF_S_H:
263 /* Index encoded in "Q:S:size<1>". */
264 QSsize = info->reglist.index << 1;
265 opcodeh2 = 0x1;
266 break;
267 case AARCH64_OPND_QLF_S_S:
268 /* Index encoded in "Q:S". */
269 QSsize = info->reglist.index << 2;
270 opcodeh2 = 0x2;
271 break;
272 case AARCH64_OPND_QLF_S_D:
273 /* Index encoded in "Q". */
274 QSsize = info->reglist.index << 3 | 0x1;
275 opcodeh2 = 0x2;
276 break;
277 default:
278 assert (0);
279 }
280 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
281 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
282 insert_field_2 (&field, code, opcodeh2, 0);
283
284 return NULL;
285 }
286
287 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
288 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
289 or SSHR <V><d>, <V><n>, #<shift>. */
290 const char *
291 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
292 const aarch64_opnd_info *info,
293 aarch64_insn *code, const aarch64_inst *inst)
294 {
295 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
296 aarch64_insn Q, imm;
297
298 if (inst->opcode->iclass == asimdshf)
299 {
300 /* Q
301 immh Q <T>
302 0000 x SEE AdvSIMD modified immediate
303 0001 0 8B
304 0001 1 16B
305 001x 0 4H
306 001x 1 8H
307 01xx 0 2S
308 01xx 1 4S
309 1xxx 0 RESERVED
310 1xxx 1 2D */
311 Q = (val & 0x1) ? 1 : 0;
312 insert_field (FLD_Q, code, Q, inst->opcode->mask);
313 val >>= 1;
314 }
315
316 assert (info->type == AARCH64_OPND_IMM_VLSR
317 || info->type == AARCH64_OPND_IMM_VLSL);
318
319 if (info->type == AARCH64_OPND_IMM_VLSR)
320 /* immh:immb
321 immh <shift>
322 0000 SEE AdvSIMD modified immediate
323 0001 (16-UInt(immh:immb))
324 001x (32-UInt(immh:immb))
325 01xx (64-UInt(immh:immb))
326 1xxx (128-UInt(immh:immb)) */
327 imm = (16 << (unsigned)val) - info->imm.value;
328 else
329 /* immh:immb
330 immh <shift>
331 0000 SEE AdvSIMD modified immediate
332 0001 (UInt(immh:immb)-8)
333 001x (UInt(immh:immb)-16)
334 01xx (UInt(immh:immb)-32)
335 1xxx (UInt(immh:immb)-64) */
336 imm = info->imm.value + (8 << (unsigned)val);
337 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
338
339 return NULL;
340 }
341
342 /* Insert fields for e.g. the immediate operands in
343 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
344 const char *
345 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
346 aarch64_insn *code,
347 const aarch64_inst *inst ATTRIBUTE_UNUSED)
348 {
349 int64_t imm;
350
351 imm = info->imm.value;
352 if (operand_need_shift_by_two (self))
353 imm >>= 2;
354 insert_all_fields (self, code, imm);
355 return NULL;
356 }
357
358 /* Insert immediate and its shift amount for e.g. the last operand in
359 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
360 const char *
361 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
362 aarch64_insn *code, const aarch64_inst *inst)
363 {
364 /* imm16 */
365 aarch64_ins_imm (self, info, code, inst);
366 /* hw */
367 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
368 return NULL;
369 }
370
371 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
372 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
373 const char *
374 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
375 const aarch64_opnd_info *info,
376 aarch64_insn *code,
377 const aarch64_inst *inst ATTRIBUTE_UNUSED)
378 {
379 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
380 uint64_t imm = info->imm.value;
381 enum aarch64_modifier_kind kind = info->shifter.kind;
382 int amount = info->shifter.amount;
383 aarch64_field field = {0, 0};
384
385 /* a:b:c:d:e:f:g:h */
386 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
387 {
388 /* Either MOVI <Dd>, #<imm>
389 or MOVI <Vd>.2D, #<imm>.
390 <imm> is a 64-bit immediate
391 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
392 encoded in "a:b:c:d:e:f:g:h". */
393 imm = aarch64_shrink_expanded_imm8 (imm);
394 assert ((int)imm >= 0);
395 }
396 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
397
398 if (kind == AARCH64_MOD_NONE)
399 return NULL;
400
401 /* shift amount partially in cmode */
402 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
403 if (kind == AARCH64_MOD_LSL)
404 {
405 /* AARCH64_MOD_LSL: shift zeros. */
406 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
407 assert (esize == 4 || esize == 2 || esize == 1);
408 /* For 8-bit move immediate, the optional LSL #0 does not require
409 encoding. */
410 if (esize == 1)
411 return NULL;
412 amount >>= 3;
413 if (esize == 4)
414 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
415 else
416 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
417 }
418 else
419 {
420 /* AARCH64_MOD_MSL: shift ones. */
421 amount >>= 4;
422 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
423 }
424 insert_field_2 (&field, code, amount, 0);
425
426 return NULL;
427 }
428
429 /* Insert fields for an 8-bit floating-point immediate. */
430 const char *
431 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
432 aarch64_insn *code,
433 const aarch64_inst *inst ATTRIBUTE_UNUSED)
434 {
435 insert_all_fields (self, code, info->imm.value);
436 return NULL;
437 }
438
439 /* Insert field rot for the rotate immediate in
440 FCMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<T>, #<rotate>. */
441 const char *
442 aarch64_ins_imm_rotate (const aarch64_operand *self,
443 const aarch64_opnd_info *info,
444 aarch64_insn *code, const aarch64_inst *inst)
445 {
446 uint64_t rot = info->imm.value / 90;
447
448 switch (info->type)
449 {
450 case AARCH64_OPND_IMM_ROT1:
451 case AARCH64_OPND_IMM_ROT2:
452 /* value rot
453 0 0
454 90 1
455 180 2
456 270 3 */
457 assert (rot < 4U);
458 break;
459 case AARCH64_OPND_IMM_ROT3:
460 /* value rot
461 90 0
462 270 1 */
463 rot = (rot - 1) / 2;
464 assert (rot < 2U);
465 break;
466 default:
467 assert (0);
468 }
469 insert_field (self->fields[0], code, rot, inst->opcode->mask);
470
471 return NULL;
472 }
473
474 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
475 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
476 const char *
477 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
478 aarch64_insn *code,
479 const aarch64_inst *inst ATTRIBUTE_UNUSED)
480 {
481 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
482 return NULL;
483 }
484
485 /* Insert arithmetic immediate for e.g. the last operand in
486 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
487 const char *
488 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
489 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
490 {
491 /* shift */
492 aarch64_insn value = info->shifter.amount ? 1 : 0;
493 insert_field (self->fields[0], code, value, 0);
494 /* imm12 (unsigned) */
495 insert_field (self->fields[1], code, info->imm.value, 0);
496 return NULL;
497 }
498
499 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
500 the operand should be inverted before encoding. */
501 static const char *
502 aarch64_ins_limm_1 (const aarch64_operand *self,
503 const aarch64_opnd_info *info, aarch64_insn *code,
504 const aarch64_inst *inst, bfd_boolean invert_p)
505 {
506 aarch64_insn value;
507 uint64_t imm = info->imm.value;
508 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
509
510 if (invert_p)
511 imm = ~imm;
512 if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
513 /* The constraint check should have guaranteed this wouldn't happen. */
514 assert (0);
515
516 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
517 self->fields[0]);
518 return NULL;
519 }
520
521 /* Insert logical/bitmask immediate for e.g. the last operand in
522 ORR <Wd|WSP>, <Wn>, #<imm>. */
523 const char *
524 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
525 aarch64_insn *code, const aarch64_inst *inst)
526 {
527 return aarch64_ins_limm_1 (self, info, code, inst,
528 inst->opcode->op == OP_BIC);
529 }
530
531 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
532 const char *
533 aarch64_ins_inv_limm (const aarch64_operand *self,
534 const aarch64_opnd_info *info, aarch64_insn *code,
535 const aarch64_inst *inst)
536 {
537 return aarch64_ins_limm_1 (self, info, code, inst, TRUE);
538 }
539
540 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
541 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
542 const char *
543 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
544 aarch64_insn *code, const aarch64_inst *inst)
545 {
546 aarch64_insn value = 0;
547
548 assert (info->idx == 0);
549
550 /* Rt */
551 aarch64_ins_regno (self, info, code, inst);
552 if (inst->opcode->iclass == ldstpair_indexed
553 || inst->opcode->iclass == ldstnapair_offs
554 || inst->opcode->iclass == ldstpair_off
555 || inst->opcode->iclass == loadlit)
556 {
557 /* size */
558 switch (info->qualifier)
559 {
560 case AARCH64_OPND_QLF_S_S: value = 0; break;
561 case AARCH64_OPND_QLF_S_D: value = 1; break;
562 case AARCH64_OPND_QLF_S_Q: value = 2; break;
563 default: assert (0);
564 }
565 insert_field (FLD_ldst_size, code, value, 0);
566 }
567 else
568 {
569 /* opc[1]:size */
570 value = aarch64_get_qualifier_standard_value (info->qualifier);
571 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
572 }
573
574 return NULL;
575 }
576
577 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
578 const char *
579 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
580 const aarch64_opnd_info *info, aarch64_insn *code,
581 const aarch64_inst *inst ATTRIBUTE_UNUSED)
582 {
583 /* Rn */
584 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
585 return NULL;
586 }
587
588 /* Encode the address operand for e.g.
589 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
590 const char *
591 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
592 const aarch64_opnd_info *info, aarch64_insn *code,
593 const aarch64_inst *inst ATTRIBUTE_UNUSED)
594 {
595 aarch64_insn S;
596 enum aarch64_modifier_kind kind = info->shifter.kind;
597
598 /* Rn */
599 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
600 /* Rm */
601 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
602 /* option */
603 if (kind == AARCH64_MOD_LSL)
604 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
605 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
606 /* S */
607 if (info->qualifier != AARCH64_OPND_QLF_S_B)
608 S = info->shifter.amount != 0;
609 else
610 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
611 S <amount>
612 0 [absent]
613 1 #0
614 Must be #0 if <extend> is explicitly LSL. */
615 S = info->shifter.operator_present && info->shifter.amount_present;
616 insert_field (FLD_S, code, S, 0);
617
618 return NULL;
619 }
620
621 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
622 const char *
623 aarch64_ins_addr_simm (const aarch64_operand *self,
624 const aarch64_opnd_info *info,
625 aarch64_insn *code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED)
627 {
628 int imm;
629
630 /* Rn */
631 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
632 /* simm (imm9 or imm7) */
633 imm = info->addr.offset.imm;
634 if (self->fields[0] == FLD_imm7)
635 /* scaled immediate in ld/st pair instructions.. */
636 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
637 insert_field (self->fields[0], code, imm, 0);
638 /* pre/post- index */
639 if (info->addr.writeback)
640 {
641 assert (inst->opcode->iclass != ldst_unscaled
642 && inst->opcode->iclass != ldstnapair_offs
643 && inst->opcode->iclass != ldstpair_off
644 && inst->opcode->iclass != ldst_unpriv);
645 assert (info->addr.preind != info->addr.postind);
646 if (info->addr.preind)
647 insert_field (self->fields[1], code, 1, 0);
648 }
649
650 return NULL;
651 }
652
653 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
654 const char *
655 aarch64_ins_addr_simm10 (const aarch64_operand *self,
656 const aarch64_opnd_info *info,
657 aarch64_insn *code,
658 const aarch64_inst *inst ATTRIBUTE_UNUSED)
659 {
660 int imm;
661
662 /* Rn */
663 insert_field (self->fields[0], code, info->addr.base_regno, 0);
664 /* simm10 */
665 imm = info->addr.offset.imm >> 3;
666 insert_field (self->fields[1], code, imm >> 9, 0);
667 insert_field (self->fields[2], code, imm, 0);
668 /* writeback */
669 if (info->addr.writeback)
670 {
671 assert (info->addr.preind == 1 && info->addr.postind == 0);
672 insert_field (self->fields[3], code, 1, 0);
673 }
674 return NULL;
675 }
676
677 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
678 const char *
679 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
680 const aarch64_opnd_info *info,
681 aarch64_insn *code,
682 const aarch64_inst *inst ATTRIBUTE_UNUSED)
683 {
684 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
685
686 /* Rn */
687 insert_field (self->fields[0], code, info->addr.base_regno, 0);
688 /* uimm12 */
689 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
690 return NULL;
691 }
692
693 /* Encode the address operand for e.g.
694 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
695 const char *
696 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
697 const aarch64_opnd_info *info, aarch64_insn *code,
698 const aarch64_inst *inst ATTRIBUTE_UNUSED)
699 {
700 /* Rn */
701 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
702 /* Rm | #<amount> */
703 if (info->addr.offset.is_reg)
704 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
705 else
706 insert_field (FLD_Rm, code, 0x1f, 0);
707 return NULL;
708 }
709
710 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
711 const char *
712 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
713 const aarch64_opnd_info *info, aarch64_insn *code,
714 const aarch64_inst *inst ATTRIBUTE_UNUSED)
715 {
716 /* cond */
717 insert_field (FLD_cond, code, info->cond->value, 0);
718 return NULL;
719 }
720
721 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
722 const char *
723 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
724 const aarch64_opnd_info *info, aarch64_insn *code,
725 const aarch64_inst *inst ATTRIBUTE_UNUSED)
726 {
727 /* op0:op1:CRn:CRm:op2 */
728 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
729 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
730 return NULL;
731 }
732
733 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
734 const char *
735 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
736 const aarch64_opnd_info *info, aarch64_insn *code,
737 const aarch64_inst *inst ATTRIBUTE_UNUSED)
738 {
739 /* op1:op2 */
740 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
741 FLD_op2, FLD_op1);
742 return NULL;
743 }
744
745 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
746 const char *
747 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
748 const aarch64_opnd_info *info, aarch64_insn *code,
749 const aarch64_inst *inst ATTRIBUTE_UNUSED)
750 {
751 /* op1:CRn:CRm:op2 */
752 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
753 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
754 return NULL;
755 }
756
757 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
758
759 const char *
760 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
761 const aarch64_opnd_info *info, aarch64_insn *code,
762 const aarch64_inst *inst ATTRIBUTE_UNUSED)
763 {
764 /* CRm */
765 insert_field (FLD_CRm, code, info->barrier->value, 0);
766 return NULL;
767 }
768
769 /* Encode the prefetch operation option operand for e.g.
770 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
771
772 const char *
773 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
774 const aarch64_opnd_info *info, aarch64_insn *code,
775 const aarch64_inst *inst ATTRIBUTE_UNUSED)
776 {
777 /* prfop in Rt */
778 insert_field (FLD_Rt, code, info->prfop->value, 0);
779 return NULL;
780 }
781
782 /* Encode the hint number for instructions that alias HINT but take an
783 operand. */
784
785 const char *
786 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
787 const aarch64_opnd_info *info, aarch64_insn *code,
788 const aarch64_inst *inst ATTRIBUTE_UNUSED)
789 {
790 /* CRm:op2. */
791 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
792 return NULL;
793 }
794
795 /* Encode the extended register operand for e.g.
796 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
797 const char *
798 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
799 const aarch64_opnd_info *info, aarch64_insn *code,
800 const aarch64_inst *inst ATTRIBUTE_UNUSED)
801 {
802 enum aarch64_modifier_kind kind;
803
804 /* Rm */
805 insert_field (FLD_Rm, code, info->reg.regno, 0);
806 /* option */
807 kind = info->shifter.kind;
808 if (kind == AARCH64_MOD_LSL)
809 kind = info->qualifier == AARCH64_OPND_QLF_W
810 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
811 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
812 /* imm3 */
813 insert_field (FLD_imm3, code, info->shifter.amount, 0);
814
815 return NULL;
816 }
817
818 /* Encode the shifted register operand for e.g.
819 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
820 const char *
821 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
822 const aarch64_opnd_info *info, aarch64_insn *code,
823 const aarch64_inst *inst ATTRIBUTE_UNUSED)
824 {
825 /* Rm */
826 insert_field (FLD_Rm, code, info->reg.regno, 0);
827 /* shift */
828 insert_field (FLD_shift, code,
829 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
830 /* imm6 */
831 insert_field (FLD_imm6, code, info->shifter.amount, 0);
832
833 return NULL;
834 }
835
836 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
837 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
838 SELF's operand-dependent value. fields[0] specifies the field that
839 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
840 const char *
841 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
842 const aarch64_opnd_info *info,
843 aarch64_insn *code,
844 const aarch64_inst *inst ATTRIBUTE_UNUSED)
845 {
846 int factor = 1 + get_operand_specific_data (self);
847 insert_field (self->fields[0], code, info->addr.base_regno, 0);
848 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
849 return NULL;
850 }
851
852 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
853 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
854 SELF's operand-dependent value. fields[0] specifies the field that
855 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
856 const char *
857 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
858 const aarch64_opnd_info *info,
859 aarch64_insn *code,
860 const aarch64_inst *inst ATTRIBUTE_UNUSED)
861 {
862 int factor = 1 + get_operand_specific_data (self);
863 insert_field (self->fields[0], code, info->addr.base_regno, 0);
864 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
865 return NULL;
866 }
867
868 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
869 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
870 SELF's operand-dependent value. fields[0] specifies the field that
871 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
872 and imm3 fields, with imm3 being the less-significant part. */
873 const char *
874 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
875 const aarch64_opnd_info *info,
876 aarch64_insn *code,
877 const aarch64_inst *inst ATTRIBUTE_UNUSED)
878 {
879 int factor = 1 + get_operand_specific_data (self);
880 insert_field (self->fields[0], code, info->addr.base_regno, 0);
881 insert_fields (code, info->addr.offset.imm / factor, 0,
882 2, FLD_imm3, FLD_SVE_imm6);
883 return NULL;
884 }
885
886 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
887 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
888 value. fields[0] specifies the base register field. */
889 const char *
890 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
891 const aarch64_opnd_info *info, aarch64_insn *code,
892 const aarch64_inst *inst ATTRIBUTE_UNUSED)
893 {
894 int factor = 1 << get_operand_specific_data (self);
895 insert_field (self->fields[0], code, info->addr.base_regno, 0);
896 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
897 return NULL;
898 }
899
900 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
901 is SELF's operand-dependent value. fields[0] specifies the base
902 register field and fields[1] specifies the offset register field. */
903 const char *
904 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
905 const aarch64_opnd_info *info, aarch64_insn *code,
906 const aarch64_inst *inst ATTRIBUTE_UNUSED)
907 {
908 insert_field (self->fields[0], code, info->addr.base_regno, 0);
909 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
910 return NULL;
911 }
912
913 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
914 <shift> is SELF's operand-dependent value. fields[0] specifies the
915 base register field, fields[1] specifies the offset register field and
916 fields[2] is a single-bit field that selects SXTW over UXTW. */
917 const char *
918 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
919 const aarch64_opnd_info *info, aarch64_insn *code,
920 const aarch64_inst *inst ATTRIBUTE_UNUSED)
921 {
922 insert_field (self->fields[0], code, info->addr.base_regno, 0);
923 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
924 if (info->shifter.kind == AARCH64_MOD_UXTW)
925 insert_field (self->fields[2], code, 0, 0);
926 else
927 insert_field (self->fields[2], code, 1, 0);
928 return NULL;
929 }
930
931 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
932 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
933 fields[0] specifies the base register field. */
934 const char *
935 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
936 const aarch64_opnd_info *info, aarch64_insn *code,
937 const aarch64_inst *inst ATTRIBUTE_UNUSED)
938 {
939 int factor = 1 << get_operand_specific_data (self);
940 insert_field (self->fields[0], code, info->addr.base_regno, 0);
941 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
942 return NULL;
943 }
944
945 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
946 where <modifier> is fixed by the instruction and where <msz> is a
947 2-bit unsigned number. fields[0] specifies the base register field
948 and fields[1] specifies the offset register field. */
949 static const char *
950 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
951 const aarch64_opnd_info *info, aarch64_insn *code)
952 {
953 insert_field (self->fields[0], code, info->addr.base_regno, 0);
954 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
955 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
956 return NULL;
957 }
958
959 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
960 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
961 field and fields[1] specifies the offset register field. */
962 const char *
963 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
964 const aarch64_opnd_info *info, aarch64_insn *code,
965 const aarch64_inst *inst ATTRIBUTE_UNUSED)
966 {
967 return aarch64_ext_sve_addr_zz (self, info, code);
968 }
969
970 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
971 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
972 field and fields[1] specifies the offset register field. */
973 const char *
974 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
975 const aarch64_opnd_info *info,
976 aarch64_insn *code,
977 const aarch64_inst *inst ATTRIBUTE_UNUSED)
978 {
979 return aarch64_ext_sve_addr_zz (self, info, code);
980 }
981
982 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
983 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
984 field and fields[1] specifies the offset register field. */
985 const char *
986 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
987 const aarch64_opnd_info *info,
988 aarch64_insn *code,
989 const aarch64_inst *inst ATTRIBUTE_UNUSED)
990 {
991 return aarch64_ext_sve_addr_zz (self, info, code);
992 }
993
994 /* Encode an SVE ADD/SUB immediate. */
995 const char *
996 aarch64_ins_sve_aimm (const aarch64_operand *self,
997 const aarch64_opnd_info *info, aarch64_insn *code,
998 const aarch64_inst *inst ATTRIBUTE_UNUSED)
999 {
1000 if (info->shifter.amount == 8)
1001 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1002 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1003 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1004 else
1005 insert_all_fields (self, code, info->imm.value & 0xff);
1006 return NULL;
1007 }
1008
1009 /* Encode an SVE CPY/DUP immediate. */
1010 const char *
1011 aarch64_ins_sve_asimm (const aarch64_operand *self,
1012 const aarch64_opnd_info *info, aarch64_insn *code,
1013 const aarch64_inst *inst)
1014 {
1015 return aarch64_ins_sve_aimm (self, info, code, inst);
1016 }
1017
1018 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1019 array specifies which field to use for Zn. MM is encoded in the
1020 concatenation of imm5 and SVE_tszh, with imm5 being the less
1021 significant part. */
1022 const char *
1023 aarch64_ins_sve_index (const aarch64_operand *self,
1024 const aarch64_opnd_info *info, aarch64_insn *code,
1025 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1026 {
1027 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1028 insert_field (self->fields[0], code, info->reglane.regno, 0);
1029 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1030 2, FLD_imm5, FLD_SVE_tszh);
1031 return NULL;
1032 }
1033
1034 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1035 const char *
1036 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1037 const aarch64_opnd_info *info, aarch64_insn *code,
1038 const aarch64_inst *inst)
1039 {
1040 return aarch64_ins_limm (self, info, code, inst);
1041 }
1042
1043 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1044 to use for Zn. */
1045 const char *
1046 aarch64_ins_sve_reglist (const aarch64_operand *self,
1047 const aarch64_opnd_info *info, aarch64_insn *code,
1048 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1049 {
1050 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1051 return NULL;
1052 }
1053
1054 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1055 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1056 field. */
1057 const char *
1058 aarch64_ins_sve_scale (const aarch64_operand *self,
1059 const aarch64_opnd_info *info, aarch64_insn *code,
1060 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1061 {
1062 insert_all_fields (self, code, info->imm.value);
1063 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1064 return NULL;
1065 }
1066
1067 /* Encode an SVE shift left immediate. */
1068 const char *
1069 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1070 const aarch64_opnd_info *info, aarch64_insn *code,
1071 const aarch64_inst *inst)
1072 {
1073 const aarch64_opnd_info *prev_operand;
1074 unsigned int esize;
1075
1076 assert (info->idx > 0);
1077 prev_operand = &inst->operands[info->idx - 1];
1078 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1079 insert_all_fields (self, code, 8 * esize + info->imm.value);
1080 return NULL;
1081 }
1082
1083 /* Encode an SVE shift right immediate. */
1084 const char *
1085 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1086 const aarch64_opnd_info *info, aarch64_insn *code,
1087 const aarch64_inst *inst)
1088 {
1089 const aarch64_opnd_info *prev_operand;
1090 unsigned int esize;
1091
1092 assert (info->idx > 0);
1093 prev_operand = &inst->operands[info->idx - 1];
1094 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1095 insert_all_fields (self, code, 16 * esize - info->imm.value);
1096 return NULL;
1097 }
1098
1099 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1100 The fields array specifies which field to use. */
1101 const char *
1102 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1103 const aarch64_opnd_info *info,
1104 aarch64_insn *code,
1105 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1106 {
1107 if (info->imm.value == 0x3f000000)
1108 insert_field (self->fields[0], code, 0, 0);
1109 else
1110 insert_field (self->fields[0], code, 1, 0);
1111 return NULL;
1112 }
1113
1114 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1115 The fields array specifies which field to use. */
1116 const char *
1117 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1118 const aarch64_opnd_info *info,
1119 aarch64_insn *code,
1120 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1121 {
1122 if (info->imm.value == 0x3f000000)
1123 insert_field (self->fields[0], code, 0, 0);
1124 else
1125 insert_field (self->fields[0], code, 1, 0);
1126 return NULL;
1127 }
1128
1129 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1130 The fields array specifies which field to use. */
1131 const char *
1132 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1133 const aarch64_opnd_info *info,
1134 aarch64_insn *code,
1135 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1136 {
1137 if (info->imm.value == 0)
1138 insert_field (self->fields[0], code, 0, 0);
1139 else
1140 insert_field (self->fields[0], code, 1, 0);
1141 return NULL;
1142 }
1143
1144 /* Miscellaneous encoding functions. */
1145
1146 /* Encode size[0], i.e. bit 22, for
1147 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1148
1149 static void
1150 encode_asimd_fcvt (aarch64_inst *inst)
1151 {
1152 aarch64_insn value;
1153 aarch64_field field = {0, 0};
1154 enum aarch64_opnd_qualifier qualifier;
1155
1156 switch (inst->opcode->op)
1157 {
1158 case OP_FCVTN:
1159 case OP_FCVTN2:
1160 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1161 qualifier = inst->operands[1].qualifier;
1162 break;
1163 case OP_FCVTL:
1164 case OP_FCVTL2:
1165 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1166 qualifier = inst->operands[0].qualifier;
1167 break;
1168 default:
1169 assert (0);
1170 }
1171 assert (qualifier == AARCH64_OPND_QLF_V_4S
1172 || qualifier == AARCH64_OPND_QLF_V_2D);
1173 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1174 gen_sub_field (FLD_size, 0, 1, &field);
1175 insert_field_2 (&field, &inst->value, value, 0);
1176 }
1177
1178 /* Encode size[0], i.e. bit 22, for
1179 e.g. FCVTXN <Vb><d>, <Va><n>. */
1180
1181 static void
1182 encode_asisd_fcvtxn (aarch64_inst *inst)
1183 {
1184 aarch64_insn val = 1;
1185 aarch64_field field = {0, 0};
1186 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1187 gen_sub_field (FLD_size, 0, 1, &field);
1188 insert_field_2 (&field, &inst->value, val, 0);
1189 }
1190
1191 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1192 static void
1193 encode_fcvt (aarch64_inst *inst)
1194 {
1195 aarch64_insn val;
1196 const aarch64_field field = {15, 2};
1197
1198 /* opc dstsize */
1199 switch (inst->operands[0].qualifier)
1200 {
1201 case AARCH64_OPND_QLF_S_S: val = 0; break;
1202 case AARCH64_OPND_QLF_S_D: val = 1; break;
1203 case AARCH64_OPND_QLF_S_H: val = 3; break;
1204 default: abort ();
1205 }
1206 insert_field_2 (&field, &inst->value, val, 0);
1207
1208 return;
1209 }
1210
1211 /* Return the index in qualifiers_list that INST is using. Should only
1212 be called once the qualifiers are known to be valid. */
1213
1214 static int
1215 aarch64_get_variant (struct aarch64_inst *inst)
1216 {
1217 int i, nops, variant;
1218
1219 nops = aarch64_num_of_operands (inst->opcode);
1220 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1221 {
1222 for (i = 0; i < nops; ++i)
1223 if (inst->opcode->qualifiers_list[variant][i]
1224 != inst->operands[i].qualifier)
1225 break;
1226 if (i == nops)
1227 return variant;
1228 }
1229 abort ();
1230 }
1231
1232 /* Do miscellaneous encodings that are not common enough to be driven by
1233 flags. */
1234
1235 static void
1236 do_misc_encoding (aarch64_inst *inst)
1237 {
1238 unsigned int value;
1239
1240 switch (inst->opcode->op)
1241 {
1242 case OP_FCVT:
1243 encode_fcvt (inst);
1244 break;
1245 case OP_FCVTN:
1246 case OP_FCVTN2:
1247 case OP_FCVTL:
1248 case OP_FCVTL2:
1249 encode_asimd_fcvt (inst);
1250 break;
1251 case OP_FCVTXN_S:
1252 encode_asisd_fcvtxn (inst);
1253 break;
1254 case OP_MOV_P_P:
1255 case OP_MOVS_P_P:
1256 /* Copy Pn to Pm and Pg. */
1257 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1258 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1259 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1260 break;
1261 case OP_MOV_Z_P_Z:
1262 /* Copy Zd to Zm. */
1263 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1264 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1265 break;
1266 case OP_MOV_Z_V:
1267 /* Fill in the zero immediate. */
1268 insert_field (FLD_SVE_tsz, &inst->value,
1269 1 << aarch64_get_variant (inst), 0);
1270 break;
1271 case OP_MOV_Z_Z:
1272 /* Copy Zn to Zm. */
1273 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1274 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1275 break;
1276 case OP_MOV_Z_Zi:
1277 break;
1278 case OP_MOVM_P_P_P:
1279 /* Copy Pd to Pm. */
1280 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1281 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1282 break;
1283 case OP_MOVZS_P_P_P:
1284 case OP_MOVZ_P_P_P:
1285 /* Copy Pn to Pm. */
1286 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1287 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1288 break;
1289 case OP_NOTS_P_P_P_Z:
1290 case OP_NOT_P_P_P_Z:
1291 /* Copy Pg to Pm. */
1292 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1293 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1294 break;
1295 default: break;
1296 }
1297 }
1298
1299 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1300 static void
1301 encode_sizeq (aarch64_inst *inst)
1302 {
1303 aarch64_insn sizeq;
1304 enum aarch64_field_kind kind;
1305 int idx;
1306
1307 /* Get the index of the operand whose information we are going to use
1308 to encode the size and Q fields.
1309 This is deduced from the possible valid qualifier lists. */
1310 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1311 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1312 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1313 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1314 /* Q */
1315 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1316 /* size */
1317 if (inst->opcode->iclass == asisdlse
1318 || inst->opcode->iclass == asisdlsep
1319 || inst->opcode->iclass == asisdlso
1320 || inst->opcode->iclass == asisdlsop)
1321 kind = FLD_vldst_size;
1322 else
1323 kind = FLD_size;
1324 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1325 }
1326
1327 /* Opcodes that have fields shared by multiple operands are usually flagged
1328 with flags. In this function, we detect such flags and use the
1329 information in one of the related operands to do the encoding. The 'one'
1330 operand is not any operand but one of the operands that has the enough
1331 information for such an encoding. */
1332
1333 static void
1334 do_special_encoding (struct aarch64_inst *inst)
1335 {
1336 int idx;
1337 aarch64_insn value = 0;
1338
1339 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1340
1341 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1342 if (inst->opcode->flags & F_COND)
1343 {
1344 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1345 }
1346 if (inst->opcode->flags & F_SF)
1347 {
1348 idx = select_operand_for_sf_field_coding (inst->opcode);
1349 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1350 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1351 ? 1 : 0;
1352 insert_field (FLD_sf, &inst->value, value, 0);
1353 if (inst->opcode->flags & F_N)
1354 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1355 }
1356 if (inst->opcode->flags & F_LSE_SZ)
1357 {
1358 idx = select_operand_for_sf_field_coding (inst->opcode);
1359 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1360 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1361 ? 1 : 0;
1362 insert_field (FLD_lse_sz, &inst->value, value, 0);
1363 }
1364 if (inst->opcode->flags & F_SIZEQ)
1365 encode_sizeq (inst);
1366 if (inst->opcode->flags & F_FPTYPE)
1367 {
1368 idx = select_operand_for_fptype_field_coding (inst->opcode);
1369 switch (inst->operands[idx].qualifier)
1370 {
1371 case AARCH64_OPND_QLF_S_S: value = 0; break;
1372 case AARCH64_OPND_QLF_S_D: value = 1; break;
1373 case AARCH64_OPND_QLF_S_H: value = 3; break;
1374 default: assert (0);
1375 }
1376 insert_field (FLD_type, &inst->value, value, 0);
1377 }
1378 if (inst->opcode->flags & F_SSIZE)
1379 {
1380 enum aarch64_opnd_qualifier qualifier;
1381 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1382 qualifier = inst->operands[idx].qualifier;
1383 assert (qualifier >= AARCH64_OPND_QLF_S_B
1384 && qualifier <= AARCH64_OPND_QLF_S_Q);
1385 value = aarch64_get_qualifier_standard_value (qualifier);
1386 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1387 }
1388 if (inst->opcode->flags & F_T)
1389 {
1390 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1391 aarch64_field field = {0, 0};
1392 enum aarch64_opnd_qualifier qualifier;
1393
1394 idx = 0;
1395 qualifier = inst->operands[idx].qualifier;
1396 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1397 == AARCH64_OPND_CLASS_SIMD_REG
1398 && qualifier >= AARCH64_OPND_QLF_V_8B
1399 && qualifier <= AARCH64_OPND_QLF_V_2D);
1400 /* imm5<3:0> q <t>
1401 0000 x reserved
1402 xxx1 0 8b
1403 xxx1 1 16b
1404 xx10 0 4h
1405 xx10 1 8h
1406 x100 0 2s
1407 x100 1 4s
1408 1000 0 reserved
1409 1000 1 2d */
1410 value = aarch64_get_qualifier_standard_value (qualifier);
1411 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1412 num = (int) value >> 1;
1413 assert (num >= 0 && num <= 3);
1414 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1415 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1416 }
1417 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1418 {
1419 /* Use Rt to encode in the case of e.g.
1420 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1421 enum aarch64_opnd_qualifier qualifier;
1422 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1423 if (idx == -1)
1424 /* Otherwise use the result operand, which has to be a integer
1425 register. */
1426 idx = 0;
1427 assert (idx == 0 || idx == 1);
1428 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1429 == AARCH64_OPND_CLASS_INT_REG);
1430 qualifier = inst->operands[idx].qualifier;
1431 insert_field (FLD_Q, &inst->value,
1432 aarch64_get_qualifier_standard_value (qualifier), 0);
1433 }
1434 if (inst->opcode->flags & F_LDS_SIZE)
1435 {
1436 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1437 enum aarch64_opnd_qualifier qualifier;
1438 aarch64_field field = {0, 0};
1439 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1440 == AARCH64_OPND_CLASS_INT_REG);
1441 gen_sub_field (FLD_opc, 0, 1, &field);
1442 qualifier = inst->operands[0].qualifier;
1443 insert_field_2 (&field, &inst->value,
1444 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1445 }
1446 /* Miscellaneous encoding as the last step. */
1447 if (inst->opcode->flags & F_MISC)
1448 do_misc_encoding (inst);
1449
1450 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1451 }
1452
1453 /* Some instructions (including all SVE ones) use the instruction class
1454 to describe how a qualifiers_list index is represented in the instruction
1455 encoding. If INST is such an instruction, encode the chosen qualifier
1456 variant. */
1457
1458 static void
1459 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1460 {
1461 switch (inst->opcode->iclass)
1462 {
1463 case sve_cpy:
1464 insert_fields (&inst->value, aarch64_get_variant (inst),
1465 0, 2, FLD_SVE_M_14, FLD_size);
1466 break;
1467
1468 case sve_index:
1469 case sve_shift_pred:
1470 case sve_shift_unpred:
1471 /* For indices and shift amounts, the variant is encoded as
1472 part of the immediate. */
1473 break;
1474
1475 case sve_limm:
1476 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1477 and depend on the immediate. They don't have a separate
1478 encoding. */
1479 break;
1480
1481 case sve_misc:
1482 /* sve_misc instructions have only a single variant. */
1483 break;
1484
1485 case sve_movprfx:
1486 insert_fields (&inst->value, aarch64_get_variant (inst),
1487 0, 2, FLD_SVE_M_16, FLD_size);
1488 break;
1489
1490 case sve_pred_zm:
1491 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1492 break;
1493
1494 case sve_size_bhs:
1495 case sve_size_bhsd:
1496 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1497 break;
1498
1499 case sve_size_hsd:
1500 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1501 break;
1502
1503 case sve_size_sd:
1504 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1505 break;
1506
1507 default:
1508 break;
1509 }
1510 }
1511
1512 /* Converters converting an alias opcode instruction to its real form. */
1513
1514 /* ROR <Wd>, <Ws>, #<shift>
1515 is equivalent to:
1516 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1517 static void
1518 convert_ror_to_extr (aarch64_inst *inst)
1519 {
1520 copy_operand_info (inst, 3, 2);
1521 copy_operand_info (inst, 2, 1);
1522 }
1523
1524 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1525 is equivalent to:
1526 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1527 static void
1528 convert_xtl_to_shll (aarch64_inst *inst)
1529 {
1530 inst->operands[2].qualifier = inst->operands[1].qualifier;
1531 inst->operands[2].imm.value = 0;
1532 }
1533
1534 /* Convert
1535 LSR <Xd>, <Xn>, #<shift>
1536 to
1537 UBFM <Xd>, <Xn>, #<shift>, #63. */
1538 static void
1539 convert_sr_to_bfm (aarch64_inst *inst)
1540 {
1541 inst->operands[3].imm.value =
1542 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1543 }
1544
1545 /* Convert MOV to ORR. */
1546 static void
1547 convert_mov_to_orr (aarch64_inst *inst)
1548 {
1549 /* MOV <Vd>.<T>, <Vn>.<T>
1550 is equivalent to:
1551 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1552 copy_operand_info (inst, 2, 1);
1553 }
1554
1555 /* When <imms> >= <immr>, the instruction written:
1556 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1557 is equivalent to:
1558 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1559
1560 static void
1561 convert_bfx_to_bfm (aarch64_inst *inst)
1562 {
1563 int64_t lsb, width;
1564
1565 /* Convert the operand. */
1566 lsb = inst->operands[2].imm.value;
1567 width = inst->operands[3].imm.value;
1568 inst->operands[2].imm.value = lsb;
1569 inst->operands[3].imm.value = lsb + width - 1;
1570 }
1571
1572 /* When <imms> < <immr>, the instruction written:
1573 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1574 is equivalent to:
1575 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1576
1577 static void
1578 convert_bfi_to_bfm (aarch64_inst *inst)
1579 {
1580 int64_t lsb, width;
1581
1582 /* Convert the operand. */
1583 lsb = inst->operands[2].imm.value;
1584 width = inst->operands[3].imm.value;
1585 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1586 {
1587 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1588 inst->operands[3].imm.value = width - 1;
1589 }
1590 else
1591 {
1592 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1593 inst->operands[3].imm.value = width - 1;
1594 }
1595 }
1596
1597 /* The instruction written:
1598 BFC <Xd>, #<lsb>, #<width>
1599 is equivalent to:
1600 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1601
1602 static void
1603 convert_bfc_to_bfm (aarch64_inst *inst)
1604 {
1605 int64_t lsb, width;
1606
1607 /* Insert XZR. */
1608 copy_operand_info (inst, 3, 2);
1609 copy_operand_info (inst, 2, 1);
1610 copy_operand_info (inst, 0, 0);
1611 inst->operands[1].reg.regno = 0x1f;
1612
1613 /* Convert the immedate operand. */
1614 lsb = inst->operands[2].imm.value;
1615 width = inst->operands[3].imm.value;
1616 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1617 {
1618 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1619 inst->operands[3].imm.value = width - 1;
1620 }
1621 else
1622 {
1623 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1624 inst->operands[3].imm.value = width - 1;
1625 }
1626 }
1627
1628 /* The instruction written:
1629 LSL <Xd>, <Xn>, #<shift>
1630 is equivalent to:
1631 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1632
1633 static void
1634 convert_lsl_to_ubfm (aarch64_inst *inst)
1635 {
1636 int64_t shift = inst->operands[2].imm.value;
1637
1638 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1639 {
1640 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1641 inst->operands[3].imm.value = 31 - shift;
1642 }
1643 else
1644 {
1645 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1646 inst->operands[3].imm.value = 63 - shift;
1647 }
1648 }
1649
1650 /* CINC <Wd>, <Wn>, <cond>
1651 is equivalent to:
1652 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1653
1654 static void
1655 convert_to_csel (aarch64_inst *inst)
1656 {
1657 copy_operand_info (inst, 3, 2);
1658 copy_operand_info (inst, 2, 1);
1659 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1660 }
1661
1662 /* CSET <Wd>, <cond>
1663 is equivalent to:
1664 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1665
1666 static void
1667 convert_cset_to_csinc (aarch64_inst *inst)
1668 {
1669 copy_operand_info (inst, 3, 1);
1670 copy_operand_info (inst, 2, 0);
1671 copy_operand_info (inst, 1, 0);
1672 inst->operands[1].reg.regno = 0x1f;
1673 inst->operands[2].reg.regno = 0x1f;
1674 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1675 }
1676
1677 /* MOV <Wd>, #<imm>
1678 is equivalent to:
1679 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1680
1681 static void
1682 convert_mov_to_movewide (aarch64_inst *inst)
1683 {
1684 int is32;
1685 uint32_t shift_amount;
1686 uint64_t value;
1687
1688 switch (inst->opcode->op)
1689 {
1690 case OP_MOV_IMM_WIDE:
1691 value = inst->operands[1].imm.value;
1692 break;
1693 case OP_MOV_IMM_WIDEN:
1694 value = ~inst->operands[1].imm.value;
1695 break;
1696 default:
1697 assert (0);
1698 }
1699 inst->operands[1].type = AARCH64_OPND_HALF;
1700 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1701 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1702 /* The constraint check should have guaranteed this wouldn't happen. */
1703 assert (0);
1704 value >>= shift_amount;
1705 value &= 0xffff;
1706 inst->operands[1].imm.value = value;
1707 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1708 inst->operands[1].shifter.amount = shift_amount;
1709 }
1710
1711 /* MOV <Wd>, #<imm>
1712 is equivalent to:
1713 ORR <Wd>, WZR, #<imm>. */
1714
1715 static void
1716 convert_mov_to_movebitmask (aarch64_inst *inst)
1717 {
1718 copy_operand_info (inst, 2, 1);
1719 inst->operands[1].reg.regno = 0x1f;
1720 inst->operands[1].skip = 0;
1721 }
1722
1723 /* Some alias opcodes are assembled by being converted to their real-form. */
1724
1725 static void
1726 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1727 {
1728 const aarch64_opcode *alias = inst->opcode;
1729
1730 if ((alias->flags & F_CONV) == 0)
1731 goto convert_to_real_return;
1732
1733 switch (alias->op)
1734 {
1735 case OP_ASR_IMM:
1736 case OP_LSR_IMM:
1737 convert_sr_to_bfm (inst);
1738 break;
1739 case OP_LSL_IMM:
1740 convert_lsl_to_ubfm (inst);
1741 break;
1742 case OP_CINC:
1743 case OP_CINV:
1744 case OP_CNEG:
1745 convert_to_csel (inst);
1746 break;
1747 case OP_CSET:
1748 case OP_CSETM:
1749 convert_cset_to_csinc (inst);
1750 break;
1751 case OP_UBFX:
1752 case OP_BFXIL:
1753 case OP_SBFX:
1754 convert_bfx_to_bfm (inst);
1755 break;
1756 case OP_SBFIZ:
1757 case OP_BFI:
1758 case OP_UBFIZ:
1759 convert_bfi_to_bfm (inst);
1760 break;
1761 case OP_BFC:
1762 convert_bfc_to_bfm (inst);
1763 break;
1764 case OP_MOV_V:
1765 convert_mov_to_orr (inst);
1766 break;
1767 case OP_MOV_IMM_WIDE:
1768 case OP_MOV_IMM_WIDEN:
1769 convert_mov_to_movewide (inst);
1770 break;
1771 case OP_MOV_IMM_LOG:
1772 convert_mov_to_movebitmask (inst);
1773 break;
1774 case OP_ROR_IMM:
1775 convert_ror_to_extr (inst);
1776 break;
1777 case OP_SXTL:
1778 case OP_SXTL2:
1779 case OP_UXTL:
1780 case OP_UXTL2:
1781 convert_xtl_to_shll (inst);
1782 break;
1783 default:
1784 break;
1785 }
1786
1787 convert_to_real_return:
1788 aarch64_replace_opcode (inst, real);
1789 }
1790
1791 /* Encode *INST_ORI of the opcode code OPCODE.
1792 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1793 matched operand qualifier sequence in *QLF_SEQ. */
1794
1795 int
1796 aarch64_opcode_encode (const aarch64_opcode *opcode,
1797 const aarch64_inst *inst_ori, aarch64_insn *code,
1798 aarch64_opnd_qualifier_t *qlf_seq,
1799 aarch64_operand_error *mismatch_detail)
1800 {
1801 int i;
1802 const aarch64_opcode *aliased;
1803 aarch64_inst copy, *inst;
1804
1805 DEBUG_TRACE ("enter with %s", opcode->name);
1806
1807 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1808 copy = *inst_ori;
1809 inst = &copy;
1810
1811 assert (inst->opcode == NULL || inst->opcode == opcode);
1812 if (inst->opcode == NULL)
1813 inst->opcode = opcode;
1814
1815 /* Constrain the operands.
1816 After passing this, the encoding is guaranteed to succeed. */
1817 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1818 {
1819 DEBUG_TRACE ("FAIL since operand constraint not met");
1820 return 0;
1821 }
1822
1823 /* Get the base value.
1824 Note: this has to be before the aliasing handling below in order to
1825 get the base value from the alias opcode before we move on to the
1826 aliased opcode for encoding. */
1827 inst->value = opcode->opcode;
1828
1829 /* No need to do anything else if the opcode does not have any operand. */
1830 if (aarch64_num_of_operands (opcode) == 0)
1831 goto encoding_exit;
1832
1833 /* Assign operand indexes and check types. Also put the matched
1834 operand qualifiers in *QLF_SEQ to return. */
1835 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1836 {
1837 assert (opcode->operands[i] == inst->operands[i].type);
1838 inst->operands[i].idx = i;
1839 if (qlf_seq != NULL)
1840 *qlf_seq = inst->operands[i].qualifier;
1841 }
1842
1843 aliased = aarch64_find_real_opcode (opcode);
1844 /* If the opcode is an alias and it does not ask for direct encoding by
1845 itself, the instruction will be transformed to the form of real opcode
1846 and the encoding will be carried out using the rules for the aliased
1847 opcode. */
1848 if (aliased != NULL && (opcode->flags & F_CONV))
1849 {
1850 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1851 aliased->name, opcode->name);
1852 /* Convert the operands to the form of the real opcode. */
1853 convert_to_real (inst, aliased);
1854 opcode = aliased;
1855 }
1856
1857 aarch64_opnd_info *info = inst->operands;
1858
1859 /* Call the inserter of each operand. */
1860 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1861 {
1862 const aarch64_operand *opnd;
1863 enum aarch64_opnd type = opcode->operands[i];
1864 if (type == AARCH64_OPND_NIL)
1865 break;
1866 if (info->skip)
1867 {
1868 DEBUG_TRACE ("skip the incomplete operand %d", i);
1869 continue;
1870 }
1871 opnd = &aarch64_operands[type];
1872 if (operand_has_inserter (opnd))
1873 aarch64_insert_operand (opnd, info, &inst->value, inst);
1874 }
1875
1876 /* Call opcode encoders indicated by flags. */
1877 if (opcode_has_special_coder (opcode))
1878 do_special_encoding (inst);
1879
1880 /* Possibly use the instruction class to encode the chosen qualifier
1881 variant. */
1882 aarch64_encode_variant_using_iclass (inst);
1883
1884 encoding_exit:
1885 DEBUG_TRACE ("exit with %s", opcode->name);
1886
1887 *code = inst->value;
1888
1889 return 1;
1890 }
This page took 0.09766 seconds and 4 git commands to generate.