x86: use template for SSE floating point comparison insns
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2020 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25 #include "opintl.h"
26
27 /* Utilities. */
28
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
32
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
38
39 static inline void
40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
41 {
42 uint32_t num;
43 const aarch64_field *field;
44 enum aarch64_field_kind kind;
45 va_list va;
46
47 va_start (va, mask);
48 num = va_arg (va, uint32_t);
49 assert (num <= 5);
50 while (num--)
51 {
52 kind = va_arg (va, enum aarch64_field_kind);
53 field = &fields[kind];
54 insert_field (kind, code, value, mask);
55 value >>= field->width;
56 }
57 va_end (va);
58 }
59
60 /* Insert a raw field value VALUE into all fields in SELF->fields.
61 The least significant bit goes in the final field. */
62
63 static void
64 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
65 aarch64_insn value)
66 {
67 unsigned int i;
68 enum aarch64_field_kind kind;
69
70 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
71 if (self->fields[i] != FLD_NIL)
72 {
73 kind = self->fields[i];
74 insert_field (kind, code, value, 0);
75 value >>= fields[kind].width;
76 }
77 }
78
79 /* Operand inserters. */
80
81 /* Insert register number. */
82 bfd_boolean
83 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
84 aarch64_insn *code,
85 const aarch64_inst *inst ATTRIBUTE_UNUSED,
86 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
87 {
88 insert_field (self->fields[0], code, info->reg.regno, 0);
89 return TRUE;
90 }
91
92 /* Insert register number, index and/or other data for SIMD register element
93 operand, e.g. the last source operand in
94 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
95 bfd_boolean
96 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
97 aarch64_insn *code, const aarch64_inst *inst,
98 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
99 {
100 /* regno */
101 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
102 /* index and/or type */
103 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
104 {
105 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
106 if (info->type == AARCH64_OPND_En
107 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
108 {
109 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
110 assert (info->idx == 1); /* Vn */
111 aarch64_insn value = info->reglane.index << pos;
112 insert_field (FLD_imm4, code, value, 0);
113 }
114 else
115 {
116 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
117 imm5<3:0> <V>
118 0000 RESERVED
119 xxx1 B
120 xx10 H
121 x100 S
122 1000 D */
123 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
124 insert_field (FLD_imm5, code, value, 0);
125 }
126 }
127 else if (inst->opcode->iclass == dotproduct)
128 {
129 unsigned reglane_index = info->reglane.index;
130 switch (info->qualifier)
131 {
132 case AARCH64_OPND_QLF_S_4B:
133 case AARCH64_OPND_QLF_S_2H:
134 /* L:H */
135 assert (reglane_index < 4);
136 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
137 break;
138 default:
139 assert (0);
140 }
141 }
142 else if (inst->opcode->iclass == cryptosm3)
143 {
144 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
145 unsigned reglane_index = info->reglane.index;
146 assert (reglane_index < 4);
147 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
148 }
149 else
150 {
151 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
152 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
153 unsigned reglane_index = info->reglane.index;
154
155 if (inst->opcode->op == OP_FCMLA_ELEM)
156 /* Complex operand takes two elements. */
157 reglane_index *= 2;
158
159 switch (info->qualifier)
160 {
161 case AARCH64_OPND_QLF_S_H:
162 /* H:L:M */
163 assert (reglane_index < 8);
164 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
165 break;
166 case AARCH64_OPND_QLF_S_S:
167 /* H:L */
168 assert (reglane_index < 4);
169 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
170 break;
171 case AARCH64_OPND_QLF_S_D:
172 /* H */
173 assert (reglane_index < 2);
174 insert_field (FLD_H, code, reglane_index, 0);
175 break;
176 default:
177 assert (0);
178 }
179 }
180 return TRUE;
181 }
182
183 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
184 bfd_boolean
185 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
186 aarch64_insn *code,
187 const aarch64_inst *inst ATTRIBUTE_UNUSED,
188 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
189 {
190 /* R */
191 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
192 /* len */
193 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
194 return TRUE;
195 }
196
197 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
198 in AdvSIMD load/store instructions. */
199 bfd_boolean
200 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
201 const aarch64_opnd_info *info, aarch64_insn *code,
202 const aarch64_inst *inst,
203 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
204 {
205 aarch64_insn value = 0;
206 /* Number of elements in each structure to be loaded/stored. */
207 unsigned num = get_opcode_dependent_value (inst->opcode);
208
209 /* Rt */
210 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
211 /* opcode */
212 switch (num)
213 {
214 case 1:
215 switch (info->reglist.num_regs)
216 {
217 case 1: value = 0x7; break;
218 case 2: value = 0xa; break;
219 case 3: value = 0x6; break;
220 case 4: value = 0x2; break;
221 default: assert (0);
222 }
223 break;
224 case 2:
225 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
226 break;
227 case 3:
228 value = 0x4;
229 break;
230 case 4:
231 value = 0x0;
232 break;
233 default:
234 assert (0);
235 }
236 insert_field (FLD_opcode, code, value, 0);
237
238 return TRUE;
239 }
240
241 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
242 single structure to all lanes instructions. */
243 bfd_boolean
244 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
245 const aarch64_opnd_info *info, aarch64_insn *code,
246 const aarch64_inst *inst,
247 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
248 {
249 aarch64_insn value;
250 /* The opcode dependent area stores the number of elements in
251 each structure to be loaded/stored. */
252 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
253
254 /* Rt */
255 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
256 /* S */
257 value = (aarch64_insn) 0;
258 if (is_ld1r && info->reglist.num_regs == 2)
259 /* OP_LD1R does not have alternating variant, but have "two consecutive"
260 instead. */
261 value = (aarch64_insn) 1;
262 insert_field (FLD_S, code, value, 0);
263
264 return TRUE;
265 }
266
267 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
268 operand e.g. Vt in AdvSIMD load/store single element instructions. */
269 bfd_boolean
270 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
271 const aarch64_opnd_info *info, aarch64_insn *code,
272 const aarch64_inst *inst ATTRIBUTE_UNUSED,
273 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
274 {
275 aarch64_field field = {0, 0};
276 aarch64_insn QSsize = 0; /* fields Q:S:size. */
277 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
278
279 assert (info->reglist.has_index);
280
281 /* Rt */
282 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
283 /* Encode the index, opcode<2:1> and size. */
284 switch (info->qualifier)
285 {
286 case AARCH64_OPND_QLF_S_B:
287 /* Index encoded in "Q:S:size". */
288 QSsize = info->reglist.index;
289 opcodeh2 = 0x0;
290 break;
291 case AARCH64_OPND_QLF_S_H:
292 /* Index encoded in "Q:S:size<1>". */
293 QSsize = info->reglist.index << 1;
294 opcodeh2 = 0x1;
295 break;
296 case AARCH64_OPND_QLF_S_S:
297 /* Index encoded in "Q:S". */
298 QSsize = info->reglist.index << 2;
299 opcodeh2 = 0x2;
300 break;
301 case AARCH64_OPND_QLF_S_D:
302 /* Index encoded in "Q". */
303 QSsize = info->reglist.index << 3 | 0x1;
304 opcodeh2 = 0x2;
305 break;
306 default:
307 assert (0);
308 }
309 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
310 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
311 insert_field_2 (&field, code, opcodeh2, 0);
312
313 return TRUE;
314 }
315
316 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
317 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
318 or SSHR <V><d>, <V><n>, #<shift>. */
319 bfd_boolean
320 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
321 const aarch64_opnd_info *info,
322 aarch64_insn *code, const aarch64_inst *inst,
323 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
324 {
325 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
326 aarch64_insn Q, imm;
327
328 if (inst->opcode->iclass == asimdshf)
329 {
330 /* Q
331 immh Q <T>
332 0000 x SEE AdvSIMD modified immediate
333 0001 0 8B
334 0001 1 16B
335 001x 0 4H
336 001x 1 8H
337 01xx 0 2S
338 01xx 1 4S
339 1xxx 0 RESERVED
340 1xxx 1 2D */
341 Q = (val & 0x1) ? 1 : 0;
342 insert_field (FLD_Q, code, Q, inst->opcode->mask);
343 val >>= 1;
344 }
345
346 assert (info->type == AARCH64_OPND_IMM_VLSR
347 || info->type == AARCH64_OPND_IMM_VLSL);
348
349 if (info->type == AARCH64_OPND_IMM_VLSR)
350 /* immh:immb
351 immh <shift>
352 0000 SEE AdvSIMD modified immediate
353 0001 (16-UInt(immh:immb))
354 001x (32-UInt(immh:immb))
355 01xx (64-UInt(immh:immb))
356 1xxx (128-UInt(immh:immb)) */
357 imm = (16 << (unsigned)val) - info->imm.value;
358 else
359 /* immh:immb
360 immh <shift>
361 0000 SEE AdvSIMD modified immediate
362 0001 (UInt(immh:immb)-8)
363 001x (UInt(immh:immb)-16)
364 01xx (UInt(immh:immb)-32)
365 1xxx (UInt(immh:immb)-64) */
366 imm = info->imm.value + (8 << (unsigned)val);
367 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
368
369 return TRUE;
370 }
371
372 /* Insert fields for e.g. the immediate operands in
373 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
374 bfd_boolean
375 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
376 aarch64_insn *code,
377 const aarch64_inst *inst ATTRIBUTE_UNUSED,
378 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
379 {
380 int64_t imm;
381
382 imm = info->imm.value;
383 if (operand_need_shift_by_two (self))
384 imm >>= 2;
385 if (operand_need_shift_by_four (self))
386 imm >>= 4;
387 insert_all_fields (self, code, imm);
388 return TRUE;
389 }
390
391 /* Insert immediate and its shift amount for e.g. the last operand in
392 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
393 bfd_boolean
394 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
395 aarch64_insn *code, const aarch64_inst *inst,
396 aarch64_operand_error *errors)
397 {
398 /* imm16 */
399 aarch64_ins_imm (self, info, code, inst, errors);
400 /* hw */
401 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
402 return TRUE;
403 }
404
405 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
406 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
407 bfd_boolean
408 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
409 const aarch64_opnd_info *info,
410 aarch64_insn *code,
411 const aarch64_inst *inst ATTRIBUTE_UNUSED,
412 aarch64_operand_error *errors
413 ATTRIBUTE_UNUSED)
414 {
415 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
416 uint64_t imm = info->imm.value;
417 enum aarch64_modifier_kind kind = info->shifter.kind;
418 int amount = info->shifter.amount;
419 aarch64_field field = {0, 0};
420
421 /* a:b:c:d:e:f:g:h */
422 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
423 {
424 /* Either MOVI <Dd>, #<imm>
425 or MOVI <Vd>.2D, #<imm>.
426 <imm> is a 64-bit immediate
427 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
428 encoded in "a:b:c:d:e:f:g:h". */
429 imm = aarch64_shrink_expanded_imm8 (imm);
430 assert ((int)imm >= 0);
431 }
432 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
433
434 if (kind == AARCH64_MOD_NONE)
435 return TRUE;
436
437 /* shift amount partially in cmode */
438 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
439 if (kind == AARCH64_MOD_LSL)
440 {
441 /* AARCH64_MOD_LSL: shift zeros. */
442 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
443 assert (esize == 4 || esize == 2 || esize == 1);
444 /* For 8-bit move immediate, the optional LSL #0 does not require
445 encoding. */
446 if (esize == 1)
447 return TRUE;
448 amount >>= 3;
449 if (esize == 4)
450 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
451 else
452 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
453 }
454 else
455 {
456 /* AARCH64_MOD_MSL: shift ones. */
457 amount >>= 4;
458 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
459 }
460 insert_field_2 (&field, code, amount, 0);
461
462 return TRUE;
463 }
464
465 /* Insert fields for an 8-bit floating-point immediate. */
466 bfd_boolean
467 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
468 aarch64_insn *code,
469 const aarch64_inst *inst ATTRIBUTE_UNUSED,
470 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
471 {
472 insert_all_fields (self, code, info->imm.value);
473 return TRUE;
474 }
475
476 /* Insert 1-bit rotation immediate (#90 or #270). */
477 bfd_boolean
478 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
479 const aarch64_opnd_info *info,
480 aarch64_insn *code, const aarch64_inst *inst,
481 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
482 {
483 uint64_t rot = (info->imm.value - 90) / 180;
484 assert (rot < 2U);
485 insert_field (self->fields[0], code, rot, inst->opcode->mask);
486 return TRUE;
487 }
488
489 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
490 bfd_boolean
491 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
492 const aarch64_opnd_info *info,
493 aarch64_insn *code, const aarch64_inst *inst,
494 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
495 {
496 uint64_t rot = info->imm.value / 90;
497 assert (rot < 4U);
498 insert_field (self->fields[0], code, rot, inst->opcode->mask);
499 return TRUE;
500 }
501
502 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
503 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
504 bfd_boolean
505 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
506 aarch64_insn *code,
507 const aarch64_inst *inst ATTRIBUTE_UNUSED,
508 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
509 {
510 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
511 return TRUE;
512 }
513
514 /* Insert arithmetic immediate for e.g. the last operand in
515 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
516 bfd_boolean
517 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
518 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
519 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
520 {
521 /* shift */
522 aarch64_insn value = info->shifter.amount ? 1 : 0;
523 insert_field (self->fields[0], code, value, 0);
524 /* imm12 (unsigned) */
525 insert_field (self->fields[1], code, info->imm.value, 0);
526 return TRUE;
527 }
528
529 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
530 the operand should be inverted before encoding. */
531 static bfd_boolean
532 aarch64_ins_limm_1 (const aarch64_operand *self,
533 const aarch64_opnd_info *info, aarch64_insn *code,
534 const aarch64_inst *inst, bfd_boolean invert_p,
535 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
536 {
537 aarch64_insn value;
538 uint64_t imm = info->imm.value;
539 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
540
541 if (invert_p)
542 imm = ~imm;
543 /* The constraint check should have guaranteed this wouldn't happen. */
544 assert (aarch64_logical_immediate_p (imm, esize, &value));
545
546 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
547 self->fields[0]);
548 return TRUE;
549 }
550
551 /* Insert logical/bitmask immediate for e.g. the last operand in
552 ORR <Wd|WSP>, <Wn>, #<imm>. */
553 bfd_boolean
554 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
555 aarch64_insn *code, const aarch64_inst *inst,
556 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
557 {
558 return aarch64_ins_limm_1 (self, info, code, inst,
559 inst->opcode->op == OP_BIC, errors);
560 }
561
562 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
563 bfd_boolean
564 aarch64_ins_inv_limm (const aarch64_operand *self,
565 const aarch64_opnd_info *info, aarch64_insn *code,
566 const aarch64_inst *inst,
567 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
568 {
569 return aarch64_ins_limm_1 (self, info, code, inst, TRUE, errors);
570 }
571
572 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
573 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
574 bfd_boolean
575 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
576 aarch64_insn *code, const aarch64_inst *inst,
577 aarch64_operand_error *errors)
578 {
579 aarch64_insn value = 0;
580
581 assert (info->idx == 0);
582
583 /* Rt */
584 aarch64_ins_regno (self, info, code, inst, errors);
585 if (inst->opcode->iclass == ldstpair_indexed
586 || inst->opcode->iclass == ldstnapair_offs
587 || inst->opcode->iclass == ldstpair_off
588 || inst->opcode->iclass == loadlit)
589 {
590 /* size */
591 switch (info->qualifier)
592 {
593 case AARCH64_OPND_QLF_S_S: value = 0; break;
594 case AARCH64_OPND_QLF_S_D: value = 1; break;
595 case AARCH64_OPND_QLF_S_Q: value = 2; break;
596 default: assert (0);
597 }
598 insert_field (FLD_ldst_size, code, value, 0);
599 }
600 else
601 {
602 /* opc[1]:size */
603 value = aarch64_get_qualifier_standard_value (info->qualifier);
604 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
605 }
606
607 return TRUE;
608 }
609
610 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
611 bfd_boolean
612 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
613 const aarch64_opnd_info *info, aarch64_insn *code,
614 const aarch64_inst *inst ATTRIBUTE_UNUSED,
615 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
616 {
617 /* Rn */
618 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
619 return TRUE;
620 }
621
622 /* Encode the address operand for e.g.
623 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
624 bfd_boolean
625 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
626 const aarch64_opnd_info *info, aarch64_insn *code,
627 const aarch64_inst *inst ATTRIBUTE_UNUSED,
628 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
629 {
630 aarch64_insn S;
631 enum aarch64_modifier_kind kind = info->shifter.kind;
632
633 /* Rn */
634 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
635 /* Rm */
636 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
637 /* option */
638 if (kind == AARCH64_MOD_LSL)
639 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
640 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
641 /* S */
642 if (info->qualifier != AARCH64_OPND_QLF_S_B)
643 S = info->shifter.amount != 0;
644 else
645 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
646 S <amount>
647 0 [absent]
648 1 #0
649 Must be #0 if <extend> is explicitly LSL. */
650 S = info->shifter.operator_present && info->shifter.amount_present;
651 insert_field (FLD_S, code, S, 0);
652
653 return TRUE;
654 }
655
656 /* Encode the address operand for e.g.
657 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
658 bfd_boolean
659 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
660 const aarch64_opnd_info *info, aarch64_insn *code,
661 const aarch64_inst *inst ATTRIBUTE_UNUSED,
662 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
663 {
664 /* Rn */
665 insert_field (self->fields[0], code, info->addr.base_regno, 0);
666
667 /* simm9 */
668 int imm = info->addr.offset.imm;
669 insert_field (self->fields[1], code, imm, 0);
670
671 /* writeback */
672 if (info->addr.writeback)
673 {
674 assert (info->addr.preind == 1 && info->addr.postind == 0);
675 insert_field (self->fields[2], code, 1, 0);
676 }
677 return TRUE;
678 }
679
680 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
681 bfd_boolean
682 aarch64_ins_addr_simm (const aarch64_operand *self,
683 const aarch64_opnd_info *info,
684 aarch64_insn *code,
685 const aarch64_inst *inst ATTRIBUTE_UNUSED,
686 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
687 {
688 int imm;
689
690 /* Rn */
691 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
692 /* simm (imm9 or imm7) */
693 imm = info->addr.offset.imm;
694 if (self->fields[0] == FLD_imm7
695 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
696 /* scaled immediate in ld/st pair instructions.. */
697 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
698 insert_field (self->fields[0], code, imm, 0);
699 /* pre/post- index */
700 if (info->addr.writeback)
701 {
702 assert (inst->opcode->iclass != ldst_unscaled
703 && inst->opcode->iclass != ldstnapair_offs
704 && inst->opcode->iclass != ldstpair_off
705 && inst->opcode->iclass != ldst_unpriv);
706 assert (info->addr.preind != info->addr.postind);
707 if (info->addr.preind)
708 insert_field (self->fields[1], code, 1, 0);
709 }
710
711 return TRUE;
712 }
713
714 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
715 bfd_boolean
716 aarch64_ins_addr_simm10 (const aarch64_operand *self,
717 const aarch64_opnd_info *info,
718 aarch64_insn *code,
719 const aarch64_inst *inst ATTRIBUTE_UNUSED,
720 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
721 {
722 int imm;
723
724 /* Rn */
725 insert_field (self->fields[0], code, info->addr.base_regno, 0);
726 /* simm10 */
727 imm = info->addr.offset.imm >> 3;
728 insert_field (self->fields[1], code, imm >> 9, 0);
729 insert_field (self->fields[2], code, imm, 0);
730 /* writeback */
731 if (info->addr.writeback)
732 {
733 assert (info->addr.preind == 1 && info->addr.postind == 0);
734 insert_field (self->fields[3], code, 1, 0);
735 }
736 return TRUE;
737 }
738
739 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
740 bfd_boolean
741 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
742 const aarch64_opnd_info *info,
743 aarch64_insn *code,
744 const aarch64_inst *inst ATTRIBUTE_UNUSED,
745 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
746 {
747 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
748
749 /* Rn */
750 insert_field (self->fields[0], code, info->addr.base_regno, 0);
751 /* uimm12 */
752 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
753 return TRUE;
754 }
755
756 /* Encode the address operand for e.g.
757 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
758 bfd_boolean
759 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
760 const aarch64_opnd_info *info, aarch64_insn *code,
761 const aarch64_inst *inst ATTRIBUTE_UNUSED,
762 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
763 {
764 /* Rn */
765 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
766 /* Rm | #<amount> */
767 if (info->addr.offset.is_reg)
768 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
769 else
770 insert_field (FLD_Rm, code, 0x1f, 0);
771 return TRUE;
772 }
773
774 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
775 bfd_boolean
776 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
777 const aarch64_opnd_info *info, aarch64_insn *code,
778 const aarch64_inst *inst ATTRIBUTE_UNUSED,
779 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
780 {
781 /* cond */
782 insert_field (FLD_cond, code, info->cond->value, 0);
783 return TRUE;
784 }
785
786 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
787 bfd_boolean
788 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
789 const aarch64_opnd_info *info, aarch64_insn *code,
790 const aarch64_inst *inst,
791 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
792 {
793 /* If a system instruction check if we have any restrictions on which
794 registers it can use. */
795 if (inst->opcode->iclass == ic_system)
796 {
797 uint64_t opcode_flags
798 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
799 uint32_t sysreg_flags
800 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
801
802 /* Check to see if it's read-only, else check if it's write only.
803 if it's both or unspecified don't care. */
804 if (opcode_flags == F_SYS_READ
805 && sysreg_flags
806 && sysreg_flags != F_REG_READ)
807 {
808 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
809 detail->error = _("specified register cannot be read from");
810 detail->index = info->idx;
811 detail->non_fatal = TRUE;
812 }
813 else if (opcode_flags == F_SYS_WRITE
814 && sysreg_flags
815 && sysreg_flags != F_REG_WRITE)
816 {
817 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
818 detail->error = _("specified register cannot be written to");
819 detail->index = info->idx;
820 detail->non_fatal = TRUE;
821 }
822 }
823 /* op0:op1:CRn:CRm:op2 */
824 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
825 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
826 return TRUE;
827 }
828
829 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
830 bfd_boolean
831 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
832 const aarch64_opnd_info *info, aarch64_insn *code,
833 const aarch64_inst *inst ATTRIBUTE_UNUSED,
834 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
835 {
836 /* op1:op2 */
837 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
838 FLD_op2, FLD_op1);
839 return TRUE;
840 }
841
842 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
843 bfd_boolean
844 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
845 const aarch64_opnd_info *info, aarch64_insn *code,
846 const aarch64_inst *inst ATTRIBUTE_UNUSED,
847 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
848 {
849 /* op1:CRn:CRm:op2 */
850 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
851 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
852 return TRUE;
853 }
854
855 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
856
857 bfd_boolean
858 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
859 const aarch64_opnd_info *info, aarch64_insn *code,
860 const aarch64_inst *inst ATTRIBUTE_UNUSED,
861 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
862 {
863 /* CRm */
864 insert_field (FLD_CRm, code, info->barrier->value, 0);
865 return TRUE;
866 }
867
868 /* Encode the prefetch operation option operand for e.g.
869 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
870
871 bfd_boolean
872 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
873 const aarch64_opnd_info *info, aarch64_insn *code,
874 const aarch64_inst *inst ATTRIBUTE_UNUSED,
875 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
876 {
877 /* prfop in Rt */
878 insert_field (FLD_Rt, code, info->prfop->value, 0);
879 return TRUE;
880 }
881
882 /* Encode the hint number for instructions that alias HINT but take an
883 operand. */
884
885 bfd_boolean
886 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
887 const aarch64_opnd_info *info, aarch64_insn *code,
888 const aarch64_inst *inst ATTRIBUTE_UNUSED,
889 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
890 {
891 /* CRm:op2. */
892 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
893 return TRUE;
894 }
895
896 /* Encode the extended register operand for e.g.
897 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
898 bfd_boolean
899 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
900 const aarch64_opnd_info *info, aarch64_insn *code,
901 const aarch64_inst *inst ATTRIBUTE_UNUSED,
902 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
903 {
904 enum aarch64_modifier_kind kind;
905
906 /* Rm */
907 insert_field (FLD_Rm, code, info->reg.regno, 0);
908 /* option */
909 kind = info->shifter.kind;
910 if (kind == AARCH64_MOD_LSL)
911 kind = info->qualifier == AARCH64_OPND_QLF_W
912 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
913 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
914 /* imm3 */
915 insert_field (FLD_imm3, code, info->shifter.amount, 0);
916
917 return TRUE;
918 }
919
920 /* Encode the shifted register operand for e.g.
921 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
922 bfd_boolean
923 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
924 const aarch64_opnd_info *info, aarch64_insn *code,
925 const aarch64_inst *inst ATTRIBUTE_UNUSED,
926 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
927 {
928 /* Rm */
929 insert_field (FLD_Rm, code, info->reg.regno, 0);
930 /* shift */
931 insert_field (FLD_shift, code,
932 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
933 /* imm6 */
934 insert_field (FLD_imm6, code, info->shifter.amount, 0);
935
936 return TRUE;
937 }
938
939 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
940 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
941 SELF's operand-dependent value. fields[0] specifies the field that
942 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
943 bfd_boolean
944 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
945 const aarch64_opnd_info *info,
946 aarch64_insn *code,
947 const aarch64_inst *inst ATTRIBUTE_UNUSED,
948 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
949 {
950 int factor = 1 + get_operand_specific_data (self);
951 insert_field (self->fields[0], code, info->addr.base_regno, 0);
952 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
953 return TRUE;
954 }
955
956 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
957 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
958 SELF's operand-dependent value. fields[0] specifies the field that
959 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
960 bfd_boolean
961 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
962 const aarch64_opnd_info *info,
963 aarch64_insn *code,
964 const aarch64_inst *inst ATTRIBUTE_UNUSED,
965 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
966 {
967 int factor = 1 + get_operand_specific_data (self);
968 insert_field (self->fields[0], code, info->addr.base_regno, 0);
969 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
970 return TRUE;
971 }
972
973 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
974 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
975 SELF's operand-dependent value. fields[0] specifies the field that
976 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
977 and imm3 fields, with imm3 being the less-significant part. */
978 bfd_boolean
979 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
980 const aarch64_opnd_info *info,
981 aarch64_insn *code,
982 const aarch64_inst *inst ATTRIBUTE_UNUSED,
983 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
984 {
985 int factor = 1 + get_operand_specific_data (self);
986 insert_field (self->fields[0], code, info->addr.base_regno, 0);
987 insert_fields (code, info->addr.offset.imm / factor, 0,
988 2, FLD_imm3, FLD_SVE_imm6);
989 return TRUE;
990 }
991
992 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
993 is a 4-bit signed number and where <shift> is SELF's operand-dependent
994 value. fields[0] specifies the base register field. */
995 bfd_boolean
996 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
997 const aarch64_opnd_info *info, aarch64_insn *code,
998 const aarch64_inst *inst ATTRIBUTE_UNUSED,
999 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1000 {
1001 int factor = 1 << get_operand_specific_data (self);
1002 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1003 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1004 return TRUE;
1005 }
1006
1007 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1008 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1009 value. fields[0] specifies the base register field. */
1010 bfd_boolean
1011 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1012 const aarch64_opnd_info *info, aarch64_insn *code,
1013 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1014 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1015 {
1016 int factor = 1 << get_operand_specific_data (self);
1017 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1018 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1019 return TRUE;
1020 }
1021
1022 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1023 is SELF's operand-dependent value. fields[0] specifies the base
1024 register field and fields[1] specifies the offset register field. */
1025 bfd_boolean
1026 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1027 const aarch64_opnd_info *info, aarch64_insn *code,
1028 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1029 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1030 {
1031 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1032 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1033 return TRUE;
1034 }
1035
1036 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1037 <shift> is SELF's operand-dependent value. fields[0] specifies the
1038 base register field, fields[1] specifies the offset register field and
1039 fields[2] is a single-bit field that selects SXTW over UXTW. */
1040 bfd_boolean
1041 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1042 const aarch64_opnd_info *info, aarch64_insn *code,
1043 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1044 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1045 {
1046 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1047 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1048 if (info->shifter.kind == AARCH64_MOD_UXTW)
1049 insert_field (self->fields[2], code, 0, 0);
1050 else
1051 insert_field (self->fields[2], code, 1, 0);
1052 return TRUE;
1053 }
1054
1055 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1056 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1057 fields[0] specifies the base register field. */
1058 bfd_boolean
1059 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1060 const aarch64_opnd_info *info, aarch64_insn *code,
1061 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1062 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1063 {
1064 int factor = 1 << get_operand_specific_data (self);
1065 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1066 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1067 return TRUE;
1068 }
1069
1070 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1071 where <modifier> is fixed by the instruction and where <msz> is a
1072 2-bit unsigned number. fields[0] specifies the base register field
1073 and fields[1] specifies the offset register field. */
1074 static bfd_boolean
1075 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1076 const aarch64_opnd_info *info, aarch64_insn *code,
1077 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1078 {
1079 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1080 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1081 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1082 return TRUE;
1083 }
1084
1085 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1086 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1087 field and fields[1] specifies the offset register field. */
1088 bfd_boolean
1089 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1090 const aarch64_opnd_info *info, aarch64_insn *code,
1091 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1092 aarch64_operand_error *errors)
1093 {
1094 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1095 }
1096
1097 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1098 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1099 field and fields[1] specifies the offset register field. */
1100 bfd_boolean
1101 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1102 const aarch64_opnd_info *info,
1103 aarch64_insn *code,
1104 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1105 aarch64_operand_error *errors)
1106 {
1107 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1108 }
1109
1110 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1111 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1112 field and fields[1] specifies the offset register field. */
1113 bfd_boolean
1114 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1115 const aarch64_opnd_info *info,
1116 aarch64_insn *code,
1117 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1118 aarch64_operand_error *errors)
1119 {
1120 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1121 }
1122
1123 /* Encode an SVE ADD/SUB immediate. */
1124 bfd_boolean
1125 aarch64_ins_sve_aimm (const aarch64_operand *self,
1126 const aarch64_opnd_info *info, aarch64_insn *code,
1127 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1128 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1129 {
1130 if (info->shifter.amount == 8)
1131 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1132 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1133 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1134 else
1135 insert_all_fields (self, code, info->imm.value & 0xff);
1136 return TRUE;
1137 }
1138
1139 /* Encode an SVE CPY/DUP immediate. */
1140 bfd_boolean
1141 aarch64_ins_sve_asimm (const aarch64_operand *self,
1142 const aarch64_opnd_info *info, aarch64_insn *code,
1143 const aarch64_inst *inst,
1144 aarch64_operand_error *errors)
1145 {
1146 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1147 }
1148
1149 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1150 array specifies which field to use for Zn. MM is encoded in the
1151 concatenation of imm5 and SVE_tszh, with imm5 being the less
1152 significant part. */
1153 bfd_boolean
1154 aarch64_ins_sve_index (const aarch64_operand *self,
1155 const aarch64_opnd_info *info, aarch64_insn *code,
1156 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1157 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1158 {
1159 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1160 insert_field (self->fields[0], code, info->reglane.regno, 0);
1161 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1162 2, FLD_imm5, FLD_SVE_tszh);
1163 return TRUE;
1164 }
1165
1166 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1167 bfd_boolean
1168 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1169 const aarch64_opnd_info *info, aarch64_insn *code,
1170 const aarch64_inst *inst,
1171 aarch64_operand_error *errors)
1172 {
1173 return aarch64_ins_limm (self, info, code, inst, errors);
1174 }
1175
1176 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1177 and where MM occupies the most-significant part. The operand-dependent
1178 value specifies the number of bits in Zn. */
1179 bfd_boolean
1180 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1181 const aarch64_opnd_info *info, aarch64_insn *code,
1182 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1183 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1184 {
1185 unsigned int reg_bits = get_operand_specific_data (self);
1186 assert (info->reglane.regno < (1U << reg_bits));
1187 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1188 insert_all_fields (self, code, val);
1189 return TRUE;
1190 }
1191
1192 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1193 to use for Zn. */
1194 bfd_boolean
1195 aarch64_ins_sve_reglist (const aarch64_operand *self,
1196 const aarch64_opnd_info *info, aarch64_insn *code,
1197 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1198 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1199 {
1200 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1201 return TRUE;
1202 }
1203
1204 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1205 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1206 field. */
1207 bfd_boolean
1208 aarch64_ins_sve_scale (const aarch64_operand *self,
1209 const aarch64_opnd_info *info, aarch64_insn *code,
1210 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1211 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1212 {
1213 insert_all_fields (self, code, info->imm.value);
1214 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1215 return TRUE;
1216 }
1217
1218 /* Encode an SVE shift left immediate. */
1219 bfd_boolean
1220 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1221 const aarch64_opnd_info *info, aarch64_insn *code,
1222 const aarch64_inst *inst,
1223 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1224 {
1225 const aarch64_opnd_info *prev_operand;
1226 unsigned int esize;
1227
1228 assert (info->idx > 0);
1229 prev_operand = &inst->operands[info->idx - 1];
1230 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1231 insert_all_fields (self, code, 8 * esize + info->imm.value);
1232 return TRUE;
1233 }
1234
1235 /* Encode an SVE shift right immediate. */
1236 bfd_boolean
1237 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1238 const aarch64_opnd_info *info, aarch64_insn *code,
1239 const aarch64_inst *inst,
1240 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1241 {
1242 const aarch64_opnd_info *prev_operand;
1243 unsigned int esize;
1244
1245 unsigned int opnd_backshift = get_operand_specific_data (self);
1246 assert (info->idx >= (int)opnd_backshift);
1247 prev_operand = &inst->operands[info->idx - opnd_backshift];
1248 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1249 insert_all_fields (self, code, 16 * esize - info->imm.value);
1250 return TRUE;
1251 }
1252
1253 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1254 The fields array specifies which field to use. */
1255 bfd_boolean
1256 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1257 const aarch64_opnd_info *info,
1258 aarch64_insn *code,
1259 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1260 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1261 {
1262 if (info->imm.value == 0x3f000000)
1263 insert_field (self->fields[0], code, 0, 0);
1264 else
1265 insert_field (self->fields[0], code, 1, 0);
1266 return TRUE;
1267 }
1268
1269 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1270 The fields array specifies which field to use. */
1271 bfd_boolean
1272 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1273 const aarch64_opnd_info *info,
1274 aarch64_insn *code,
1275 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1276 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1277 {
1278 if (info->imm.value == 0x3f000000)
1279 insert_field (self->fields[0], code, 0, 0);
1280 else
1281 insert_field (self->fields[0], code, 1, 0);
1282 return TRUE;
1283 }
1284
1285 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1286 The fields array specifies which field to use. */
1287 bfd_boolean
1288 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1289 const aarch64_opnd_info *info,
1290 aarch64_insn *code,
1291 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1292 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1293 {
1294 if (info->imm.value == 0)
1295 insert_field (self->fields[0], code, 0, 0);
1296 else
1297 insert_field (self->fields[0], code, 1, 0);
1298 return TRUE;
1299 }
1300
1301 /* Miscellaneous encoding functions. */
1302
1303 /* Encode size[0], i.e. bit 22, for
1304 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1305
1306 static void
1307 encode_asimd_fcvt (aarch64_inst *inst)
1308 {
1309 aarch64_insn value;
1310 aarch64_field field = {0, 0};
1311 enum aarch64_opnd_qualifier qualifier;
1312
1313 switch (inst->opcode->op)
1314 {
1315 case OP_FCVTN:
1316 case OP_FCVTN2:
1317 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1318 qualifier = inst->operands[1].qualifier;
1319 break;
1320 case OP_FCVTL:
1321 case OP_FCVTL2:
1322 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1323 qualifier = inst->operands[0].qualifier;
1324 break;
1325 default:
1326 assert (0);
1327 }
1328 assert (qualifier == AARCH64_OPND_QLF_V_4S
1329 || qualifier == AARCH64_OPND_QLF_V_2D);
1330 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1331 gen_sub_field (FLD_size, 0, 1, &field);
1332 insert_field_2 (&field, &inst->value, value, 0);
1333 }
1334
1335 /* Encode size[0], i.e. bit 22, for
1336 e.g. FCVTXN <Vb><d>, <Va><n>. */
1337
1338 static void
1339 encode_asisd_fcvtxn (aarch64_inst *inst)
1340 {
1341 aarch64_insn val = 1;
1342 aarch64_field field = {0, 0};
1343 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1344 gen_sub_field (FLD_size, 0, 1, &field);
1345 insert_field_2 (&field, &inst->value, val, 0);
1346 }
1347
1348 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1349 static void
1350 encode_fcvt (aarch64_inst *inst)
1351 {
1352 aarch64_insn val;
1353 const aarch64_field field = {15, 2};
1354
1355 /* opc dstsize */
1356 switch (inst->operands[0].qualifier)
1357 {
1358 case AARCH64_OPND_QLF_S_S: val = 0; break;
1359 case AARCH64_OPND_QLF_S_D: val = 1; break;
1360 case AARCH64_OPND_QLF_S_H: val = 3; break;
1361 default: abort ();
1362 }
1363 insert_field_2 (&field, &inst->value, val, 0);
1364
1365 return;
1366 }
1367
1368 /* Return the index in qualifiers_list that INST is using. Should only
1369 be called once the qualifiers are known to be valid. */
1370
1371 static int
1372 aarch64_get_variant (struct aarch64_inst *inst)
1373 {
1374 int i, nops, variant;
1375
1376 nops = aarch64_num_of_operands (inst->opcode);
1377 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1378 {
1379 for (i = 0; i < nops; ++i)
1380 if (inst->opcode->qualifiers_list[variant][i]
1381 != inst->operands[i].qualifier)
1382 break;
1383 if (i == nops)
1384 return variant;
1385 }
1386 abort ();
1387 }
1388
1389 /* Do miscellaneous encodings that are not common enough to be driven by
1390 flags. */
1391
1392 static void
1393 do_misc_encoding (aarch64_inst *inst)
1394 {
1395 unsigned int value;
1396
1397 switch (inst->opcode->op)
1398 {
1399 case OP_FCVT:
1400 encode_fcvt (inst);
1401 break;
1402 case OP_FCVTN:
1403 case OP_FCVTN2:
1404 case OP_FCVTL:
1405 case OP_FCVTL2:
1406 encode_asimd_fcvt (inst);
1407 break;
1408 case OP_FCVTXN_S:
1409 encode_asisd_fcvtxn (inst);
1410 break;
1411 case OP_MOV_P_P:
1412 case OP_MOVS_P_P:
1413 /* Copy Pn to Pm and Pg. */
1414 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1415 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1416 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1417 break;
1418 case OP_MOV_Z_P_Z:
1419 /* Copy Zd to Zm. */
1420 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1421 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1422 break;
1423 case OP_MOV_Z_V:
1424 /* Fill in the zero immediate. */
1425 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1426 2, FLD_imm5, FLD_SVE_tszh);
1427 break;
1428 case OP_MOV_Z_Z:
1429 /* Copy Zn to Zm. */
1430 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1431 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1432 break;
1433 case OP_MOV_Z_Zi:
1434 break;
1435 case OP_MOVM_P_P_P:
1436 /* Copy Pd to Pm. */
1437 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1438 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1439 break;
1440 case OP_MOVZS_P_P_P:
1441 case OP_MOVZ_P_P_P:
1442 /* Copy Pn to Pm. */
1443 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1444 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1445 break;
1446 case OP_NOTS_P_P_P_Z:
1447 case OP_NOT_P_P_P_Z:
1448 /* Copy Pg to Pm. */
1449 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1450 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1451 break;
1452 default: break;
1453 }
1454 }
1455
1456 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1457 static void
1458 encode_sizeq (aarch64_inst *inst)
1459 {
1460 aarch64_insn sizeq;
1461 enum aarch64_field_kind kind;
1462 int idx;
1463
1464 /* Get the index of the operand whose information we are going to use
1465 to encode the size and Q fields.
1466 This is deduced from the possible valid qualifier lists. */
1467 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1468 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1469 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1470 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1471 /* Q */
1472 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1473 /* size */
1474 if (inst->opcode->iclass == asisdlse
1475 || inst->opcode->iclass == asisdlsep
1476 || inst->opcode->iclass == asisdlso
1477 || inst->opcode->iclass == asisdlsop)
1478 kind = FLD_vldst_size;
1479 else
1480 kind = FLD_size;
1481 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1482 }
1483
1484 /* Opcodes that have fields shared by multiple operands are usually flagged
1485 with flags. In this function, we detect such flags and use the
1486 information in one of the related operands to do the encoding. The 'one'
1487 operand is not any operand but one of the operands that has the enough
1488 information for such an encoding. */
1489
1490 static void
1491 do_special_encoding (struct aarch64_inst *inst)
1492 {
1493 int idx;
1494 aarch64_insn value = 0;
1495
1496 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1497
1498 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1499 if (inst->opcode->flags & F_COND)
1500 {
1501 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1502 }
1503 if (inst->opcode->flags & F_SF)
1504 {
1505 idx = select_operand_for_sf_field_coding (inst->opcode);
1506 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1507 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1508 ? 1 : 0;
1509 insert_field (FLD_sf, &inst->value, value, 0);
1510 if (inst->opcode->flags & F_N)
1511 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1512 }
1513 if (inst->opcode->flags & F_LSE_SZ)
1514 {
1515 idx = select_operand_for_sf_field_coding (inst->opcode);
1516 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1517 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1518 ? 1 : 0;
1519 insert_field (FLD_lse_sz, &inst->value, value, 0);
1520 }
1521 if (inst->opcode->flags & F_SIZEQ)
1522 encode_sizeq (inst);
1523 if (inst->opcode->flags & F_FPTYPE)
1524 {
1525 idx = select_operand_for_fptype_field_coding (inst->opcode);
1526 switch (inst->operands[idx].qualifier)
1527 {
1528 case AARCH64_OPND_QLF_S_S: value = 0; break;
1529 case AARCH64_OPND_QLF_S_D: value = 1; break;
1530 case AARCH64_OPND_QLF_S_H: value = 3; break;
1531 default: assert (0);
1532 }
1533 insert_field (FLD_type, &inst->value, value, 0);
1534 }
1535 if (inst->opcode->flags & F_SSIZE)
1536 {
1537 enum aarch64_opnd_qualifier qualifier;
1538 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1539 qualifier = inst->operands[idx].qualifier;
1540 assert (qualifier >= AARCH64_OPND_QLF_S_B
1541 && qualifier <= AARCH64_OPND_QLF_S_Q);
1542 value = aarch64_get_qualifier_standard_value (qualifier);
1543 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1544 }
1545 if (inst->opcode->flags & F_T)
1546 {
1547 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1548 aarch64_field field = {0, 0};
1549 enum aarch64_opnd_qualifier qualifier;
1550
1551 idx = 0;
1552 qualifier = inst->operands[idx].qualifier;
1553 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1554 == AARCH64_OPND_CLASS_SIMD_REG
1555 && qualifier >= AARCH64_OPND_QLF_V_8B
1556 && qualifier <= AARCH64_OPND_QLF_V_2D);
1557 /* imm5<3:0> q <t>
1558 0000 x reserved
1559 xxx1 0 8b
1560 xxx1 1 16b
1561 xx10 0 4h
1562 xx10 1 8h
1563 x100 0 2s
1564 x100 1 4s
1565 1000 0 reserved
1566 1000 1 2d */
1567 value = aarch64_get_qualifier_standard_value (qualifier);
1568 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1569 num = (int) value >> 1;
1570 assert (num >= 0 && num <= 3);
1571 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1572 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1573 }
1574 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1575 {
1576 /* Use Rt to encode in the case of e.g.
1577 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1578 enum aarch64_opnd_qualifier qualifier;
1579 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1580 if (idx == -1)
1581 /* Otherwise use the result operand, which has to be a integer
1582 register. */
1583 idx = 0;
1584 assert (idx == 0 || idx == 1);
1585 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1586 == AARCH64_OPND_CLASS_INT_REG);
1587 qualifier = inst->operands[idx].qualifier;
1588 insert_field (FLD_Q, &inst->value,
1589 aarch64_get_qualifier_standard_value (qualifier), 0);
1590 }
1591 if (inst->opcode->flags & F_LDS_SIZE)
1592 {
1593 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1594 enum aarch64_opnd_qualifier qualifier;
1595 aarch64_field field = {0, 0};
1596 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1597 == AARCH64_OPND_CLASS_INT_REG);
1598 gen_sub_field (FLD_opc, 0, 1, &field);
1599 qualifier = inst->operands[0].qualifier;
1600 insert_field_2 (&field, &inst->value,
1601 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1602 }
1603 /* Miscellaneous encoding as the last step. */
1604 if (inst->opcode->flags & F_MISC)
1605 do_misc_encoding (inst);
1606
1607 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1608 }
1609
1610 /* Some instructions (including all SVE ones) use the instruction class
1611 to describe how a qualifiers_list index is represented in the instruction
1612 encoding. If INST is such an instruction, encode the chosen qualifier
1613 variant. */
1614
1615 static void
1616 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1617 {
1618 int variant = 0;
1619 switch (inst->opcode->iclass)
1620 {
1621 case sve_cpy:
1622 insert_fields (&inst->value, aarch64_get_variant (inst),
1623 0, 2, FLD_SVE_M_14, FLD_size);
1624 break;
1625
1626 case sve_index:
1627 case sve_shift_pred:
1628 case sve_shift_unpred:
1629 case sve_shift_tsz_hsd:
1630 case sve_shift_tsz_bhsd:
1631 /* For indices and shift amounts, the variant is encoded as
1632 part of the immediate. */
1633 break;
1634
1635 case sve_limm:
1636 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1637 and depend on the immediate. They don't have a separate
1638 encoding. */
1639 break;
1640
1641 case sve_misc:
1642 /* sve_misc instructions have only a single variant. */
1643 break;
1644
1645 case sve_movprfx:
1646 insert_fields (&inst->value, aarch64_get_variant (inst),
1647 0, 2, FLD_SVE_M_16, FLD_size);
1648 break;
1649
1650 case sve_pred_zm:
1651 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1652 break;
1653
1654 case sve_size_bhs:
1655 case sve_size_bhsd:
1656 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1657 break;
1658
1659 case sve_size_hsd:
1660 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1661 break;
1662
1663 case sve_size_bh:
1664 case sve_size_sd:
1665 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1666 break;
1667
1668 case sve_size_sd2:
1669 insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
1670 break;
1671
1672 case sve_size_hsd2:
1673 insert_field (FLD_SVE_size, &inst->value,
1674 aarch64_get_variant (inst) + 1, 0);
1675 break;
1676
1677 case sve_size_tsz_bhs:
1678 insert_fields (&inst->value,
1679 (1 << aarch64_get_variant (inst)),
1680 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
1681 break;
1682
1683 case sve_size_13:
1684 variant = aarch64_get_variant (inst) + 1;
1685 if (variant == 2)
1686 variant = 3;
1687 insert_field (FLD_size, &inst->value, variant, 0);
1688 break;
1689
1690 default:
1691 break;
1692 }
1693 }
1694
1695 /* Converters converting an alias opcode instruction to its real form. */
1696
1697 /* ROR <Wd>, <Ws>, #<shift>
1698 is equivalent to:
1699 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1700 static void
1701 convert_ror_to_extr (aarch64_inst *inst)
1702 {
1703 copy_operand_info (inst, 3, 2);
1704 copy_operand_info (inst, 2, 1);
1705 }
1706
1707 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1708 is equivalent to:
1709 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1710 static void
1711 convert_xtl_to_shll (aarch64_inst *inst)
1712 {
1713 inst->operands[2].qualifier = inst->operands[1].qualifier;
1714 inst->operands[2].imm.value = 0;
1715 }
1716
1717 /* Convert
1718 LSR <Xd>, <Xn>, #<shift>
1719 to
1720 UBFM <Xd>, <Xn>, #<shift>, #63. */
1721 static void
1722 convert_sr_to_bfm (aarch64_inst *inst)
1723 {
1724 inst->operands[3].imm.value =
1725 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1726 }
1727
1728 /* Convert MOV to ORR. */
1729 static void
1730 convert_mov_to_orr (aarch64_inst *inst)
1731 {
1732 /* MOV <Vd>.<T>, <Vn>.<T>
1733 is equivalent to:
1734 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1735 copy_operand_info (inst, 2, 1);
1736 }
1737
1738 /* When <imms> >= <immr>, the instruction written:
1739 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1740 is equivalent to:
1741 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1742
1743 static void
1744 convert_bfx_to_bfm (aarch64_inst *inst)
1745 {
1746 int64_t lsb, width;
1747
1748 /* Convert the operand. */
1749 lsb = inst->operands[2].imm.value;
1750 width = inst->operands[3].imm.value;
1751 inst->operands[2].imm.value = lsb;
1752 inst->operands[3].imm.value = lsb + width - 1;
1753 }
1754
1755 /* When <imms> < <immr>, the instruction written:
1756 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1757 is equivalent to:
1758 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1759
1760 static void
1761 convert_bfi_to_bfm (aarch64_inst *inst)
1762 {
1763 int64_t lsb, width;
1764
1765 /* Convert the operand. */
1766 lsb = inst->operands[2].imm.value;
1767 width = inst->operands[3].imm.value;
1768 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1769 {
1770 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1771 inst->operands[3].imm.value = width - 1;
1772 }
1773 else
1774 {
1775 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1776 inst->operands[3].imm.value = width - 1;
1777 }
1778 }
1779
1780 /* The instruction written:
1781 BFC <Xd>, #<lsb>, #<width>
1782 is equivalent to:
1783 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1784
1785 static void
1786 convert_bfc_to_bfm (aarch64_inst *inst)
1787 {
1788 int64_t lsb, width;
1789
1790 /* Insert XZR. */
1791 copy_operand_info (inst, 3, 2);
1792 copy_operand_info (inst, 2, 1);
1793 copy_operand_info (inst, 1, 0);
1794 inst->operands[1].reg.regno = 0x1f;
1795
1796 /* Convert the immediate operand. */
1797 lsb = inst->operands[2].imm.value;
1798 width = inst->operands[3].imm.value;
1799 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1800 {
1801 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1802 inst->operands[3].imm.value = width - 1;
1803 }
1804 else
1805 {
1806 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1807 inst->operands[3].imm.value = width - 1;
1808 }
1809 }
1810
1811 /* The instruction written:
1812 LSL <Xd>, <Xn>, #<shift>
1813 is equivalent to:
1814 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1815
1816 static void
1817 convert_lsl_to_ubfm (aarch64_inst *inst)
1818 {
1819 int64_t shift = inst->operands[2].imm.value;
1820
1821 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1822 {
1823 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1824 inst->operands[3].imm.value = 31 - shift;
1825 }
1826 else
1827 {
1828 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1829 inst->operands[3].imm.value = 63 - shift;
1830 }
1831 }
1832
1833 /* CINC <Wd>, <Wn>, <cond>
1834 is equivalent to:
1835 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1836
1837 static void
1838 convert_to_csel (aarch64_inst *inst)
1839 {
1840 copy_operand_info (inst, 3, 2);
1841 copy_operand_info (inst, 2, 1);
1842 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1843 }
1844
1845 /* CSET <Wd>, <cond>
1846 is equivalent to:
1847 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1848
1849 static void
1850 convert_cset_to_csinc (aarch64_inst *inst)
1851 {
1852 copy_operand_info (inst, 3, 1);
1853 copy_operand_info (inst, 2, 0);
1854 copy_operand_info (inst, 1, 0);
1855 inst->operands[1].reg.regno = 0x1f;
1856 inst->operands[2].reg.regno = 0x1f;
1857 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1858 }
1859
1860 /* MOV <Wd>, #<imm>
1861 is equivalent to:
1862 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1863
1864 static void
1865 convert_mov_to_movewide (aarch64_inst *inst)
1866 {
1867 int is32;
1868 uint32_t shift_amount;
1869 uint64_t value;
1870
1871 switch (inst->opcode->op)
1872 {
1873 case OP_MOV_IMM_WIDE:
1874 value = inst->operands[1].imm.value;
1875 break;
1876 case OP_MOV_IMM_WIDEN:
1877 value = ~inst->operands[1].imm.value;
1878 break;
1879 default:
1880 assert (0);
1881 }
1882 inst->operands[1].type = AARCH64_OPND_HALF;
1883 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1884 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1885 /* The constraint check should have guaranteed this wouldn't happen. */
1886 assert (0);
1887 value >>= shift_amount;
1888 value &= 0xffff;
1889 inst->operands[1].imm.value = value;
1890 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1891 inst->operands[1].shifter.amount = shift_amount;
1892 }
1893
1894 /* MOV <Wd>, #<imm>
1895 is equivalent to:
1896 ORR <Wd>, WZR, #<imm>. */
1897
1898 static void
1899 convert_mov_to_movebitmask (aarch64_inst *inst)
1900 {
1901 copy_operand_info (inst, 2, 1);
1902 inst->operands[1].reg.regno = 0x1f;
1903 inst->operands[1].skip = 0;
1904 }
1905
1906 /* Some alias opcodes are assembled by being converted to their real-form. */
1907
1908 static void
1909 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1910 {
1911 const aarch64_opcode *alias = inst->opcode;
1912
1913 if ((alias->flags & F_CONV) == 0)
1914 goto convert_to_real_return;
1915
1916 switch (alias->op)
1917 {
1918 case OP_ASR_IMM:
1919 case OP_LSR_IMM:
1920 convert_sr_to_bfm (inst);
1921 break;
1922 case OP_LSL_IMM:
1923 convert_lsl_to_ubfm (inst);
1924 break;
1925 case OP_CINC:
1926 case OP_CINV:
1927 case OP_CNEG:
1928 convert_to_csel (inst);
1929 break;
1930 case OP_CSET:
1931 case OP_CSETM:
1932 convert_cset_to_csinc (inst);
1933 break;
1934 case OP_UBFX:
1935 case OP_BFXIL:
1936 case OP_SBFX:
1937 convert_bfx_to_bfm (inst);
1938 break;
1939 case OP_SBFIZ:
1940 case OP_BFI:
1941 case OP_UBFIZ:
1942 convert_bfi_to_bfm (inst);
1943 break;
1944 case OP_BFC:
1945 convert_bfc_to_bfm (inst);
1946 break;
1947 case OP_MOV_V:
1948 convert_mov_to_orr (inst);
1949 break;
1950 case OP_MOV_IMM_WIDE:
1951 case OP_MOV_IMM_WIDEN:
1952 convert_mov_to_movewide (inst);
1953 break;
1954 case OP_MOV_IMM_LOG:
1955 convert_mov_to_movebitmask (inst);
1956 break;
1957 case OP_ROR_IMM:
1958 convert_ror_to_extr (inst);
1959 break;
1960 case OP_SXTL:
1961 case OP_SXTL2:
1962 case OP_UXTL:
1963 case OP_UXTL2:
1964 convert_xtl_to_shll (inst);
1965 break;
1966 default:
1967 break;
1968 }
1969
1970 convert_to_real_return:
1971 aarch64_replace_opcode (inst, real);
1972 }
1973
1974 /* Encode *INST_ORI of the opcode code OPCODE.
1975 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1976 matched operand qualifier sequence in *QLF_SEQ. */
1977
1978 bfd_boolean
1979 aarch64_opcode_encode (const aarch64_opcode *opcode,
1980 const aarch64_inst *inst_ori, aarch64_insn *code,
1981 aarch64_opnd_qualifier_t *qlf_seq,
1982 aarch64_operand_error *mismatch_detail,
1983 aarch64_instr_sequence* insn_sequence)
1984 {
1985 int i;
1986 const aarch64_opcode *aliased;
1987 aarch64_inst copy, *inst;
1988
1989 DEBUG_TRACE ("enter with %s", opcode->name);
1990
1991 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1992 copy = *inst_ori;
1993 inst = &copy;
1994
1995 assert (inst->opcode == NULL || inst->opcode == opcode);
1996 if (inst->opcode == NULL)
1997 inst->opcode = opcode;
1998
1999 /* Constrain the operands.
2000 After passing this, the encoding is guaranteed to succeed. */
2001 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
2002 {
2003 DEBUG_TRACE ("FAIL since operand constraint not met");
2004 return 0;
2005 }
2006
2007 /* Get the base value.
2008 Note: this has to be before the aliasing handling below in order to
2009 get the base value from the alias opcode before we move on to the
2010 aliased opcode for encoding. */
2011 inst->value = opcode->opcode;
2012
2013 /* No need to do anything else if the opcode does not have any operand. */
2014 if (aarch64_num_of_operands (opcode) == 0)
2015 goto encoding_exit;
2016
2017 /* Assign operand indexes and check types. Also put the matched
2018 operand qualifiers in *QLF_SEQ to return. */
2019 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2020 {
2021 assert (opcode->operands[i] == inst->operands[i].type);
2022 inst->operands[i].idx = i;
2023 if (qlf_seq != NULL)
2024 *qlf_seq = inst->operands[i].qualifier;
2025 }
2026
2027 aliased = aarch64_find_real_opcode (opcode);
2028 /* If the opcode is an alias and it does not ask for direct encoding by
2029 itself, the instruction will be transformed to the form of real opcode
2030 and the encoding will be carried out using the rules for the aliased
2031 opcode. */
2032 if (aliased != NULL && (opcode->flags & F_CONV))
2033 {
2034 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2035 aliased->name, opcode->name);
2036 /* Convert the operands to the form of the real opcode. */
2037 convert_to_real (inst, aliased);
2038 opcode = aliased;
2039 }
2040
2041 aarch64_opnd_info *info = inst->operands;
2042
2043 /* Call the inserter of each operand. */
2044 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2045 {
2046 const aarch64_operand *opnd;
2047 enum aarch64_opnd type = opcode->operands[i];
2048 if (type == AARCH64_OPND_NIL)
2049 break;
2050 if (info->skip)
2051 {
2052 DEBUG_TRACE ("skip the incomplete operand %d", i);
2053 continue;
2054 }
2055 opnd = &aarch64_operands[type];
2056 if (operand_has_inserter (opnd)
2057 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2058 mismatch_detail))
2059 return FALSE;
2060 }
2061
2062 /* Call opcode encoders indicated by flags. */
2063 if (opcode_has_special_coder (opcode))
2064 do_special_encoding (inst);
2065
2066 /* Possibly use the instruction class to encode the chosen qualifier
2067 variant. */
2068 aarch64_encode_variant_using_iclass (inst);
2069
2070 /* Run a verifier if the instruction has one set. */
2071 if (opcode->verifier)
2072 {
2073 enum err_type result = opcode->verifier (inst, *code, 0, TRUE,
2074 mismatch_detail, insn_sequence);
2075 switch (result)
2076 {
2077 case ERR_UND:
2078 case ERR_UNP:
2079 case ERR_NYI:
2080 return FALSE;
2081 default:
2082 break;
2083 }
2084 }
2085
2086 /* Always run constrain verifiers, this is needed because constrains need to
2087 maintain a global state. Regardless if the instruction has the flag set
2088 or not. */
2089 enum err_type result = verify_constraints (inst, *code, 0, TRUE,
2090 mismatch_detail, insn_sequence);
2091 switch (result)
2092 {
2093 case ERR_UND:
2094 case ERR_UNP:
2095 case ERR_NYI:
2096 return FALSE;
2097 default:
2098 break;
2099 }
2100
2101
2102 encoding_exit:
2103 DEBUG_TRACE ("exit with %s", opcode->name);
2104
2105 *code = inst->value;
2106
2107 return TRUE;
2108 }
This page took 0.075222 seconds and 4 git commands to generate.