b865d50d01ad8d1a5deb718289d6b3261b464f2d
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25 #include "opintl.h"
26
27 /* Utilities. */
28
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
32
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
38
39 static inline void
40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
41 {
42 uint32_t num;
43 const aarch64_field *field;
44 enum aarch64_field_kind kind;
45 va_list va;
46
47 va_start (va, mask);
48 num = va_arg (va, uint32_t);
49 assert (num <= 5);
50 while (num--)
51 {
52 kind = va_arg (va, enum aarch64_field_kind);
53 field = &fields[kind];
54 insert_field (kind, code, value, mask);
55 value >>= field->width;
56 }
57 va_end (va);
58 }
59
60 /* Insert a raw field value VALUE into all fields in SELF->fields.
61 The least significant bit goes in the final field. */
62
63 static void
64 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
65 aarch64_insn value)
66 {
67 unsigned int i;
68 enum aarch64_field_kind kind;
69
70 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
71 if (self->fields[i] != FLD_NIL)
72 {
73 kind = self->fields[i];
74 insert_field (kind, code, value, 0);
75 value >>= fields[kind].width;
76 }
77 }
78
79 /* Operand inserters. */
80
81 /* Insert register number. */
82 bfd_boolean
83 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
84 aarch64_insn *code,
85 const aarch64_inst *inst ATTRIBUTE_UNUSED,
86 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
87 {
88 insert_field (self->fields[0], code, info->reg.regno, 0);
89 return TRUE;
90 }
91
92 /* Insert register number, index and/or other data for SIMD register element
93 operand, e.g. the last source operand in
94 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
95 bfd_boolean
96 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
97 aarch64_insn *code, const aarch64_inst *inst,
98 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
99 {
100 /* regno */
101 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
102 /* index and/or type */
103 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
104 {
105 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
106 if (info->type == AARCH64_OPND_En
107 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
108 {
109 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
110 assert (info->idx == 1); /* Vn */
111 aarch64_insn value = info->reglane.index << pos;
112 insert_field (FLD_imm4, code, value, 0);
113 }
114 else
115 {
116 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
117 imm5<3:0> <V>
118 0000 RESERVED
119 xxx1 B
120 xx10 H
121 x100 S
122 1000 D */
123 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
124 insert_field (FLD_imm5, code, value, 0);
125 }
126 }
127 else if (inst->opcode->iclass == dotproduct)
128 {
129 unsigned reglane_index = info->reglane.index;
130 switch (info->qualifier)
131 {
132 case AARCH64_OPND_QLF_S_4B:
133 /* L:H */
134 assert (reglane_index < 4);
135 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
136 break;
137 default:
138 assert (0);
139 }
140 }
141 else if (inst->opcode->iclass == cryptosm3)
142 {
143 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
144 unsigned reglane_index = info->reglane.index;
145 assert (reglane_index < 4);
146 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
147 }
148 else
149 {
150 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
151 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
152 unsigned reglane_index = info->reglane.index;
153
154 if (inst->opcode->op == OP_FCMLA_ELEM)
155 /* Complex operand takes two elements. */
156 reglane_index *= 2;
157
158 switch (info->qualifier)
159 {
160 case AARCH64_OPND_QLF_S_H:
161 /* H:L:M */
162 assert (reglane_index < 8);
163 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
164 break;
165 case AARCH64_OPND_QLF_S_S:
166 /* H:L */
167 assert (reglane_index < 4);
168 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
169 break;
170 case AARCH64_OPND_QLF_S_D:
171 /* H */
172 assert (reglane_index < 2);
173 insert_field (FLD_H, code, reglane_index, 0);
174 break;
175 default:
176 assert (0);
177 }
178 }
179 return TRUE;
180 }
181
182 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
183 bfd_boolean
184 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
185 aarch64_insn *code,
186 const aarch64_inst *inst ATTRIBUTE_UNUSED,
187 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
188 {
189 /* R */
190 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
191 /* len */
192 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
193 return TRUE;
194 }
195
196 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
197 in AdvSIMD load/store instructions. */
198 bfd_boolean
199 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
200 const aarch64_opnd_info *info, aarch64_insn *code,
201 const aarch64_inst *inst,
202 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
203 {
204 aarch64_insn value = 0;
205 /* Number of elements in each structure to be loaded/stored. */
206 unsigned num = get_opcode_dependent_value (inst->opcode);
207
208 /* Rt */
209 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
210 /* opcode */
211 switch (num)
212 {
213 case 1:
214 switch (info->reglist.num_regs)
215 {
216 case 1: value = 0x7; break;
217 case 2: value = 0xa; break;
218 case 3: value = 0x6; break;
219 case 4: value = 0x2; break;
220 default: assert (0);
221 }
222 break;
223 case 2:
224 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
225 break;
226 case 3:
227 value = 0x4;
228 break;
229 case 4:
230 value = 0x0;
231 break;
232 default:
233 assert (0);
234 }
235 insert_field (FLD_opcode, code, value, 0);
236
237 return TRUE;
238 }
239
240 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
241 single structure to all lanes instructions. */
242 bfd_boolean
243 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
244 const aarch64_opnd_info *info, aarch64_insn *code,
245 const aarch64_inst *inst,
246 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
247 {
248 aarch64_insn value;
249 /* The opcode dependent area stores the number of elements in
250 each structure to be loaded/stored. */
251 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
252
253 /* Rt */
254 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
255 /* S */
256 value = (aarch64_insn) 0;
257 if (is_ld1r && info->reglist.num_regs == 2)
258 /* OP_LD1R does not have alternating variant, but have "two consecutive"
259 instead. */
260 value = (aarch64_insn) 1;
261 insert_field (FLD_S, code, value, 0);
262
263 return TRUE;
264 }
265
266 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
267 operand e.g. Vt in AdvSIMD load/store single element instructions. */
268 bfd_boolean
269 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
270 const aarch64_opnd_info *info, aarch64_insn *code,
271 const aarch64_inst *inst ATTRIBUTE_UNUSED,
272 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
273 {
274 aarch64_field field = {0, 0};
275 aarch64_insn QSsize = 0; /* fields Q:S:size. */
276 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
277
278 assert (info->reglist.has_index);
279
280 /* Rt */
281 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
282 /* Encode the index, opcode<2:1> and size. */
283 switch (info->qualifier)
284 {
285 case AARCH64_OPND_QLF_S_B:
286 /* Index encoded in "Q:S:size". */
287 QSsize = info->reglist.index;
288 opcodeh2 = 0x0;
289 break;
290 case AARCH64_OPND_QLF_S_H:
291 /* Index encoded in "Q:S:size<1>". */
292 QSsize = info->reglist.index << 1;
293 opcodeh2 = 0x1;
294 break;
295 case AARCH64_OPND_QLF_S_S:
296 /* Index encoded in "Q:S". */
297 QSsize = info->reglist.index << 2;
298 opcodeh2 = 0x2;
299 break;
300 case AARCH64_OPND_QLF_S_D:
301 /* Index encoded in "Q". */
302 QSsize = info->reglist.index << 3 | 0x1;
303 opcodeh2 = 0x2;
304 break;
305 default:
306 assert (0);
307 }
308 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
309 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
310 insert_field_2 (&field, code, opcodeh2, 0);
311
312 return TRUE;
313 }
314
315 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
316 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
317 or SSHR <V><d>, <V><n>, #<shift>. */
318 bfd_boolean
319 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
320 const aarch64_opnd_info *info,
321 aarch64_insn *code, const aarch64_inst *inst,
322 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
323 {
324 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
325 aarch64_insn Q, imm;
326
327 if (inst->opcode->iclass == asimdshf)
328 {
329 /* Q
330 immh Q <T>
331 0000 x SEE AdvSIMD modified immediate
332 0001 0 8B
333 0001 1 16B
334 001x 0 4H
335 001x 1 8H
336 01xx 0 2S
337 01xx 1 4S
338 1xxx 0 RESERVED
339 1xxx 1 2D */
340 Q = (val & 0x1) ? 1 : 0;
341 insert_field (FLD_Q, code, Q, inst->opcode->mask);
342 val >>= 1;
343 }
344
345 assert (info->type == AARCH64_OPND_IMM_VLSR
346 || info->type == AARCH64_OPND_IMM_VLSL);
347
348 if (info->type == AARCH64_OPND_IMM_VLSR)
349 /* immh:immb
350 immh <shift>
351 0000 SEE AdvSIMD modified immediate
352 0001 (16-UInt(immh:immb))
353 001x (32-UInt(immh:immb))
354 01xx (64-UInt(immh:immb))
355 1xxx (128-UInt(immh:immb)) */
356 imm = (16 << (unsigned)val) - info->imm.value;
357 else
358 /* immh:immb
359 immh <shift>
360 0000 SEE AdvSIMD modified immediate
361 0001 (UInt(immh:immb)-8)
362 001x (UInt(immh:immb)-16)
363 01xx (UInt(immh:immb)-32)
364 1xxx (UInt(immh:immb)-64) */
365 imm = info->imm.value + (8 << (unsigned)val);
366 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
367
368 return TRUE;
369 }
370
371 /* Insert fields for e.g. the immediate operands in
372 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
373 bfd_boolean
374 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
375 aarch64_insn *code,
376 const aarch64_inst *inst ATTRIBUTE_UNUSED,
377 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
378 {
379 int64_t imm;
380
381 imm = info->imm.value;
382 if (operand_need_shift_by_two (self))
383 imm >>= 2;
384 insert_all_fields (self, code, imm);
385 return TRUE;
386 }
387
388 /* Insert immediate and its shift amount for e.g. the last operand in
389 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
390 bfd_boolean
391 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
392 aarch64_insn *code, const aarch64_inst *inst,
393 aarch64_operand_error *errors)
394 {
395 /* imm16 */
396 aarch64_ins_imm (self, info, code, inst, errors);
397 /* hw */
398 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
399 return TRUE;
400 }
401
402 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
403 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
404 bfd_boolean
405 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
406 const aarch64_opnd_info *info,
407 aarch64_insn *code,
408 const aarch64_inst *inst ATTRIBUTE_UNUSED,
409 aarch64_operand_error *errors
410 ATTRIBUTE_UNUSED)
411 {
412 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
413 uint64_t imm = info->imm.value;
414 enum aarch64_modifier_kind kind = info->shifter.kind;
415 int amount = info->shifter.amount;
416 aarch64_field field = {0, 0};
417
418 /* a:b:c:d:e:f:g:h */
419 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
420 {
421 /* Either MOVI <Dd>, #<imm>
422 or MOVI <Vd>.2D, #<imm>.
423 <imm> is a 64-bit immediate
424 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
425 encoded in "a:b:c:d:e:f:g:h". */
426 imm = aarch64_shrink_expanded_imm8 (imm);
427 assert ((int)imm >= 0);
428 }
429 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
430
431 if (kind == AARCH64_MOD_NONE)
432 return TRUE;
433
434 /* shift amount partially in cmode */
435 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
436 if (kind == AARCH64_MOD_LSL)
437 {
438 /* AARCH64_MOD_LSL: shift zeros. */
439 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
440 assert (esize == 4 || esize == 2 || esize == 1);
441 /* For 8-bit move immediate, the optional LSL #0 does not require
442 encoding. */
443 if (esize == 1)
444 return TRUE;
445 amount >>= 3;
446 if (esize == 4)
447 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
448 else
449 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
450 }
451 else
452 {
453 /* AARCH64_MOD_MSL: shift ones. */
454 amount >>= 4;
455 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
456 }
457 insert_field_2 (&field, code, amount, 0);
458
459 return TRUE;
460 }
461
462 /* Insert fields for an 8-bit floating-point immediate. */
463 bfd_boolean
464 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
465 aarch64_insn *code,
466 const aarch64_inst *inst ATTRIBUTE_UNUSED,
467 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
468 {
469 insert_all_fields (self, code, info->imm.value);
470 return TRUE;
471 }
472
473 /* Insert 1-bit rotation immediate (#90 or #270). */
474 bfd_boolean
475 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
476 const aarch64_opnd_info *info,
477 aarch64_insn *code, const aarch64_inst *inst,
478 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
479 {
480 uint64_t rot = (info->imm.value - 90) / 180;
481 assert (rot < 2U);
482 insert_field (self->fields[0], code, rot, inst->opcode->mask);
483 return TRUE;
484 }
485
486 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
487 bfd_boolean
488 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
489 const aarch64_opnd_info *info,
490 aarch64_insn *code, const aarch64_inst *inst,
491 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
492 {
493 uint64_t rot = info->imm.value / 90;
494 assert (rot < 4U);
495 insert_field (self->fields[0], code, rot, inst->opcode->mask);
496 return TRUE;
497 }
498
499 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
500 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
501 bfd_boolean
502 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
503 aarch64_insn *code,
504 const aarch64_inst *inst ATTRIBUTE_UNUSED,
505 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
506 {
507 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
508 return TRUE;
509 }
510
511 /* Insert arithmetic immediate for e.g. the last operand in
512 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
513 bfd_boolean
514 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
515 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
516 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
517 {
518 /* shift */
519 aarch64_insn value = info->shifter.amount ? 1 : 0;
520 insert_field (self->fields[0], code, value, 0);
521 /* imm12 (unsigned) */
522 insert_field (self->fields[1], code, info->imm.value, 0);
523 return TRUE;
524 }
525
526 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
527 the operand should be inverted before encoding. */
528 static bfd_boolean
529 aarch64_ins_limm_1 (const aarch64_operand *self,
530 const aarch64_opnd_info *info, aarch64_insn *code,
531 const aarch64_inst *inst, bfd_boolean invert_p,
532 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
533 {
534 aarch64_insn value;
535 uint64_t imm = info->imm.value;
536 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
537
538 if (invert_p)
539 imm = ~imm;
540 /* The constraint check should have guaranteed this wouldn't happen. */
541 assert (aarch64_logical_immediate_p (imm, esize, &value));
542
543 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
544 self->fields[0]);
545 return TRUE;
546 }
547
548 /* Insert logical/bitmask immediate for e.g. the last operand in
549 ORR <Wd|WSP>, <Wn>, #<imm>. */
550 bfd_boolean
551 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
552 aarch64_insn *code, const aarch64_inst *inst,
553 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
554 {
555 return aarch64_ins_limm_1 (self, info, code, inst,
556 inst->opcode->op == OP_BIC, errors);
557 }
558
559 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
560 bfd_boolean
561 aarch64_ins_inv_limm (const aarch64_operand *self,
562 const aarch64_opnd_info *info, aarch64_insn *code,
563 const aarch64_inst *inst,
564 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
565 {
566 return aarch64_ins_limm_1 (self, info, code, inst, TRUE, errors);
567 }
568
569 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
570 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
571 bfd_boolean
572 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
573 aarch64_insn *code, const aarch64_inst *inst,
574 aarch64_operand_error *errors)
575 {
576 aarch64_insn value = 0;
577
578 assert (info->idx == 0);
579
580 /* Rt */
581 aarch64_ins_regno (self, info, code, inst, errors);
582 if (inst->opcode->iclass == ldstpair_indexed
583 || inst->opcode->iclass == ldstnapair_offs
584 || inst->opcode->iclass == ldstpair_off
585 || inst->opcode->iclass == loadlit)
586 {
587 /* size */
588 switch (info->qualifier)
589 {
590 case AARCH64_OPND_QLF_S_S: value = 0; break;
591 case AARCH64_OPND_QLF_S_D: value = 1; break;
592 case AARCH64_OPND_QLF_S_Q: value = 2; break;
593 default: assert (0);
594 }
595 insert_field (FLD_ldst_size, code, value, 0);
596 }
597 else
598 {
599 /* opc[1]:size */
600 value = aarch64_get_qualifier_standard_value (info->qualifier);
601 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
602 }
603
604 return TRUE;
605 }
606
607 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
608 bfd_boolean
609 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
610 const aarch64_opnd_info *info, aarch64_insn *code,
611 const aarch64_inst *inst ATTRIBUTE_UNUSED,
612 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
613 {
614 /* Rn */
615 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
616 return TRUE;
617 }
618
619 /* Encode the address operand for e.g.
620 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
621 bfd_boolean
622 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
623 const aarch64_opnd_info *info, aarch64_insn *code,
624 const aarch64_inst *inst ATTRIBUTE_UNUSED,
625 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
626 {
627 aarch64_insn S;
628 enum aarch64_modifier_kind kind = info->shifter.kind;
629
630 /* Rn */
631 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
632 /* Rm */
633 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
634 /* option */
635 if (kind == AARCH64_MOD_LSL)
636 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
637 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
638 /* S */
639 if (info->qualifier != AARCH64_OPND_QLF_S_B)
640 S = info->shifter.amount != 0;
641 else
642 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
643 S <amount>
644 0 [absent]
645 1 #0
646 Must be #0 if <extend> is explicitly LSL. */
647 S = info->shifter.operator_present && info->shifter.amount_present;
648 insert_field (FLD_S, code, S, 0);
649
650 return TRUE;
651 }
652
653 /* Encode the address operand for e.g.
654 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
655 bfd_boolean
656 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
657 const aarch64_opnd_info *info, aarch64_insn *code,
658 const aarch64_inst *inst ATTRIBUTE_UNUSED,
659 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
660 {
661 /* Rn */
662 insert_field (self->fields[0], code, info->addr.base_regno, 0);
663
664 /* simm9 */
665 int imm = info->addr.offset.imm;
666 insert_field (self->fields[1], code, imm, 0);
667
668 /* writeback */
669 if (info->addr.writeback)
670 {
671 assert (info->addr.preind == 1 && info->addr.postind == 0);
672 insert_field (self->fields[2], code, 1, 0);
673 }
674 return TRUE;
675 }
676
677 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
678 bfd_boolean
679 aarch64_ins_addr_simm (const aarch64_operand *self,
680 const aarch64_opnd_info *info,
681 aarch64_insn *code,
682 const aarch64_inst *inst ATTRIBUTE_UNUSED,
683 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
684 {
685 int imm;
686
687 /* Rn */
688 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
689 /* simm (imm9 or imm7) */
690 imm = info->addr.offset.imm;
691 if (self->fields[0] == FLD_imm7)
692 /* scaled immediate in ld/st pair instructions.. */
693 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
694 insert_field (self->fields[0], code, imm, 0);
695 /* pre/post- index */
696 if (info->addr.writeback)
697 {
698 assert (inst->opcode->iclass != ldst_unscaled
699 && inst->opcode->iclass != ldstnapair_offs
700 && inst->opcode->iclass != ldstpair_off
701 && inst->opcode->iclass != ldst_unpriv);
702 assert (info->addr.preind != info->addr.postind);
703 if (info->addr.preind)
704 insert_field (self->fields[1], code, 1, 0);
705 }
706
707 return TRUE;
708 }
709
710 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
711 bfd_boolean
712 aarch64_ins_addr_simm10 (const aarch64_operand *self,
713 const aarch64_opnd_info *info,
714 aarch64_insn *code,
715 const aarch64_inst *inst ATTRIBUTE_UNUSED,
716 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
717 {
718 int imm;
719
720 /* Rn */
721 insert_field (self->fields[0], code, info->addr.base_regno, 0);
722 /* simm10 */
723 imm = info->addr.offset.imm >> 3;
724 insert_field (self->fields[1], code, imm >> 9, 0);
725 insert_field (self->fields[2], code, imm, 0);
726 /* writeback */
727 if (info->addr.writeback)
728 {
729 assert (info->addr.preind == 1 && info->addr.postind == 0);
730 insert_field (self->fields[3], code, 1, 0);
731 }
732 return TRUE;
733 }
734
735 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
736 bfd_boolean
737 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
738 const aarch64_opnd_info *info,
739 aarch64_insn *code,
740 const aarch64_inst *inst ATTRIBUTE_UNUSED,
741 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
742 {
743 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
744
745 /* Rn */
746 insert_field (self->fields[0], code, info->addr.base_regno, 0);
747 /* uimm12 */
748 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
749 return TRUE;
750 }
751
752 /* Encode the address operand for e.g.
753 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
754 bfd_boolean
755 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
756 const aarch64_opnd_info *info, aarch64_insn *code,
757 const aarch64_inst *inst ATTRIBUTE_UNUSED,
758 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
759 {
760 /* Rn */
761 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
762 /* Rm | #<amount> */
763 if (info->addr.offset.is_reg)
764 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
765 else
766 insert_field (FLD_Rm, code, 0x1f, 0);
767 return TRUE;
768 }
769
770 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
771 bfd_boolean
772 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
773 const aarch64_opnd_info *info, aarch64_insn *code,
774 const aarch64_inst *inst ATTRIBUTE_UNUSED,
775 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
776 {
777 /* cond */
778 insert_field (FLD_cond, code, info->cond->value, 0);
779 return TRUE;
780 }
781
782 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
783 bfd_boolean
784 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
785 const aarch64_opnd_info *info, aarch64_insn *code,
786 const aarch64_inst *inst,
787 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
788 {
789 /* If a system instruction check if we have any restrictions on which
790 registers it can use. */
791 if (inst->opcode->iclass == ic_system)
792 {
793 uint64_t opcode_flags
794 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
795 uint32_t sysreg_flags
796 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
797
798 /* Check to see if it's read-only, else check if it's write only.
799 if it's both or unspecified don't care. */
800 if (opcode_flags == F_SYS_READ
801 && sysreg_flags
802 && sysreg_flags != F_REG_READ)
803 {
804 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
805 detail->error = _("specified register cannot be read from");
806 detail->index = info->idx;
807 detail->non_fatal = TRUE;
808 }
809 else if (opcode_flags == F_SYS_WRITE
810 && sysreg_flags
811 && sysreg_flags != F_REG_WRITE)
812 {
813 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
814 detail->error = _("specified register cannot be written to");
815 detail->index = info->idx;
816 detail->non_fatal = TRUE;
817 }
818 }
819 /* op0:op1:CRn:CRm:op2 */
820 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
821 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
822 return TRUE;
823 }
824
825 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
826 bfd_boolean
827 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
828 const aarch64_opnd_info *info, aarch64_insn *code,
829 const aarch64_inst *inst ATTRIBUTE_UNUSED,
830 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
831 {
832 /* op1:op2 */
833 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
834 FLD_op2, FLD_op1);
835 return TRUE;
836 }
837
838 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
839 bfd_boolean
840 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
841 const aarch64_opnd_info *info, aarch64_insn *code,
842 const aarch64_inst *inst ATTRIBUTE_UNUSED,
843 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
844 {
845 /* op1:CRn:CRm:op2 */
846 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
847 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
848 return TRUE;
849 }
850
851 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
852
853 bfd_boolean
854 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
855 const aarch64_opnd_info *info, aarch64_insn *code,
856 const aarch64_inst *inst ATTRIBUTE_UNUSED,
857 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
858 {
859 /* CRm */
860 insert_field (FLD_CRm, code, info->barrier->value, 0);
861 return TRUE;
862 }
863
864 /* Encode the prefetch operation option operand for e.g.
865 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
866
867 bfd_boolean
868 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
869 const aarch64_opnd_info *info, aarch64_insn *code,
870 const aarch64_inst *inst ATTRIBUTE_UNUSED,
871 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
872 {
873 /* prfop in Rt */
874 insert_field (FLD_Rt, code, info->prfop->value, 0);
875 return TRUE;
876 }
877
878 /* Encode the hint number for instructions that alias HINT but take an
879 operand. */
880
881 bfd_boolean
882 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
883 const aarch64_opnd_info *info, aarch64_insn *code,
884 const aarch64_inst *inst ATTRIBUTE_UNUSED,
885 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
886 {
887 /* CRm:op2. */
888 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
889 return TRUE;
890 }
891
892 /* Encode the extended register operand for e.g.
893 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
894 bfd_boolean
895 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
896 const aarch64_opnd_info *info, aarch64_insn *code,
897 const aarch64_inst *inst ATTRIBUTE_UNUSED,
898 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
899 {
900 enum aarch64_modifier_kind kind;
901
902 /* Rm */
903 insert_field (FLD_Rm, code, info->reg.regno, 0);
904 /* option */
905 kind = info->shifter.kind;
906 if (kind == AARCH64_MOD_LSL)
907 kind = info->qualifier == AARCH64_OPND_QLF_W
908 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
909 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
910 /* imm3 */
911 insert_field (FLD_imm3, code, info->shifter.amount, 0);
912
913 return TRUE;
914 }
915
916 /* Encode the shifted register operand for e.g.
917 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
918 bfd_boolean
919 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
920 const aarch64_opnd_info *info, aarch64_insn *code,
921 const aarch64_inst *inst ATTRIBUTE_UNUSED,
922 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
923 {
924 /* Rm */
925 insert_field (FLD_Rm, code, info->reg.regno, 0);
926 /* shift */
927 insert_field (FLD_shift, code,
928 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
929 /* imm6 */
930 insert_field (FLD_imm6, code, info->shifter.amount, 0);
931
932 return TRUE;
933 }
934
935 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
936 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
937 SELF's operand-dependent value. fields[0] specifies the field that
938 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
939 bfd_boolean
940 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
941 const aarch64_opnd_info *info,
942 aarch64_insn *code,
943 const aarch64_inst *inst ATTRIBUTE_UNUSED,
944 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
945 {
946 int factor = 1 + get_operand_specific_data (self);
947 insert_field (self->fields[0], code, info->addr.base_regno, 0);
948 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
949 return TRUE;
950 }
951
952 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
953 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
954 SELF's operand-dependent value. fields[0] specifies the field that
955 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
956 bfd_boolean
957 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
958 const aarch64_opnd_info *info,
959 aarch64_insn *code,
960 const aarch64_inst *inst ATTRIBUTE_UNUSED,
961 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
962 {
963 int factor = 1 + get_operand_specific_data (self);
964 insert_field (self->fields[0], code, info->addr.base_regno, 0);
965 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
966 return TRUE;
967 }
968
969 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
970 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
971 SELF's operand-dependent value. fields[0] specifies the field that
972 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
973 and imm3 fields, with imm3 being the less-significant part. */
974 bfd_boolean
975 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
976 const aarch64_opnd_info *info,
977 aarch64_insn *code,
978 const aarch64_inst *inst ATTRIBUTE_UNUSED,
979 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
980 {
981 int factor = 1 + get_operand_specific_data (self);
982 insert_field (self->fields[0], code, info->addr.base_regno, 0);
983 insert_fields (code, info->addr.offset.imm / factor, 0,
984 2, FLD_imm3, FLD_SVE_imm6);
985 return TRUE;
986 }
987
988 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
989 is a 4-bit signed number and where <shift> is SELF's operand-dependent
990 value. fields[0] specifies the base register field. */
991 bfd_boolean
992 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
993 const aarch64_opnd_info *info, aarch64_insn *code,
994 const aarch64_inst *inst ATTRIBUTE_UNUSED,
995 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
996 {
997 int factor = 1 << get_operand_specific_data (self);
998 insert_field (self->fields[0], code, info->addr.base_regno, 0);
999 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1000 return TRUE;
1001 }
1002
1003 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1004 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1005 value. fields[0] specifies the base register field. */
1006 bfd_boolean
1007 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1008 const aarch64_opnd_info *info, aarch64_insn *code,
1009 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1010 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1011 {
1012 int factor = 1 << get_operand_specific_data (self);
1013 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1014 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1015 return TRUE;
1016 }
1017
1018 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1019 is SELF's operand-dependent value. fields[0] specifies the base
1020 register field and fields[1] specifies the offset register field. */
1021 bfd_boolean
1022 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1023 const aarch64_opnd_info *info, aarch64_insn *code,
1024 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1025 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1026 {
1027 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1028 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1029 return TRUE;
1030 }
1031
1032 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1033 <shift> is SELF's operand-dependent value. fields[0] specifies the
1034 base register field, fields[1] specifies the offset register field and
1035 fields[2] is a single-bit field that selects SXTW over UXTW. */
1036 bfd_boolean
1037 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1038 const aarch64_opnd_info *info, aarch64_insn *code,
1039 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1040 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1041 {
1042 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1043 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1044 if (info->shifter.kind == AARCH64_MOD_UXTW)
1045 insert_field (self->fields[2], code, 0, 0);
1046 else
1047 insert_field (self->fields[2], code, 1, 0);
1048 return TRUE;
1049 }
1050
1051 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1052 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1053 fields[0] specifies the base register field. */
1054 bfd_boolean
1055 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1056 const aarch64_opnd_info *info, aarch64_insn *code,
1057 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1058 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1059 {
1060 int factor = 1 << get_operand_specific_data (self);
1061 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1062 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1063 return TRUE;
1064 }
1065
1066 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1067 where <modifier> is fixed by the instruction and where <msz> is a
1068 2-bit unsigned number. fields[0] specifies the base register field
1069 and fields[1] specifies the offset register field. */
1070 static bfd_boolean
1071 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1072 const aarch64_opnd_info *info, aarch64_insn *code,
1073 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1074 {
1075 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1076 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1077 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1078 return TRUE;
1079 }
1080
1081 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1082 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1083 field and fields[1] specifies the offset register field. */
1084 bfd_boolean
1085 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1086 const aarch64_opnd_info *info, aarch64_insn *code,
1087 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1088 aarch64_operand_error *errors)
1089 {
1090 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1091 }
1092
1093 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1094 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1095 field and fields[1] specifies the offset register field. */
1096 bfd_boolean
1097 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1098 const aarch64_opnd_info *info,
1099 aarch64_insn *code,
1100 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1101 aarch64_operand_error *errors)
1102 {
1103 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1104 }
1105
1106 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1107 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1108 field and fields[1] specifies the offset register field. */
1109 bfd_boolean
1110 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1111 const aarch64_opnd_info *info,
1112 aarch64_insn *code,
1113 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1114 aarch64_operand_error *errors)
1115 {
1116 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1117 }
1118
1119 /* Encode an SVE ADD/SUB immediate. */
1120 bfd_boolean
1121 aarch64_ins_sve_aimm (const aarch64_operand *self,
1122 const aarch64_opnd_info *info, aarch64_insn *code,
1123 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1124 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1125 {
1126 if (info->shifter.amount == 8)
1127 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1128 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1129 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1130 else
1131 insert_all_fields (self, code, info->imm.value & 0xff);
1132 return TRUE;
1133 }
1134
1135 /* Encode an SVE CPY/DUP immediate. */
1136 bfd_boolean
1137 aarch64_ins_sve_asimm (const aarch64_operand *self,
1138 const aarch64_opnd_info *info, aarch64_insn *code,
1139 const aarch64_inst *inst,
1140 aarch64_operand_error *errors)
1141 {
1142 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1143 }
1144
1145 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1146 array specifies which field to use for Zn. MM is encoded in the
1147 concatenation of imm5 and SVE_tszh, with imm5 being the less
1148 significant part. */
1149 bfd_boolean
1150 aarch64_ins_sve_index (const aarch64_operand *self,
1151 const aarch64_opnd_info *info, aarch64_insn *code,
1152 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1153 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1154 {
1155 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1156 insert_field (self->fields[0], code, info->reglane.regno, 0);
1157 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1158 2, FLD_imm5, FLD_SVE_tszh);
1159 return TRUE;
1160 }
1161
1162 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1163 bfd_boolean
1164 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1165 const aarch64_opnd_info *info, aarch64_insn *code,
1166 const aarch64_inst *inst,
1167 aarch64_operand_error *errors)
1168 {
1169 return aarch64_ins_limm (self, info, code, inst, errors);
1170 }
1171
1172 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1173 and where MM occupies the most-significant part. The operand-dependent
1174 value specifies the number of bits in Zn. */
1175 bfd_boolean
1176 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1177 const aarch64_opnd_info *info, aarch64_insn *code,
1178 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1179 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1180 {
1181 unsigned int reg_bits = get_operand_specific_data (self);
1182 assert (info->reglane.regno < (1U << reg_bits));
1183 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1184 insert_all_fields (self, code, val);
1185 return TRUE;
1186 }
1187
1188 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1189 to use for Zn. */
1190 bfd_boolean
1191 aarch64_ins_sve_reglist (const aarch64_operand *self,
1192 const aarch64_opnd_info *info, aarch64_insn *code,
1193 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1194 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1195 {
1196 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1197 return TRUE;
1198 }
1199
1200 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1201 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1202 field. */
1203 bfd_boolean
1204 aarch64_ins_sve_scale (const aarch64_operand *self,
1205 const aarch64_opnd_info *info, aarch64_insn *code,
1206 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1207 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1208 {
1209 insert_all_fields (self, code, info->imm.value);
1210 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1211 return TRUE;
1212 }
1213
1214 /* Encode an SVE shift left immediate. */
1215 bfd_boolean
1216 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1217 const aarch64_opnd_info *info, aarch64_insn *code,
1218 const aarch64_inst *inst,
1219 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1220 {
1221 const aarch64_opnd_info *prev_operand;
1222 unsigned int esize;
1223
1224 assert (info->idx > 0);
1225 prev_operand = &inst->operands[info->idx - 1];
1226 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1227 insert_all_fields (self, code, 8 * esize + info->imm.value);
1228 return TRUE;
1229 }
1230
1231 /* Encode an SVE shift right immediate. */
1232 bfd_boolean
1233 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1234 const aarch64_opnd_info *info, aarch64_insn *code,
1235 const aarch64_inst *inst,
1236 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1237 {
1238 const aarch64_opnd_info *prev_operand;
1239 unsigned int esize;
1240
1241 assert (info->idx > 0);
1242 prev_operand = &inst->operands[info->idx - 1];
1243 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1244 insert_all_fields (self, code, 16 * esize - info->imm.value);
1245 return TRUE;
1246 }
1247
1248 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1249 The fields array specifies which field to use. */
1250 bfd_boolean
1251 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1252 const aarch64_opnd_info *info,
1253 aarch64_insn *code,
1254 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1255 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1256 {
1257 if (info->imm.value == 0x3f000000)
1258 insert_field (self->fields[0], code, 0, 0);
1259 else
1260 insert_field (self->fields[0], code, 1, 0);
1261 return TRUE;
1262 }
1263
1264 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1265 The fields array specifies which field to use. */
1266 bfd_boolean
1267 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1268 const aarch64_opnd_info *info,
1269 aarch64_insn *code,
1270 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1271 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1272 {
1273 if (info->imm.value == 0x3f000000)
1274 insert_field (self->fields[0], code, 0, 0);
1275 else
1276 insert_field (self->fields[0], code, 1, 0);
1277 return TRUE;
1278 }
1279
1280 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1281 The fields array specifies which field to use. */
1282 bfd_boolean
1283 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1284 const aarch64_opnd_info *info,
1285 aarch64_insn *code,
1286 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1287 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1288 {
1289 if (info->imm.value == 0)
1290 insert_field (self->fields[0], code, 0, 0);
1291 else
1292 insert_field (self->fields[0], code, 1, 0);
1293 return TRUE;
1294 }
1295
1296 /* Miscellaneous encoding functions. */
1297
1298 /* Encode size[0], i.e. bit 22, for
1299 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1300
1301 static void
1302 encode_asimd_fcvt (aarch64_inst *inst)
1303 {
1304 aarch64_insn value;
1305 aarch64_field field = {0, 0};
1306 enum aarch64_opnd_qualifier qualifier;
1307
1308 switch (inst->opcode->op)
1309 {
1310 case OP_FCVTN:
1311 case OP_FCVTN2:
1312 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1313 qualifier = inst->operands[1].qualifier;
1314 break;
1315 case OP_FCVTL:
1316 case OP_FCVTL2:
1317 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1318 qualifier = inst->operands[0].qualifier;
1319 break;
1320 default:
1321 assert (0);
1322 }
1323 assert (qualifier == AARCH64_OPND_QLF_V_4S
1324 || qualifier == AARCH64_OPND_QLF_V_2D);
1325 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1326 gen_sub_field (FLD_size, 0, 1, &field);
1327 insert_field_2 (&field, &inst->value, value, 0);
1328 }
1329
1330 /* Encode size[0], i.e. bit 22, for
1331 e.g. FCVTXN <Vb><d>, <Va><n>. */
1332
1333 static void
1334 encode_asisd_fcvtxn (aarch64_inst *inst)
1335 {
1336 aarch64_insn val = 1;
1337 aarch64_field field = {0, 0};
1338 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1339 gen_sub_field (FLD_size, 0, 1, &field);
1340 insert_field_2 (&field, &inst->value, val, 0);
1341 }
1342
1343 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1344 static void
1345 encode_fcvt (aarch64_inst *inst)
1346 {
1347 aarch64_insn val;
1348 const aarch64_field field = {15, 2};
1349
1350 /* opc dstsize */
1351 switch (inst->operands[0].qualifier)
1352 {
1353 case AARCH64_OPND_QLF_S_S: val = 0; break;
1354 case AARCH64_OPND_QLF_S_D: val = 1; break;
1355 case AARCH64_OPND_QLF_S_H: val = 3; break;
1356 default: abort ();
1357 }
1358 insert_field_2 (&field, &inst->value, val, 0);
1359
1360 return;
1361 }
1362
1363 /* Return the index in qualifiers_list that INST is using. Should only
1364 be called once the qualifiers are known to be valid. */
1365
1366 static int
1367 aarch64_get_variant (struct aarch64_inst *inst)
1368 {
1369 int i, nops, variant;
1370
1371 nops = aarch64_num_of_operands (inst->opcode);
1372 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1373 {
1374 for (i = 0; i < nops; ++i)
1375 if (inst->opcode->qualifiers_list[variant][i]
1376 != inst->operands[i].qualifier)
1377 break;
1378 if (i == nops)
1379 return variant;
1380 }
1381 abort ();
1382 }
1383
1384 /* Do miscellaneous encodings that are not common enough to be driven by
1385 flags. */
1386
1387 static void
1388 do_misc_encoding (aarch64_inst *inst)
1389 {
1390 unsigned int value;
1391
1392 switch (inst->opcode->op)
1393 {
1394 case OP_FCVT:
1395 encode_fcvt (inst);
1396 break;
1397 case OP_FCVTN:
1398 case OP_FCVTN2:
1399 case OP_FCVTL:
1400 case OP_FCVTL2:
1401 encode_asimd_fcvt (inst);
1402 break;
1403 case OP_FCVTXN_S:
1404 encode_asisd_fcvtxn (inst);
1405 break;
1406 case OP_MOV_P_P:
1407 case OP_MOVS_P_P:
1408 /* Copy Pn to Pm and Pg. */
1409 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1410 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1411 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1412 break;
1413 case OP_MOV_Z_P_Z:
1414 /* Copy Zd to Zm. */
1415 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1416 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1417 break;
1418 case OP_MOV_Z_V:
1419 /* Fill in the zero immediate. */
1420 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1421 2, FLD_imm5, FLD_SVE_tszh);
1422 break;
1423 case OP_MOV_Z_Z:
1424 /* Copy Zn to Zm. */
1425 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1426 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1427 break;
1428 case OP_MOV_Z_Zi:
1429 break;
1430 case OP_MOVM_P_P_P:
1431 /* Copy Pd to Pm. */
1432 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1433 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1434 break;
1435 case OP_MOVZS_P_P_P:
1436 case OP_MOVZ_P_P_P:
1437 /* Copy Pn to Pm. */
1438 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1439 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1440 break;
1441 case OP_NOTS_P_P_P_Z:
1442 case OP_NOT_P_P_P_Z:
1443 /* Copy Pg to Pm. */
1444 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1445 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1446 break;
1447 default: break;
1448 }
1449 }
1450
1451 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1452 static void
1453 encode_sizeq (aarch64_inst *inst)
1454 {
1455 aarch64_insn sizeq;
1456 enum aarch64_field_kind kind;
1457 int idx;
1458
1459 /* Get the index of the operand whose information we are going to use
1460 to encode the size and Q fields.
1461 This is deduced from the possible valid qualifier lists. */
1462 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1463 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1464 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1465 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1466 /* Q */
1467 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1468 /* size */
1469 if (inst->opcode->iclass == asisdlse
1470 || inst->opcode->iclass == asisdlsep
1471 || inst->opcode->iclass == asisdlso
1472 || inst->opcode->iclass == asisdlsop)
1473 kind = FLD_vldst_size;
1474 else
1475 kind = FLD_size;
1476 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1477 }
1478
1479 /* Opcodes that have fields shared by multiple operands are usually flagged
1480 with flags. In this function, we detect such flags and use the
1481 information in one of the related operands to do the encoding. The 'one'
1482 operand is not any operand but one of the operands that has the enough
1483 information for such an encoding. */
1484
1485 static void
1486 do_special_encoding (struct aarch64_inst *inst)
1487 {
1488 int idx;
1489 aarch64_insn value = 0;
1490
1491 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1492
1493 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1494 if (inst->opcode->flags & F_COND)
1495 {
1496 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1497 }
1498 if (inst->opcode->flags & F_SF)
1499 {
1500 idx = select_operand_for_sf_field_coding (inst->opcode);
1501 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1502 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1503 ? 1 : 0;
1504 insert_field (FLD_sf, &inst->value, value, 0);
1505 if (inst->opcode->flags & F_N)
1506 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1507 }
1508 if (inst->opcode->flags & F_LSE_SZ)
1509 {
1510 idx = select_operand_for_sf_field_coding (inst->opcode);
1511 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1512 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1513 ? 1 : 0;
1514 insert_field (FLD_lse_sz, &inst->value, value, 0);
1515 }
1516 if (inst->opcode->flags & F_SIZEQ)
1517 encode_sizeq (inst);
1518 if (inst->opcode->flags & F_FPTYPE)
1519 {
1520 idx = select_operand_for_fptype_field_coding (inst->opcode);
1521 switch (inst->operands[idx].qualifier)
1522 {
1523 case AARCH64_OPND_QLF_S_S: value = 0; break;
1524 case AARCH64_OPND_QLF_S_D: value = 1; break;
1525 case AARCH64_OPND_QLF_S_H: value = 3; break;
1526 default: assert (0);
1527 }
1528 insert_field (FLD_type, &inst->value, value, 0);
1529 }
1530 if (inst->opcode->flags & F_SSIZE)
1531 {
1532 enum aarch64_opnd_qualifier qualifier;
1533 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1534 qualifier = inst->operands[idx].qualifier;
1535 assert (qualifier >= AARCH64_OPND_QLF_S_B
1536 && qualifier <= AARCH64_OPND_QLF_S_Q);
1537 value = aarch64_get_qualifier_standard_value (qualifier);
1538 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1539 }
1540 if (inst->opcode->flags & F_T)
1541 {
1542 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1543 aarch64_field field = {0, 0};
1544 enum aarch64_opnd_qualifier qualifier;
1545
1546 idx = 0;
1547 qualifier = inst->operands[idx].qualifier;
1548 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1549 == AARCH64_OPND_CLASS_SIMD_REG
1550 && qualifier >= AARCH64_OPND_QLF_V_8B
1551 && qualifier <= AARCH64_OPND_QLF_V_2D);
1552 /* imm5<3:0> q <t>
1553 0000 x reserved
1554 xxx1 0 8b
1555 xxx1 1 16b
1556 xx10 0 4h
1557 xx10 1 8h
1558 x100 0 2s
1559 x100 1 4s
1560 1000 0 reserved
1561 1000 1 2d */
1562 value = aarch64_get_qualifier_standard_value (qualifier);
1563 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1564 num = (int) value >> 1;
1565 assert (num >= 0 && num <= 3);
1566 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1567 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1568 }
1569 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1570 {
1571 /* Use Rt to encode in the case of e.g.
1572 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1573 enum aarch64_opnd_qualifier qualifier;
1574 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1575 if (idx == -1)
1576 /* Otherwise use the result operand, which has to be a integer
1577 register. */
1578 idx = 0;
1579 assert (idx == 0 || idx == 1);
1580 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1581 == AARCH64_OPND_CLASS_INT_REG);
1582 qualifier = inst->operands[idx].qualifier;
1583 insert_field (FLD_Q, &inst->value,
1584 aarch64_get_qualifier_standard_value (qualifier), 0);
1585 }
1586 if (inst->opcode->flags & F_LDS_SIZE)
1587 {
1588 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1589 enum aarch64_opnd_qualifier qualifier;
1590 aarch64_field field = {0, 0};
1591 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1592 == AARCH64_OPND_CLASS_INT_REG);
1593 gen_sub_field (FLD_opc, 0, 1, &field);
1594 qualifier = inst->operands[0].qualifier;
1595 insert_field_2 (&field, &inst->value,
1596 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1597 }
1598 /* Miscellaneous encoding as the last step. */
1599 if (inst->opcode->flags & F_MISC)
1600 do_misc_encoding (inst);
1601
1602 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1603 }
1604
1605 /* Some instructions (including all SVE ones) use the instruction class
1606 to describe how a qualifiers_list index is represented in the instruction
1607 encoding. If INST is such an instruction, encode the chosen qualifier
1608 variant. */
1609
1610 static void
1611 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1612 {
1613 switch (inst->opcode->iclass)
1614 {
1615 case sve_cpy:
1616 insert_fields (&inst->value, aarch64_get_variant (inst),
1617 0, 2, FLD_SVE_M_14, FLD_size);
1618 break;
1619
1620 case sve_index:
1621 case sve_shift_pred:
1622 case sve_shift_unpred:
1623 /* For indices and shift amounts, the variant is encoded as
1624 part of the immediate. */
1625 break;
1626
1627 case sve_limm:
1628 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1629 and depend on the immediate. They don't have a separate
1630 encoding. */
1631 break;
1632
1633 case sve_misc:
1634 /* sve_misc instructions have only a single variant. */
1635 break;
1636
1637 case sve_movprfx:
1638 insert_fields (&inst->value, aarch64_get_variant (inst),
1639 0, 2, FLD_SVE_M_16, FLD_size);
1640 break;
1641
1642 case sve_pred_zm:
1643 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1644 break;
1645
1646 case sve_size_bhs:
1647 case sve_size_bhsd:
1648 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1649 break;
1650
1651 case sve_size_hsd:
1652 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1653 break;
1654
1655 case sve_size_sd:
1656 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1657 break;
1658
1659 default:
1660 break;
1661 }
1662 }
1663
1664 /* Converters converting an alias opcode instruction to its real form. */
1665
1666 /* ROR <Wd>, <Ws>, #<shift>
1667 is equivalent to:
1668 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1669 static void
1670 convert_ror_to_extr (aarch64_inst *inst)
1671 {
1672 copy_operand_info (inst, 3, 2);
1673 copy_operand_info (inst, 2, 1);
1674 }
1675
1676 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1677 is equivalent to:
1678 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1679 static void
1680 convert_xtl_to_shll (aarch64_inst *inst)
1681 {
1682 inst->operands[2].qualifier = inst->operands[1].qualifier;
1683 inst->operands[2].imm.value = 0;
1684 }
1685
1686 /* Convert
1687 LSR <Xd>, <Xn>, #<shift>
1688 to
1689 UBFM <Xd>, <Xn>, #<shift>, #63. */
1690 static void
1691 convert_sr_to_bfm (aarch64_inst *inst)
1692 {
1693 inst->operands[3].imm.value =
1694 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1695 }
1696
1697 /* Convert MOV to ORR. */
1698 static void
1699 convert_mov_to_orr (aarch64_inst *inst)
1700 {
1701 /* MOV <Vd>.<T>, <Vn>.<T>
1702 is equivalent to:
1703 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1704 copy_operand_info (inst, 2, 1);
1705 }
1706
1707 /* When <imms> >= <immr>, the instruction written:
1708 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1709 is equivalent to:
1710 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1711
1712 static void
1713 convert_bfx_to_bfm (aarch64_inst *inst)
1714 {
1715 int64_t lsb, width;
1716
1717 /* Convert the operand. */
1718 lsb = inst->operands[2].imm.value;
1719 width = inst->operands[3].imm.value;
1720 inst->operands[2].imm.value = lsb;
1721 inst->operands[3].imm.value = lsb + width - 1;
1722 }
1723
1724 /* When <imms> < <immr>, the instruction written:
1725 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1726 is equivalent to:
1727 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1728
1729 static void
1730 convert_bfi_to_bfm (aarch64_inst *inst)
1731 {
1732 int64_t lsb, width;
1733
1734 /* Convert the operand. */
1735 lsb = inst->operands[2].imm.value;
1736 width = inst->operands[3].imm.value;
1737 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1738 {
1739 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1740 inst->operands[3].imm.value = width - 1;
1741 }
1742 else
1743 {
1744 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1745 inst->operands[3].imm.value = width - 1;
1746 }
1747 }
1748
1749 /* The instruction written:
1750 BFC <Xd>, #<lsb>, #<width>
1751 is equivalent to:
1752 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1753
1754 static void
1755 convert_bfc_to_bfm (aarch64_inst *inst)
1756 {
1757 int64_t lsb, width;
1758
1759 /* Insert XZR. */
1760 copy_operand_info (inst, 3, 2);
1761 copy_operand_info (inst, 2, 1);
1762 copy_operand_info (inst, 1, 0);
1763 inst->operands[1].reg.regno = 0x1f;
1764
1765 /* Convert the immediate operand. */
1766 lsb = inst->operands[2].imm.value;
1767 width = inst->operands[3].imm.value;
1768 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1769 {
1770 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1771 inst->operands[3].imm.value = width - 1;
1772 }
1773 else
1774 {
1775 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1776 inst->operands[3].imm.value = width - 1;
1777 }
1778 }
1779
1780 /* The instruction written:
1781 LSL <Xd>, <Xn>, #<shift>
1782 is equivalent to:
1783 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1784
1785 static void
1786 convert_lsl_to_ubfm (aarch64_inst *inst)
1787 {
1788 int64_t shift = inst->operands[2].imm.value;
1789
1790 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1791 {
1792 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1793 inst->operands[3].imm.value = 31 - shift;
1794 }
1795 else
1796 {
1797 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1798 inst->operands[3].imm.value = 63 - shift;
1799 }
1800 }
1801
1802 /* CINC <Wd>, <Wn>, <cond>
1803 is equivalent to:
1804 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1805
1806 static void
1807 convert_to_csel (aarch64_inst *inst)
1808 {
1809 copy_operand_info (inst, 3, 2);
1810 copy_operand_info (inst, 2, 1);
1811 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1812 }
1813
1814 /* CSET <Wd>, <cond>
1815 is equivalent to:
1816 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1817
1818 static void
1819 convert_cset_to_csinc (aarch64_inst *inst)
1820 {
1821 copy_operand_info (inst, 3, 1);
1822 copy_operand_info (inst, 2, 0);
1823 copy_operand_info (inst, 1, 0);
1824 inst->operands[1].reg.regno = 0x1f;
1825 inst->operands[2].reg.regno = 0x1f;
1826 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1827 }
1828
1829 /* MOV <Wd>, #<imm>
1830 is equivalent to:
1831 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1832
1833 static void
1834 convert_mov_to_movewide (aarch64_inst *inst)
1835 {
1836 int is32;
1837 uint32_t shift_amount;
1838 uint64_t value;
1839
1840 switch (inst->opcode->op)
1841 {
1842 case OP_MOV_IMM_WIDE:
1843 value = inst->operands[1].imm.value;
1844 break;
1845 case OP_MOV_IMM_WIDEN:
1846 value = ~inst->operands[1].imm.value;
1847 break;
1848 default:
1849 assert (0);
1850 }
1851 inst->operands[1].type = AARCH64_OPND_HALF;
1852 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1853 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1854 /* The constraint check should have guaranteed this wouldn't happen. */
1855 assert (0);
1856 value >>= shift_amount;
1857 value &= 0xffff;
1858 inst->operands[1].imm.value = value;
1859 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1860 inst->operands[1].shifter.amount = shift_amount;
1861 }
1862
1863 /* MOV <Wd>, #<imm>
1864 is equivalent to:
1865 ORR <Wd>, WZR, #<imm>. */
1866
1867 static void
1868 convert_mov_to_movebitmask (aarch64_inst *inst)
1869 {
1870 copy_operand_info (inst, 2, 1);
1871 inst->operands[1].reg.regno = 0x1f;
1872 inst->operands[1].skip = 0;
1873 }
1874
1875 /* Some alias opcodes are assembled by being converted to their real-form. */
1876
1877 static void
1878 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1879 {
1880 const aarch64_opcode *alias = inst->opcode;
1881
1882 if ((alias->flags & F_CONV) == 0)
1883 goto convert_to_real_return;
1884
1885 switch (alias->op)
1886 {
1887 case OP_ASR_IMM:
1888 case OP_LSR_IMM:
1889 convert_sr_to_bfm (inst);
1890 break;
1891 case OP_LSL_IMM:
1892 convert_lsl_to_ubfm (inst);
1893 break;
1894 case OP_CINC:
1895 case OP_CINV:
1896 case OP_CNEG:
1897 convert_to_csel (inst);
1898 break;
1899 case OP_CSET:
1900 case OP_CSETM:
1901 convert_cset_to_csinc (inst);
1902 break;
1903 case OP_UBFX:
1904 case OP_BFXIL:
1905 case OP_SBFX:
1906 convert_bfx_to_bfm (inst);
1907 break;
1908 case OP_SBFIZ:
1909 case OP_BFI:
1910 case OP_UBFIZ:
1911 convert_bfi_to_bfm (inst);
1912 break;
1913 case OP_BFC:
1914 convert_bfc_to_bfm (inst);
1915 break;
1916 case OP_MOV_V:
1917 convert_mov_to_orr (inst);
1918 break;
1919 case OP_MOV_IMM_WIDE:
1920 case OP_MOV_IMM_WIDEN:
1921 convert_mov_to_movewide (inst);
1922 break;
1923 case OP_MOV_IMM_LOG:
1924 convert_mov_to_movebitmask (inst);
1925 break;
1926 case OP_ROR_IMM:
1927 convert_ror_to_extr (inst);
1928 break;
1929 case OP_SXTL:
1930 case OP_SXTL2:
1931 case OP_UXTL:
1932 case OP_UXTL2:
1933 convert_xtl_to_shll (inst);
1934 break;
1935 default:
1936 break;
1937 }
1938
1939 convert_to_real_return:
1940 aarch64_replace_opcode (inst, real);
1941 }
1942
1943 /* Encode *INST_ORI of the opcode code OPCODE.
1944 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1945 matched operand qualifier sequence in *QLF_SEQ. */
1946
1947 bfd_boolean
1948 aarch64_opcode_encode (const aarch64_opcode *opcode,
1949 const aarch64_inst *inst_ori, aarch64_insn *code,
1950 aarch64_opnd_qualifier_t *qlf_seq,
1951 aarch64_operand_error *mismatch_detail,
1952 aarch64_instr_sequence* insn_sequence)
1953 {
1954 int i;
1955 const aarch64_opcode *aliased;
1956 aarch64_inst copy, *inst;
1957
1958 DEBUG_TRACE ("enter with %s", opcode->name);
1959
1960 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1961 copy = *inst_ori;
1962 inst = &copy;
1963
1964 assert (inst->opcode == NULL || inst->opcode == opcode);
1965 if (inst->opcode == NULL)
1966 inst->opcode = opcode;
1967
1968 /* Constrain the operands.
1969 After passing this, the encoding is guaranteed to succeed. */
1970 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1971 {
1972 DEBUG_TRACE ("FAIL since operand constraint not met");
1973 return 0;
1974 }
1975
1976 /* Get the base value.
1977 Note: this has to be before the aliasing handling below in order to
1978 get the base value from the alias opcode before we move on to the
1979 aliased opcode for encoding. */
1980 inst->value = opcode->opcode;
1981
1982 /* No need to do anything else if the opcode does not have any operand. */
1983 if (aarch64_num_of_operands (opcode) == 0)
1984 goto encoding_exit;
1985
1986 /* Assign operand indexes and check types. Also put the matched
1987 operand qualifiers in *QLF_SEQ to return. */
1988 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1989 {
1990 assert (opcode->operands[i] == inst->operands[i].type);
1991 inst->operands[i].idx = i;
1992 if (qlf_seq != NULL)
1993 *qlf_seq = inst->operands[i].qualifier;
1994 }
1995
1996 aliased = aarch64_find_real_opcode (opcode);
1997 /* If the opcode is an alias and it does not ask for direct encoding by
1998 itself, the instruction will be transformed to the form of real opcode
1999 and the encoding will be carried out using the rules for the aliased
2000 opcode. */
2001 if (aliased != NULL && (opcode->flags & F_CONV))
2002 {
2003 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2004 aliased->name, opcode->name);
2005 /* Convert the operands to the form of the real opcode. */
2006 convert_to_real (inst, aliased);
2007 opcode = aliased;
2008 }
2009
2010 aarch64_opnd_info *info = inst->operands;
2011
2012 /* Call the inserter of each operand. */
2013 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2014 {
2015 const aarch64_operand *opnd;
2016 enum aarch64_opnd type = opcode->operands[i];
2017 if (type == AARCH64_OPND_NIL)
2018 break;
2019 if (info->skip)
2020 {
2021 DEBUG_TRACE ("skip the incomplete operand %d", i);
2022 continue;
2023 }
2024 opnd = &aarch64_operands[type];
2025 if (operand_has_inserter (opnd)
2026 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2027 mismatch_detail))
2028 return FALSE;
2029 }
2030
2031 /* Call opcode encoders indicated by flags. */
2032 if (opcode_has_special_coder (opcode))
2033 do_special_encoding (inst);
2034
2035 /* Possibly use the instruction class to encode the chosen qualifier
2036 variant. */
2037 aarch64_encode_variant_using_iclass (inst);
2038
2039 /* Run a verifier if the instruction has one set. */
2040 if (opcode->verifier)
2041 {
2042 enum err_type result = opcode->verifier (inst, *code, 0, TRUE,
2043 mismatch_detail, insn_sequence);
2044 switch (result)
2045 {
2046 case ERR_UND:
2047 case ERR_UNP:
2048 case ERR_NYI:
2049 return FALSE;
2050 default:
2051 break;
2052 }
2053 }
2054
2055 /* Always run constrain verifiers, this is needed because constrains need to
2056 maintain a global state. Regardless if the instruction has the flag set
2057 or not. */
2058 enum err_type result = verify_constraints (inst, *code, 0, TRUE,
2059 mismatch_detail, insn_sequence);
2060 switch (result)
2061 {
2062 case ERR_UND:
2063 case ERR_UNP:
2064 case ERR_NYI:
2065 return FALSE;
2066 default:
2067 break;
2068 }
2069
2070
2071 encoding_exit:
2072 DEBUG_TRACE ("exit with %s", opcode->name);
2073
2074 *code = inst->value;
2075
2076 return TRUE;
2077 }
This page took 0.069806 seconds and 3 git commands to generate.