[BINUTILS, AARCH64, 2/8] Add Tag generation instructions in Memory Tagging Extension
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25 #include "opintl.h"
26
27 /* Utilities. */
28
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
32
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
38
39 static inline void
40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
41 {
42 uint32_t num;
43 const aarch64_field *field;
44 enum aarch64_field_kind kind;
45 va_list va;
46
47 va_start (va, mask);
48 num = va_arg (va, uint32_t);
49 assert (num <= 5);
50 while (num--)
51 {
52 kind = va_arg (va, enum aarch64_field_kind);
53 field = &fields[kind];
54 insert_field (kind, code, value, mask);
55 value >>= field->width;
56 }
57 va_end (va);
58 }
59
60 /* Insert a raw field value VALUE into all fields in SELF->fields.
61 The least significant bit goes in the final field. */
62
63 static void
64 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
65 aarch64_insn value)
66 {
67 unsigned int i;
68 enum aarch64_field_kind kind;
69
70 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
71 if (self->fields[i] != FLD_NIL)
72 {
73 kind = self->fields[i];
74 insert_field (kind, code, value, 0);
75 value >>= fields[kind].width;
76 }
77 }
78
79 /* Operand inserters. */
80
81 /* Insert register number. */
82 bfd_boolean
83 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
84 aarch64_insn *code,
85 const aarch64_inst *inst ATTRIBUTE_UNUSED,
86 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
87 {
88 insert_field (self->fields[0], code, info->reg.regno, 0);
89 return TRUE;
90 }
91
92 /* Insert register number, index and/or other data for SIMD register element
93 operand, e.g. the last source operand in
94 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
95 bfd_boolean
96 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
97 aarch64_insn *code, const aarch64_inst *inst,
98 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
99 {
100 /* regno */
101 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
102 /* index and/or type */
103 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
104 {
105 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
106 if (info->type == AARCH64_OPND_En
107 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
108 {
109 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
110 assert (info->idx == 1); /* Vn */
111 aarch64_insn value = info->reglane.index << pos;
112 insert_field (FLD_imm4, code, value, 0);
113 }
114 else
115 {
116 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
117 imm5<3:0> <V>
118 0000 RESERVED
119 xxx1 B
120 xx10 H
121 x100 S
122 1000 D */
123 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
124 insert_field (FLD_imm5, code, value, 0);
125 }
126 }
127 else if (inst->opcode->iclass == dotproduct)
128 {
129 unsigned reglane_index = info->reglane.index;
130 switch (info->qualifier)
131 {
132 case AARCH64_OPND_QLF_S_4B:
133 /* L:H */
134 assert (reglane_index < 4);
135 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
136 break;
137 default:
138 assert (0);
139 }
140 }
141 else if (inst->opcode->iclass == cryptosm3)
142 {
143 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
144 unsigned reglane_index = info->reglane.index;
145 assert (reglane_index < 4);
146 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
147 }
148 else
149 {
150 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
151 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
152 unsigned reglane_index = info->reglane.index;
153
154 if (inst->opcode->op == OP_FCMLA_ELEM)
155 /* Complex operand takes two elements. */
156 reglane_index *= 2;
157
158 switch (info->qualifier)
159 {
160 case AARCH64_OPND_QLF_S_H:
161 /* H:L:M */
162 assert (reglane_index < 8);
163 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
164 break;
165 case AARCH64_OPND_QLF_S_S:
166 /* H:L */
167 assert (reglane_index < 4);
168 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
169 break;
170 case AARCH64_OPND_QLF_S_D:
171 /* H */
172 assert (reglane_index < 2);
173 insert_field (FLD_H, code, reglane_index, 0);
174 break;
175 default:
176 assert (0);
177 }
178 }
179 return TRUE;
180 }
181
182 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
183 bfd_boolean
184 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
185 aarch64_insn *code,
186 const aarch64_inst *inst ATTRIBUTE_UNUSED,
187 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
188 {
189 /* R */
190 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
191 /* len */
192 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
193 return TRUE;
194 }
195
196 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
197 in AdvSIMD load/store instructions. */
198 bfd_boolean
199 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
200 const aarch64_opnd_info *info, aarch64_insn *code,
201 const aarch64_inst *inst,
202 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
203 {
204 aarch64_insn value = 0;
205 /* Number of elements in each structure to be loaded/stored. */
206 unsigned num = get_opcode_dependent_value (inst->opcode);
207
208 /* Rt */
209 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
210 /* opcode */
211 switch (num)
212 {
213 case 1:
214 switch (info->reglist.num_regs)
215 {
216 case 1: value = 0x7; break;
217 case 2: value = 0xa; break;
218 case 3: value = 0x6; break;
219 case 4: value = 0x2; break;
220 default: assert (0);
221 }
222 break;
223 case 2:
224 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
225 break;
226 case 3:
227 value = 0x4;
228 break;
229 case 4:
230 value = 0x0;
231 break;
232 default:
233 assert (0);
234 }
235 insert_field (FLD_opcode, code, value, 0);
236
237 return TRUE;
238 }
239
240 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
241 single structure to all lanes instructions. */
242 bfd_boolean
243 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
244 const aarch64_opnd_info *info, aarch64_insn *code,
245 const aarch64_inst *inst,
246 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
247 {
248 aarch64_insn value;
249 /* The opcode dependent area stores the number of elements in
250 each structure to be loaded/stored. */
251 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
252
253 /* Rt */
254 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
255 /* S */
256 value = (aarch64_insn) 0;
257 if (is_ld1r && info->reglist.num_regs == 2)
258 /* OP_LD1R does not have alternating variant, but have "two consecutive"
259 instead. */
260 value = (aarch64_insn) 1;
261 insert_field (FLD_S, code, value, 0);
262
263 return TRUE;
264 }
265
266 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
267 operand e.g. Vt in AdvSIMD load/store single element instructions. */
268 bfd_boolean
269 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
270 const aarch64_opnd_info *info, aarch64_insn *code,
271 const aarch64_inst *inst ATTRIBUTE_UNUSED,
272 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
273 {
274 aarch64_field field = {0, 0};
275 aarch64_insn QSsize = 0; /* fields Q:S:size. */
276 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
277
278 assert (info->reglist.has_index);
279
280 /* Rt */
281 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
282 /* Encode the index, opcode<2:1> and size. */
283 switch (info->qualifier)
284 {
285 case AARCH64_OPND_QLF_S_B:
286 /* Index encoded in "Q:S:size". */
287 QSsize = info->reglist.index;
288 opcodeh2 = 0x0;
289 break;
290 case AARCH64_OPND_QLF_S_H:
291 /* Index encoded in "Q:S:size<1>". */
292 QSsize = info->reglist.index << 1;
293 opcodeh2 = 0x1;
294 break;
295 case AARCH64_OPND_QLF_S_S:
296 /* Index encoded in "Q:S". */
297 QSsize = info->reglist.index << 2;
298 opcodeh2 = 0x2;
299 break;
300 case AARCH64_OPND_QLF_S_D:
301 /* Index encoded in "Q". */
302 QSsize = info->reglist.index << 3 | 0x1;
303 opcodeh2 = 0x2;
304 break;
305 default:
306 assert (0);
307 }
308 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
309 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
310 insert_field_2 (&field, code, opcodeh2, 0);
311
312 return TRUE;
313 }
314
315 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
316 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
317 or SSHR <V><d>, <V><n>, #<shift>. */
318 bfd_boolean
319 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
320 const aarch64_opnd_info *info,
321 aarch64_insn *code, const aarch64_inst *inst,
322 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
323 {
324 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
325 aarch64_insn Q, imm;
326
327 if (inst->opcode->iclass == asimdshf)
328 {
329 /* Q
330 immh Q <T>
331 0000 x SEE AdvSIMD modified immediate
332 0001 0 8B
333 0001 1 16B
334 001x 0 4H
335 001x 1 8H
336 01xx 0 2S
337 01xx 1 4S
338 1xxx 0 RESERVED
339 1xxx 1 2D */
340 Q = (val & 0x1) ? 1 : 0;
341 insert_field (FLD_Q, code, Q, inst->opcode->mask);
342 val >>= 1;
343 }
344
345 assert (info->type == AARCH64_OPND_IMM_VLSR
346 || info->type == AARCH64_OPND_IMM_VLSL);
347
348 if (info->type == AARCH64_OPND_IMM_VLSR)
349 /* immh:immb
350 immh <shift>
351 0000 SEE AdvSIMD modified immediate
352 0001 (16-UInt(immh:immb))
353 001x (32-UInt(immh:immb))
354 01xx (64-UInt(immh:immb))
355 1xxx (128-UInt(immh:immb)) */
356 imm = (16 << (unsigned)val) - info->imm.value;
357 else
358 /* immh:immb
359 immh <shift>
360 0000 SEE AdvSIMD modified immediate
361 0001 (UInt(immh:immb)-8)
362 001x (UInt(immh:immb)-16)
363 01xx (UInt(immh:immb)-32)
364 1xxx (UInt(immh:immb)-64) */
365 imm = info->imm.value + (8 << (unsigned)val);
366 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
367
368 return TRUE;
369 }
370
371 /* Insert fields for e.g. the immediate operands in
372 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
373 bfd_boolean
374 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
375 aarch64_insn *code,
376 const aarch64_inst *inst ATTRIBUTE_UNUSED,
377 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
378 {
379 int64_t imm;
380
381 imm = info->imm.value;
382 if (operand_need_shift_by_two (self))
383 imm >>= 2;
384 if (operand_need_shift_by_four (self))
385 imm >>= 4;
386 insert_all_fields (self, code, imm);
387 return TRUE;
388 }
389
390 /* Insert immediate and its shift amount for e.g. the last operand in
391 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
392 bfd_boolean
393 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
394 aarch64_insn *code, const aarch64_inst *inst,
395 aarch64_operand_error *errors)
396 {
397 /* imm16 */
398 aarch64_ins_imm (self, info, code, inst, errors);
399 /* hw */
400 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
401 return TRUE;
402 }
403
404 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
405 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
406 bfd_boolean
407 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
408 const aarch64_opnd_info *info,
409 aarch64_insn *code,
410 const aarch64_inst *inst ATTRIBUTE_UNUSED,
411 aarch64_operand_error *errors
412 ATTRIBUTE_UNUSED)
413 {
414 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
415 uint64_t imm = info->imm.value;
416 enum aarch64_modifier_kind kind = info->shifter.kind;
417 int amount = info->shifter.amount;
418 aarch64_field field = {0, 0};
419
420 /* a:b:c:d:e:f:g:h */
421 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
422 {
423 /* Either MOVI <Dd>, #<imm>
424 or MOVI <Vd>.2D, #<imm>.
425 <imm> is a 64-bit immediate
426 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
427 encoded in "a:b:c:d:e:f:g:h". */
428 imm = aarch64_shrink_expanded_imm8 (imm);
429 assert ((int)imm >= 0);
430 }
431 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
432
433 if (kind == AARCH64_MOD_NONE)
434 return TRUE;
435
436 /* shift amount partially in cmode */
437 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
438 if (kind == AARCH64_MOD_LSL)
439 {
440 /* AARCH64_MOD_LSL: shift zeros. */
441 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
442 assert (esize == 4 || esize == 2 || esize == 1);
443 /* For 8-bit move immediate, the optional LSL #0 does not require
444 encoding. */
445 if (esize == 1)
446 return TRUE;
447 amount >>= 3;
448 if (esize == 4)
449 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
450 else
451 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
452 }
453 else
454 {
455 /* AARCH64_MOD_MSL: shift ones. */
456 amount >>= 4;
457 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
458 }
459 insert_field_2 (&field, code, amount, 0);
460
461 return TRUE;
462 }
463
464 /* Insert fields for an 8-bit floating-point immediate. */
465 bfd_boolean
466 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
467 aarch64_insn *code,
468 const aarch64_inst *inst ATTRIBUTE_UNUSED,
469 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
470 {
471 insert_all_fields (self, code, info->imm.value);
472 return TRUE;
473 }
474
475 /* Insert 1-bit rotation immediate (#90 or #270). */
476 bfd_boolean
477 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
478 const aarch64_opnd_info *info,
479 aarch64_insn *code, const aarch64_inst *inst,
480 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
481 {
482 uint64_t rot = (info->imm.value - 90) / 180;
483 assert (rot < 2U);
484 insert_field (self->fields[0], code, rot, inst->opcode->mask);
485 return TRUE;
486 }
487
488 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
489 bfd_boolean
490 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
491 const aarch64_opnd_info *info,
492 aarch64_insn *code, const aarch64_inst *inst,
493 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
494 {
495 uint64_t rot = info->imm.value / 90;
496 assert (rot < 4U);
497 insert_field (self->fields[0], code, rot, inst->opcode->mask);
498 return TRUE;
499 }
500
501 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
502 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
503 bfd_boolean
504 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
505 aarch64_insn *code,
506 const aarch64_inst *inst ATTRIBUTE_UNUSED,
507 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
508 {
509 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
510 return TRUE;
511 }
512
513 /* Insert arithmetic immediate for e.g. the last operand in
514 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
515 bfd_boolean
516 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
517 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
518 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
519 {
520 /* shift */
521 aarch64_insn value = info->shifter.amount ? 1 : 0;
522 insert_field (self->fields[0], code, value, 0);
523 /* imm12 (unsigned) */
524 insert_field (self->fields[1], code, info->imm.value, 0);
525 return TRUE;
526 }
527
528 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
529 the operand should be inverted before encoding. */
530 static bfd_boolean
531 aarch64_ins_limm_1 (const aarch64_operand *self,
532 const aarch64_opnd_info *info, aarch64_insn *code,
533 const aarch64_inst *inst, bfd_boolean invert_p,
534 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
535 {
536 aarch64_insn value;
537 uint64_t imm = info->imm.value;
538 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
539
540 if (invert_p)
541 imm = ~imm;
542 /* The constraint check should have guaranteed this wouldn't happen. */
543 assert (aarch64_logical_immediate_p (imm, esize, &value));
544
545 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
546 self->fields[0]);
547 return TRUE;
548 }
549
550 /* Insert logical/bitmask immediate for e.g. the last operand in
551 ORR <Wd|WSP>, <Wn>, #<imm>. */
552 bfd_boolean
553 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
554 aarch64_insn *code, const aarch64_inst *inst,
555 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
556 {
557 return aarch64_ins_limm_1 (self, info, code, inst,
558 inst->opcode->op == OP_BIC, errors);
559 }
560
561 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
562 bfd_boolean
563 aarch64_ins_inv_limm (const aarch64_operand *self,
564 const aarch64_opnd_info *info, aarch64_insn *code,
565 const aarch64_inst *inst,
566 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
567 {
568 return aarch64_ins_limm_1 (self, info, code, inst, TRUE, errors);
569 }
570
571 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
572 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
573 bfd_boolean
574 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
575 aarch64_insn *code, const aarch64_inst *inst,
576 aarch64_operand_error *errors)
577 {
578 aarch64_insn value = 0;
579
580 assert (info->idx == 0);
581
582 /* Rt */
583 aarch64_ins_regno (self, info, code, inst, errors);
584 if (inst->opcode->iclass == ldstpair_indexed
585 || inst->opcode->iclass == ldstnapair_offs
586 || inst->opcode->iclass == ldstpair_off
587 || inst->opcode->iclass == loadlit)
588 {
589 /* size */
590 switch (info->qualifier)
591 {
592 case AARCH64_OPND_QLF_S_S: value = 0; break;
593 case AARCH64_OPND_QLF_S_D: value = 1; break;
594 case AARCH64_OPND_QLF_S_Q: value = 2; break;
595 default: assert (0);
596 }
597 insert_field (FLD_ldst_size, code, value, 0);
598 }
599 else
600 {
601 /* opc[1]:size */
602 value = aarch64_get_qualifier_standard_value (info->qualifier);
603 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
604 }
605
606 return TRUE;
607 }
608
609 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
610 bfd_boolean
611 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
612 const aarch64_opnd_info *info, aarch64_insn *code,
613 const aarch64_inst *inst ATTRIBUTE_UNUSED,
614 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
615 {
616 /* Rn */
617 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
618 return TRUE;
619 }
620
621 /* Encode the address operand for e.g.
622 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
623 bfd_boolean
624 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
625 const aarch64_opnd_info *info, aarch64_insn *code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED,
627 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
628 {
629 aarch64_insn S;
630 enum aarch64_modifier_kind kind = info->shifter.kind;
631
632 /* Rn */
633 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
634 /* Rm */
635 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
636 /* option */
637 if (kind == AARCH64_MOD_LSL)
638 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
639 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
640 /* S */
641 if (info->qualifier != AARCH64_OPND_QLF_S_B)
642 S = info->shifter.amount != 0;
643 else
644 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
645 S <amount>
646 0 [absent]
647 1 #0
648 Must be #0 if <extend> is explicitly LSL. */
649 S = info->shifter.operator_present && info->shifter.amount_present;
650 insert_field (FLD_S, code, S, 0);
651
652 return TRUE;
653 }
654
655 /* Encode the address operand for e.g.
656 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
657 bfd_boolean
658 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
659 const aarch64_opnd_info *info, aarch64_insn *code,
660 const aarch64_inst *inst ATTRIBUTE_UNUSED,
661 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
662 {
663 /* Rn */
664 insert_field (self->fields[0], code, info->addr.base_regno, 0);
665
666 /* simm9 */
667 int imm = info->addr.offset.imm;
668 insert_field (self->fields[1], code, imm, 0);
669
670 /* writeback */
671 if (info->addr.writeback)
672 {
673 assert (info->addr.preind == 1 && info->addr.postind == 0);
674 insert_field (self->fields[2], code, 1, 0);
675 }
676 return TRUE;
677 }
678
679 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
680 bfd_boolean
681 aarch64_ins_addr_simm (const aarch64_operand *self,
682 const aarch64_opnd_info *info,
683 aarch64_insn *code,
684 const aarch64_inst *inst ATTRIBUTE_UNUSED,
685 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
686 {
687 int imm;
688
689 /* Rn */
690 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
691 /* simm (imm9 or imm7) */
692 imm = info->addr.offset.imm;
693 if (self->fields[0] == FLD_imm7)
694 /* scaled immediate in ld/st pair instructions.. */
695 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
696 insert_field (self->fields[0], code, imm, 0);
697 /* pre/post- index */
698 if (info->addr.writeback)
699 {
700 assert (inst->opcode->iclass != ldst_unscaled
701 && inst->opcode->iclass != ldstnapair_offs
702 && inst->opcode->iclass != ldstpair_off
703 && inst->opcode->iclass != ldst_unpriv);
704 assert (info->addr.preind != info->addr.postind);
705 if (info->addr.preind)
706 insert_field (self->fields[1], code, 1, 0);
707 }
708
709 return TRUE;
710 }
711
712 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
713 bfd_boolean
714 aarch64_ins_addr_simm10 (const aarch64_operand *self,
715 const aarch64_opnd_info *info,
716 aarch64_insn *code,
717 const aarch64_inst *inst ATTRIBUTE_UNUSED,
718 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
719 {
720 int imm;
721
722 /* Rn */
723 insert_field (self->fields[0], code, info->addr.base_regno, 0);
724 /* simm10 */
725 imm = info->addr.offset.imm >> 3;
726 insert_field (self->fields[1], code, imm >> 9, 0);
727 insert_field (self->fields[2], code, imm, 0);
728 /* writeback */
729 if (info->addr.writeback)
730 {
731 assert (info->addr.preind == 1 && info->addr.postind == 0);
732 insert_field (self->fields[3], code, 1, 0);
733 }
734 return TRUE;
735 }
736
737 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
738 bfd_boolean
739 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
740 const aarch64_opnd_info *info,
741 aarch64_insn *code,
742 const aarch64_inst *inst ATTRIBUTE_UNUSED,
743 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
744 {
745 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
746
747 /* Rn */
748 insert_field (self->fields[0], code, info->addr.base_regno, 0);
749 /* uimm12 */
750 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
751 return TRUE;
752 }
753
754 /* Encode the address operand for e.g.
755 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
756 bfd_boolean
757 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
758 const aarch64_opnd_info *info, aarch64_insn *code,
759 const aarch64_inst *inst ATTRIBUTE_UNUSED,
760 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
761 {
762 /* Rn */
763 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
764 /* Rm | #<amount> */
765 if (info->addr.offset.is_reg)
766 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
767 else
768 insert_field (FLD_Rm, code, 0x1f, 0);
769 return TRUE;
770 }
771
772 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
773 bfd_boolean
774 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
775 const aarch64_opnd_info *info, aarch64_insn *code,
776 const aarch64_inst *inst ATTRIBUTE_UNUSED,
777 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
778 {
779 /* cond */
780 insert_field (FLD_cond, code, info->cond->value, 0);
781 return TRUE;
782 }
783
784 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
785 bfd_boolean
786 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
787 const aarch64_opnd_info *info, aarch64_insn *code,
788 const aarch64_inst *inst,
789 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
790 {
791 /* If a system instruction check if we have any restrictions on which
792 registers it can use. */
793 if (inst->opcode->iclass == ic_system)
794 {
795 uint64_t opcode_flags
796 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
797 uint32_t sysreg_flags
798 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
799
800 /* Check to see if it's read-only, else check if it's write only.
801 if it's both or unspecified don't care. */
802 if (opcode_flags == F_SYS_READ
803 && sysreg_flags
804 && sysreg_flags != F_REG_READ)
805 {
806 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
807 detail->error = _("specified register cannot be read from");
808 detail->index = info->idx;
809 detail->non_fatal = TRUE;
810 }
811 else if (opcode_flags == F_SYS_WRITE
812 && sysreg_flags
813 && sysreg_flags != F_REG_WRITE)
814 {
815 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
816 detail->error = _("specified register cannot be written to");
817 detail->index = info->idx;
818 detail->non_fatal = TRUE;
819 }
820 }
821 /* op0:op1:CRn:CRm:op2 */
822 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
823 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
824 return TRUE;
825 }
826
827 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
828 bfd_boolean
829 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
830 const aarch64_opnd_info *info, aarch64_insn *code,
831 const aarch64_inst *inst ATTRIBUTE_UNUSED,
832 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
833 {
834 /* op1:op2 */
835 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
836 FLD_op2, FLD_op1);
837 return TRUE;
838 }
839
840 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
841 bfd_boolean
842 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
843 const aarch64_opnd_info *info, aarch64_insn *code,
844 const aarch64_inst *inst ATTRIBUTE_UNUSED,
845 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
846 {
847 /* op1:CRn:CRm:op2 */
848 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
849 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
850 return TRUE;
851 }
852
853 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
854
855 bfd_boolean
856 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
857 const aarch64_opnd_info *info, aarch64_insn *code,
858 const aarch64_inst *inst ATTRIBUTE_UNUSED,
859 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
860 {
861 /* CRm */
862 insert_field (FLD_CRm, code, info->barrier->value, 0);
863 return TRUE;
864 }
865
866 /* Encode the prefetch operation option operand for e.g.
867 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
868
869 bfd_boolean
870 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
871 const aarch64_opnd_info *info, aarch64_insn *code,
872 const aarch64_inst *inst ATTRIBUTE_UNUSED,
873 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
874 {
875 /* prfop in Rt */
876 insert_field (FLD_Rt, code, info->prfop->value, 0);
877 return TRUE;
878 }
879
880 /* Encode the hint number for instructions that alias HINT but take an
881 operand. */
882
883 bfd_boolean
884 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
885 const aarch64_opnd_info *info, aarch64_insn *code,
886 const aarch64_inst *inst ATTRIBUTE_UNUSED,
887 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
888 {
889 /* CRm:op2. */
890 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
891 return TRUE;
892 }
893
894 /* Encode the extended register operand for e.g.
895 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
896 bfd_boolean
897 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
898 const aarch64_opnd_info *info, aarch64_insn *code,
899 const aarch64_inst *inst ATTRIBUTE_UNUSED,
900 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
901 {
902 enum aarch64_modifier_kind kind;
903
904 /* Rm */
905 insert_field (FLD_Rm, code, info->reg.regno, 0);
906 /* option */
907 kind = info->shifter.kind;
908 if (kind == AARCH64_MOD_LSL)
909 kind = info->qualifier == AARCH64_OPND_QLF_W
910 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
911 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
912 /* imm3 */
913 insert_field (FLD_imm3, code, info->shifter.amount, 0);
914
915 return TRUE;
916 }
917
918 /* Encode the shifted register operand for e.g.
919 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
920 bfd_boolean
921 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
922 const aarch64_opnd_info *info, aarch64_insn *code,
923 const aarch64_inst *inst ATTRIBUTE_UNUSED,
924 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
925 {
926 /* Rm */
927 insert_field (FLD_Rm, code, info->reg.regno, 0);
928 /* shift */
929 insert_field (FLD_shift, code,
930 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
931 /* imm6 */
932 insert_field (FLD_imm6, code, info->shifter.amount, 0);
933
934 return TRUE;
935 }
936
937 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
938 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
939 SELF's operand-dependent value. fields[0] specifies the field that
940 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
941 bfd_boolean
942 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
943 const aarch64_opnd_info *info,
944 aarch64_insn *code,
945 const aarch64_inst *inst ATTRIBUTE_UNUSED,
946 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
947 {
948 int factor = 1 + get_operand_specific_data (self);
949 insert_field (self->fields[0], code, info->addr.base_regno, 0);
950 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
951 return TRUE;
952 }
953
954 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
955 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
956 SELF's operand-dependent value. fields[0] specifies the field that
957 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
958 bfd_boolean
959 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
960 const aarch64_opnd_info *info,
961 aarch64_insn *code,
962 const aarch64_inst *inst ATTRIBUTE_UNUSED,
963 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
964 {
965 int factor = 1 + get_operand_specific_data (self);
966 insert_field (self->fields[0], code, info->addr.base_regno, 0);
967 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
968 return TRUE;
969 }
970
971 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
972 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
973 SELF's operand-dependent value. fields[0] specifies the field that
974 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
975 and imm3 fields, with imm3 being the less-significant part. */
976 bfd_boolean
977 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
978 const aarch64_opnd_info *info,
979 aarch64_insn *code,
980 const aarch64_inst *inst ATTRIBUTE_UNUSED,
981 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
982 {
983 int factor = 1 + get_operand_specific_data (self);
984 insert_field (self->fields[0], code, info->addr.base_regno, 0);
985 insert_fields (code, info->addr.offset.imm / factor, 0,
986 2, FLD_imm3, FLD_SVE_imm6);
987 return TRUE;
988 }
989
990 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
991 is a 4-bit signed number and where <shift> is SELF's operand-dependent
992 value. fields[0] specifies the base register field. */
993 bfd_boolean
994 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
995 const aarch64_opnd_info *info, aarch64_insn *code,
996 const aarch64_inst *inst ATTRIBUTE_UNUSED,
997 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
998 {
999 int factor = 1 << get_operand_specific_data (self);
1000 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1001 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1002 return TRUE;
1003 }
1004
1005 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1006 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1007 value. fields[0] specifies the base register field. */
1008 bfd_boolean
1009 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1010 const aarch64_opnd_info *info, aarch64_insn *code,
1011 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1012 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1013 {
1014 int factor = 1 << get_operand_specific_data (self);
1015 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1016 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1017 return TRUE;
1018 }
1019
1020 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1021 is SELF's operand-dependent value. fields[0] specifies the base
1022 register field and fields[1] specifies the offset register field. */
1023 bfd_boolean
1024 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1025 const aarch64_opnd_info *info, aarch64_insn *code,
1026 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1027 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1028 {
1029 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1030 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1031 return TRUE;
1032 }
1033
1034 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1035 <shift> is SELF's operand-dependent value. fields[0] specifies the
1036 base register field, fields[1] specifies the offset register field and
1037 fields[2] is a single-bit field that selects SXTW over UXTW. */
1038 bfd_boolean
1039 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1040 const aarch64_opnd_info *info, aarch64_insn *code,
1041 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1042 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1043 {
1044 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1045 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1046 if (info->shifter.kind == AARCH64_MOD_UXTW)
1047 insert_field (self->fields[2], code, 0, 0);
1048 else
1049 insert_field (self->fields[2], code, 1, 0);
1050 return TRUE;
1051 }
1052
1053 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1054 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1055 fields[0] specifies the base register field. */
1056 bfd_boolean
1057 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1058 const aarch64_opnd_info *info, aarch64_insn *code,
1059 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1060 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1061 {
1062 int factor = 1 << get_operand_specific_data (self);
1063 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1064 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1065 return TRUE;
1066 }
1067
1068 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1069 where <modifier> is fixed by the instruction and where <msz> is a
1070 2-bit unsigned number. fields[0] specifies the base register field
1071 and fields[1] specifies the offset register field. */
1072 static bfd_boolean
1073 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1074 const aarch64_opnd_info *info, aarch64_insn *code,
1075 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1076 {
1077 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1078 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1079 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1080 return TRUE;
1081 }
1082
1083 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1084 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1085 field and fields[1] specifies the offset register field. */
1086 bfd_boolean
1087 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1088 const aarch64_opnd_info *info, aarch64_insn *code,
1089 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1090 aarch64_operand_error *errors)
1091 {
1092 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1093 }
1094
1095 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1096 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1097 field and fields[1] specifies the offset register field. */
1098 bfd_boolean
1099 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1100 const aarch64_opnd_info *info,
1101 aarch64_insn *code,
1102 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1103 aarch64_operand_error *errors)
1104 {
1105 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1106 }
1107
1108 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1109 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1110 field and fields[1] specifies the offset register field. */
1111 bfd_boolean
1112 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1113 const aarch64_opnd_info *info,
1114 aarch64_insn *code,
1115 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1116 aarch64_operand_error *errors)
1117 {
1118 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1119 }
1120
1121 /* Encode an SVE ADD/SUB immediate. */
1122 bfd_boolean
1123 aarch64_ins_sve_aimm (const aarch64_operand *self,
1124 const aarch64_opnd_info *info, aarch64_insn *code,
1125 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1126 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1127 {
1128 if (info->shifter.amount == 8)
1129 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1130 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1131 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1132 else
1133 insert_all_fields (self, code, info->imm.value & 0xff);
1134 return TRUE;
1135 }
1136
1137 /* Encode an SVE CPY/DUP immediate. */
1138 bfd_boolean
1139 aarch64_ins_sve_asimm (const aarch64_operand *self,
1140 const aarch64_opnd_info *info, aarch64_insn *code,
1141 const aarch64_inst *inst,
1142 aarch64_operand_error *errors)
1143 {
1144 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1145 }
1146
1147 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1148 array specifies which field to use for Zn. MM is encoded in the
1149 concatenation of imm5 and SVE_tszh, with imm5 being the less
1150 significant part. */
1151 bfd_boolean
1152 aarch64_ins_sve_index (const aarch64_operand *self,
1153 const aarch64_opnd_info *info, aarch64_insn *code,
1154 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1155 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1156 {
1157 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1158 insert_field (self->fields[0], code, info->reglane.regno, 0);
1159 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1160 2, FLD_imm5, FLD_SVE_tszh);
1161 return TRUE;
1162 }
1163
1164 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1165 bfd_boolean
1166 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1167 const aarch64_opnd_info *info, aarch64_insn *code,
1168 const aarch64_inst *inst,
1169 aarch64_operand_error *errors)
1170 {
1171 return aarch64_ins_limm (self, info, code, inst, errors);
1172 }
1173
1174 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1175 and where MM occupies the most-significant part. The operand-dependent
1176 value specifies the number of bits in Zn. */
1177 bfd_boolean
1178 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1179 const aarch64_opnd_info *info, aarch64_insn *code,
1180 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1181 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1182 {
1183 unsigned int reg_bits = get_operand_specific_data (self);
1184 assert (info->reglane.regno < (1U << reg_bits));
1185 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1186 insert_all_fields (self, code, val);
1187 return TRUE;
1188 }
1189
1190 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1191 to use for Zn. */
1192 bfd_boolean
1193 aarch64_ins_sve_reglist (const aarch64_operand *self,
1194 const aarch64_opnd_info *info, aarch64_insn *code,
1195 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1196 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1197 {
1198 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1199 return TRUE;
1200 }
1201
1202 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1203 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1204 field. */
1205 bfd_boolean
1206 aarch64_ins_sve_scale (const aarch64_operand *self,
1207 const aarch64_opnd_info *info, aarch64_insn *code,
1208 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1209 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1210 {
1211 insert_all_fields (self, code, info->imm.value);
1212 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1213 return TRUE;
1214 }
1215
1216 /* Encode an SVE shift left immediate. */
1217 bfd_boolean
1218 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1219 const aarch64_opnd_info *info, aarch64_insn *code,
1220 const aarch64_inst *inst,
1221 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1222 {
1223 const aarch64_opnd_info *prev_operand;
1224 unsigned int esize;
1225
1226 assert (info->idx > 0);
1227 prev_operand = &inst->operands[info->idx - 1];
1228 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1229 insert_all_fields (self, code, 8 * esize + info->imm.value);
1230 return TRUE;
1231 }
1232
1233 /* Encode an SVE shift right immediate. */
1234 bfd_boolean
1235 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1236 const aarch64_opnd_info *info, aarch64_insn *code,
1237 const aarch64_inst *inst,
1238 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1239 {
1240 const aarch64_opnd_info *prev_operand;
1241 unsigned int esize;
1242
1243 assert (info->idx > 0);
1244 prev_operand = &inst->operands[info->idx - 1];
1245 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1246 insert_all_fields (self, code, 16 * esize - info->imm.value);
1247 return TRUE;
1248 }
1249
1250 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1251 The fields array specifies which field to use. */
1252 bfd_boolean
1253 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1254 const aarch64_opnd_info *info,
1255 aarch64_insn *code,
1256 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1257 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1258 {
1259 if (info->imm.value == 0x3f000000)
1260 insert_field (self->fields[0], code, 0, 0);
1261 else
1262 insert_field (self->fields[0], code, 1, 0);
1263 return TRUE;
1264 }
1265
1266 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1267 The fields array specifies which field to use. */
1268 bfd_boolean
1269 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1270 const aarch64_opnd_info *info,
1271 aarch64_insn *code,
1272 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1273 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1274 {
1275 if (info->imm.value == 0x3f000000)
1276 insert_field (self->fields[0], code, 0, 0);
1277 else
1278 insert_field (self->fields[0], code, 1, 0);
1279 return TRUE;
1280 }
1281
1282 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1283 The fields array specifies which field to use. */
1284 bfd_boolean
1285 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1286 const aarch64_opnd_info *info,
1287 aarch64_insn *code,
1288 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1289 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1290 {
1291 if (info->imm.value == 0)
1292 insert_field (self->fields[0], code, 0, 0);
1293 else
1294 insert_field (self->fields[0], code, 1, 0);
1295 return TRUE;
1296 }
1297
1298 /* Miscellaneous encoding functions. */
1299
1300 /* Encode size[0], i.e. bit 22, for
1301 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1302
1303 static void
1304 encode_asimd_fcvt (aarch64_inst *inst)
1305 {
1306 aarch64_insn value;
1307 aarch64_field field = {0, 0};
1308 enum aarch64_opnd_qualifier qualifier;
1309
1310 switch (inst->opcode->op)
1311 {
1312 case OP_FCVTN:
1313 case OP_FCVTN2:
1314 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1315 qualifier = inst->operands[1].qualifier;
1316 break;
1317 case OP_FCVTL:
1318 case OP_FCVTL2:
1319 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1320 qualifier = inst->operands[0].qualifier;
1321 break;
1322 default:
1323 assert (0);
1324 }
1325 assert (qualifier == AARCH64_OPND_QLF_V_4S
1326 || qualifier == AARCH64_OPND_QLF_V_2D);
1327 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1328 gen_sub_field (FLD_size, 0, 1, &field);
1329 insert_field_2 (&field, &inst->value, value, 0);
1330 }
1331
1332 /* Encode size[0], i.e. bit 22, for
1333 e.g. FCVTXN <Vb><d>, <Va><n>. */
1334
1335 static void
1336 encode_asisd_fcvtxn (aarch64_inst *inst)
1337 {
1338 aarch64_insn val = 1;
1339 aarch64_field field = {0, 0};
1340 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1341 gen_sub_field (FLD_size, 0, 1, &field);
1342 insert_field_2 (&field, &inst->value, val, 0);
1343 }
1344
1345 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1346 static void
1347 encode_fcvt (aarch64_inst *inst)
1348 {
1349 aarch64_insn val;
1350 const aarch64_field field = {15, 2};
1351
1352 /* opc dstsize */
1353 switch (inst->operands[0].qualifier)
1354 {
1355 case AARCH64_OPND_QLF_S_S: val = 0; break;
1356 case AARCH64_OPND_QLF_S_D: val = 1; break;
1357 case AARCH64_OPND_QLF_S_H: val = 3; break;
1358 default: abort ();
1359 }
1360 insert_field_2 (&field, &inst->value, val, 0);
1361
1362 return;
1363 }
1364
1365 /* Return the index in qualifiers_list that INST is using. Should only
1366 be called once the qualifiers are known to be valid. */
1367
1368 static int
1369 aarch64_get_variant (struct aarch64_inst *inst)
1370 {
1371 int i, nops, variant;
1372
1373 nops = aarch64_num_of_operands (inst->opcode);
1374 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1375 {
1376 for (i = 0; i < nops; ++i)
1377 if (inst->opcode->qualifiers_list[variant][i]
1378 != inst->operands[i].qualifier)
1379 break;
1380 if (i == nops)
1381 return variant;
1382 }
1383 abort ();
1384 }
1385
1386 /* Do miscellaneous encodings that are not common enough to be driven by
1387 flags. */
1388
1389 static void
1390 do_misc_encoding (aarch64_inst *inst)
1391 {
1392 unsigned int value;
1393
1394 switch (inst->opcode->op)
1395 {
1396 case OP_FCVT:
1397 encode_fcvt (inst);
1398 break;
1399 case OP_FCVTN:
1400 case OP_FCVTN2:
1401 case OP_FCVTL:
1402 case OP_FCVTL2:
1403 encode_asimd_fcvt (inst);
1404 break;
1405 case OP_FCVTXN_S:
1406 encode_asisd_fcvtxn (inst);
1407 break;
1408 case OP_MOV_P_P:
1409 case OP_MOVS_P_P:
1410 /* Copy Pn to Pm and Pg. */
1411 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1412 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1413 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1414 break;
1415 case OP_MOV_Z_P_Z:
1416 /* Copy Zd to Zm. */
1417 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1418 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1419 break;
1420 case OP_MOV_Z_V:
1421 /* Fill in the zero immediate. */
1422 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1423 2, FLD_imm5, FLD_SVE_tszh);
1424 break;
1425 case OP_MOV_Z_Z:
1426 /* Copy Zn to Zm. */
1427 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1428 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1429 break;
1430 case OP_MOV_Z_Zi:
1431 break;
1432 case OP_MOVM_P_P_P:
1433 /* Copy Pd to Pm. */
1434 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1435 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1436 break;
1437 case OP_MOVZS_P_P_P:
1438 case OP_MOVZ_P_P_P:
1439 /* Copy Pn to Pm. */
1440 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1441 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1442 break;
1443 case OP_NOTS_P_P_P_Z:
1444 case OP_NOT_P_P_P_Z:
1445 /* Copy Pg to Pm. */
1446 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1447 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1448 break;
1449 default: break;
1450 }
1451 }
1452
1453 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1454 static void
1455 encode_sizeq (aarch64_inst *inst)
1456 {
1457 aarch64_insn sizeq;
1458 enum aarch64_field_kind kind;
1459 int idx;
1460
1461 /* Get the index of the operand whose information we are going to use
1462 to encode the size and Q fields.
1463 This is deduced from the possible valid qualifier lists. */
1464 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1465 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1466 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1467 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1468 /* Q */
1469 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1470 /* size */
1471 if (inst->opcode->iclass == asisdlse
1472 || inst->opcode->iclass == asisdlsep
1473 || inst->opcode->iclass == asisdlso
1474 || inst->opcode->iclass == asisdlsop)
1475 kind = FLD_vldst_size;
1476 else
1477 kind = FLD_size;
1478 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1479 }
1480
1481 /* Opcodes that have fields shared by multiple operands are usually flagged
1482 with flags. In this function, we detect such flags and use the
1483 information in one of the related operands to do the encoding. The 'one'
1484 operand is not any operand but one of the operands that has the enough
1485 information for such an encoding. */
1486
1487 static void
1488 do_special_encoding (struct aarch64_inst *inst)
1489 {
1490 int idx;
1491 aarch64_insn value = 0;
1492
1493 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1494
1495 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1496 if (inst->opcode->flags & F_COND)
1497 {
1498 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1499 }
1500 if (inst->opcode->flags & F_SF)
1501 {
1502 idx = select_operand_for_sf_field_coding (inst->opcode);
1503 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1504 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1505 ? 1 : 0;
1506 insert_field (FLD_sf, &inst->value, value, 0);
1507 if (inst->opcode->flags & F_N)
1508 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1509 }
1510 if (inst->opcode->flags & F_LSE_SZ)
1511 {
1512 idx = select_operand_for_sf_field_coding (inst->opcode);
1513 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1514 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1515 ? 1 : 0;
1516 insert_field (FLD_lse_sz, &inst->value, value, 0);
1517 }
1518 if (inst->opcode->flags & F_SIZEQ)
1519 encode_sizeq (inst);
1520 if (inst->opcode->flags & F_FPTYPE)
1521 {
1522 idx = select_operand_for_fptype_field_coding (inst->opcode);
1523 switch (inst->operands[idx].qualifier)
1524 {
1525 case AARCH64_OPND_QLF_S_S: value = 0; break;
1526 case AARCH64_OPND_QLF_S_D: value = 1; break;
1527 case AARCH64_OPND_QLF_S_H: value = 3; break;
1528 default: assert (0);
1529 }
1530 insert_field (FLD_type, &inst->value, value, 0);
1531 }
1532 if (inst->opcode->flags & F_SSIZE)
1533 {
1534 enum aarch64_opnd_qualifier qualifier;
1535 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1536 qualifier = inst->operands[idx].qualifier;
1537 assert (qualifier >= AARCH64_OPND_QLF_S_B
1538 && qualifier <= AARCH64_OPND_QLF_S_Q);
1539 value = aarch64_get_qualifier_standard_value (qualifier);
1540 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1541 }
1542 if (inst->opcode->flags & F_T)
1543 {
1544 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1545 aarch64_field field = {0, 0};
1546 enum aarch64_opnd_qualifier qualifier;
1547
1548 idx = 0;
1549 qualifier = inst->operands[idx].qualifier;
1550 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1551 == AARCH64_OPND_CLASS_SIMD_REG
1552 && qualifier >= AARCH64_OPND_QLF_V_8B
1553 && qualifier <= AARCH64_OPND_QLF_V_2D);
1554 /* imm5<3:0> q <t>
1555 0000 x reserved
1556 xxx1 0 8b
1557 xxx1 1 16b
1558 xx10 0 4h
1559 xx10 1 8h
1560 x100 0 2s
1561 x100 1 4s
1562 1000 0 reserved
1563 1000 1 2d */
1564 value = aarch64_get_qualifier_standard_value (qualifier);
1565 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1566 num = (int) value >> 1;
1567 assert (num >= 0 && num <= 3);
1568 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1569 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1570 }
1571 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1572 {
1573 /* Use Rt to encode in the case of e.g.
1574 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1575 enum aarch64_opnd_qualifier qualifier;
1576 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1577 if (idx == -1)
1578 /* Otherwise use the result operand, which has to be a integer
1579 register. */
1580 idx = 0;
1581 assert (idx == 0 || idx == 1);
1582 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1583 == AARCH64_OPND_CLASS_INT_REG);
1584 qualifier = inst->operands[idx].qualifier;
1585 insert_field (FLD_Q, &inst->value,
1586 aarch64_get_qualifier_standard_value (qualifier), 0);
1587 }
1588 if (inst->opcode->flags & F_LDS_SIZE)
1589 {
1590 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1591 enum aarch64_opnd_qualifier qualifier;
1592 aarch64_field field = {0, 0};
1593 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1594 == AARCH64_OPND_CLASS_INT_REG);
1595 gen_sub_field (FLD_opc, 0, 1, &field);
1596 qualifier = inst->operands[0].qualifier;
1597 insert_field_2 (&field, &inst->value,
1598 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1599 }
1600 /* Miscellaneous encoding as the last step. */
1601 if (inst->opcode->flags & F_MISC)
1602 do_misc_encoding (inst);
1603
1604 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1605 }
1606
1607 /* Some instructions (including all SVE ones) use the instruction class
1608 to describe how a qualifiers_list index is represented in the instruction
1609 encoding. If INST is such an instruction, encode the chosen qualifier
1610 variant. */
1611
1612 static void
1613 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1614 {
1615 switch (inst->opcode->iclass)
1616 {
1617 case sve_cpy:
1618 insert_fields (&inst->value, aarch64_get_variant (inst),
1619 0, 2, FLD_SVE_M_14, FLD_size);
1620 break;
1621
1622 case sve_index:
1623 case sve_shift_pred:
1624 case sve_shift_unpred:
1625 /* For indices and shift amounts, the variant is encoded as
1626 part of the immediate. */
1627 break;
1628
1629 case sve_limm:
1630 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1631 and depend on the immediate. They don't have a separate
1632 encoding. */
1633 break;
1634
1635 case sve_misc:
1636 /* sve_misc instructions have only a single variant. */
1637 break;
1638
1639 case sve_movprfx:
1640 insert_fields (&inst->value, aarch64_get_variant (inst),
1641 0, 2, FLD_SVE_M_16, FLD_size);
1642 break;
1643
1644 case sve_pred_zm:
1645 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1646 break;
1647
1648 case sve_size_bhs:
1649 case sve_size_bhsd:
1650 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1651 break;
1652
1653 case sve_size_hsd:
1654 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1655 break;
1656
1657 case sve_size_sd:
1658 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1659 break;
1660
1661 default:
1662 break;
1663 }
1664 }
1665
1666 /* Converters converting an alias opcode instruction to its real form. */
1667
1668 /* ROR <Wd>, <Ws>, #<shift>
1669 is equivalent to:
1670 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1671 static void
1672 convert_ror_to_extr (aarch64_inst *inst)
1673 {
1674 copy_operand_info (inst, 3, 2);
1675 copy_operand_info (inst, 2, 1);
1676 }
1677
1678 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1679 is equivalent to:
1680 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1681 static void
1682 convert_xtl_to_shll (aarch64_inst *inst)
1683 {
1684 inst->operands[2].qualifier = inst->operands[1].qualifier;
1685 inst->operands[2].imm.value = 0;
1686 }
1687
1688 /* Convert
1689 LSR <Xd>, <Xn>, #<shift>
1690 to
1691 UBFM <Xd>, <Xn>, #<shift>, #63. */
1692 static void
1693 convert_sr_to_bfm (aarch64_inst *inst)
1694 {
1695 inst->operands[3].imm.value =
1696 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1697 }
1698
1699 /* Convert MOV to ORR. */
1700 static void
1701 convert_mov_to_orr (aarch64_inst *inst)
1702 {
1703 /* MOV <Vd>.<T>, <Vn>.<T>
1704 is equivalent to:
1705 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1706 copy_operand_info (inst, 2, 1);
1707 }
1708
1709 /* When <imms> >= <immr>, the instruction written:
1710 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1711 is equivalent to:
1712 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1713
1714 static void
1715 convert_bfx_to_bfm (aarch64_inst *inst)
1716 {
1717 int64_t lsb, width;
1718
1719 /* Convert the operand. */
1720 lsb = inst->operands[2].imm.value;
1721 width = inst->operands[3].imm.value;
1722 inst->operands[2].imm.value = lsb;
1723 inst->operands[3].imm.value = lsb + width - 1;
1724 }
1725
1726 /* When <imms> < <immr>, the instruction written:
1727 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1728 is equivalent to:
1729 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1730
1731 static void
1732 convert_bfi_to_bfm (aarch64_inst *inst)
1733 {
1734 int64_t lsb, width;
1735
1736 /* Convert the operand. */
1737 lsb = inst->operands[2].imm.value;
1738 width = inst->operands[3].imm.value;
1739 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1740 {
1741 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1742 inst->operands[3].imm.value = width - 1;
1743 }
1744 else
1745 {
1746 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1747 inst->operands[3].imm.value = width - 1;
1748 }
1749 }
1750
1751 /* The instruction written:
1752 BFC <Xd>, #<lsb>, #<width>
1753 is equivalent to:
1754 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1755
1756 static void
1757 convert_bfc_to_bfm (aarch64_inst *inst)
1758 {
1759 int64_t lsb, width;
1760
1761 /* Insert XZR. */
1762 copy_operand_info (inst, 3, 2);
1763 copy_operand_info (inst, 2, 1);
1764 copy_operand_info (inst, 1, 0);
1765 inst->operands[1].reg.regno = 0x1f;
1766
1767 /* Convert the immediate operand. */
1768 lsb = inst->operands[2].imm.value;
1769 width = inst->operands[3].imm.value;
1770 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1771 {
1772 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1773 inst->operands[3].imm.value = width - 1;
1774 }
1775 else
1776 {
1777 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1778 inst->operands[3].imm.value = width - 1;
1779 }
1780 }
1781
1782 /* The instruction written:
1783 LSL <Xd>, <Xn>, #<shift>
1784 is equivalent to:
1785 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1786
1787 static void
1788 convert_lsl_to_ubfm (aarch64_inst *inst)
1789 {
1790 int64_t shift = inst->operands[2].imm.value;
1791
1792 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1793 {
1794 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1795 inst->operands[3].imm.value = 31 - shift;
1796 }
1797 else
1798 {
1799 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1800 inst->operands[3].imm.value = 63 - shift;
1801 }
1802 }
1803
1804 /* CINC <Wd>, <Wn>, <cond>
1805 is equivalent to:
1806 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1807
1808 static void
1809 convert_to_csel (aarch64_inst *inst)
1810 {
1811 copy_operand_info (inst, 3, 2);
1812 copy_operand_info (inst, 2, 1);
1813 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1814 }
1815
1816 /* CSET <Wd>, <cond>
1817 is equivalent to:
1818 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1819
1820 static void
1821 convert_cset_to_csinc (aarch64_inst *inst)
1822 {
1823 copy_operand_info (inst, 3, 1);
1824 copy_operand_info (inst, 2, 0);
1825 copy_operand_info (inst, 1, 0);
1826 inst->operands[1].reg.regno = 0x1f;
1827 inst->operands[2].reg.regno = 0x1f;
1828 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1829 }
1830
1831 /* MOV <Wd>, #<imm>
1832 is equivalent to:
1833 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1834
1835 static void
1836 convert_mov_to_movewide (aarch64_inst *inst)
1837 {
1838 int is32;
1839 uint32_t shift_amount;
1840 uint64_t value;
1841
1842 switch (inst->opcode->op)
1843 {
1844 case OP_MOV_IMM_WIDE:
1845 value = inst->operands[1].imm.value;
1846 break;
1847 case OP_MOV_IMM_WIDEN:
1848 value = ~inst->operands[1].imm.value;
1849 break;
1850 default:
1851 assert (0);
1852 }
1853 inst->operands[1].type = AARCH64_OPND_HALF;
1854 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1855 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1856 /* The constraint check should have guaranteed this wouldn't happen. */
1857 assert (0);
1858 value >>= shift_amount;
1859 value &= 0xffff;
1860 inst->operands[1].imm.value = value;
1861 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1862 inst->operands[1].shifter.amount = shift_amount;
1863 }
1864
1865 /* MOV <Wd>, #<imm>
1866 is equivalent to:
1867 ORR <Wd>, WZR, #<imm>. */
1868
1869 static void
1870 convert_mov_to_movebitmask (aarch64_inst *inst)
1871 {
1872 copy_operand_info (inst, 2, 1);
1873 inst->operands[1].reg.regno = 0x1f;
1874 inst->operands[1].skip = 0;
1875 }
1876
1877 /* Some alias opcodes are assembled by being converted to their real-form. */
1878
1879 static void
1880 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1881 {
1882 const aarch64_opcode *alias = inst->opcode;
1883
1884 if ((alias->flags & F_CONV) == 0)
1885 goto convert_to_real_return;
1886
1887 switch (alias->op)
1888 {
1889 case OP_ASR_IMM:
1890 case OP_LSR_IMM:
1891 convert_sr_to_bfm (inst);
1892 break;
1893 case OP_LSL_IMM:
1894 convert_lsl_to_ubfm (inst);
1895 break;
1896 case OP_CINC:
1897 case OP_CINV:
1898 case OP_CNEG:
1899 convert_to_csel (inst);
1900 break;
1901 case OP_CSET:
1902 case OP_CSETM:
1903 convert_cset_to_csinc (inst);
1904 break;
1905 case OP_UBFX:
1906 case OP_BFXIL:
1907 case OP_SBFX:
1908 convert_bfx_to_bfm (inst);
1909 break;
1910 case OP_SBFIZ:
1911 case OP_BFI:
1912 case OP_UBFIZ:
1913 convert_bfi_to_bfm (inst);
1914 break;
1915 case OP_BFC:
1916 convert_bfc_to_bfm (inst);
1917 break;
1918 case OP_MOV_V:
1919 convert_mov_to_orr (inst);
1920 break;
1921 case OP_MOV_IMM_WIDE:
1922 case OP_MOV_IMM_WIDEN:
1923 convert_mov_to_movewide (inst);
1924 break;
1925 case OP_MOV_IMM_LOG:
1926 convert_mov_to_movebitmask (inst);
1927 break;
1928 case OP_ROR_IMM:
1929 convert_ror_to_extr (inst);
1930 break;
1931 case OP_SXTL:
1932 case OP_SXTL2:
1933 case OP_UXTL:
1934 case OP_UXTL2:
1935 convert_xtl_to_shll (inst);
1936 break;
1937 default:
1938 break;
1939 }
1940
1941 convert_to_real_return:
1942 aarch64_replace_opcode (inst, real);
1943 }
1944
1945 /* Encode *INST_ORI of the opcode code OPCODE.
1946 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1947 matched operand qualifier sequence in *QLF_SEQ. */
1948
1949 bfd_boolean
1950 aarch64_opcode_encode (const aarch64_opcode *opcode,
1951 const aarch64_inst *inst_ori, aarch64_insn *code,
1952 aarch64_opnd_qualifier_t *qlf_seq,
1953 aarch64_operand_error *mismatch_detail,
1954 aarch64_instr_sequence* insn_sequence)
1955 {
1956 int i;
1957 const aarch64_opcode *aliased;
1958 aarch64_inst copy, *inst;
1959
1960 DEBUG_TRACE ("enter with %s", opcode->name);
1961
1962 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1963 copy = *inst_ori;
1964 inst = &copy;
1965
1966 assert (inst->opcode == NULL || inst->opcode == opcode);
1967 if (inst->opcode == NULL)
1968 inst->opcode = opcode;
1969
1970 /* Constrain the operands.
1971 After passing this, the encoding is guaranteed to succeed. */
1972 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1973 {
1974 DEBUG_TRACE ("FAIL since operand constraint not met");
1975 return 0;
1976 }
1977
1978 /* Get the base value.
1979 Note: this has to be before the aliasing handling below in order to
1980 get the base value from the alias opcode before we move on to the
1981 aliased opcode for encoding. */
1982 inst->value = opcode->opcode;
1983
1984 /* No need to do anything else if the opcode does not have any operand. */
1985 if (aarch64_num_of_operands (opcode) == 0)
1986 goto encoding_exit;
1987
1988 /* Assign operand indexes and check types. Also put the matched
1989 operand qualifiers in *QLF_SEQ to return. */
1990 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1991 {
1992 assert (opcode->operands[i] == inst->operands[i].type);
1993 inst->operands[i].idx = i;
1994 if (qlf_seq != NULL)
1995 *qlf_seq = inst->operands[i].qualifier;
1996 }
1997
1998 aliased = aarch64_find_real_opcode (opcode);
1999 /* If the opcode is an alias and it does not ask for direct encoding by
2000 itself, the instruction will be transformed to the form of real opcode
2001 and the encoding will be carried out using the rules for the aliased
2002 opcode. */
2003 if (aliased != NULL && (opcode->flags & F_CONV))
2004 {
2005 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2006 aliased->name, opcode->name);
2007 /* Convert the operands to the form of the real opcode. */
2008 convert_to_real (inst, aliased);
2009 opcode = aliased;
2010 }
2011
2012 aarch64_opnd_info *info = inst->operands;
2013
2014 /* Call the inserter of each operand. */
2015 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2016 {
2017 const aarch64_operand *opnd;
2018 enum aarch64_opnd type = opcode->operands[i];
2019 if (type == AARCH64_OPND_NIL)
2020 break;
2021 if (info->skip)
2022 {
2023 DEBUG_TRACE ("skip the incomplete operand %d", i);
2024 continue;
2025 }
2026 opnd = &aarch64_operands[type];
2027 if (operand_has_inserter (opnd)
2028 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2029 mismatch_detail))
2030 return FALSE;
2031 }
2032
2033 /* Call opcode encoders indicated by flags. */
2034 if (opcode_has_special_coder (opcode))
2035 do_special_encoding (inst);
2036
2037 /* Possibly use the instruction class to encode the chosen qualifier
2038 variant. */
2039 aarch64_encode_variant_using_iclass (inst);
2040
2041 /* Run a verifier if the instruction has one set. */
2042 if (opcode->verifier)
2043 {
2044 enum err_type result = opcode->verifier (inst, *code, 0, TRUE,
2045 mismatch_detail, insn_sequence);
2046 switch (result)
2047 {
2048 case ERR_UND:
2049 case ERR_UNP:
2050 case ERR_NYI:
2051 return FALSE;
2052 default:
2053 break;
2054 }
2055 }
2056
2057 /* Always run constrain verifiers, this is needed because constrains need to
2058 maintain a global state. Regardless if the instruction has the flag set
2059 or not. */
2060 enum err_type result = verify_constraints (inst, *code, 0, TRUE,
2061 mismatch_detail, insn_sequence);
2062 switch (result)
2063 {
2064 case ERR_UND:
2065 case ERR_UNP:
2066 case ERR_NYI:
2067 return FALSE;
2068 default:
2069 break;
2070 }
2071
2072
2073 encoding_exit:
2074 DEBUG_TRACE ("exit with %s", opcode->name);
2075
2076 *code = inst->value;
2077
2078 return TRUE;
2079 }
This page took 0.083821 seconds and 5 git commands to generate.