include/opcode/
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright 2012, 2013 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "aarch64-asm.h"
24
25 /* Utilities. */
26
27 /* The unnamed arguments consist of the number of fields and information about
28 these fields where the VALUE will be inserted into CODE. MASK can be zero or
29 the base mask of the opcode.
30
31 N.B. the fields are required to be in such an order than the least signficant
32 field for VALUE comes the first, e.g. the <index> in
33 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
34 is encoded in H:L:M in some cases, the the fields H:L:M should be passed in
35 the order of M, L, H. */
36
37 static inline void
38 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
39 {
40 uint32_t num;
41 const aarch64_field *field;
42 enum aarch64_field_kind kind;
43 va_list va;
44
45 va_start (va, mask);
46 num = va_arg (va, uint32_t);
47 assert (num <= 5);
48 while (num--)
49 {
50 kind = va_arg (va, enum aarch64_field_kind);
51 field = &fields[kind];
52 insert_field (kind, code, value, mask);
53 value >>= field->width;
54 }
55 va_end (va);
56 }
57
58 /* Operand inserters. */
59
60 /* Insert register number. */
61 const char *
62 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
63 aarch64_insn *code,
64 const aarch64_inst *inst ATTRIBUTE_UNUSED)
65 {
66 insert_field (self->fields[0], code, info->reg.regno, 0);
67 return NULL;
68 }
69
70 /* Insert register number, index and/or other data for SIMD register element
71 operand, e.g. the last source operand in
72 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
73 const char *
74 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
75 aarch64_insn *code, const aarch64_inst *inst)
76 {
77 /* regno */
78 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
79 /* index and/or type */
80 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
81 {
82 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
83 if (info->type == AARCH64_OPND_En
84 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
85 {
86 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
87 assert (info->idx == 1); /* Vn */
88 aarch64_insn value = info->reglane.index << pos;
89 insert_field (FLD_imm4, code, value, 0);
90 }
91 else
92 {
93 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
94 imm5<3:0> <V>
95 0000 RESERVED
96 xxx1 B
97 xx10 H
98 x100 S
99 1000 D */
100 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
101 insert_field (FLD_imm5, code, value, 0);
102 }
103 }
104 else
105 {
106 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
107 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
108 switch (info->qualifier)
109 {
110 case AARCH64_OPND_QLF_S_H:
111 /* H:L:M */
112 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
113 break;
114 case AARCH64_OPND_QLF_S_S:
115 /* H:L */
116 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
117 break;
118 case AARCH64_OPND_QLF_S_D:
119 /* H */
120 insert_field (FLD_H, code, info->reglane.index, 0);
121 break;
122 default:
123 assert (0);
124 }
125 }
126 return NULL;
127 }
128
129 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
130 const char *
131 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
132 aarch64_insn *code,
133 const aarch64_inst *inst ATTRIBUTE_UNUSED)
134 {
135 /* R */
136 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
137 /* len */
138 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
139 return NULL;
140 }
141
142 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
143 in AdvSIMD load/store instructions. */
144 const char *
145 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
146 const aarch64_opnd_info *info, aarch64_insn *code,
147 const aarch64_inst *inst)
148 {
149 aarch64_insn value = 0;
150 /* Number of elements in each structure to be loaded/stored. */
151 unsigned num = get_opcode_dependent_value (inst->opcode);
152
153 /* Rt */
154 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
155 /* opcode */
156 switch (num)
157 {
158 case 1:
159 switch (info->reglist.num_regs)
160 {
161 case 1: value = 0x7; break;
162 case 2: value = 0xa; break;
163 case 3: value = 0x6; break;
164 case 4: value = 0x2; break;
165 default: assert (0);
166 }
167 break;
168 case 2:
169 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
170 break;
171 case 3:
172 value = 0x4;
173 break;
174 case 4:
175 value = 0x0;
176 break;
177 default:
178 assert (0);
179 }
180 insert_field (FLD_opcode, code, value, 0);
181
182 return NULL;
183 }
184
185 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
186 single structure to all lanes instructions. */
187 const char *
188 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
189 const aarch64_opnd_info *info, aarch64_insn *code,
190 const aarch64_inst *inst)
191 {
192 aarch64_insn value;
193 /* The opcode dependent area stores the number of elements in
194 each structure to be loaded/stored. */
195 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
196
197 /* Rt */
198 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
199 /* S */
200 value = (aarch64_insn) 0;
201 if (is_ld1r && info->reglist.num_regs == 2)
202 /* OP_LD1R does not have alternating variant, but have "two consecutive"
203 instead. */
204 value = (aarch64_insn) 1;
205 insert_field (FLD_S, code, value, 0);
206
207 return NULL;
208 }
209
210 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
211 operand e.g. Vt in AdvSIMD load/store single element instructions. */
212 const char *
213 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
214 const aarch64_opnd_info *info, aarch64_insn *code,
215 const aarch64_inst *inst ATTRIBUTE_UNUSED)
216 {
217 aarch64_field field = {0, 0};
218 aarch64_insn QSsize = 0; /* fields Q:S:size. */
219 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
220
221 assert (info->reglist.has_index);
222
223 /* Rt */
224 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
225 /* Encode the index, opcode<2:1> and size. */
226 switch (info->qualifier)
227 {
228 case AARCH64_OPND_QLF_S_B:
229 /* Index encoded in "Q:S:size". */
230 QSsize = info->reglist.index;
231 opcodeh2 = 0x0;
232 break;
233 case AARCH64_OPND_QLF_S_H:
234 /* Index encoded in "Q:S:size<1>". */
235 QSsize = info->reglist.index << 1;
236 opcodeh2 = 0x1;
237 break;
238 case AARCH64_OPND_QLF_S_S:
239 /* Index encoded in "Q:S". */
240 QSsize = info->reglist.index << 2;
241 opcodeh2 = 0x2;
242 break;
243 case AARCH64_OPND_QLF_S_D:
244 /* Index encoded in "Q". */
245 QSsize = info->reglist.index << 3 | 0x1;
246 opcodeh2 = 0x2;
247 break;
248 default:
249 assert (0);
250 }
251 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
252 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
253 insert_field_2 (&field, code, opcodeh2, 0);
254
255 return NULL;
256 }
257
258 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
259 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
260 or SSHR <V><d>, <V><n>, #<shift>. */
261 const char *
262 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
263 const aarch64_opnd_info *info,
264 aarch64_insn *code, const aarch64_inst *inst)
265 {
266 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
267 aarch64_insn Q, imm;
268
269 if (inst->opcode->iclass == asimdshf)
270 {
271 /* Q
272 immh Q <T>
273 0000 x SEE AdvSIMD modified immediate
274 0001 0 8B
275 0001 1 16B
276 001x 0 4H
277 001x 1 8H
278 01xx 0 2S
279 01xx 1 4S
280 1xxx 0 RESERVED
281 1xxx 1 2D */
282 Q = (val & 0x1) ? 1 : 0;
283 insert_field (FLD_Q, code, Q, inst->opcode->mask);
284 val >>= 1;
285 }
286
287 assert (info->type == AARCH64_OPND_IMM_VLSR
288 || info->type == AARCH64_OPND_IMM_VLSL);
289
290 if (info->type == AARCH64_OPND_IMM_VLSR)
291 /* immh:immb
292 immh <shift>
293 0000 SEE AdvSIMD modified immediate
294 0001 (16-UInt(immh:immb))
295 001x (32-UInt(immh:immb))
296 01xx (64-UInt(immh:immb))
297 1xxx (128-UInt(immh:immb)) */
298 imm = (16 << (unsigned)val) - info->imm.value;
299 else
300 /* immh:immb
301 immh <shift>
302 0000 SEE AdvSIMD modified immediate
303 0001 (UInt(immh:immb)-8)
304 001x (UInt(immh:immb)-16)
305 01xx (UInt(immh:immb)-32)
306 1xxx (UInt(immh:immb)-64) */
307 imm = info->imm.value + (8 << (unsigned)val);
308 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
309
310 return NULL;
311 }
312
313 /* Insert fields for e.g. the immediate operands in
314 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
315 const char *
316 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
317 aarch64_insn *code,
318 const aarch64_inst *inst ATTRIBUTE_UNUSED)
319 {
320 int64_t imm;
321 /* Maximum of two fields to insert. */
322 assert (self->fields[2] == FLD_NIL);
323
324 imm = info->imm.value;
325 if (operand_need_shift_by_two (self))
326 imm >>= 2;
327 if (self->fields[1] == FLD_NIL)
328 insert_field (self->fields[0], code, imm, 0);
329 else
330 /* e.g. TBZ b5:b40. */
331 insert_fields (code, imm, 0, 2, self->fields[1], self->fields[0]);
332 return NULL;
333 }
334
335 /* Insert immediate and its shift amount for e.g. the last operand in
336 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
337 const char *
338 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
339 aarch64_insn *code, const aarch64_inst *inst)
340 {
341 /* imm16 */
342 aarch64_ins_imm (self, info, code, inst);
343 /* hw */
344 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
345 return NULL;
346 }
347
348 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
349 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
350 const char *
351 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
352 const aarch64_opnd_info *info,
353 aarch64_insn *code,
354 const aarch64_inst *inst ATTRIBUTE_UNUSED)
355 {
356 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
357 uint64_t imm = info->imm.value;
358 enum aarch64_modifier_kind kind = info->shifter.kind;
359 int amount = info->shifter.amount;
360 aarch64_field field = {0, 0};
361
362 /* a:b:c:d:e:f:g:h */
363 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
364 {
365 /* Either MOVI <Dd>, #<imm>
366 or MOVI <Vd>.2D, #<imm>.
367 <imm> is a 64-bit immediate
368 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
369 encoded in "a:b:c:d:e:f:g:h". */
370 imm = aarch64_shrink_expanded_imm8 (imm);
371 assert ((int)imm >= 0);
372 }
373 assert (imm <= 255);
374 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
375
376 if (kind == AARCH64_MOD_NONE)
377 return NULL;
378
379 /* shift amount partially in cmode */
380 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
381 if (kind == AARCH64_MOD_LSL)
382 {
383 /* AARCH64_MOD_LSL: shift zeros. */
384 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
385 assert (esize == 4 || esize == 2 || esize == 1);
386 /* For 8-bit move immediate, the optional LSL #0 does not require
387 encoding. */
388 if (esize == 1)
389 return NULL;
390 amount >>= 3;
391 if (esize == 4)
392 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
393 else
394 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
395 }
396 else
397 {
398 /* AARCH64_MOD_MSL: shift ones. */
399 amount >>= 4;
400 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
401 }
402 insert_field_2 (&field, code, amount, 0);
403
404 return NULL;
405 }
406
407 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
408 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
409 const char *
410 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
411 aarch64_insn *code,
412 const aarch64_inst *inst ATTRIBUTE_UNUSED)
413 {
414 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
415 return NULL;
416 }
417
418 /* Insert arithmetic immediate for e.g. the last operand in
419 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
420 const char *
421 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
422 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
423 {
424 /* shift */
425 aarch64_insn value = info->shifter.amount ? 1 : 0;
426 insert_field (self->fields[0], code, value, 0);
427 /* imm12 (unsigned) */
428 insert_field (self->fields[1], code, info->imm.value, 0);
429 return NULL;
430 }
431
432 /* Insert logical/bitmask immediate for e.g. the last operand in
433 ORR <Wd|WSP>, <Wn>, #<imm>. */
434 const char *
435 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
436 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
437 {
438 aarch64_insn value;
439 uint64_t imm = info->imm.value;
440 int is32 = aarch64_get_qualifier_esize (inst->operands[0].qualifier) == 4;
441
442 if (inst->opcode->op == OP_BIC)
443 imm = ~imm;
444 if (aarch64_logical_immediate_p (imm, is32, &value) == FALSE)
445 /* The constraint check should have guaranteed this wouldn't happen. */
446 assert (0);
447
448 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
449 self->fields[0]);
450 return NULL;
451 }
452
453 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
454 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
455 const char *
456 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
457 aarch64_insn *code, const aarch64_inst *inst)
458 {
459 aarch64_insn value = 0;
460
461 assert (info->idx == 0);
462
463 /* Rt */
464 aarch64_ins_regno (self, info, code, inst);
465 if (inst->opcode->iclass == ldstpair_indexed
466 || inst->opcode->iclass == ldstnapair_offs
467 || inst->opcode->iclass == ldstpair_off
468 || inst->opcode->iclass == loadlit)
469 {
470 /* size */
471 switch (info->qualifier)
472 {
473 case AARCH64_OPND_QLF_S_S: value = 0; break;
474 case AARCH64_OPND_QLF_S_D: value = 1; break;
475 case AARCH64_OPND_QLF_S_Q: value = 2; break;
476 default: assert (0);
477 }
478 insert_field (FLD_ldst_size, code, value, 0);
479 }
480 else
481 {
482 /* opc[1]:size */
483 value = aarch64_get_qualifier_standard_value (info->qualifier);
484 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
485 }
486
487 return NULL;
488 }
489
490 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
491 const char *
492 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
493 const aarch64_opnd_info *info, aarch64_insn *code,
494 const aarch64_inst *inst ATTRIBUTE_UNUSED)
495 {
496 /* Rn */
497 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
498 return NULL;
499 }
500
501 /* Encode the address operand for e.g.
502 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
503 const char *
504 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
505 const aarch64_opnd_info *info, aarch64_insn *code,
506 const aarch64_inst *inst ATTRIBUTE_UNUSED)
507 {
508 aarch64_insn S;
509 enum aarch64_modifier_kind kind = info->shifter.kind;
510
511 /* Rn */
512 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
513 /* Rm */
514 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
515 /* option */
516 if (kind == AARCH64_MOD_LSL)
517 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
518 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
519 /* S */
520 if (info->qualifier != AARCH64_OPND_QLF_S_B)
521 S = info->shifter.amount != 0;
522 else
523 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
524 S <amount>
525 0 [absent]
526 1 #0
527 Must be #0 if <extend> is explicitly LSL. */
528 S = info->shifter.operator_present && info->shifter.amount_present;
529 insert_field (FLD_S, code, S, 0);
530
531 return NULL;
532 }
533
534 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
535 const char *
536 aarch64_ins_addr_simm (const aarch64_operand *self,
537 const aarch64_opnd_info *info,
538 aarch64_insn *code,
539 const aarch64_inst *inst ATTRIBUTE_UNUSED)
540 {
541 int imm;
542
543 /* Rn */
544 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
545 /* simm (imm9 or imm7) */
546 imm = info->addr.offset.imm;
547 if (self->fields[0] == FLD_imm7)
548 /* scaled immediate in ld/st pair instructions.. */
549 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
550 insert_field (self->fields[0], code, imm, 0);
551 /* pre/post- index */
552 if (info->addr.writeback)
553 {
554 assert (inst->opcode->iclass != ldst_unscaled
555 && inst->opcode->iclass != ldstnapair_offs
556 && inst->opcode->iclass != ldstpair_off
557 && inst->opcode->iclass != ldst_unpriv);
558 assert (info->addr.preind != info->addr.postind);
559 if (info->addr.preind)
560 insert_field (self->fields[1], code, 1, 0);
561 }
562
563 return NULL;
564 }
565
566 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
567 const char *
568 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
569 const aarch64_opnd_info *info,
570 aarch64_insn *code,
571 const aarch64_inst *inst ATTRIBUTE_UNUSED)
572 {
573 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
574
575 /* Rn */
576 insert_field (self->fields[0], code, info->addr.base_regno, 0);
577 /* uimm12 */
578 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
579 return NULL;
580 }
581
582 /* Encode the address operand for e.g.
583 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
584 const char *
585 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
586 const aarch64_opnd_info *info, aarch64_insn *code,
587 const aarch64_inst *inst ATTRIBUTE_UNUSED)
588 {
589 /* Rn */
590 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
591 /* Rm | #<amount> */
592 if (info->addr.offset.is_reg)
593 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
594 else
595 insert_field (FLD_Rm, code, 0x1f, 0);
596 return NULL;
597 }
598
599 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
600 const char *
601 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
602 const aarch64_opnd_info *info, aarch64_insn *code,
603 const aarch64_inst *inst ATTRIBUTE_UNUSED)
604 {
605 /* cond */
606 insert_field (FLD_cond, code, info->cond->value, 0);
607 return NULL;
608 }
609
610 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
611 const char *
612 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
613 const aarch64_opnd_info *info, aarch64_insn *code,
614 const aarch64_inst *inst ATTRIBUTE_UNUSED)
615 {
616 /* op0:op1:CRn:CRm:op2 */
617 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
618 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
619 return NULL;
620 }
621
622 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
623 const char *
624 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
625 const aarch64_opnd_info *info, aarch64_insn *code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED)
627 {
628 /* op1:op2 */
629 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
630 FLD_op2, FLD_op1);
631 return NULL;
632 }
633
634 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
635 const char *
636 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
637 const aarch64_opnd_info *info, aarch64_insn *code,
638 const aarch64_inst *inst ATTRIBUTE_UNUSED)
639 {
640 /* op1:CRn:CRm:op2 */
641 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
642 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
643 return NULL;
644 }
645
646 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
647
648 const char *
649 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
650 const aarch64_opnd_info *info, aarch64_insn *code,
651 const aarch64_inst *inst ATTRIBUTE_UNUSED)
652 {
653 /* CRm */
654 insert_field (FLD_CRm, code, info->barrier->value, 0);
655 return NULL;
656 }
657
658 /* Encode the prefetch operation option operand for e.g.
659 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
660
661 const char *
662 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
663 const aarch64_opnd_info *info, aarch64_insn *code,
664 const aarch64_inst *inst ATTRIBUTE_UNUSED)
665 {
666 /* prfop in Rt */
667 insert_field (FLD_Rt, code, info->prfop->value, 0);
668 return NULL;
669 }
670
671 /* Encode the extended register operand for e.g.
672 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
673 const char *
674 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
675 const aarch64_opnd_info *info, aarch64_insn *code,
676 const aarch64_inst *inst ATTRIBUTE_UNUSED)
677 {
678 enum aarch64_modifier_kind kind;
679
680 /* Rm */
681 insert_field (FLD_Rm, code, info->reg.regno, 0);
682 /* option */
683 kind = info->shifter.kind;
684 if (kind == AARCH64_MOD_LSL)
685 kind = info->qualifier == AARCH64_OPND_QLF_W
686 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
687 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
688 /* imm3 */
689 insert_field (FLD_imm3, code, info->shifter.amount, 0);
690
691 return NULL;
692 }
693
694 /* Encode the shifted register operand for e.g.
695 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
696 const char *
697 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
698 const aarch64_opnd_info *info, aarch64_insn *code,
699 const aarch64_inst *inst ATTRIBUTE_UNUSED)
700 {
701 /* Rm */
702 insert_field (FLD_Rm, code, info->reg.regno, 0);
703 /* shift */
704 insert_field (FLD_shift, code,
705 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
706 /* imm6 */
707 insert_field (FLD_imm6, code, info->shifter.amount, 0);
708
709 return NULL;
710 }
711
712 /* Miscellaneous encoding functions. */
713
714 /* Encode size[0], i.e. bit 22, for
715 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
716
717 static void
718 encode_asimd_fcvt (aarch64_inst *inst)
719 {
720 aarch64_insn value;
721 aarch64_field field = {0, 0};
722 enum aarch64_opnd_qualifier qualifier;
723
724 switch (inst->opcode->op)
725 {
726 case OP_FCVTN:
727 case OP_FCVTN2:
728 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
729 qualifier = inst->operands[1].qualifier;
730 break;
731 case OP_FCVTL:
732 case OP_FCVTL2:
733 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
734 qualifier = inst->operands[0].qualifier;
735 break;
736 default:
737 assert (0);
738 }
739 assert (qualifier == AARCH64_OPND_QLF_V_4S
740 || qualifier == AARCH64_OPND_QLF_V_2D);
741 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
742 gen_sub_field (FLD_size, 0, 1, &field);
743 insert_field_2 (&field, &inst->value, value, 0);
744 }
745
746 /* Encode size[0], i.e. bit 22, for
747 e.g. FCVTXN <Vb><d>, <Va><n>. */
748
749 static void
750 encode_asisd_fcvtxn (aarch64_inst *inst)
751 {
752 aarch64_insn val = 1;
753 aarch64_field field = {0, 0};
754 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
755 gen_sub_field (FLD_size, 0, 1, &field);
756 insert_field_2 (&field, &inst->value, val, 0);
757 }
758
759 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
760 static void
761 encode_fcvt (aarch64_inst *inst)
762 {
763 aarch64_insn val;
764 const aarch64_field field = {15, 2};
765
766 /* opc dstsize */
767 switch (inst->operands[0].qualifier)
768 {
769 case AARCH64_OPND_QLF_S_S: val = 0; break;
770 case AARCH64_OPND_QLF_S_D: val = 1; break;
771 case AARCH64_OPND_QLF_S_H: val = 3; break;
772 default: abort ();
773 }
774 insert_field_2 (&field, &inst->value, val, 0);
775
776 return;
777 }
778
779 /* Do miscellaneous encodings that are not common enough to be driven by
780 flags. */
781
782 static void
783 do_misc_encoding (aarch64_inst *inst)
784 {
785 switch (inst->opcode->op)
786 {
787 case OP_FCVT:
788 encode_fcvt (inst);
789 break;
790 case OP_FCVTN:
791 case OP_FCVTN2:
792 case OP_FCVTL:
793 case OP_FCVTL2:
794 encode_asimd_fcvt (inst);
795 break;
796 case OP_FCVTXN_S:
797 encode_asisd_fcvtxn (inst);
798 break;
799 default: break;
800 }
801 }
802
803 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
804 static void
805 encode_sizeq (aarch64_inst *inst)
806 {
807 aarch64_insn sizeq;
808 enum aarch64_field_kind kind;
809 int idx;
810
811 /* Get the index of the operand whose information we are going to use
812 to encode the size and Q fields.
813 This is deduced from the possible valid qualifier lists. */
814 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
815 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
816 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
817 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
818 /* Q */
819 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
820 /* size */
821 if (inst->opcode->iclass == asisdlse
822 || inst->opcode->iclass == asisdlsep
823 || inst->opcode->iclass == asisdlso
824 || inst->opcode->iclass == asisdlsop)
825 kind = FLD_vldst_size;
826 else
827 kind = FLD_size;
828 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
829 }
830
831 /* Opcodes that have fields shared by multiple operands are usually flagged
832 with flags. In this function, we detect such flags and use the
833 information in one of the related operands to do the encoding. The 'one'
834 operand is not any operand but one of the operands that has the enough
835 information for such an encoding. */
836
837 static void
838 do_special_encoding (struct aarch64_inst *inst)
839 {
840 int idx;
841 aarch64_insn value = 0;
842
843 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
844
845 /* Condition for truly conditional executed instructions, e.g. b.cond. */
846 if (inst->opcode->flags & F_COND)
847 {
848 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
849 }
850 if (inst->opcode->flags & F_SF)
851 {
852 idx = select_operand_for_sf_field_coding (inst->opcode);
853 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
854 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
855 ? 1 : 0;
856 insert_field (FLD_sf, &inst->value, value, 0);
857 if (inst->opcode->flags & F_N)
858 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
859 }
860 if (inst->opcode->flags & F_SIZEQ)
861 encode_sizeq (inst);
862 if (inst->opcode->flags & F_FPTYPE)
863 {
864 idx = select_operand_for_fptype_field_coding (inst->opcode);
865 switch (inst->operands[idx].qualifier)
866 {
867 case AARCH64_OPND_QLF_S_S: value = 0; break;
868 case AARCH64_OPND_QLF_S_D: value = 1; break;
869 case AARCH64_OPND_QLF_S_H: value = 3; break;
870 default: assert (0);
871 }
872 insert_field (FLD_type, &inst->value, value, 0);
873 }
874 if (inst->opcode->flags & F_SSIZE)
875 {
876 enum aarch64_opnd_qualifier qualifier;
877 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
878 qualifier = inst->operands[idx].qualifier;
879 assert (qualifier >= AARCH64_OPND_QLF_S_B
880 && qualifier <= AARCH64_OPND_QLF_S_Q);
881 value = aarch64_get_qualifier_standard_value (qualifier);
882 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
883 }
884 if (inst->opcode->flags & F_T)
885 {
886 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
887 aarch64_field field = {0, 0};
888 enum aarch64_opnd_qualifier qualifier;
889
890 idx = 0;
891 qualifier = inst->operands[idx].qualifier;
892 assert (aarch64_get_operand_class (inst->opcode->operands[0])
893 == AARCH64_OPND_CLASS_SIMD_REG
894 && qualifier >= AARCH64_OPND_QLF_V_8B
895 && qualifier <= AARCH64_OPND_QLF_V_2D);
896 /* imm5<3:0> q <t>
897 0000 x reserved
898 xxx1 0 8b
899 xxx1 1 16b
900 xx10 0 4h
901 xx10 1 8h
902 x100 0 2s
903 x100 1 4s
904 1000 0 reserved
905 1000 1 2d */
906 value = aarch64_get_qualifier_standard_value (qualifier);
907 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
908 num = (int) value >> 1;
909 assert (num >= 0 && num <= 3);
910 gen_sub_field (FLD_imm5, 0, num + 1, &field);
911 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
912 }
913 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
914 {
915 /* Use Rt to encode in the case of e.g.
916 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
917 enum aarch64_opnd_qualifier qualifier;
918 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
919 if (idx == -1)
920 /* Otherwise use the result operand, which has to be a integer
921 register. */
922 idx = 0;
923 assert (idx == 0 || idx == 1);
924 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
925 == AARCH64_OPND_CLASS_INT_REG);
926 qualifier = inst->operands[idx].qualifier;
927 insert_field (FLD_Q, &inst->value,
928 aarch64_get_qualifier_standard_value (qualifier), 0);
929 }
930 if (inst->opcode->flags & F_LDS_SIZE)
931 {
932 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
933 enum aarch64_opnd_qualifier qualifier;
934 aarch64_field field = {0, 0};
935 assert (aarch64_get_operand_class (inst->opcode->operands[0])
936 == AARCH64_OPND_CLASS_INT_REG);
937 gen_sub_field (FLD_opc, 0, 1, &field);
938 qualifier = inst->operands[0].qualifier;
939 insert_field_2 (&field, &inst->value,
940 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
941 }
942 /* Miscellaneous encoding as the last step. */
943 if (inst->opcode->flags & F_MISC)
944 do_misc_encoding (inst);
945
946 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
947 }
948
949 /* Converters converting an alias opcode instruction to its real form. */
950
951 /* ROR <Wd>, <Ws>, #<shift>
952 is equivalent to:
953 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
954 static void
955 convert_ror_to_extr (aarch64_inst *inst)
956 {
957 copy_operand_info (inst, 3, 2);
958 copy_operand_info (inst, 2, 1);
959 }
960
961 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
962 is equivalent to:
963 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
964 static void
965 convert_xtl_to_shll (aarch64_inst *inst)
966 {
967 inst->operands[2].qualifier = inst->operands[1].qualifier;
968 inst->operands[2].imm.value = 0;
969 }
970
971 /* Convert
972 LSR <Xd>, <Xn>, #<shift>
973 to
974 UBFM <Xd>, <Xn>, #<shift>, #63. */
975 static void
976 convert_sr_to_bfm (aarch64_inst *inst)
977 {
978 inst->operands[3].imm.value =
979 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
980 }
981
982 /* Convert MOV to ORR. */
983 static void
984 convert_mov_to_orr (aarch64_inst *inst)
985 {
986 /* MOV <Vd>.<T>, <Vn>.<T>
987 is equivalent to:
988 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
989 copy_operand_info (inst, 2, 1);
990 }
991
992 /* When <imms> >= <immr>, the instruction written:
993 SBFX <Xd>, <Xn>, #<lsb>, #<width>
994 is equivalent to:
995 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
996
997 static void
998 convert_bfx_to_bfm (aarch64_inst *inst)
999 {
1000 int64_t lsb, width;
1001
1002 /* Convert the operand. */
1003 lsb = inst->operands[2].imm.value;
1004 width = inst->operands[3].imm.value;
1005 inst->operands[2].imm.value = lsb;
1006 inst->operands[3].imm.value = lsb + width - 1;
1007 }
1008
1009 /* When <imms> < <immr>, the instruction written:
1010 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1011 is equivalent to:
1012 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1013
1014 static void
1015 convert_bfi_to_bfm (aarch64_inst *inst)
1016 {
1017 int64_t lsb, width;
1018
1019 /* Convert the operand. */
1020 lsb = inst->operands[2].imm.value;
1021 width = inst->operands[3].imm.value;
1022 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1023 {
1024 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1025 inst->operands[3].imm.value = width - 1;
1026 }
1027 else
1028 {
1029 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1030 inst->operands[3].imm.value = width - 1;
1031 }
1032 }
1033
1034 /* The instruction written:
1035 LSL <Xd>, <Xn>, #<shift>
1036 is equivalent to:
1037 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1038
1039 static void
1040 convert_lsl_to_ubfm (aarch64_inst *inst)
1041 {
1042 int64_t shift = inst->operands[2].imm.value;
1043
1044 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1045 {
1046 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1047 inst->operands[3].imm.value = 31 - shift;
1048 }
1049 else
1050 {
1051 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1052 inst->operands[3].imm.value = 63 - shift;
1053 }
1054 }
1055
1056 /* CINC <Wd>, <Wn>, <cond>
1057 is equivalent to:
1058 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1059
1060 static void
1061 convert_to_csel (aarch64_inst *inst)
1062 {
1063 copy_operand_info (inst, 3, 2);
1064 copy_operand_info (inst, 2, 1);
1065 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1066 }
1067
1068 /* CSET <Wd>, <cond>
1069 is equivalent to:
1070 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1071
1072 static void
1073 convert_cset_to_csinc (aarch64_inst *inst)
1074 {
1075 copy_operand_info (inst, 3, 1);
1076 copy_operand_info (inst, 2, 0);
1077 copy_operand_info (inst, 1, 0);
1078 inst->operands[1].reg.regno = 0x1f;
1079 inst->operands[2].reg.regno = 0x1f;
1080 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1081 }
1082
1083 /* MOV <Wd>, #<imm>
1084 is equivalent to:
1085 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1086
1087 static void
1088 convert_mov_to_movewide (aarch64_inst *inst)
1089 {
1090 int is32;
1091 uint32_t shift_amount;
1092 uint64_t value;
1093
1094 switch (inst->opcode->op)
1095 {
1096 case OP_MOV_IMM_WIDE:
1097 value = inst->operands[1].imm.value;
1098 break;
1099 case OP_MOV_IMM_WIDEN:
1100 value = ~inst->operands[1].imm.value;
1101 break;
1102 default:
1103 assert (0);
1104 }
1105 inst->operands[1].type = AARCH64_OPND_HALF;
1106 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1107 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1108 /* The constraint check should have guaranteed this wouldn't happen. */
1109 assert (0);
1110 value >>= shift_amount;
1111 value &= 0xffff;
1112 inst->operands[1].imm.value = value;
1113 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1114 inst->operands[1].shifter.amount = shift_amount;
1115 }
1116
1117 /* MOV <Wd>, #<imm>
1118 is equivalent to:
1119 ORR <Wd>, WZR, #<imm>. */
1120
1121 static void
1122 convert_mov_to_movebitmask (aarch64_inst *inst)
1123 {
1124 copy_operand_info (inst, 2, 1);
1125 inst->operands[1].reg.regno = 0x1f;
1126 inst->operands[1].skip = 0;
1127 }
1128
1129 /* Some alias opcodes are assembled by being converted to their real-form. */
1130
1131 static void
1132 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1133 {
1134 const aarch64_opcode *alias = inst->opcode;
1135
1136 if ((alias->flags & F_CONV) == 0)
1137 goto convert_to_real_return;
1138
1139 switch (alias->op)
1140 {
1141 case OP_ASR_IMM:
1142 case OP_LSR_IMM:
1143 convert_sr_to_bfm (inst);
1144 break;
1145 case OP_LSL_IMM:
1146 convert_lsl_to_ubfm (inst);
1147 break;
1148 case OP_CINC:
1149 case OP_CINV:
1150 case OP_CNEG:
1151 convert_to_csel (inst);
1152 break;
1153 case OP_CSET:
1154 case OP_CSETM:
1155 convert_cset_to_csinc (inst);
1156 break;
1157 case OP_UBFX:
1158 case OP_BFXIL:
1159 case OP_SBFX:
1160 convert_bfx_to_bfm (inst);
1161 break;
1162 case OP_SBFIZ:
1163 case OP_BFI:
1164 case OP_UBFIZ:
1165 convert_bfi_to_bfm (inst);
1166 break;
1167 case OP_MOV_V:
1168 convert_mov_to_orr (inst);
1169 break;
1170 case OP_MOV_IMM_WIDE:
1171 case OP_MOV_IMM_WIDEN:
1172 convert_mov_to_movewide (inst);
1173 break;
1174 case OP_MOV_IMM_LOG:
1175 convert_mov_to_movebitmask (inst);
1176 break;
1177 case OP_ROR_IMM:
1178 convert_ror_to_extr (inst);
1179 break;
1180 case OP_SXTL:
1181 case OP_SXTL2:
1182 case OP_UXTL:
1183 case OP_UXTL2:
1184 convert_xtl_to_shll (inst);
1185 break;
1186 default:
1187 break;
1188 }
1189
1190 convert_to_real_return:
1191 aarch64_replace_opcode (inst, real);
1192 }
1193
1194 /* Encode *INST_ORI of the opcode code OPCODE.
1195 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1196 matched operand qualifier sequence in *QLF_SEQ. */
1197
1198 int
1199 aarch64_opcode_encode (const aarch64_opcode *opcode,
1200 const aarch64_inst *inst_ori, aarch64_insn *code,
1201 aarch64_opnd_qualifier_t *qlf_seq,
1202 aarch64_operand_error *mismatch_detail)
1203 {
1204 int i;
1205 const aarch64_opcode *aliased;
1206 aarch64_inst copy, *inst;
1207
1208 DEBUG_TRACE ("enter with %s", opcode->name);
1209
1210 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1211 copy = *inst_ori;
1212 inst = &copy;
1213
1214 assert (inst->opcode == NULL || inst->opcode == opcode);
1215 if (inst->opcode == NULL)
1216 inst->opcode = opcode;
1217
1218 /* Constrain the operands.
1219 After passing this, the encoding is guaranteed to succeed. */
1220 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1221 {
1222 DEBUG_TRACE ("FAIL since operand constraint not met");
1223 return 0;
1224 }
1225
1226 /* Get the base value.
1227 Note: this has to be before the aliasing handling below in order to
1228 get the base value from the alias opcode before we move on to the
1229 aliased opcode for encoding. */
1230 inst->value = opcode->opcode;
1231
1232 /* No need to do anything else if the opcode does not have any operand. */
1233 if (aarch64_num_of_operands (opcode) == 0)
1234 goto encoding_exit;
1235
1236 /* Assign operand indexes and check types. Also put the matched
1237 operand qualifiers in *QLF_SEQ to return. */
1238 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1239 {
1240 assert (opcode->operands[i] == inst->operands[i].type);
1241 inst->operands[i].idx = i;
1242 if (qlf_seq != NULL)
1243 *qlf_seq = inst->operands[i].qualifier;
1244 }
1245
1246 aliased = aarch64_find_real_opcode (opcode);
1247 /* If the opcode is an alias and it does not ask for direct encoding by
1248 itself, the instruction will be transformed to the form of real opcode
1249 and the encoding will be carried out using the rules for the aliased
1250 opcode. */
1251 if (aliased != NULL && (opcode->flags & F_CONV))
1252 {
1253 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1254 aliased->name, opcode->name);
1255 /* Convert the operands to the form of the real opcode. */
1256 convert_to_real (inst, aliased);
1257 opcode = aliased;
1258 }
1259
1260 aarch64_opnd_info *info = inst->operands;
1261
1262 /* Call the inserter of each operand. */
1263 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1264 {
1265 const aarch64_operand *opnd;
1266 enum aarch64_opnd type = opcode->operands[i];
1267 if (type == AARCH64_OPND_NIL)
1268 break;
1269 if (info->skip)
1270 {
1271 DEBUG_TRACE ("skip the incomplete operand %d", i);
1272 continue;
1273 }
1274 opnd = &aarch64_operands[type];
1275 if (operand_has_inserter (opnd))
1276 aarch64_insert_operand (opnd, info, &inst->value, inst);
1277 }
1278
1279 /* Call opcode encoders indicated by flags. */
1280 if (opcode_has_special_coder (opcode))
1281 do_special_encoding (inst);
1282
1283 encoding_exit:
1284 DEBUG_TRACE ("exit with %s", opcode->name);
1285
1286 *code = inst->value;
1287
1288 return 1;
1289 }
This page took 0.061316 seconds and 5 git commands to generate.