Automatic date update in version.in
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "aarch64-asm.h"
24
25 /* Utilities. */
26
27 /* The unnamed arguments consist of the number of fields and information about
28 these fields where the VALUE will be inserted into CODE. MASK can be zero or
29 the base mask of the opcode.
30
31 N.B. the fields are required to be in such an order than the least signficant
32 field for VALUE comes the first, e.g. the <index> in
33 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
34 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
35 the order of M, L, H. */
36
37 static inline void
38 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
39 {
40 uint32_t num;
41 const aarch64_field *field;
42 enum aarch64_field_kind kind;
43 va_list va;
44
45 va_start (va, mask);
46 num = va_arg (va, uint32_t);
47 assert (num <= 5);
48 while (num--)
49 {
50 kind = va_arg (va, enum aarch64_field_kind);
51 field = &fields[kind];
52 insert_field (kind, code, value, mask);
53 value >>= field->width;
54 }
55 va_end (va);
56 }
57
58 /* Operand inserters. */
59
60 /* Insert register number. */
61 const char *
62 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
63 aarch64_insn *code,
64 const aarch64_inst *inst ATTRIBUTE_UNUSED)
65 {
66 insert_field (self->fields[0], code, info->reg.regno, 0);
67 return NULL;
68 }
69
70 /* Insert register number, index and/or other data for SIMD register element
71 operand, e.g. the last source operand in
72 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
73 const char *
74 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
75 aarch64_insn *code, const aarch64_inst *inst)
76 {
77 /* regno */
78 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
79 /* index and/or type */
80 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
81 {
82 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
83 if (info->type == AARCH64_OPND_En
84 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
85 {
86 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
87 assert (info->idx == 1); /* Vn */
88 aarch64_insn value = info->reglane.index << pos;
89 insert_field (FLD_imm4, code, value, 0);
90 }
91 else
92 {
93 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
94 imm5<3:0> <V>
95 0000 RESERVED
96 xxx1 B
97 xx10 H
98 x100 S
99 1000 D */
100 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
101 insert_field (FLD_imm5, code, value, 0);
102 }
103 }
104 else
105 {
106 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
107 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
108 switch (info->qualifier)
109 {
110 case AARCH64_OPND_QLF_S_H:
111 /* H:L:M */
112 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
113 break;
114 case AARCH64_OPND_QLF_S_S:
115 /* H:L */
116 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
117 break;
118 case AARCH64_OPND_QLF_S_D:
119 /* H */
120 insert_field (FLD_H, code, info->reglane.index, 0);
121 break;
122 default:
123 assert (0);
124 }
125 }
126 return NULL;
127 }
128
129 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
130 const char *
131 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
132 aarch64_insn *code,
133 const aarch64_inst *inst ATTRIBUTE_UNUSED)
134 {
135 /* R */
136 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
137 /* len */
138 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
139 return NULL;
140 }
141
142 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
143 in AdvSIMD load/store instructions. */
144 const char *
145 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
146 const aarch64_opnd_info *info, aarch64_insn *code,
147 const aarch64_inst *inst)
148 {
149 aarch64_insn value = 0;
150 /* Number of elements in each structure to be loaded/stored. */
151 unsigned num = get_opcode_dependent_value (inst->opcode);
152
153 /* Rt */
154 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
155 /* opcode */
156 switch (num)
157 {
158 case 1:
159 switch (info->reglist.num_regs)
160 {
161 case 1: value = 0x7; break;
162 case 2: value = 0xa; break;
163 case 3: value = 0x6; break;
164 case 4: value = 0x2; break;
165 default: assert (0);
166 }
167 break;
168 case 2:
169 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
170 break;
171 case 3:
172 value = 0x4;
173 break;
174 case 4:
175 value = 0x0;
176 break;
177 default:
178 assert (0);
179 }
180 insert_field (FLD_opcode, code, value, 0);
181
182 return NULL;
183 }
184
185 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
186 single structure to all lanes instructions. */
187 const char *
188 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
189 const aarch64_opnd_info *info, aarch64_insn *code,
190 const aarch64_inst *inst)
191 {
192 aarch64_insn value;
193 /* The opcode dependent area stores the number of elements in
194 each structure to be loaded/stored. */
195 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
196
197 /* Rt */
198 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
199 /* S */
200 value = (aarch64_insn) 0;
201 if (is_ld1r && info->reglist.num_regs == 2)
202 /* OP_LD1R does not have alternating variant, but have "two consecutive"
203 instead. */
204 value = (aarch64_insn) 1;
205 insert_field (FLD_S, code, value, 0);
206
207 return NULL;
208 }
209
210 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
211 operand e.g. Vt in AdvSIMD load/store single element instructions. */
212 const char *
213 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
214 const aarch64_opnd_info *info, aarch64_insn *code,
215 const aarch64_inst *inst ATTRIBUTE_UNUSED)
216 {
217 aarch64_field field = {0, 0};
218 aarch64_insn QSsize = 0; /* fields Q:S:size. */
219 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
220
221 assert (info->reglist.has_index);
222
223 /* Rt */
224 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
225 /* Encode the index, opcode<2:1> and size. */
226 switch (info->qualifier)
227 {
228 case AARCH64_OPND_QLF_S_B:
229 /* Index encoded in "Q:S:size". */
230 QSsize = info->reglist.index;
231 opcodeh2 = 0x0;
232 break;
233 case AARCH64_OPND_QLF_S_H:
234 /* Index encoded in "Q:S:size<1>". */
235 QSsize = info->reglist.index << 1;
236 opcodeh2 = 0x1;
237 break;
238 case AARCH64_OPND_QLF_S_S:
239 /* Index encoded in "Q:S". */
240 QSsize = info->reglist.index << 2;
241 opcodeh2 = 0x2;
242 break;
243 case AARCH64_OPND_QLF_S_D:
244 /* Index encoded in "Q". */
245 QSsize = info->reglist.index << 3 | 0x1;
246 opcodeh2 = 0x2;
247 break;
248 default:
249 assert (0);
250 }
251 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
252 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
253 insert_field_2 (&field, code, opcodeh2, 0);
254
255 return NULL;
256 }
257
258 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
259 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
260 or SSHR <V><d>, <V><n>, #<shift>. */
261 const char *
262 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
263 const aarch64_opnd_info *info,
264 aarch64_insn *code, const aarch64_inst *inst)
265 {
266 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
267 aarch64_insn Q, imm;
268
269 if (inst->opcode->iclass == asimdshf)
270 {
271 /* Q
272 immh Q <T>
273 0000 x SEE AdvSIMD modified immediate
274 0001 0 8B
275 0001 1 16B
276 001x 0 4H
277 001x 1 8H
278 01xx 0 2S
279 01xx 1 4S
280 1xxx 0 RESERVED
281 1xxx 1 2D */
282 Q = (val & 0x1) ? 1 : 0;
283 insert_field (FLD_Q, code, Q, inst->opcode->mask);
284 val >>= 1;
285 }
286
287 assert (info->type == AARCH64_OPND_IMM_VLSR
288 || info->type == AARCH64_OPND_IMM_VLSL);
289
290 if (info->type == AARCH64_OPND_IMM_VLSR)
291 /* immh:immb
292 immh <shift>
293 0000 SEE AdvSIMD modified immediate
294 0001 (16-UInt(immh:immb))
295 001x (32-UInt(immh:immb))
296 01xx (64-UInt(immh:immb))
297 1xxx (128-UInt(immh:immb)) */
298 imm = (16 << (unsigned)val) - info->imm.value;
299 else
300 /* immh:immb
301 immh <shift>
302 0000 SEE AdvSIMD modified immediate
303 0001 (UInt(immh:immb)-8)
304 001x (UInt(immh:immb)-16)
305 01xx (UInt(immh:immb)-32)
306 1xxx (UInt(immh:immb)-64) */
307 imm = info->imm.value + (8 << (unsigned)val);
308 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
309
310 return NULL;
311 }
312
313 /* Insert fields for e.g. the immediate operands in
314 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
315 const char *
316 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
317 aarch64_insn *code,
318 const aarch64_inst *inst ATTRIBUTE_UNUSED)
319 {
320 int64_t imm;
321 /* Maximum of two fields to insert. */
322 assert (self->fields[2] == FLD_NIL);
323
324 imm = info->imm.value;
325 if (operand_need_shift_by_two (self))
326 imm >>= 2;
327 if (self->fields[1] == FLD_NIL)
328 insert_field (self->fields[0], code, imm, 0);
329 else
330 /* e.g. TBZ b5:b40. */
331 insert_fields (code, imm, 0, 2, self->fields[1], self->fields[0]);
332 return NULL;
333 }
334
335 /* Insert immediate and its shift amount for e.g. the last operand in
336 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
337 const char *
338 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
339 aarch64_insn *code, const aarch64_inst *inst)
340 {
341 /* imm16 */
342 aarch64_ins_imm (self, info, code, inst);
343 /* hw */
344 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
345 return NULL;
346 }
347
348 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
349 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
350 const char *
351 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
352 const aarch64_opnd_info *info,
353 aarch64_insn *code,
354 const aarch64_inst *inst ATTRIBUTE_UNUSED)
355 {
356 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
357 uint64_t imm = info->imm.value;
358 enum aarch64_modifier_kind kind = info->shifter.kind;
359 int amount = info->shifter.amount;
360 aarch64_field field = {0, 0};
361
362 /* a:b:c:d:e:f:g:h */
363 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
364 {
365 /* Either MOVI <Dd>, #<imm>
366 or MOVI <Vd>.2D, #<imm>.
367 <imm> is a 64-bit immediate
368 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
369 encoded in "a:b:c:d:e:f:g:h". */
370 imm = aarch64_shrink_expanded_imm8 (imm);
371 assert ((int)imm >= 0);
372 }
373 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
374
375 if (kind == AARCH64_MOD_NONE)
376 return NULL;
377
378 /* shift amount partially in cmode */
379 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
380 if (kind == AARCH64_MOD_LSL)
381 {
382 /* AARCH64_MOD_LSL: shift zeros. */
383 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
384 assert (esize == 4 || esize == 2 || esize == 1);
385 /* For 8-bit move immediate, the optional LSL #0 does not require
386 encoding. */
387 if (esize == 1)
388 return NULL;
389 amount >>= 3;
390 if (esize == 4)
391 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
392 else
393 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
394 }
395 else
396 {
397 /* AARCH64_MOD_MSL: shift ones. */
398 amount >>= 4;
399 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
400 }
401 insert_field_2 (&field, code, amount, 0);
402
403 return NULL;
404 }
405
406 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
407 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
408 const char *
409 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
410 aarch64_insn *code,
411 const aarch64_inst *inst ATTRIBUTE_UNUSED)
412 {
413 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
414 return NULL;
415 }
416
417 /* Insert arithmetic immediate for e.g. the last operand in
418 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
419 const char *
420 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
421 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
422 {
423 /* shift */
424 aarch64_insn value = info->shifter.amount ? 1 : 0;
425 insert_field (self->fields[0], code, value, 0);
426 /* imm12 (unsigned) */
427 insert_field (self->fields[1], code, info->imm.value, 0);
428 return NULL;
429 }
430
431 /* Insert logical/bitmask immediate for e.g. the last operand in
432 ORR <Wd|WSP>, <Wn>, #<imm>. */
433 const char *
434 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
435 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
436 {
437 aarch64_insn value;
438 uint64_t imm = info->imm.value;
439 int is32 = aarch64_get_qualifier_esize (inst->operands[0].qualifier) == 4;
440
441 if (inst->opcode->op == OP_BIC)
442 imm = ~imm;
443 if (aarch64_logical_immediate_p (imm, is32, &value) == FALSE)
444 /* The constraint check should have guaranteed this wouldn't happen. */
445 assert (0);
446
447 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
448 self->fields[0]);
449 return NULL;
450 }
451
452 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
453 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
454 const char *
455 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
456 aarch64_insn *code, const aarch64_inst *inst)
457 {
458 aarch64_insn value = 0;
459
460 assert (info->idx == 0);
461
462 /* Rt */
463 aarch64_ins_regno (self, info, code, inst);
464 if (inst->opcode->iclass == ldstpair_indexed
465 || inst->opcode->iclass == ldstnapair_offs
466 || inst->opcode->iclass == ldstpair_off
467 || inst->opcode->iclass == loadlit)
468 {
469 /* size */
470 switch (info->qualifier)
471 {
472 case AARCH64_OPND_QLF_S_S: value = 0; break;
473 case AARCH64_OPND_QLF_S_D: value = 1; break;
474 case AARCH64_OPND_QLF_S_Q: value = 2; break;
475 default: assert (0);
476 }
477 insert_field (FLD_ldst_size, code, value, 0);
478 }
479 else
480 {
481 /* opc[1]:size */
482 value = aarch64_get_qualifier_standard_value (info->qualifier);
483 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
484 }
485
486 return NULL;
487 }
488
489 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
490 const char *
491 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
492 const aarch64_opnd_info *info, aarch64_insn *code,
493 const aarch64_inst *inst ATTRIBUTE_UNUSED)
494 {
495 /* Rn */
496 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
497 return NULL;
498 }
499
500 /* Encode the address operand for e.g.
501 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
502 const char *
503 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
504 const aarch64_opnd_info *info, aarch64_insn *code,
505 const aarch64_inst *inst ATTRIBUTE_UNUSED)
506 {
507 aarch64_insn S;
508 enum aarch64_modifier_kind kind = info->shifter.kind;
509
510 /* Rn */
511 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
512 /* Rm */
513 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
514 /* option */
515 if (kind == AARCH64_MOD_LSL)
516 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
517 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
518 /* S */
519 if (info->qualifier != AARCH64_OPND_QLF_S_B)
520 S = info->shifter.amount != 0;
521 else
522 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
523 S <amount>
524 0 [absent]
525 1 #0
526 Must be #0 if <extend> is explicitly LSL. */
527 S = info->shifter.operator_present && info->shifter.amount_present;
528 insert_field (FLD_S, code, S, 0);
529
530 return NULL;
531 }
532
533 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
534 const char *
535 aarch64_ins_addr_simm (const aarch64_operand *self,
536 const aarch64_opnd_info *info,
537 aarch64_insn *code,
538 const aarch64_inst *inst ATTRIBUTE_UNUSED)
539 {
540 int imm;
541
542 /* Rn */
543 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
544 /* simm (imm9 or imm7) */
545 imm = info->addr.offset.imm;
546 if (self->fields[0] == FLD_imm7)
547 /* scaled immediate in ld/st pair instructions.. */
548 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
549 insert_field (self->fields[0], code, imm, 0);
550 /* pre/post- index */
551 if (info->addr.writeback)
552 {
553 assert (inst->opcode->iclass != ldst_unscaled
554 && inst->opcode->iclass != ldstnapair_offs
555 && inst->opcode->iclass != ldstpair_off
556 && inst->opcode->iclass != ldst_unpriv);
557 assert (info->addr.preind != info->addr.postind);
558 if (info->addr.preind)
559 insert_field (self->fields[1], code, 1, 0);
560 }
561
562 return NULL;
563 }
564
565 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
566 const char *
567 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
568 const aarch64_opnd_info *info,
569 aarch64_insn *code,
570 const aarch64_inst *inst ATTRIBUTE_UNUSED)
571 {
572 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
573
574 /* Rn */
575 insert_field (self->fields[0], code, info->addr.base_regno, 0);
576 /* uimm12 */
577 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
578 return NULL;
579 }
580
581 /* Encode the address operand for e.g.
582 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
583 const char *
584 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
585 const aarch64_opnd_info *info, aarch64_insn *code,
586 const aarch64_inst *inst ATTRIBUTE_UNUSED)
587 {
588 /* Rn */
589 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
590 /* Rm | #<amount> */
591 if (info->addr.offset.is_reg)
592 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
593 else
594 insert_field (FLD_Rm, code, 0x1f, 0);
595 return NULL;
596 }
597
598 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
599 const char *
600 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
601 const aarch64_opnd_info *info, aarch64_insn *code,
602 const aarch64_inst *inst ATTRIBUTE_UNUSED)
603 {
604 /* cond */
605 insert_field (FLD_cond, code, info->cond->value, 0);
606 return NULL;
607 }
608
609 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
610 const char *
611 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
612 const aarch64_opnd_info *info, aarch64_insn *code,
613 const aarch64_inst *inst ATTRIBUTE_UNUSED)
614 {
615 /* op0:op1:CRn:CRm:op2 */
616 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
617 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
618 return NULL;
619 }
620
621 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
622 const char *
623 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
624 const aarch64_opnd_info *info, aarch64_insn *code,
625 const aarch64_inst *inst ATTRIBUTE_UNUSED)
626 {
627 /* op1:op2 */
628 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
629 FLD_op2, FLD_op1);
630 return NULL;
631 }
632
633 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
634 const char *
635 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
636 const aarch64_opnd_info *info, aarch64_insn *code,
637 const aarch64_inst *inst ATTRIBUTE_UNUSED)
638 {
639 /* op1:CRn:CRm:op2 */
640 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
641 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
642 return NULL;
643 }
644
645 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
646
647 const char *
648 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
649 const aarch64_opnd_info *info, aarch64_insn *code,
650 const aarch64_inst *inst ATTRIBUTE_UNUSED)
651 {
652 /* CRm */
653 insert_field (FLD_CRm, code, info->barrier->value, 0);
654 return NULL;
655 }
656
657 /* Encode the prefetch operation option operand for e.g.
658 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
659
660 const char *
661 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
662 const aarch64_opnd_info *info, aarch64_insn *code,
663 const aarch64_inst *inst ATTRIBUTE_UNUSED)
664 {
665 /* prfop in Rt */
666 insert_field (FLD_Rt, code, info->prfop->value, 0);
667 return NULL;
668 }
669
670 /* Encode the extended register operand for e.g.
671 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
672 const char *
673 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
674 const aarch64_opnd_info *info, aarch64_insn *code,
675 const aarch64_inst *inst ATTRIBUTE_UNUSED)
676 {
677 enum aarch64_modifier_kind kind;
678
679 /* Rm */
680 insert_field (FLD_Rm, code, info->reg.regno, 0);
681 /* option */
682 kind = info->shifter.kind;
683 if (kind == AARCH64_MOD_LSL)
684 kind = info->qualifier == AARCH64_OPND_QLF_W
685 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
686 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
687 /* imm3 */
688 insert_field (FLD_imm3, code, info->shifter.amount, 0);
689
690 return NULL;
691 }
692
693 /* Encode the shifted register operand for e.g.
694 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
695 const char *
696 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
697 const aarch64_opnd_info *info, aarch64_insn *code,
698 const aarch64_inst *inst ATTRIBUTE_UNUSED)
699 {
700 /* Rm */
701 insert_field (FLD_Rm, code, info->reg.regno, 0);
702 /* shift */
703 insert_field (FLD_shift, code,
704 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
705 /* imm6 */
706 insert_field (FLD_imm6, code, info->shifter.amount, 0);
707
708 return NULL;
709 }
710
711 /* Miscellaneous encoding functions. */
712
713 /* Encode size[0], i.e. bit 22, for
714 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
715
716 static void
717 encode_asimd_fcvt (aarch64_inst *inst)
718 {
719 aarch64_insn value;
720 aarch64_field field = {0, 0};
721 enum aarch64_opnd_qualifier qualifier;
722
723 switch (inst->opcode->op)
724 {
725 case OP_FCVTN:
726 case OP_FCVTN2:
727 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
728 qualifier = inst->operands[1].qualifier;
729 break;
730 case OP_FCVTL:
731 case OP_FCVTL2:
732 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
733 qualifier = inst->operands[0].qualifier;
734 break;
735 default:
736 assert (0);
737 }
738 assert (qualifier == AARCH64_OPND_QLF_V_4S
739 || qualifier == AARCH64_OPND_QLF_V_2D);
740 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
741 gen_sub_field (FLD_size, 0, 1, &field);
742 insert_field_2 (&field, &inst->value, value, 0);
743 }
744
745 /* Encode size[0], i.e. bit 22, for
746 e.g. FCVTXN <Vb><d>, <Va><n>. */
747
748 static void
749 encode_asisd_fcvtxn (aarch64_inst *inst)
750 {
751 aarch64_insn val = 1;
752 aarch64_field field = {0, 0};
753 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
754 gen_sub_field (FLD_size, 0, 1, &field);
755 insert_field_2 (&field, &inst->value, val, 0);
756 }
757
758 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
759 static void
760 encode_fcvt (aarch64_inst *inst)
761 {
762 aarch64_insn val;
763 const aarch64_field field = {15, 2};
764
765 /* opc dstsize */
766 switch (inst->operands[0].qualifier)
767 {
768 case AARCH64_OPND_QLF_S_S: val = 0; break;
769 case AARCH64_OPND_QLF_S_D: val = 1; break;
770 case AARCH64_OPND_QLF_S_H: val = 3; break;
771 default: abort ();
772 }
773 insert_field_2 (&field, &inst->value, val, 0);
774
775 return;
776 }
777
778 /* Do miscellaneous encodings that are not common enough to be driven by
779 flags. */
780
781 static void
782 do_misc_encoding (aarch64_inst *inst)
783 {
784 switch (inst->opcode->op)
785 {
786 case OP_FCVT:
787 encode_fcvt (inst);
788 break;
789 case OP_FCVTN:
790 case OP_FCVTN2:
791 case OP_FCVTL:
792 case OP_FCVTL2:
793 encode_asimd_fcvt (inst);
794 break;
795 case OP_FCVTXN_S:
796 encode_asisd_fcvtxn (inst);
797 break;
798 default: break;
799 }
800 }
801
802 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
803 static void
804 encode_sizeq (aarch64_inst *inst)
805 {
806 aarch64_insn sizeq;
807 enum aarch64_field_kind kind;
808 int idx;
809
810 /* Get the index of the operand whose information we are going to use
811 to encode the size and Q fields.
812 This is deduced from the possible valid qualifier lists. */
813 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
814 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
815 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
816 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
817 /* Q */
818 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
819 /* size */
820 if (inst->opcode->iclass == asisdlse
821 || inst->opcode->iclass == asisdlsep
822 || inst->opcode->iclass == asisdlso
823 || inst->opcode->iclass == asisdlsop)
824 kind = FLD_vldst_size;
825 else
826 kind = FLD_size;
827 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
828 }
829
830 /* Opcodes that have fields shared by multiple operands are usually flagged
831 with flags. In this function, we detect such flags and use the
832 information in one of the related operands to do the encoding. The 'one'
833 operand is not any operand but one of the operands that has the enough
834 information for such an encoding. */
835
836 static void
837 do_special_encoding (struct aarch64_inst *inst)
838 {
839 int idx;
840 aarch64_insn value = 0;
841
842 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
843
844 /* Condition for truly conditional executed instructions, e.g. b.cond. */
845 if (inst->opcode->flags & F_COND)
846 {
847 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
848 }
849 if (inst->opcode->flags & F_SF)
850 {
851 idx = select_operand_for_sf_field_coding (inst->opcode);
852 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
853 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
854 ? 1 : 0;
855 insert_field (FLD_sf, &inst->value, value, 0);
856 if (inst->opcode->flags & F_N)
857 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
858 }
859 if (inst->opcode->flags & F_LSE_SZ)
860 {
861 idx = select_operand_for_sf_field_coding (inst->opcode);
862 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
863 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
864 ? 1 : 0;
865 insert_field (FLD_lse_sz, &inst->value, value, 0);
866 }
867 if (inst->opcode->flags & F_SIZEQ)
868 encode_sizeq (inst);
869 if (inst->opcode->flags & F_FPTYPE)
870 {
871 idx = select_operand_for_fptype_field_coding (inst->opcode);
872 switch (inst->operands[idx].qualifier)
873 {
874 case AARCH64_OPND_QLF_S_S: value = 0; break;
875 case AARCH64_OPND_QLF_S_D: value = 1; break;
876 case AARCH64_OPND_QLF_S_H: value = 3; break;
877 default: assert (0);
878 }
879 insert_field (FLD_type, &inst->value, value, 0);
880 }
881 if (inst->opcode->flags & F_SSIZE)
882 {
883 enum aarch64_opnd_qualifier qualifier;
884 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
885 qualifier = inst->operands[idx].qualifier;
886 assert (qualifier >= AARCH64_OPND_QLF_S_B
887 && qualifier <= AARCH64_OPND_QLF_S_Q);
888 value = aarch64_get_qualifier_standard_value (qualifier);
889 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
890 }
891 if (inst->opcode->flags & F_T)
892 {
893 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
894 aarch64_field field = {0, 0};
895 enum aarch64_opnd_qualifier qualifier;
896
897 idx = 0;
898 qualifier = inst->operands[idx].qualifier;
899 assert (aarch64_get_operand_class (inst->opcode->operands[0])
900 == AARCH64_OPND_CLASS_SIMD_REG
901 && qualifier >= AARCH64_OPND_QLF_V_8B
902 && qualifier <= AARCH64_OPND_QLF_V_2D);
903 /* imm5<3:0> q <t>
904 0000 x reserved
905 xxx1 0 8b
906 xxx1 1 16b
907 xx10 0 4h
908 xx10 1 8h
909 x100 0 2s
910 x100 1 4s
911 1000 0 reserved
912 1000 1 2d */
913 value = aarch64_get_qualifier_standard_value (qualifier);
914 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
915 num = (int) value >> 1;
916 assert (num >= 0 && num <= 3);
917 gen_sub_field (FLD_imm5, 0, num + 1, &field);
918 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
919 }
920 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
921 {
922 /* Use Rt to encode in the case of e.g.
923 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
924 enum aarch64_opnd_qualifier qualifier;
925 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
926 if (idx == -1)
927 /* Otherwise use the result operand, which has to be a integer
928 register. */
929 idx = 0;
930 assert (idx == 0 || idx == 1);
931 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
932 == AARCH64_OPND_CLASS_INT_REG);
933 qualifier = inst->operands[idx].qualifier;
934 insert_field (FLD_Q, &inst->value,
935 aarch64_get_qualifier_standard_value (qualifier), 0);
936 }
937 if (inst->opcode->flags & F_LDS_SIZE)
938 {
939 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
940 enum aarch64_opnd_qualifier qualifier;
941 aarch64_field field = {0, 0};
942 assert (aarch64_get_operand_class (inst->opcode->operands[0])
943 == AARCH64_OPND_CLASS_INT_REG);
944 gen_sub_field (FLD_opc, 0, 1, &field);
945 qualifier = inst->operands[0].qualifier;
946 insert_field_2 (&field, &inst->value,
947 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
948 }
949 /* Miscellaneous encoding as the last step. */
950 if (inst->opcode->flags & F_MISC)
951 do_misc_encoding (inst);
952
953 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
954 }
955
956 /* Converters converting an alias opcode instruction to its real form. */
957
958 /* ROR <Wd>, <Ws>, #<shift>
959 is equivalent to:
960 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
961 static void
962 convert_ror_to_extr (aarch64_inst *inst)
963 {
964 copy_operand_info (inst, 3, 2);
965 copy_operand_info (inst, 2, 1);
966 }
967
968 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
969 is equivalent to:
970 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
971 static void
972 convert_xtl_to_shll (aarch64_inst *inst)
973 {
974 inst->operands[2].qualifier = inst->operands[1].qualifier;
975 inst->operands[2].imm.value = 0;
976 }
977
978 /* Convert
979 LSR <Xd>, <Xn>, #<shift>
980 to
981 UBFM <Xd>, <Xn>, #<shift>, #63. */
982 static void
983 convert_sr_to_bfm (aarch64_inst *inst)
984 {
985 inst->operands[3].imm.value =
986 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
987 }
988
989 /* Convert MOV to ORR. */
990 static void
991 convert_mov_to_orr (aarch64_inst *inst)
992 {
993 /* MOV <Vd>.<T>, <Vn>.<T>
994 is equivalent to:
995 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
996 copy_operand_info (inst, 2, 1);
997 }
998
999 /* When <imms> >= <immr>, the instruction written:
1000 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1001 is equivalent to:
1002 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1003
1004 static void
1005 convert_bfx_to_bfm (aarch64_inst *inst)
1006 {
1007 int64_t lsb, width;
1008
1009 /* Convert the operand. */
1010 lsb = inst->operands[2].imm.value;
1011 width = inst->operands[3].imm.value;
1012 inst->operands[2].imm.value = lsb;
1013 inst->operands[3].imm.value = lsb + width - 1;
1014 }
1015
1016 /* When <imms> < <immr>, the instruction written:
1017 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1018 is equivalent to:
1019 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1020
1021 static void
1022 convert_bfi_to_bfm (aarch64_inst *inst)
1023 {
1024 int64_t lsb, width;
1025
1026 /* Convert the operand. */
1027 lsb = inst->operands[2].imm.value;
1028 width = inst->operands[3].imm.value;
1029 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1030 {
1031 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1032 inst->operands[3].imm.value = width - 1;
1033 }
1034 else
1035 {
1036 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1037 inst->operands[3].imm.value = width - 1;
1038 }
1039 }
1040
1041 /* The instruction written:
1042 LSL <Xd>, <Xn>, #<shift>
1043 is equivalent to:
1044 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1045
1046 static void
1047 convert_lsl_to_ubfm (aarch64_inst *inst)
1048 {
1049 int64_t shift = inst->operands[2].imm.value;
1050
1051 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1052 {
1053 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1054 inst->operands[3].imm.value = 31 - shift;
1055 }
1056 else
1057 {
1058 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1059 inst->operands[3].imm.value = 63 - shift;
1060 }
1061 }
1062
1063 /* CINC <Wd>, <Wn>, <cond>
1064 is equivalent to:
1065 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1066
1067 static void
1068 convert_to_csel (aarch64_inst *inst)
1069 {
1070 copy_operand_info (inst, 3, 2);
1071 copy_operand_info (inst, 2, 1);
1072 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1073 }
1074
1075 /* CSET <Wd>, <cond>
1076 is equivalent to:
1077 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1078
1079 static void
1080 convert_cset_to_csinc (aarch64_inst *inst)
1081 {
1082 copy_operand_info (inst, 3, 1);
1083 copy_operand_info (inst, 2, 0);
1084 copy_operand_info (inst, 1, 0);
1085 inst->operands[1].reg.regno = 0x1f;
1086 inst->operands[2].reg.regno = 0x1f;
1087 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1088 }
1089
1090 /* MOV <Wd>, #<imm>
1091 is equivalent to:
1092 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1093
1094 static void
1095 convert_mov_to_movewide (aarch64_inst *inst)
1096 {
1097 int is32;
1098 uint32_t shift_amount;
1099 uint64_t value;
1100
1101 switch (inst->opcode->op)
1102 {
1103 case OP_MOV_IMM_WIDE:
1104 value = inst->operands[1].imm.value;
1105 break;
1106 case OP_MOV_IMM_WIDEN:
1107 value = ~inst->operands[1].imm.value;
1108 break;
1109 default:
1110 assert (0);
1111 }
1112 inst->operands[1].type = AARCH64_OPND_HALF;
1113 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1114 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1115 /* The constraint check should have guaranteed this wouldn't happen. */
1116 assert (0);
1117 value >>= shift_amount;
1118 value &= 0xffff;
1119 inst->operands[1].imm.value = value;
1120 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1121 inst->operands[1].shifter.amount = shift_amount;
1122 }
1123
1124 /* MOV <Wd>, #<imm>
1125 is equivalent to:
1126 ORR <Wd>, WZR, #<imm>. */
1127
1128 static void
1129 convert_mov_to_movebitmask (aarch64_inst *inst)
1130 {
1131 copy_operand_info (inst, 2, 1);
1132 inst->operands[1].reg.regno = 0x1f;
1133 inst->operands[1].skip = 0;
1134 }
1135
1136 /* Some alias opcodes are assembled by being converted to their real-form. */
1137
1138 static void
1139 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1140 {
1141 const aarch64_opcode *alias = inst->opcode;
1142
1143 if ((alias->flags & F_CONV) == 0)
1144 goto convert_to_real_return;
1145
1146 switch (alias->op)
1147 {
1148 case OP_ASR_IMM:
1149 case OP_LSR_IMM:
1150 convert_sr_to_bfm (inst);
1151 break;
1152 case OP_LSL_IMM:
1153 convert_lsl_to_ubfm (inst);
1154 break;
1155 case OP_CINC:
1156 case OP_CINV:
1157 case OP_CNEG:
1158 convert_to_csel (inst);
1159 break;
1160 case OP_CSET:
1161 case OP_CSETM:
1162 convert_cset_to_csinc (inst);
1163 break;
1164 case OP_UBFX:
1165 case OP_BFXIL:
1166 case OP_SBFX:
1167 convert_bfx_to_bfm (inst);
1168 break;
1169 case OP_SBFIZ:
1170 case OP_BFI:
1171 case OP_UBFIZ:
1172 convert_bfi_to_bfm (inst);
1173 break;
1174 case OP_MOV_V:
1175 convert_mov_to_orr (inst);
1176 break;
1177 case OP_MOV_IMM_WIDE:
1178 case OP_MOV_IMM_WIDEN:
1179 convert_mov_to_movewide (inst);
1180 break;
1181 case OP_MOV_IMM_LOG:
1182 convert_mov_to_movebitmask (inst);
1183 break;
1184 case OP_ROR_IMM:
1185 convert_ror_to_extr (inst);
1186 break;
1187 case OP_SXTL:
1188 case OP_SXTL2:
1189 case OP_UXTL:
1190 case OP_UXTL2:
1191 convert_xtl_to_shll (inst);
1192 break;
1193 default:
1194 break;
1195 }
1196
1197 convert_to_real_return:
1198 aarch64_replace_opcode (inst, real);
1199 }
1200
1201 /* Encode *INST_ORI of the opcode code OPCODE.
1202 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1203 matched operand qualifier sequence in *QLF_SEQ. */
1204
1205 int
1206 aarch64_opcode_encode (const aarch64_opcode *opcode,
1207 const aarch64_inst *inst_ori, aarch64_insn *code,
1208 aarch64_opnd_qualifier_t *qlf_seq,
1209 aarch64_operand_error *mismatch_detail)
1210 {
1211 int i;
1212 const aarch64_opcode *aliased;
1213 aarch64_inst copy, *inst;
1214
1215 DEBUG_TRACE ("enter with %s", opcode->name);
1216
1217 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1218 copy = *inst_ori;
1219 inst = &copy;
1220
1221 assert (inst->opcode == NULL || inst->opcode == opcode);
1222 if (inst->opcode == NULL)
1223 inst->opcode = opcode;
1224
1225 /* Constrain the operands.
1226 After passing this, the encoding is guaranteed to succeed. */
1227 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1228 {
1229 DEBUG_TRACE ("FAIL since operand constraint not met");
1230 return 0;
1231 }
1232
1233 /* Get the base value.
1234 Note: this has to be before the aliasing handling below in order to
1235 get the base value from the alias opcode before we move on to the
1236 aliased opcode for encoding. */
1237 inst->value = opcode->opcode;
1238
1239 /* No need to do anything else if the opcode does not have any operand. */
1240 if (aarch64_num_of_operands (opcode) == 0)
1241 goto encoding_exit;
1242
1243 /* Assign operand indexes and check types. Also put the matched
1244 operand qualifiers in *QLF_SEQ to return. */
1245 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1246 {
1247 assert (opcode->operands[i] == inst->operands[i].type);
1248 inst->operands[i].idx = i;
1249 if (qlf_seq != NULL)
1250 *qlf_seq = inst->operands[i].qualifier;
1251 }
1252
1253 aliased = aarch64_find_real_opcode (opcode);
1254 /* If the opcode is an alias and it does not ask for direct encoding by
1255 itself, the instruction will be transformed to the form of real opcode
1256 and the encoding will be carried out using the rules for the aliased
1257 opcode. */
1258 if (aliased != NULL && (opcode->flags & F_CONV))
1259 {
1260 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1261 aliased->name, opcode->name);
1262 /* Convert the operands to the form of the real opcode. */
1263 convert_to_real (inst, aliased);
1264 opcode = aliased;
1265 }
1266
1267 aarch64_opnd_info *info = inst->operands;
1268
1269 /* Call the inserter of each operand. */
1270 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1271 {
1272 const aarch64_operand *opnd;
1273 enum aarch64_opnd type = opcode->operands[i];
1274 if (type == AARCH64_OPND_NIL)
1275 break;
1276 if (info->skip)
1277 {
1278 DEBUG_TRACE ("skip the incomplete operand %d", i);
1279 continue;
1280 }
1281 opnd = &aarch64_operands[type];
1282 if (operand_has_inserter (opnd))
1283 aarch64_insert_operand (opnd, info, &inst->value, inst);
1284 }
1285
1286 /* Call opcode encoders indicated by flags. */
1287 if (opcode_has_special_coder (opcode))
1288 do_special_encoding (inst);
1289
1290 encoding_exit:
1291 DEBUG_TRACE ("exit with %s", opcode->name);
1292
1293 *code = inst->value;
1294
1295 return 1;
1296 }
This page took 0.057615 seconds and 4 git commands to generate.