2012-09-17 Yufeng Zhang <yufeng.zhang@arm.com>
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright 2012 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "aarch64-asm.h"
24
25 /* Utilities. */
26
27 /* The unnamed arguments consist of the number of fields and information about
28 these fields where the VALUE will be inserted into CODE. MASK can be zero or
29 the base mask of the opcode.
30
31 N.B. the fields are required to be in such an order than the least signficant
32 field for VALUE comes the first, e.g. the <index> in
33 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
34 is encoded in H:L:M in some cases, the the fields H:L:M should be passed in
35 the order of M, L, H. */
36
37 static inline void
38 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
39 {
40 uint32_t num;
41 const aarch64_field *field;
42 enum aarch64_field_kind kind;
43 va_list va;
44
45 va_start (va, mask);
46 num = va_arg (va, uint32_t);
47 assert (num <= 5);
48 while (num--)
49 {
50 kind = va_arg (va, enum aarch64_field_kind);
51 field = &fields[kind];
52 insert_field (kind, code, value, mask);
53 value >>= field->width;
54 }
55 va_end (va);
56 }
57
58 /* Operand inserters. */
59
60 /* Insert register number. */
61 const char *
62 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
63 aarch64_insn *code,
64 const aarch64_inst *inst ATTRIBUTE_UNUSED)
65 {
66 insert_field (self->fields[0], code, info->reg.regno, 0);
67 return NULL;
68 }
69
70 /* Insert register number, index and/or other data for SIMD register element
71 operand, e.g. the last source operand in
72 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
73 const char *
74 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
75 aarch64_insn *code, const aarch64_inst *inst)
76 {
77 /* regno */
78 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
79 /* index and/or type */
80 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
81 {
82 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
83 if (info->type == AARCH64_OPND_En
84 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
85 {
86 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
87 assert (info->idx == 1); /* Vn */
88 aarch64_insn value = info->reglane.index << pos;
89 insert_field (FLD_imm4, code, value, 0);
90 }
91 else
92 {
93 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
94 imm5<3:0> <V>
95 0000 RESERVED
96 xxx1 B
97 xx10 H
98 x100 S
99 1000 D */
100 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
101 insert_field (FLD_imm5, code, value, 0);
102 }
103 }
104 else
105 {
106 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
107 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
108 switch (info->qualifier)
109 {
110 case AARCH64_OPND_QLF_S_H:
111 /* H:L:M */
112 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
113 break;
114 case AARCH64_OPND_QLF_S_S:
115 /* H:L */
116 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
117 break;
118 case AARCH64_OPND_QLF_S_D:
119 /* H */
120 insert_field (FLD_H, code, info->reglane.index, 0);
121 break;
122 default:
123 assert (0);
124 }
125 }
126 return NULL;
127 }
128
129 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
130 const char *
131 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
132 aarch64_insn *code,
133 const aarch64_inst *inst ATTRIBUTE_UNUSED)
134 {
135 /* R */
136 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
137 /* len */
138 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
139 return NULL;
140 }
141
142 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
143 in AdvSIMD load/store instructions. */
144 const char *
145 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
146 const aarch64_opnd_info *info, aarch64_insn *code,
147 const aarch64_inst *inst)
148 {
149 aarch64_insn value;
150 /* Number of elements in each structure to be loaded/stored. */
151 unsigned num = get_opcode_dependent_value (inst->opcode);
152
153 /* Rt */
154 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
155 /* opcode */
156 switch (num)
157 {
158 case 1:
159 switch (info->reglist.num_regs)
160 {
161 case 1: value = 0x7; break;
162 case 2: value = 0xa; break;
163 case 3: value = 0x6; break;
164 case 4: value = 0x2; break;
165 default: assert (0);
166 }
167 break;
168 case 2:
169 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
170 break;
171 case 3:
172 value = 0x4;
173 break;
174 case 4:
175 value = 0x0;
176 break;
177 default:
178 assert (0);
179 }
180 insert_field (FLD_opcode, code, value, 0);
181
182 return NULL;
183 }
184
185 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
186 single structure to all lanes instructions. */
187 const char *
188 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
189 const aarch64_opnd_info *info, aarch64_insn *code,
190 const aarch64_inst *inst)
191 {
192 aarch64_insn value;
193 /* The opcode dependent area stores the number of elements in
194 each structure to be loaded/stored. */
195 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
196
197 /* Rt */
198 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
199 /* S */
200 value = (aarch64_insn) 0;
201 if (is_ld1r && info->reglist.num_regs == 2)
202 /* OP_LD1R does not have alternating variant, but have "two consecutive"
203 instead. */
204 value = (aarch64_insn) 1;
205 insert_field (FLD_S, code, value, 0);
206
207 return NULL;
208 }
209
210 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
211 operand e.g. Vt in AdvSIMD load/store single element instructions. */
212 const char *
213 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
214 const aarch64_opnd_info *info, aarch64_insn *code,
215 const aarch64_inst *inst ATTRIBUTE_UNUSED)
216 {
217 aarch64_field field = {0, 0};
218 aarch64_insn QSsize; /* fields Q:S:size. */
219 aarch64_insn opcodeh2; /* opcode<2:1> */
220
221 assert (info->reglist.has_index);
222
223 /* Rt */
224 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
225 /* Encode the index, opcode<2:1> and size. */
226 switch (info->qualifier)
227 {
228 case AARCH64_OPND_QLF_S_B:
229 /* Index encoded in "Q:S:size". */
230 QSsize = info->reglist.index;
231 opcodeh2 = 0x0;
232 break;
233 case AARCH64_OPND_QLF_S_H:
234 /* Index encoded in "Q:S:size<1>". */
235 QSsize = info->reglist.index << 1;
236 opcodeh2 = 0x1;
237 break;
238 case AARCH64_OPND_QLF_S_S:
239 /* Index encoded in "Q:S". */
240 QSsize = info->reglist.index << 2;
241 opcodeh2 = 0x2;
242 break;
243 case AARCH64_OPND_QLF_S_D:
244 /* Index encoded in "Q". */
245 QSsize = info->reglist.index << 3 | 0x1;
246 opcodeh2 = 0x2;
247 break;
248 default:
249 assert (0);
250 }
251 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
252 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
253 insert_field_2 (&field, code, opcodeh2, 0);
254
255 return NULL;
256 }
257
258 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
259 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
260 or SSHR <V><d>, <V><n>, #<shift>. */
261 const char *
262 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
263 const aarch64_opnd_info *info,
264 aarch64_insn *code, const aarch64_inst *inst)
265 {
266 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
267 aarch64_insn Q, imm;
268
269 if (inst->opcode->iclass == asimdshf)
270 {
271 /* Q
272 immh Q <T>
273 0000 x SEE AdvSIMD modified immediate
274 0001 0 8B
275 0001 1 16B
276 001x 0 4H
277 001x 1 8H
278 01xx 0 2S
279 01xx 1 4S
280 1xxx 0 RESERVED
281 1xxx 1 2D */
282 Q = (val & 0x1) ? 1 : 0;
283 insert_field (FLD_Q, code, Q, inst->opcode->mask);
284 val >>= 1;
285 }
286
287 assert (info->type == AARCH64_OPND_IMM_VLSR
288 || info->type == AARCH64_OPND_IMM_VLSL);
289
290 if (info->type == AARCH64_OPND_IMM_VLSR)
291 /* immh:immb
292 immh <shift>
293 0000 SEE AdvSIMD modified immediate
294 0001 (16-UInt(immh:immb))
295 001x (32-UInt(immh:immb))
296 01xx (64-UInt(immh:immb))
297 1xxx (128-UInt(immh:immb)) */
298 imm = (16 << (unsigned)val) - info->imm.value;
299 else
300 /* immh:immb
301 immh <shift>
302 0000 SEE AdvSIMD modified immediate
303 0001 (UInt(immh:immb)-8)
304 001x (UInt(immh:immb)-16)
305 01xx (UInt(immh:immb)-32)
306 1xxx (UInt(immh:immb)-64) */
307 imm = info->imm.value + (8 << (unsigned)val);
308 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
309
310 return NULL;
311 }
312
313 /* Insert fields for e.g. the immediate operands in
314 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
315 const char *
316 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
317 aarch64_insn *code,
318 const aarch64_inst *inst ATTRIBUTE_UNUSED)
319 {
320 int64_t imm;
321 /* Maximum of two fields to insert. */
322 assert (self->fields[2] == FLD_NIL);
323
324 imm = info->imm.value;
325 if (operand_need_shift_by_two (self))
326 imm >>= 2;
327 if (self->fields[1] == FLD_NIL)
328 insert_field (self->fields[0], code, imm, 0);
329 else
330 /* e.g. TBZ b5:b40. */
331 insert_fields (code, imm, 0, 2, self->fields[1], self->fields[0]);
332 return NULL;
333 }
334
335 /* Insert immediate and its shift amount for e.g. the last operand in
336 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
337 const char *
338 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
339 aarch64_insn *code, const aarch64_inst *inst)
340 {
341 /* imm16 */
342 aarch64_ins_imm (self, info, code, inst);
343 /* hw */
344 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
345 return NULL;
346 }
347
348 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
349 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
350 const char *
351 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
352 const aarch64_opnd_info *info,
353 aarch64_insn *code,
354 const aarch64_inst *inst ATTRIBUTE_UNUSED)
355 {
356 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
357 uint64_t imm = info->imm.value;
358 enum aarch64_modifier_kind kind = info->shifter.kind;
359 int amount = info->shifter.amount;
360 aarch64_field field = {0, 0};
361
362 /* a:b:c:d:e:f:g:h */
363 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
364 {
365 /* Either MOVI <Dd>, #<imm>
366 or MOVI <Vd>.2D, #<imm>.
367 <imm> is a 64-bit immediate
368 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
369 encoded in "a:b:c:d:e:f:g:h". */
370 imm = aarch64_shrink_expanded_imm8 (imm);
371 assert ((int)imm >= 0);
372 }
373 assert (imm <= 255);
374 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
375
376 if (kind == AARCH64_MOD_NONE)
377 return NULL;
378
379 /* shift amount partially in cmode */
380 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
381 if (kind == AARCH64_MOD_LSL)
382 {
383 /* AARCH64_MOD_LSL: shift zeros. */
384 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
385 assert (esize == 4 || esize == 2);
386 amount >>= 3;
387 if (esize == 4)
388 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
389 else
390 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
391 }
392 else
393 {
394 /* AARCH64_MOD_MSL: shift ones. */
395 amount >>= 4;
396 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
397 }
398 insert_field_2 (&field, code, amount, 0);
399
400 return NULL;
401 }
402
403 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
404 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
405 const char *
406 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
407 aarch64_insn *code,
408 const aarch64_inst *inst ATTRIBUTE_UNUSED)
409 {
410 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
411 return NULL;
412 }
413
414 /* Insert arithmetic immediate for e.g. the last operand in
415 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
416 const char *
417 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
418 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
419 {
420 /* shift */
421 aarch64_insn value = info->shifter.amount ? 1 : 0;
422 insert_field (self->fields[0], code, value, 0);
423 /* imm12 (unsigned) */
424 insert_field (self->fields[1], code, info->imm.value, 0);
425 return NULL;
426 }
427
428 /* Insert logical/bitmask immediate for e.g. the last operand in
429 ORR <Wd|WSP>, <Wn>, #<imm>. */
430 const char *
431 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
432 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
433 {
434 aarch64_insn value;
435 uint64_t imm = info->imm.value;
436 int is32 = aarch64_get_qualifier_esize (inst->operands[0].qualifier) == 4;
437
438 if (inst->opcode->op == OP_BIC)
439 imm = ~imm;
440 if (aarch64_logical_immediate_p (imm, is32, &value) == FALSE)
441 /* The constraint check should have guaranteed this wouldn't happen. */
442 assert (0);
443
444 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
445 self->fields[0]);
446 return NULL;
447 }
448
449 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
450 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
451 const char *
452 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
453 aarch64_insn *code, const aarch64_inst *inst)
454 {
455 aarch64_insn value;
456
457 assert (info->idx == 0);
458
459 /* Rt */
460 aarch64_ins_regno (self, info, code, inst);
461 if (inst->opcode->iclass == ldstpair_indexed
462 || inst->opcode->iclass == ldstnapair_offs
463 || inst->opcode->iclass == ldstpair_off
464 || inst->opcode->iclass == loadlit)
465 {
466 /* size */
467 switch (info->qualifier)
468 {
469 case AARCH64_OPND_QLF_S_S: value = 0; break;
470 case AARCH64_OPND_QLF_S_D: value = 1; break;
471 case AARCH64_OPND_QLF_S_Q: value = 2; break;
472 default: assert (0);
473 }
474 insert_field (FLD_ldst_size, code, value, 0);
475 }
476 else
477 {
478 /* opc[1]:size */
479 value = aarch64_get_qualifier_standard_value (info->qualifier);
480 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
481 }
482
483 return NULL;
484 }
485
486 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
487 const char *
488 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
489 const aarch64_opnd_info *info, aarch64_insn *code,
490 const aarch64_inst *inst ATTRIBUTE_UNUSED)
491 {
492 /* Rn */
493 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
494 return NULL;
495 }
496
497 /* Encode the address operand for e.g.
498 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
499 const char *
500 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
501 const aarch64_opnd_info *info, aarch64_insn *code,
502 const aarch64_inst *inst ATTRIBUTE_UNUSED)
503 {
504 aarch64_insn S;
505 enum aarch64_modifier_kind kind = info->shifter.kind;
506
507 /* Rn */
508 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
509 /* Rm */
510 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
511 /* option */
512 if (kind == AARCH64_MOD_LSL)
513 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
514 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
515 /* S */
516 if (info->qualifier != AARCH64_OPND_QLF_S_B)
517 S = info->shifter.amount != 0;
518 else
519 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
520 S <amount>
521 0 [absent]
522 1 #0
523 Must be #0 if <extend> is explicitly LSL. */
524 S = info->shifter.operator_present && info->shifter.amount_present;
525 insert_field (FLD_S, code, S, 0);
526
527 return NULL;
528 }
529
530 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
531 const char *
532 aarch64_ins_addr_simm (const aarch64_operand *self,
533 const aarch64_opnd_info *info,
534 aarch64_insn *code,
535 const aarch64_inst *inst ATTRIBUTE_UNUSED)
536 {
537 int imm;
538
539 /* Rn */
540 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
541 /* simm (imm9 or imm7) */
542 imm = info->addr.offset.imm;
543 if (self->fields[0] == FLD_imm7)
544 /* scaled immediate in ld/st pair instructions.. */
545 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
546 insert_field (self->fields[0], code, imm, 0);
547 /* pre/post- index */
548 if (info->addr.writeback)
549 {
550 assert (inst->opcode->iclass != ldst_unscaled
551 && inst->opcode->iclass != ldstnapair_offs
552 && inst->opcode->iclass != ldstpair_off
553 && inst->opcode->iclass != ldst_unpriv);
554 assert (info->addr.preind != info->addr.postind);
555 if (info->addr.preind)
556 insert_field (self->fields[1], code, 1, 0);
557 }
558
559 return NULL;
560 }
561
562 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
563 const char *
564 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
565 const aarch64_opnd_info *info,
566 aarch64_insn *code,
567 const aarch64_inst *inst ATTRIBUTE_UNUSED)
568 {
569 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
570
571 /* Rn */
572 insert_field (self->fields[0], code, info->addr.base_regno, 0);
573 /* uimm12 */
574 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
575 return NULL;
576 }
577
578 /* Encode the address operand for e.g.
579 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
580 const char *
581 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
582 const aarch64_opnd_info *info, aarch64_insn *code,
583 const aarch64_inst *inst ATTRIBUTE_UNUSED)
584 {
585 /* Rn */
586 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
587 /* Rm | #<amount> */
588 if (info->addr.offset.is_reg)
589 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
590 else
591 insert_field (FLD_Rm, code, 0x1f, 0);
592 return NULL;
593 }
594
595 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
596 const char *
597 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
598 const aarch64_opnd_info *info, aarch64_insn *code,
599 const aarch64_inst *inst ATTRIBUTE_UNUSED)
600 {
601 /* cond */
602 insert_field (FLD_cond, code, info->cond->value, 0);
603 return NULL;
604 }
605
606 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
607 const char *
608 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
609 const aarch64_opnd_info *info, aarch64_insn *code,
610 const aarch64_inst *inst ATTRIBUTE_UNUSED)
611 {
612 /* op0:op1:CRn:CRm:op2 */
613 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
614 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
615 return NULL;
616 }
617
618 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
619 const char *
620 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
621 const aarch64_opnd_info *info, aarch64_insn *code,
622 const aarch64_inst *inst ATTRIBUTE_UNUSED)
623 {
624 /* op1:op2 */
625 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
626 FLD_op2, FLD_op1);
627 return NULL;
628 }
629
630 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
631 const char *
632 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
633 const aarch64_opnd_info *info, aarch64_insn *code,
634 const aarch64_inst *inst ATTRIBUTE_UNUSED)
635 {
636 /* op1:CRn:CRm:op2 */
637 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
638 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
639 return NULL;
640 }
641
642 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
643
644 const char *
645 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
646 const aarch64_opnd_info *info, aarch64_insn *code,
647 const aarch64_inst *inst ATTRIBUTE_UNUSED)
648 {
649 /* CRm */
650 insert_field (FLD_CRm, code, info->barrier->value, 0);
651 return NULL;
652 }
653
654 /* Encode the prefetch operation option operand for e.g.
655 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
656
657 const char *
658 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
659 const aarch64_opnd_info *info, aarch64_insn *code,
660 const aarch64_inst *inst ATTRIBUTE_UNUSED)
661 {
662 /* prfop in Rt */
663 insert_field (FLD_Rt, code, info->prfop->value, 0);
664 return NULL;
665 }
666
667 /* Encode the extended register operand for e.g.
668 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
669 const char *
670 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
671 const aarch64_opnd_info *info, aarch64_insn *code,
672 const aarch64_inst *inst ATTRIBUTE_UNUSED)
673 {
674 enum aarch64_modifier_kind kind;
675
676 /* Rm */
677 insert_field (FLD_Rm, code, info->reg.regno, 0);
678 /* option */
679 kind = info->shifter.kind;
680 if (kind == AARCH64_MOD_LSL)
681 kind = info->qualifier == AARCH64_OPND_QLF_W
682 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
683 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
684 /* imm3 */
685 insert_field (FLD_imm3, code, info->shifter.amount, 0);
686
687 return NULL;
688 }
689
690 /* Encode the shifted register operand for e.g.
691 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
692 const char *
693 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
694 const aarch64_opnd_info *info, aarch64_insn *code,
695 const aarch64_inst *inst ATTRIBUTE_UNUSED)
696 {
697 /* Rm */
698 insert_field (FLD_Rm, code, info->reg.regno, 0);
699 /* shift */
700 insert_field (FLD_shift, code,
701 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
702 /* imm6 */
703 insert_field (FLD_imm6, code, info->shifter.amount, 0);
704
705 return NULL;
706 }
707
708 /* Miscellaneous encoding functions. */
709
710 /* Encode size[0], i.e. bit 22, for
711 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
712
713 static void
714 encode_asimd_fcvt (aarch64_inst *inst)
715 {
716 aarch64_insn value;
717 aarch64_field field = {0, 0};
718 enum aarch64_opnd_qualifier qualifier;
719
720 switch (inst->opcode->op)
721 {
722 case OP_FCVTN:
723 case OP_FCVTN2:
724 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
725 qualifier = inst->operands[1].qualifier;
726 break;
727 case OP_FCVTL:
728 case OP_FCVTL2:
729 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
730 qualifier = inst->operands[0].qualifier;
731 break;
732 default:
733 assert (0);
734 }
735 assert (qualifier == AARCH64_OPND_QLF_V_4S
736 || qualifier == AARCH64_OPND_QLF_V_2D);
737 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
738 gen_sub_field (FLD_size, 0, 1, &field);
739 insert_field_2 (&field, &inst->value, value, 0);
740 }
741
742 /* Encode size[0], i.e. bit 22, for
743 e.g. FCVTXN <Vb><d>, <Va><n>. */
744
745 static void
746 encode_asisd_fcvtxn (aarch64_inst *inst)
747 {
748 aarch64_insn val = 1;
749 aarch64_field field = {0, 0};
750 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
751 gen_sub_field (FLD_size, 0, 1, &field);
752 insert_field_2 (&field, &inst->value, val, 0);
753 }
754
755 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
756 static void
757 encode_fcvt (aarch64_inst *inst)
758 {
759 aarch64_insn val;
760 const aarch64_field field = {15, 2};
761
762 /* opc dstsize */
763 switch (inst->operands[0].qualifier)
764 {
765 case AARCH64_OPND_QLF_S_S: val = 0; break;
766 case AARCH64_OPND_QLF_S_D: val = 1; break;
767 case AARCH64_OPND_QLF_S_H: val = 3; break;
768 default: abort ();
769 }
770 insert_field_2 (&field, &inst->value, val, 0);
771
772 return;
773 }
774
775 /* Do miscellaneous encodings that are not common enough to be driven by
776 flags. */
777
778 static void
779 do_misc_encoding (aarch64_inst *inst)
780 {
781 switch (inst->opcode->op)
782 {
783 case OP_FCVT:
784 encode_fcvt (inst);
785 break;
786 case OP_FCVTN:
787 case OP_FCVTN2:
788 case OP_FCVTL:
789 case OP_FCVTL2:
790 encode_asimd_fcvt (inst);
791 break;
792 case OP_FCVTXN_S:
793 encode_asisd_fcvtxn (inst);
794 break;
795 default: break;
796 }
797 }
798
799 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
800 static void
801 encode_sizeq (aarch64_inst *inst)
802 {
803 aarch64_insn sizeq;
804 enum aarch64_field_kind kind;
805 int idx;
806
807 /* Get the index of the operand whose information we are going to use
808 to encode the size and Q fields.
809 This is deduced from the possible valid qualifier lists. */
810 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
811 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
812 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
813 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
814 /* Q */
815 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
816 /* size */
817 if (inst->opcode->iclass == asisdlse
818 || inst->opcode->iclass == asisdlsep
819 || inst->opcode->iclass == asisdlso
820 || inst->opcode->iclass == asisdlsop)
821 kind = FLD_vldst_size;
822 else
823 kind = FLD_size;
824 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
825 }
826
827 /* Opcodes that have fields shared by multiple operands are usually flagged
828 with flags. In this function, we detect such flags and use the
829 information in one of the related operands to do the encoding. The 'one'
830 operand is not any operand but one of the operands that has the enough
831 information for such an encoding. */
832
833 static void
834 do_special_encoding (struct aarch64_inst *inst)
835 {
836 int idx;
837 aarch64_insn value;
838
839 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
840
841 /* Condition for truly conditional executed instructions, e.g. b.cond. */
842 if (inst->opcode->flags & F_COND)
843 {
844 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
845 }
846 if (inst->opcode->flags & F_SF)
847 {
848 idx = select_operand_for_sf_field_coding (inst->opcode);
849 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
850 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
851 ? 1 : 0;
852 insert_field (FLD_sf, &inst->value, value, 0);
853 if (inst->opcode->flags & F_N)
854 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
855 }
856 if (inst->opcode->flags & F_SIZEQ)
857 encode_sizeq (inst);
858 if (inst->opcode->flags & F_FPTYPE)
859 {
860 idx = select_operand_for_fptype_field_coding (inst->opcode);
861 switch (inst->operands[idx].qualifier)
862 {
863 case AARCH64_OPND_QLF_S_S: value = 0; break;
864 case AARCH64_OPND_QLF_S_D: value = 1; break;
865 case AARCH64_OPND_QLF_S_H: value = 3; break;
866 default: assert (0);
867 }
868 insert_field (FLD_type, &inst->value, value, 0);
869 }
870 if (inst->opcode->flags & F_SSIZE)
871 {
872 enum aarch64_opnd_qualifier qualifier;
873 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
874 qualifier = inst->operands[idx].qualifier;
875 assert (qualifier >= AARCH64_OPND_QLF_S_B
876 && qualifier <= AARCH64_OPND_QLF_S_Q);
877 value = aarch64_get_qualifier_standard_value (qualifier);
878 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
879 }
880 if (inst->opcode->flags & F_T)
881 {
882 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
883 aarch64_field field = {0, 0};
884 enum aarch64_opnd_qualifier qualifier;
885
886 idx = 0;
887 qualifier = inst->operands[idx].qualifier;
888 assert (aarch64_get_operand_class (inst->opcode->operands[0])
889 == AARCH64_OPND_CLASS_SIMD_REG
890 && qualifier >= AARCH64_OPND_QLF_V_8B
891 && qualifier <= AARCH64_OPND_QLF_V_2D);
892 /* imm5<3:0> q <t>
893 0000 x reserved
894 xxx1 0 8b
895 xxx1 1 16b
896 xx10 0 4h
897 xx10 1 8h
898 x100 0 2s
899 x100 1 4s
900 1000 0 reserved
901 1000 1 2d */
902 value = aarch64_get_qualifier_standard_value (qualifier);
903 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
904 num = (int) value >> 1;
905 assert (num >= 0 && num <= 3);
906 gen_sub_field (FLD_imm5, 0, num + 1, &field);
907 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
908 }
909 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
910 {
911 /* Use Rt to encode in the case of e.g.
912 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
913 enum aarch64_opnd_qualifier qualifier;
914 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
915 if (idx == -1)
916 /* Otherwise use the result operand, which has to be a integer
917 register. */
918 idx = 0;
919 assert (idx == 0 || idx == 1);
920 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
921 == AARCH64_OPND_CLASS_INT_REG);
922 qualifier = inst->operands[idx].qualifier;
923 insert_field (FLD_Q, &inst->value,
924 aarch64_get_qualifier_standard_value (qualifier), 0);
925 }
926 if (inst->opcode->flags & F_LDS_SIZE)
927 {
928 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
929 enum aarch64_opnd_qualifier qualifier;
930 aarch64_field field = {0, 0};
931 assert (aarch64_get_operand_class (inst->opcode->operands[0])
932 == AARCH64_OPND_CLASS_INT_REG);
933 gen_sub_field (FLD_opc, 0, 1, &field);
934 qualifier = inst->operands[0].qualifier;
935 insert_field_2 (&field, &inst->value,
936 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
937 }
938 /* Miscellaneous encoding as the last step. */
939 if (inst->opcode->flags & F_MISC)
940 do_misc_encoding (inst);
941
942 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
943 }
944
945 /* Converters converting an alias opcode instruction to its real form. */
946
947 /* ROR <Wd>, <Ws>, #<shift>
948 is equivalent to:
949 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
950 static void
951 convert_ror_to_extr (aarch64_inst *inst)
952 {
953 copy_operand_info (inst, 3, 2);
954 copy_operand_info (inst, 2, 1);
955 }
956
957 /* Convert
958 LSR <Xd>, <Xn>, #<shift>
959 to
960 UBFM <Xd>, <Xn>, #<shift>, #63. */
961 static void
962 convert_sr_to_bfm (aarch64_inst *inst)
963 {
964 inst->operands[3].imm.value =
965 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
966 }
967
968 /* Convert MOV to ORR. */
969 static void
970 convert_mov_to_orr (aarch64_inst *inst)
971 {
972 /* MOV <Vd>.<T>, <Vn>.<T>
973 is equivalent to:
974 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
975 copy_operand_info (inst, 2, 1);
976 }
977
978 /* When <imms> >= <immr>, the instruction written:
979 SBFX <Xd>, <Xn>, #<lsb>, #<width>
980 is equivalent to:
981 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
982
983 static void
984 convert_bfx_to_bfm (aarch64_inst *inst)
985 {
986 int64_t lsb, width;
987
988 /* Convert the operand. */
989 lsb = inst->operands[2].imm.value;
990 width = inst->operands[3].imm.value;
991 inst->operands[2].imm.value = lsb;
992 inst->operands[3].imm.value = lsb + width - 1;
993 }
994
995 /* When <imms> < <immr>, the instruction written:
996 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
997 is equivalent to:
998 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
999
1000 static void
1001 convert_bfi_to_bfm (aarch64_inst *inst)
1002 {
1003 int64_t lsb, width;
1004
1005 /* Convert the operand. */
1006 lsb = inst->operands[2].imm.value;
1007 width = inst->operands[3].imm.value;
1008 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1009 {
1010 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1011 inst->operands[3].imm.value = width - 1;
1012 }
1013 else
1014 {
1015 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1016 inst->operands[3].imm.value = width - 1;
1017 }
1018 }
1019
1020 /* The instruction written:
1021 LSL <Xd>, <Xn>, #<shift>
1022 is equivalent to:
1023 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1024
1025 static void
1026 convert_lsl_to_ubfm (aarch64_inst *inst)
1027 {
1028 int64_t shift = inst->operands[2].imm.value;
1029
1030 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1031 {
1032 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1033 inst->operands[3].imm.value = 31 - shift;
1034 }
1035 else
1036 {
1037 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1038 inst->operands[3].imm.value = 63 - shift;
1039 }
1040 }
1041
1042 /* CINC <Wd>, <Wn>, <cond>
1043 is equivalent to:
1044 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1045
1046 static void
1047 convert_to_csel (aarch64_inst *inst)
1048 {
1049 copy_operand_info (inst, 3, 2);
1050 copy_operand_info (inst, 2, 1);
1051 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1052 }
1053
1054 /* CSET <Wd>, <cond>
1055 is equivalent to:
1056 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1057
1058 static void
1059 convert_cset_to_csinc (aarch64_inst *inst)
1060 {
1061 copy_operand_info (inst, 3, 1);
1062 copy_operand_info (inst, 2, 0);
1063 copy_operand_info (inst, 1, 0);
1064 inst->operands[1].reg.regno = 0x1f;
1065 inst->operands[2].reg.regno = 0x1f;
1066 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1067 }
1068
1069 /* MOV <Wd>, #<imm>
1070 is equivalent to:
1071 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1072
1073 static void
1074 convert_mov_to_movewide (aarch64_inst *inst)
1075 {
1076 int is32;
1077 uint32_t shift_amount;
1078 uint64_t value;
1079
1080 switch (inst->opcode->op)
1081 {
1082 case OP_MOV_IMM_WIDE:
1083 value = inst->operands[1].imm.value;
1084 break;
1085 case OP_MOV_IMM_WIDEN:
1086 value = ~inst->operands[1].imm.value;
1087 break;
1088 default:
1089 assert (0);
1090 }
1091 inst->operands[1].type = AARCH64_OPND_HALF;
1092 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1093 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1094 /* The constraint check should have guaranteed this wouldn't happen. */
1095 assert (0);
1096 value >>= shift_amount;
1097 value &= 0xffff;
1098 inst->operands[1].imm.value = value;
1099 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1100 inst->operands[1].shifter.amount = shift_amount;
1101 }
1102
1103 /* MOV <Wd>, #<imm>
1104 is equivalent to:
1105 ORR <Wd>, WZR, #<imm>. */
1106
1107 static void
1108 convert_mov_to_movebitmask (aarch64_inst *inst)
1109 {
1110 copy_operand_info (inst, 2, 1);
1111 inst->operands[1].reg.regno = 0x1f;
1112 inst->operands[1].skip = 0;
1113 }
1114
1115 /* Some alias opcodes are assembled by being converted to their real-form. */
1116
1117 static void
1118 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1119 {
1120 const aarch64_opcode *alias = inst->opcode;
1121
1122 if ((alias->flags & F_CONV) == 0)
1123 goto convert_to_real_return;
1124
1125 switch (alias->op)
1126 {
1127 case OP_ASR_IMM:
1128 case OP_LSR_IMM:
1129 convert_sr_to_bfm (inst);
1130 break;
1131 case OP_LSL_IMM:
1132 convert_lsl_to_ubfm (inst);
1133 break;
1134 case OP_CINC:
1135 case OP_CINV:
1136 case OP_CNEG:
1137 convert_to_csel (inst);
1138 break;
1139 case OP_CSET:
1140 case OP_CSETM:
1141 convert_cset_to_csinc (inst);
1142 break;
1143 case OP_UBFX:
1144 case OP_BFXIL:
1145 case OP_SBFX:
1146 convert_bfx_to_bfm (inst);
1147 break;
1148 case OP_SBFIZ:
1149 case OP_BFI:
1150 case OP_UBFIZ:
1151 convert_bfi_to_bfm (inst);
1152 break;
1153 case OP_MOV_V:
1154 convert_mov_to_orr (inst);
1155 break;
1156 case OP_MOV_IMM_WIDE:
1157 case OP_MOV_IMM_WIDEN:
1158 convert_mov_to_movewide (inst);
1159 break;
1160 case OP_MOV_IMM_LOG:
1161 convert_mov_to_movebitmask (inst);
1162 break;
1163 case OP_ROR_IMM:
1164 convert_ror_to_extr (inst);
1165 break;
1166 default:
1167 break;
1168 }
1169
1170 convert_to_real_return:
1171 aarch64_replace_opcode (inst, real);
1172 }
1173
1174 /* Encode *INST_ORI of the opcode code OPCODE.
1175 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1176 matched operand qualifier sequence in *QLF_SEQ. */
1177
1178 int
1179 aarch64_opcode_encode (const aarch64_opcode *opcode,
1180 const aarch64_inst *inst_ori, aarch64_insn *code,
1181 aarch64_opnd_qualifier_t *qlf_seq,
1182 aarch64_operand_error *mismatch_detail)
1183 {
1184 int i;
1185 const aarch64_opcode *aliased;
1186 aarch64_inst copy, *inst;
1187
1188 DEBUG_TRACE ("enter with %s", opcode->name);
1189
1190 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1191 copy = *inst_ori;
1192 inst = &copy;
1193
1194 assert (inst->opcode == NULL || inst->opcode == opcode);
1195 if (inst->opcode == NULL)
1196 inst->opcode = opcode;
1197
1198 /* Constrain the operands.
1199 After passing this, the encoding is guaranteed to succeed. */
1200 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1201 {
1202 DEBUG_TRACE ("FAIL since operand constraint not met");
1203 return 0;
1204 }
1205
1206 /* Get the base value.
1207 Note: this has to be before the aliasing handling below in order to
1208 get the base value from the alias opcode before we move on to the
1209 aliased opcode for encoding. */
1210 inst->value = opcode->opcode;
1211
1212 /* No need to do anything else if the opcode does not have any operand. */
1213 if (aarch64_num_of_operands (opcode) == 0)
1214 goto encoding_exit;
1215
1216 /* Assign operand indexes and check types. Also put the matched
1217 operand qualifiers in *QLF_SEQ to return. */
1218 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1219 {
1220 assert (opcode->operands[i] == inst->operands[i].type);
1221 inst->operands[i].idx = i;
1222 if (qlf_seq != NULL)
1223 *qlf_seq = inst->operands[i].qualifier;
1224 }
1225
1226 aliased = aarch64_find_real_opcode (opcode);
1227 /* If the opcode is an alias and it does not ask for direct encoding by
1228 itself, the instruction will be transformed to the form of real opcode
1229 and the encoding will be carried out using the rules for the aliased
1230 opcode. */
1231 if (aliased != NULL && (opcode->flags & F_CONV))
1232 {
1233 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1234 aliased->name, opcode->name);
1235 /* Convert the operands to the form of the real opcode. */
1236 convert_to_real (inst, aliased);
1237 opcode = aliased;
1238 }
1239
1240 aarch64_opnd_info *info = inst->operands;
1241
1242 /* Call the inserter of each operand. */
1243 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1244 {
1245 const aarch64_operand *opnd;
1246 enum aarch64_opnd type = opcode->operands[i];
1247 if (type == AARCH64_OPND_NIL)
1248 break;
1249 if (info->skip)
1250 {
1251 DEBUG_TRACE ("skip the incomplete operand %d", i);
1252 continue;
1253 }
1254 opnd = &aarch64_operands[type];
1255 if (operand_has_inserter (opnd))
1256 aarch64_insert_operand (opnd, info, &inst->value, inst);
1257 }
1258
1259 /* Call opcode encoders indicated by flags. */
1260 if (opcode_has_special_coder (opcode))
1261 do_special_encoding (inst);
1262
1263 encoding_exit:
1264 DEBUG_TRACE ("exit with %s", opcode->name);
1265
1266 *code = inst->value;
1267
1268 return 1;
1269 }
This page took 0.056036 seconds and 5 git commands to generate.