[binutils][aarch64] New SVE_Zm4_11_INDEX operand.
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include "bfd_stdint.h"
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
247 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
248 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
249 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
250 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
251 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
252 { 5, 14 }, /* imm14: in test bit and branch instructions. */
253 { 5, 16 }, /* imm16: in exception instructions. */
254 { 0, 26 }, /* imm26: in unconditional branch instructions. */
255 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
256 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
257 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
258 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
259 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
260 { 22, 1 }, /* N: in logical (immediate) instructions. */
261 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
262 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
263 { 31, 1 }, /* sf: in integer data processing instructions. */
264 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
265 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
266 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
267 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
268 { 31, 1 }, /* b5: in the test bit and branch instructions. */
269 { 19, 5 }, /* b40: in the test bit and branch instructions. */
270 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
271 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
272 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
273 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
274 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
275 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
276 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
277 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
278 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
279 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
280 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
281 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
282 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
283 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
284 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
285 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
286 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
289 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
290 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
291 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
292 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
293 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
294 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
295 { 5, 1 }, /* SVE_i1: single-bit immediate. */
296 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
297 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
298 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
299 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
300 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
301 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
302 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
303 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
304 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
305 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
306 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
307 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
308 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
309 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
310 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
311 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
312 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
313 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
314 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
315 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
316 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
317 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
318 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
319 { 16, 4 }, /* SVE_tsz: triangular size select. */
320 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
321 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
322 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
323 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
324 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
325 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
326 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
327 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
328 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
329 { 22, 1 }, /* sz: 1-bit element size select. */
330 };
331
332 enum aarch64_operand_class
333 aarch64_get_operand_class (enum aarch64_opnd type)
334 {
335 return aarch64_operands[type].op_class;
336 }
337
338 const char *
339 aarch64_get_operand_name (enum aarch64_opnd type)
340 {
341 return aarch64_operands[type].name;
342 }
343
344 /* Get operand description string.
345 This is usually for the diagnosis purpose. */
346 const char *
347 aarch64_get_operand_desc (enum aarch64_opnd type)
348 {
349 return aarch64_operands[type].desc;
350 }
351
352 /* Table of all conditional affixes. */
353 const aarch64_cond aarch64_conds[16] =
354 {
355 {{"eq", "none"}, 0x0},
356 {{"ne", "any"}, 0x1},
357 {{"cs", "hs", "nlast"}, 0x2},
358 {{"cc", "lo", "ul", "last"}, 0x3},
359 {{"mi", "first"}, 0x4},
360 {{"pl", "nfrst"}, 0x5},
361 {{"vs"}, 0x6},
362 {{"vc"}, 0x7},
363 {{"hi", "pmore"}, 0x8},
364 {{"ls", "plast"}, 0x9},
365 {{"ge", "tcont"}, 0xa},
366 {{"lt", "tstop"}, 0xb},
367 {{"gt"}, 0xc},
368 {{"le"}, 0xd},
369 {{"al"}, 0xe},
370 {{"nv"}, 0xf},
371 };
372
373 const aarch64_cond *
374 get_cond_from_value (aarch64_insn value)
375 {
376 assert (value < 16);
377 return &aarch64_conds[(unsigned int) value];
378 }
379
380 const aarch64_cond *
381 get_inverted_cond (const aarch64_cond *cond)
382 {
383 return &aarch64_conds[cond->value ^ 0x1];
384 }
385
386 /* Table describing the operand extension/shifting operators; indexed by
387 enum aarch64_modifier_kind.
388
389 The value column provides the most common values for encoding modifiers,
390 which enables table-driven encoding/decoding for the modifiers. */
391 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
392 {
393 {"none", 0x0},
394 {"msl", 0x0},
395 {"ror", 0x3},
396 {"asr", 0x2},
397 {"lsr", 0x1},
398 {"lsl", 0x0},
399 {"uxtb", 0x0},
400 {"uxth", 0x1},
401 {"uxtw", 0x2},
402 {"uxtx", 0x3},
403 {"sxtb", 0x4},
404 {"sxth", 0x5},
405 {"sxtw", 0x6},
406 {"sxtx", 0x7},
407 {"mul", 0x0},
408 {"mul vl", 0x0},
409 {NULL, 0},
410 };
411
412 enum aarch64_modifier_kind
413 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
414 {
415 return desc - aarch64_operand_modifiers;
416 }
417
418 aarch64_insn
419 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
420 {
421 return aarch64_operand_modifiers[kind].value;
422 }
423
424 enum aarch64_modifier_kind
425 aarch64_get_operand_modifier_from_value (aarch64_insn value,
426 bfd_boolean extend_p)
427 {
428 if (extend_p == TRUE)
429 return AARCH64_MOD_UXTB + value;
430 else
431 return AARCH64_MOD_LSL - value;
432 }
433
434 bfd_boolean
435 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
436 {
437 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
438 ? TRUE : FALSE;
439 }
440
441 static inline bfd_boolean
442 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
443 {
444 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
445 ? TRUE : FALSE;
446 }
447
448 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
449 {
450 { "#0x00", 0x0 },
451 { "oshld", 0x1 },
452 { "oshst", 0x2 },
453 { "osh", 0x3 },
454 { "#0x04", 0x4 },
455 { "nshld", 0x5 },
456 { "nshst", 0x6 },
457 { "nsh", 0x7 },
458 { "#0x08", 0x8 },
459 { "ishld", 0x9 },
460 { "ishst", 0xa },
461 { "ish", 0xb },
462 { "#0x0c", 0xc },
463 { "ld", 0xd },
464 { "st", 0xe },
465 { "sy", 0xf },
466 };
467
468 /* Table describing the operands supported by the aliases of the HINT
469 instruction.
470
471 The name column is the operand that is accepted for the alias. The value
472 column is the hint number of the alias. The list of operands is terminated
473 by NULL in the name column. */
474
475 const struct aarch64_name_value_pair aarch64_hint_options[] =
476 {
477 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
478 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
479 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
480 { "c", HINT_OPD_C }, /* BTI C. */
481 { "j", HINT_OPD_J }, /* BTI J. */
482 { "jc", HINT_OPD_JC }, /* BTI JC. */
483 { NULL, HINT_OPD_NULL },
484 };
485
486 /* op -> op: load = 0 instruction = 1 store = 2
487 l -> level: 1-3
488 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
489 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
490 const struct aarch64_name_value_pair aarch64_prfops[32] =
491 {
492 { "pldl1keep", B(0, 1, 0) },
493 { "pldl1strm", B(0, 1, 1) },
494 { "pldl2keep", B(0, 2, 0) },
495 { "pldl2strm", B(0, 2, 1) },
496 { "pldl3keep", B(0, 3, 0) },
497 { "pldl3strm", B(0, 3, 1) },
498 { NULL, 0x06 },
499 { NULL, 0x07 },
500 { "plil1keep", B(1, 1, 0) },
501 { "plil1strm", B(1, 1, 1) },
502 { "plil2keep", B(1, 2, 0) },
503 { "plil2strm", B(1, 2, 1) },
504 { "plil3keep", B(1, 3, 0) },
505 { "plil3strm", B(1, 3, 1) },
506 { NULL, 0x0e },
507 { NULL, 0x0f },
508 { "pstl1keep", B(2, 1, 0) },
509 { "pstl1strm", B(2, 1, 1) },
510 { "pstl2keep", B(2, 2, 0) },
511 { "pstl2strm", B(2, 2, 1) },
512 { "pstl3keep", B(2, 3, 0) },
513 { "pstl3strm", B(2, 3, 1) },
514 { NULL, 0x16 },
515 { NULL, 0x17 },
516 { NULL, 0x18 },
517 { NULL, 0x19 },
518 { NULL, 0x1a },
519 { NULL, 0x1b },
520 { NULL, 0x1c },
521 { NULL, 0x1d },
522 { NULL, 0x1e },
523 { NULL, 0x1f },
524 };
525 #undef B
526 \f
527 /* Utilities on value constraint. */
528
529 static inline int
530 value_in_range_p (int64_t value, int low, int high)
531 {
532 return (value >= low && value <= high) ? 1 : 0;
533 }
534
535 /* Return true if VALUE is a multiple of ALIGN. */
536 static inline int
537 value_aligned_p (int64_t value, int align)
538 {
539 return (value % align) == 0;
540 }
541
542 /* A signed value fits in a field. */
543 static inline int
544 value_fit_signed_field_p (int64_t value, unsigned width)
545 {
546 assert (width < 32);
547 if (width < sizeof (value) * 8)
548 {
549 int64_t lim = (int64_t)1 << (width - 1);
550 if (value >= -lim && value < lim)
551 return 1;
552 }
553 return 0;
554 }
555
556 /* An unsigned value fits in a field. */
557 static inline int
558 value_fit_unsigned_field_p (int64_t value, unsigned width)
559 {
560 assert (width < 32);
561 if (width < sizeof (value) * 8)
562 {
563 int64_t lim = (int64_t)1 << width;
564 if (value >= 0 && value < lim)
565 return 1;
566 }
567 return 0;
568 }
569
570 /* Return 1 if OPERAND is SP or WSP. */
571 int
572 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
573 {
574 return ((aarch64_get_operand_class (operand->type)
575 == AARCH64_OPND_CLASS_INT_REG)
576 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
577 && operand->reg.regno == 31);
578 }
579
580 /* Return 1 if OPERAND is XZR or WZP. */
581 int
582 aarch64_zero_register_p (const aarch64_opnd_info *operand)
583 {
584 return ((aarch64_get_operand_class (operand->type)
585 == AARCH64_OPND_CLASS_INT_REG)
586 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
587 && operand->reg.regno == 31);
588 }
589
590 /* Return true if the operand *OPERAND that has the operand code
591 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
592 qualified by the qualifier TARGET. */
593
594 static inline int
595 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
596 aarch64_opnd_qualifier_t target)
597 {
598 switch (operand->qualifier)
599 {
600 case AARCH64_OPND_QLF_W:
601 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
602 return 1;
603 break;
604 case AARCH64_OPND_QLF_X:
605 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
606 return 1;
607 break;
608 case AARCH64_OPND_QLF_WSP:
609 if (target == AARCH64_OPND_QLF_W
610 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
611 return 1;
612 break;
613 case AARCH64_OPND_QLF_SP:
614 if (target == AARCH64_OPND_QLF_X
615 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
616 return 1;
617 break;
618 default:
619 break;
620 }
621
622 return 0;
623 }
624
625 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
626 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
627
628 Return NIL if more than one expected qualifiers are found. */
629
630 aarch64_opnd_qualifier_t
631 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
632 int idx,
633 const aarch64_opnd_qualifier_t known_qlf,
634 int known_idx)
635 {
636 int i, saved_i;
637
638 /* Special case.
639
640 When the known qualifier is NIL, we have to assume that there is only
641 one qualifier sequence in the *QSEQ_LIST and return the corresponding
642 qualifier directly. One scenario is that for instruction
643 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
644 which has only one possible valid qualifier sequence
645 NIL, S_D
646 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
647 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
648
649 Because the qualifier NIL has dual roles in the qualifier sequence:
650 it can mean no qualifier for the operand, or the qualifer sequence is
651 not in use (when all qualifiers in the sequence are NILs), we have to
652 handle this special case here. */
653 if (known_qlf == AARCH64_OPND_NIL)
654 {
655 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
656 return qseq_list[0][idx];
657 }
658
659 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
660 {
661 if (qseq_list[i][known_idx] == known_qlf)
662 {
663 if (saved_i != -1)
664 /* More than one sequences are found to have KNOWN_QLF at
665 KNOWN_IDX. */
666 return AARCH64_OPND_NIL;
667 saved_i = i;
668 }
669 }
670
671 return qseq_list[saved_i][idx];
672 }
673
674 enum operand_qualifier_kind
675 {
676 OQK_NIL,
677 OQK_OPD_VARIANT,
678 OQK_VALUE_IN_RANGE,
679 OQK_MISC,
680 };
681
682 /* Operand qualifier description. */
683 struct operand_qualifier_data
684 {
685 /* The usage of the three data fields depends on the qualifier kind. */
686 int data0;
687 int data1;
688 int data2;
689 /* Description. */
690 const char *desc;
691 /* Kind. */
692 enum operand_qualifier_kind kind;
693 };
694
695 /* Indexed by the operand qualifier enumerators. */
696 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
697 {
698 {0, 0, 0, "NIL", OQK_NIL},
699
700 /* Operand variant qualifiers.
701 First 3 fields:
702 element size, number of elements and common value for encoding. */
703
704 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
705 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
706 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
707 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
708
709 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
710 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
711 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
712 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
713 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
714 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
715
716 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
717 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
718 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
719 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
720 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
721 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
722 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
723 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
724 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
725 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
726 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
727
728 {0, 0, 0, "z", OQK_OPD_VARIANT},
729 {0, 0, 0, "m", OQK_OPD_VARIANT},
730
731 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
732 {16, 0, 0, "tag", OQK_OPD_VARIANT},
733
734 /* Qualifiers constraining the value range.
735 First 3 fields:
736 Lower bound, higher bound, unused. */
737
738 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
739 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
740 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
741 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
742 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
743 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
744 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
745
746 /* Qualifiers for miscellaneous purpose.
747 First 3 fields:
748 unused, unused and unused. */
749
750 {0, 0, 0, "lsl", 0},
751 {0, 0, 0, "msl", 0},
752
753 {0, 0, 0, "retrieving", 0},
754 };
755
756 static inline bfd_boolean
757 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
758 {
759 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
760 ? TRUE : FALSE;
761 }
762
763 static inline bfd_boolean
764 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
765 {
766 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
767 ? TRUE : FALSE;
768 }
769
770 const char*
771 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
772 {
773 return aarch64_opnd_qualifiers[qualifier].desc;
774 }
775
776 /* Given an operand qualifier, return the expected data element size
777 of a qualified operand. */
778 unsigned char
779 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
780 {
781 assert (operand_variant_qualifier_p (qualifier) == TRUE);
782 return aarch64_opnd_qualifiers[qualifier].data0;
783 }
784
785 unsigned char
786 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
787 {
788 assert (operand_variant_qualifier_p (qualifier) == TRUE);
789 return aarch64_opnd_qualifiers[qualifier].data1;
790 }
791
792 aarch64_insn
793 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
794 {
795 assert (operand_variant_qualifier_p (qualifier) == TRUE);
796 return aarch64_opnd_qualifiers[qualifier].data2;
797 }
798
799 static int
800 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
801 {
802 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
803 return aarch64_opnd_qualifiers[qualifier].data0;
804 }
805
806 static int
807 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
808 {
809 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
810 return aarch64_opnd_qualifiers[qualifier].data1;
811 }
812
813 #ifdef DEBUG_AARCH64
814 void
815 aarch64_verbose (const char *str, ...)
816 {
817 va_list ap;
818 va_start (ap, str);
819 printf ("#### ");
820 vprintf (str, ap);
821 printf ("\n");
822 va_end (ap);
823 }
824
825 static inline void
826 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
827 {
828 int i;
829 printf ("#### \t");
830 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
831 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
832 printf ("\n");
833 }
834
835 static void
836 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
837 const aarch64_opnd_qualifier_t *qualifier)
838 {
839 int i;
840 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
841
842 aarch64_verbose ("dump_match_qualifiers:");
843 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
844 curr[i] = opnd[i].qualifier;
845 dump_qualifier_sequence (curr);
846 aarch64_verbose ("against");
847 dump_qualifier_sequence (qualifier);
848 }
849 #endif /* DEBUG_AARCH64 */
850
851 /* This function checks if the given instruction INSN is a destructive
852 instruction based on the usage of the registers. It does not recognize
853 unary destructive instructions. */
854 bfd_boolean
855 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
856 {
857 int i = 0;
858 const enum aarch64_opnd *opnds = opcode->operands;
859
860 if (opnds[0] == AARCH64_OPND_NIL)
861 return FALSE;
862
863 while (opnds[++i] != AARCH64_OPND_NIL)
864 if (opnds[i] == opnds[0])
865 return TRUE;
866
867 return FALSE;
868 }
869
870 /* TODO improve this, we can have an extra field at the runtime to
871 store the number of operands rather than calculating it every time. */
872
873 int
874 aarch64_num_of_operands (const aarch64_opcode *opcode)
875 {
876 int i = 0;
877 const enum aarch64_opnd *opnds = opcode->operands;
878 while (opnds[i++] != AARCH64_OPND_NIL)
879 ;
880 --i;
881 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
882 return i;
883 }
884
885 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
886 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
887
888 N.B. on the entry, it is very likely that only some operands in *INST
889 have had their qualifiers been established.
890
891 If STOP_AT is not -1, the function will only try to match
892 the qualifier sequence for operands before and including the operand
893 of index STOP_AT; and on success *RET will only be filled with the first
894 (STOP_AT+1) qualifiers.
895
896 A couple examples of the matching algorithm:
897
898 X,W,NIL should match
899 X,W,NIL
900
901 NIL,NIL should match
902 X ,NIL
903
904 Apart from serving the main encoding routine, this can also be called
905 during or after the operand decoding. */
906
907 int
908 aarch64_find_best_match (const aarch64_inst *inst,
909 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
910 int stop_at, aarch64_opnd_qualifier_t *ret)
911 {
912 int found = 0;
913 int i, num_opnds;
914 const aarch64_opnd_qualifier_t *qualifiers;
915
916 num_opnds = aarch64_num_of_operands (inst->opcode);
917 if (num_opnds == 0)
918 {
919 DEBUG_TRACE ("SUCCEED: no operand");
920 return 1;
921 }
922
923 if (stop_at < 0 || stop_at >= num_opnds)
924 stop_at = num_opnds - 1;
925
926 /* For each pattern. */
927 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
928 {
929 int j;
930 qualifiers = *qualifiers_list;
931
932 /* Start as positive. */
933 found = 1;
934
935 DEBUG_TRACE ("%d", i);
936 #ifdef DEBUG_AARCH64
937 if (debug_dump)
938 dump_match_qualifiers (inst->operands, qualifiers);
939 #endif
940
941 /* Most opcodes has much fewer patterns in the list.
942 First NIL qualifier indicates the end in the list. */
943 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
944 {
945 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
946 if (i)
947 found = 0;
948 break;
949 }
950
951 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
952 {
953 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
954 {
955 /* Either the operand does not have qualifier, or the qualifier
956 for the operand needs to be deduced from the qualifier
957 sequence.
958 In the latter case, any constraint checking related with
959 the obtained qualifier should be done later in
960 operand_general_constraint_met_p. */
961 continue;
962 }
963 else if (*qualifiers != inst->operands[j].qualifier)
964 {
965 /* Unless the target qualifier can also qualify the operand
966 (which has already had a non-nil qualifier), non-equal
967 qualifiers are generally un-matched. */
968 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
969 continue;
970 else
971 {
972 found = 0;
973 break;
974 }
975 }
976 else
977 continue; /* Equal qualifiers are certainly matched. */
978 }
979
980 /* Qualifiers established. */
981 if (found == 1)
982 break;
983 }
984
985 if (found == 1)
986 {
987 /* Fill the result in *RET. */
988 int j;
989 qualifiers = *qualifiers_list;
990
991 DEBUG_TRACE ("complete qualifiers using list %d", i);
992 #ifdef DEBUG_AARCH64
993 if (debug_dump)
994 dump_qualifier_sequence (qualifiers);
995 #endif
996
997 for (j = 0; j <= stop_at; ++j, ++qualifiers)
998 ret[j] = *qualifiers;
999 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1000 ret[j] = AARCH64_OPND_QLF_NIL;
1001
1002 DEBUG_TRACE ("SUCCESS");
1003 return 1;
1004 }
1005
1006 DEBUG_TRACE ("FAIL");
1007 return 0;
1008 }
1009
1010 /* Operand qualifier matching and resolving.
1011
1012 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1013 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1014
1015 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1016 succeeds. */
1017
1018 static int
1019 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
1020 {
1021 int i, nops;
1022 aarch64_opnd_qualifier_seq_t qualifiers;
1023
1024 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1025 qualifiers))
1026 {
1027 DEBUG_TRACE ("matching FAIL");
1028 return 0;
1029 }
1030
1031 if (inst->opcode->flags & F_STRICT)
1032 {
1033 /* Require an exact qualifier match, even for NIL qualifiers. */
1034 nops = aarch64_num_of_operands (inst->opcode);
1035 for (i = 0; i < nops; ++i)
1036 if (inst->operands[i].qualifier != qualifiers[i])
1037 return FALSE;
1038 }
1039
1040 /* Update the qualifiers. */
1041 if (update_p == TRUE)
1042 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1043 {
1044 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1045 break;
1046 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1047 "update %s with %s for operand %d",
1048 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1049 aarch64_get_qualifier_name (qualifiers[i]), i);
1050 inst->operands[i].qualifier = qualifiers[i];
1051 }
1052
1053 DEBUG_TRACE ("matching SUCCESS");
1054 return 1;
1055 }
1056
1057 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1058 register by MOVZ.
1059
1060 IS32 indicates whether value is a 32-bit immediate or not.
1061 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1062 amount will be returned in *SHIFT_AMOUNT. */
1063
1064 bfd_boolean
1065 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1066 {
1067 int amount;
1068
1069 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1070
1071 if (is32)
1072 {
1073 /* Allow all zeros or all ones in top 32-bits, so that
1074 32-bit constant expressions like ~0x80000000 are
1075 permitted. */
1076 uint64_t ext = value;
1077 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1078 /* Immediate out of range. */
1079 return FALSE;
1080 value &= (int64_t) 0xffffffff;
1081 }
1082
1083 /* first, try movz then movn */
1084 amount = -1;
1085 if ((value & ((int64_t) 0xffff << 0)) == value)
1086 amount = 0;
1087 else if ((value & ((int64_t) 0xffff << 16)) == value)
1088 amount = 16;
1089 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1090 amount = 32;
1091 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1092 amount = 48;
1093
1094 if (amount == -1)
1095 {
1096 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1097 return FALSE;
1098 }
1099
1100 if (shift_amount != NULL)
1101 *shift_amount = amount;
1102
1103 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1104
1105 return TRUE;
1106 }
1107
1108 /* Build the accepted values for immediate logical SIMD instructions.
1109
1110 The standard encodings of the immediate value are:
1111 N imms immr SIMD size R S
1112 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1113 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1114 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1115 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1116 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1117 0 11110s 00000r 2 UInt(r) UInt(s)
1118 where all-ones value of S is reserved.
1119
1120 Let's call E the SIMD size.
1121
1122 The immediate value is: S+1 bits '1' rotated to the right by R.
1123
1124 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1125 (remember S != E - 1). */
1126
1127 #define TOTAL_IMM_NB 5334
1128
1129 typedef struct
1130 {
1131 uint64_t imm;
1132 aarch64_insn encoding;
1133 } simd_imm_encoding;
1134
1135 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1136
1137 static int
1138 simd_imm_encoding_cmp(const void *i1, const void *i2)
1139 {
1140 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1141 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1142
1143 if (imm1->imm < imm2->imm)
1144 return -1;
1145 if (imm1->imm > imm2->imm)
1146 return +1;
1147 return 0;
1148 }
1149
1150 /* immediate bitfield standard encoding
1151 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1152 1 ssssss rrrrrr 64 rrrrrr ssssss
1153 0 0sssss 0rrrrr 32 rrrrr sssss
1154 0 10ssss 00rrrr 16 rrrr ssss
1155 0 110sss 000rrr 8 rrr sss
1156 0 1110ss 0000rr 4 rr ss
1157 0 11110s 00000r 2 r s */
1158 static inline int
1159 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1160 {
1161 return (is64 << 12) | (r << 6) | s;
1162 }
1163
1164 static void
1165 build_immediate_table (void)
1166 {
1167 uint32_t log_e, e, s, r, s_mask;
1168 uint64_t mask, imm;
1169 int nb_imms;
1170 int is64;
1171
1172 nb_imms = 0;
1173 for (log_e = 1; log_e <= 6; log_e++)
1174 {
1175 /* Get element size. */
1176 e = 1u << log_e;
1177 if (log_e == 6)
1178 {
1179 is64 = 1;
1180 mask = 0xffffffffffffffffull;
1181 s_mask = 0;
1182 }
1183 else
1184 {
1185 is64 = 0;
1186 mask = (1ull << e) - 1;
1187 /* log_e s_mask
1188 1 ((1 << 4) - 1) << 2 = 111100
1189 2 ((1 << 3) - 1) << 3 = 111000
1190 3 ((1 << 2) - 1) << 4 = 110000
1191 4 ((1 << 1) - 1) << 5 = 100000
1192 5 ((1 << 0) - 1) << 6 = 000000 */
1193 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1194 }
1195 for (s = 0; s < e - 1; s++)
1196 for (r = 0; r < e; r++)
1197 {
1198 /* s+1 consecutive bits to 1 (s < 63) */
1199 imm = (1ull << (s + 1)) - 1;
1200 /* rotate right by r */
1201 if (r != 0)
1202 imm = (imm >> r) | ((imm << (e - r)) & mask);
1203 /* replicate the constant depending on SIMD size */
1204 switch (log_e)
1205 {
1206 case 1: imm = (imm << 2) | imm;
1207 /* Fall through. */
1208 case 2: imm = (imm << 4) | imm;
1209 /* Fall through. */
1210 case 3: imm = (imm << 8) | imm;
1211 /* Fall through. */
1212 case 4: imm = (imm << 16) | imm;
1213 /* Fall through. */
1214 case 5: imm = (imm << 32) | imm;
1215 /* Fall through. */
1216 case 6: break;
1217 default: abort ();
1218 }
1219 simd_immediates[nb_imms].imm = imm;
1220 simd_immediates[nb_imms].encoding =
1221 encode_immediate_bitfield(is64, s | s_mask, r);
1222 nb_imms++;
1223 }
1224 }
1225 assert (nb_imms == TOTAL_IMM_NB);
1226 qsort(simd_immediates, nb_imms,
1227 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1228 }
1229
1230 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1231 be accepted by logical (immediate) instructions
1232 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1233
1234 ESIZE is the number of bytes in the decoded immediate value.
1235 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1236 VALUE will be returned in *ENCODING. */
1237
1238 bfd_boolean
1239 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1240 {
1241 simd_imm_encoding imm_enc;
1242 const simd_imm_encoding *imm_encoding;
1243 static bfd_boolean initialized = FALSE;
1244 uint64_t upper;
1245 int i;
1246
1247 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1248 value, esize);
1249
1250 if (!initialized)
1251 {
1252 build_immediate_table ();
1253 initialized = TRUE;
1254 }
1255
1256 /* Allow all zeros or all ones in top bits, so that
1257 constant expressions like ~1 are permitted. */
1258 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1259 if ((value & ~upper) != value && (value | upper) != value)
1260 return FALSE;
1261
1262 /* Replicate to a full 64-bit value. */
1263 value &= ~upper;
1264 for (i = esize * 8; i < 64; i *= 2)
1265 value |= (value << i);
1266
1267 imm_enc.imm = value;
1268 imm_encoding = (const simd_imm_encoding *)
1269 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1270 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1271 if (imm_encoding == NULL)
1272 {
1273 DEBUG_TRACE ("exit with FALSE");
1274 return FALSE;
1275 }
1276 if (encoding != NULL)
1277 *encoding = imm_encoding->encoding;
1278 DEBUG_TRACE ("exit with TRUE");
1279 return TRUE;
1280 }
1281
1282 /* If 64-bit immediate IMM is in the format of
1283 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1284 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1285 of value "abcdefgh". Otherwise return -1. */
1286 int
1287 aarch64_shrink_expanded_imm8 (uint64_t imm)
1288 {
1289 int i, ret;
1290 uint32_t byte;
1291
1292 ret = 0;
1293 for (i = 0; i < 8; i++)
1294 {
1295 byte = (imm >> (8 * i)) & 0xff;
1296 if (byte == 0xff)
1297 ret |= 1 << i;
1298 else if (byte != 0x00)
1299 return -1;
1300 }
1301 return ret;
1302 }
1303
1304 /* Utility inline functions for operand_general_constraint_met_p. */
1305
1306 static inline void
1307 set_error (aarch64_operand_error *mismatch_detail,
1308 enum aarch64_operand_error_kind kind, int idx,
1309 const char* error)
1310 {
1311 if (mismatch_detail == NULL)
1312 return;
1313 mismatch_detail->kind = kind;
1314 mismatch_detail->index = idx;
1315 mismatch_detail->error = error;
1316 }
1317
1318 static inline void
1319 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1320 const char* error)
1321 {
1322 if (mismatch_detail == NULL)
1323 return;
1324 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1325 }
1326
1327 static inline void
1328 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1329 int idx, int lower_bound, int upper_bound,
1330 const char* error)
1331 {
1332 if (mismatch_detail == NULL)
1333 return;
1334 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1335 mismatch_detail->data[0] = lower_bound;
1336 mismatch_detail->data[1] = upper_bound;
1337 }
1338
1339 static inline void
1340 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1341 int idx, int lower_bound, int upper_bound)
1342 {
1343 if (mismatch_detail == NULL)
1344 return;
1345 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1346 _("immediate value"));
1347 }
1348
1349 static inline void
1350 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1351 int idx, int lower_bound, int upper_bound)
1352 {
1353 if (mismatch_detail == NULL)
1354 return;
1355 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1356 _("immediate offset"));
1357 }
1358
1359 static inline void
1360 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1361 int idx, int lower_bound, int upper_bound)
1362 {
1363 if (mismatch_detail == NULL)
1364 return;
1365 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1366 _("register number"));
1367 }
1368
1369 static inline void
1370 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1371 int idx, int lower_bound, int upper_bound)
1372 {
1373 if (mismatch_detail == NULL)
1374 return;
1375 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1376 _("register element index"));
1377 }
1378
1379 static inline void
1380 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1381 int idx, int lower_bound, int upper_bound)
1382 {
1383 if (mismatch_detail == NULL)
1384 return;
1385 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1386 _("shift amount"));
1387 }
1388
1389 /* Report that the MUL modifier in operand IDX should be in the range
1390 [LOWER_BOUND, UPPER_BOUND]. */
1391 static inline void
1392 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1393 int idx, int lower_bound, int upper_bound)
1394 {
1395 if (mismatch_detail == NULL)
1396 return;
1397 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1398 _("multiplier"));
1399 }
1400
1401 static inline void
1402 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1403 int alignment)
1404 {
1405 if (mismatch_detail == NULL)
1406 return;
1407 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1408 mismatch_detail->data[0] = alignment;
1409 }
1410
1411 static inline void
1412 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1413 int expected_num)
1414 {
1415 if (mismatch_detail == NULL)
1416 return;
1417 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1418 mismatch_detail->data[0] = expected_num;
1419 }
1420
1421 static inline void
1422 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1423 const char* error)
1424 {
1425 if (mismatch_detail == NULL)
1426 return;
1427 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1428 }
1429
1430 /* General constraint checking based on operand code.
1431
1432 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1433 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1434
1435 This function has to be called after the qualifiers for all operands
1436 have been resolved.
1437
1438 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1439 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1440 of error message during the disassembling where error message is not
1441 wanted. We avoid the dynamic construction of strings of error messages
1442 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1443 use a combination of error code, static string and some integer data to
1444 represent an error. */
1445
1446 static int
1447 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1448 enum aarch64_opnd type,
1449 const aarch64_opcode *opcode,
1450 aarch64_operand_error *mismatch_detail)
1451 {
1452 unsigned num, modifiers, shift;
1453 unsigned char size;
1454 int64_t imm, min_value, max_value;
1455 uint64_t uvalue, mask;
1456 const aarch64_opnd_info *opnd = opnds + idx;
1457 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1458
1459 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1460
1461 switch (aarch64_operands[type].op_class)
1462 {
1463 case AARCH64_OPND_CLASS_INT_REG:
1464 /* Check pair reg constraints for cas* instructions. */
1465 if (type == AARCH64_OPND_PAIRREG)
1466 {
1467 assert (idx == 1 || idx == 3);
1468 if (opnds[idx - 1].reg.regno % 2 != 0)
1469 {
1470 set_syntax_error (mismatch_detail, idx - 1,
1471 _("reg pair must start from even reg"));
1472 return 0;
1473 }
1474 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1475 {
1476 set_syntax_error (mismatch_detail, idx,
1477 _("reg pair must be contiguous"));
1478 return 0;
1479 }
1480 break;
1481 }
1482
1483 /* <Xt> may be optional in some IC and TLBI instructions. */
1484 if (type == AARCH64_OPND_Rt_SYS)
1485 {
1486 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1487 == AARCH64_OPND_CLASS_SYSTEM));
1488 if (opnds[1].present
1489 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1490 {
1491 set_other_error (mismatch_detail, idx, _("extraneous register"));
1492 return 0;
1493 }
1494 if (!opnds[1].present
1495 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1496 {
1497 set_other_error (mismatch_detail, idx, _("missing register"));
1498 return 0;
1499 }
1500 }
1501 switch (qualifier)
1502 {
1503 case AARCH64_OPND_QLF_WSP:
1504 case AARCH64_OPND_QLF_SP:
1505 if (!aarch64_stack_pointer_p (opnd))
1506 {
1507 set_other_error (mismatch_detail, idx,
1508 _("stack pointer register expected"));
1509 return 0;
1510 }
1511 break;
1512 default:
1513 break;
1514 }
1515 break;
1516
1517 case AARCH64_OPND_CLASS_SVE_REG:
1518 switch (type)
1519 {
1520 case AARCH64_OPND_SVE_Zm3_INDEX:
1521 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1522 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1523 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1524 case AARCH64_OPND_SVE_Zm4_INDEX:
1525 size = get_operand_fields_width (get_operand_from_code (type));
1526 shift = get_operand_specific_data (&aarch64_operands[type]);
1527 mask = (1 << shift) - 1;
1528 if (opnd->reg.regno > mask)
1529 {
1530 assert (mask == 7 || mask == 15);
1531 set_other_error (mismatch_detail, idx,
1532 mask == 15
1533 ? _("z0-z15 expected")
1534 : _("z0-z7 expected"));
1535 return 0;
1536 }
1537 mask = (1 << (size - shift)) - 1;
1538 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1539 {
1540 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1541 return 0;
1542 }
1543 break;
1544
1545 case AARCH64_OPND_SVE_Zn_INDEX:
1546 size = aarch64_get_qualifier_esize (opnd->qualifier);
1547 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1548 {
1549 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1550 0, 64 / size - 1);
1551 return 0;
1552 }
1553 break;
1554
1555 case AARCH64_OPND_SVE_ZnxN:
1556 case AARCH64_OPND_SVE_ZtxN:
1557 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1558 {
1559 set_other_error (mismatch_detail, idx,
1560 _("invalid register list"));
1561 return 0;
1562 }
1563 break;
1564
1565 default:
1566 break;
1567 }
1568 break;
1569
1570 case AARCH64_OPND_CLASS_PRED_REG:
1571 if (opnd->reg.regno >= 8
1572 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1573 {
1574 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1575 return 0;
1576 }
1577 break;
1578
1579 case AARCH64_OPND_CLASS_COND:
1580 if (type == AARCH64_OPND_COND1
1581 && (opnds[idx].cond->value & 0xe) == 0xe)
1582 {
1583 /* Not allow AL or NV. */
1584 set_syntax_error (mismatch_detail, idx, NULL);
1585 }
1586 break;
1587
1588 case AARCH64_OPND_CLASS_ADDRESS:
1589 /* Check writeback. */
1590 switch (opcode->iclass)
1591 {
1592 case ldst_pos:
1593 case ldst_unscaled:
1594 case ldstnapair_offs:
1595 case ldstpair_off:
1596 case ldst_unpriv:
1597 if (opnd->addr.writeback == 1)
1598 {
1599 set_syntax_error (mismatch_detail, idx,
1600 _("unexpected address writeback"));
1601 return 0;
1602 }
1603 break;
1604 case ldst_imm10:
1605 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1606 {
1607 set_syntax_error (mismatch_detail, idx,
1608 _("unexpected address writeback"));
1609 return 0;
1610 }
1611 break;
1612 case ldst_imm9:
1613 case ldstpair_indexed:
1614 case asisdlsep:
1615 case asisdlsop:
1616 if (opnd->addr.writeback == 0)
1617 {
1618 set_syntax_error (mismatch_detail, idx,
1619 _("address writeback expected"));
1620 return 0;
1621 }
1622 break;
1623 default:
1624 assert (opnd->addr.writeback == 0);
1625 break;
1626 }
1627 switch (type)
1628 {
1629 case AARCH64_OPND_ADDR_SIMM7:
1630 /* Scaled signed 7 bits immediate offset. */
1631 /* Get the size of the data element that is accessed, which may be
1632 different from that of the source register size,
1633 e.g. in strb/ldrb. */
1634 size = aarch64_get_qualifier_esize (opnd->qualifier);
1635 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1636 {
1637 set_offset_out_of_range_error (mismatch_detail, idx,
1638 -64 * size, 63 * size);
1639 return 0;
1640 }
1641 if (!value_aligned_p (opnd->addr.offset.imm, size))
1642 {
1643 set_unaligned_error (mismatch_detail, idx, size);
1644 return 0;
1645 }
1646 break;
1647 case AARCH64_OPND_ADDR_OFFSET:
1648 case AARCH64_OPND_ADDR_SIMM9:
1649 /* Unscaled signed 9 bits immediate offset. */
1650 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1651 {
1652 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1653 return 0;
1654 }
1655 break;
1656
1657 case AARCH64_OPND_ADDR_SIMM9_2:
1658 /* Unscaled signed 9 bits immediate offset, which has to be negative
1659 or unaligned. */
1660 size = aarch64_get_qualifier_esize (qualifier);
1661 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1662 && !value_aligned_p (opnd->addr.offset.imm, size))
1663 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1664 return 1;
1665 set_other_error (mismatch_detail, idx,
1666 _("negative or unaligned offset expected"));
1667 return 0;
1668
1669 case AARCH64_OPND_ADDR_SIMM10:
1670 /* Scaled signed 10 bits immediate offset. */
1671 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1672 {
1673 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1674 return 0;
1675 }
1676 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1677 {
1678 set_unaligned_error (mismatch_detail, idx, 8);
1679 return 0;
1680 }
1681 break;
1682
1683 case AARCH64_OPND_ADDR_SIMM11:
1684 /* Signed 11 bits immediate offset (multiple of 16). */
1685 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1686 {
1687 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1688 return 0;
1689 }
1690
1691 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1692 {
1693 set_unaligned_error (mismatch_detail, idx, 16);
1694 return 0;
1695 }
1696 break;
1697
1698 case AARCH64_OPND_ADDR_SIMM13:
1699 /* Signed 13 bits immediate offset (multiple of 16). */
1700 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1701 {
1702 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1703 return 0;
1704 }
1705
1706 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1707 {
1708 set_unaligned_error (mismatch_detail, idx, 16);
1709 return 0;
1710 }
1711 break;
1712
1713 case AARCH64_OPND_SIMD_ADDR_POST:
1714 /* AdvSIMD load/store multiple structures, post-index. */
1715 assert (idx == 1);
1716 if (opnd->addr.offset.is_reg)
1717 {
1718 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1719 return 1;
1720 else
1721 {
1722 set_other_error (mismatch_detail, idx,
1723 _("invalid register offset"));
1724 return 0;
1725 }
1726 }
1727 else
1728 {
1729 const aarch64_opnd_info *prev = &opnds[idx-1];
1730 unsigned num_bytes; /* total number of bytes transferred. */
1731 /* The opcode dependent area stores the number of elements in
1732 each structure to be loaded/stored. */
1733 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1734 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1735 /* Special handling of loading single structure to all lane. */
1736 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1737 * aarch64_get_qualifier_esize (prev->qualifier);
1738 else
1739 num_bytes = prev->reglist.num_regs
1740 * aarch64_get_qualifier_esize (prev->qualifier)
1741 * aarch64_get_qualifier_nelem (prev->qualifier);
1742 if ((int) num_bytes != opnd->addr.offset.imm)
1743 {
1744 set_other_error (mismatch_detail, idx,
1745 _("invalid post-increment amount"));
1746 return 0;
1747 }
1748 }
1749 break;
1750
1751 case AARCH64_OPND_ADDR_REGOFF:
1752 /* Get the size of the data element that is accessed, which may be
1753 different from that of the source register size,
1754 e.g. in strb/ldrb. */
1755 size = aarch64_get_qualifier_esize (opnd->qualifier);
1756 /* It is either no shift or shift by the binary logarithm of SIZE. */
1757 if (opnd->shifter.amount != 0
1758 && opnd->shifter.amount != (int)get_logsz (size))
1759 {
1760 set_other_error (mismatch_detail, idx,
1761 _("invalid shift amount"));
1762 return 0;
1763 }
1764 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1765 operators. */
1766 switch (opnd->shifter.kind)
1767 {
1768 case AARCH64_MOD_UXTW:
1769 case AARCH64_MOD_LSL:
1770 case AARCH64_MOD_SXTW:
1771 case AARCH64_MOD_SXTX: break;
1772 default:
1773 set_other_error (mismatch_detail, idx,
1774 _("invalid extend/shift operator"));
1775 return 0;
1776 }
1777 break;
1778
1779 case AARCH64_OPND_ADDR_UIMM12:
1780 imm = opnd->addr.offset.imm;
1781 /* Get the size of the data element that is accessed, which may be
1782 different from that of the source register size,
1783 e.g. in strb/ldrb. */
1784 size = aarch64_get_qualifier_esize (qualifier);
1785 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1786 {
1787 set_offset_out_of_range_error (mismatch_detail, idx,
1788 0, 4095 * size);
1789 return 0;
1790 }
1791 if (!value_aligned_p (opnd->addr.offset.imm, size))
1792 {
1793 set_unaligned_error (mismatch_detail, idx, size);
1794 return 0;
1795 }
1796 break;
1797
1798 case AARCH64_OPND_ADDR_PCREL14:
1799 case AARCH64_OPND_ADDR_PCREL19:
1800 case AARCH64_OPND_ADDR_PCREL21:
1801 case AARCH64_OPND_ADDR_PCREL26:
1802 imm = opnd->imm.value;
1803 if (operand_need_shift_by_two (get_operand_from_code (type)))
1804 {
1805 /* The offset value in a PC-relative branch instruction is alway
1806 4-byte aligned and is encoded without the lowest 2 bits. */
1807 if (!value_aligned_p (imm, 4))
1808 {
1809 set_unaligned_error (mismatch_detail, idx, 4);
1810 return 0;
1811 }
1812 /* Right shift by 2 so that we can carry out the following check
1813 canonically. */
1814 imm >>= 2;
1815 }
1816 size = get_operand_fields_width (get_operand_from_code (type));
1817 if (!value_fit_signed_field_p (imm, size))
1818 {
1819 set_other_error (mismatch_detail, idx,
1820 _("immediate out of range"));
1821 return 0;
1822 }
1823 break;
1824
1825 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1826 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1827 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1828 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1829 min_value = -8;
1830 max_value = 7;
1831 sve_imm_offset_vl:
1832 assert (!opnd->addr.offset.is_reg);
1833 assert (opnd->addr.preind);
1834 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1835 min_value *= num;
1836 max_value *= num;
1837 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1838 || (opnd->shifter.operator_present
1839 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1840 {
1841 set_other_error (mismatch_detail, idx,
1842 _("invalid addressing mode"));
1843 return 0;
1844 }
1845 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1846 {
1847 set_offset_out_of_range_error (mismatch_detail, idx,
1848 min_value, max_value);
1849 return 0;
1850 }
1851 if (!value_aligned_p (opnd->addr.offset.imm, num))
1852 {
1853 set_unaligned_error (mismatch_detail, idx, num);
1854 return 0;
1855 }
1856 break;
1857
1858 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1859 min_value = -32;
1860 max_value = 31;
1861 goto sve_imm_offset_vl;
1862
1863 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1864 min_value = -256;
1865 max_value = 255;
1866 goto sve_imm_offset_vl;
1867
1868 case AARCH64_OPND_SVE_ADDR_RI_U6:
1869 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1870 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1871 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1872 min_value = 0;
1873 max_value = 63;
1874 sve_imm_offset:
1875 assert (!opnd->addr.offset.is_reg);
1876 assert (opnd->addr.preind);
1877 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1878 min_value *= num;
1879 max_value *= num;
1880 if (opnd->shifter.operator_present
1881 || opnd->shifter.amount_present)
1882 {
1883 set_other_error (mismatch_detail, idx,
1884 _("invalid addressing mode"));
1885 return 0;
1886 }
1887 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1888 {
1889 set_offset_out_of_range_error (mismatch_detail, idx,
1890 min_value, max_value);
1891 return 0;
1892 }
1893 if (!value_aligned_p (opnd->addr.offset.imm, num))
1894 {
1895 set_unaligned_error (mismatch_detail, idx, num);
1896 return 0;
1897 }
1898 break;
1899
1900 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1901 min_value = -8;
1902 max_value = 7;
1903 goto sve_imm_offset;
1904
1905 case AARCH64_OPND_SVE_ADDR_ZX:
1906 /* Everything is already ensured by parse_operands or
1907 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1908 argument type). */
1909 assert (opnd->addr.offset.is_reg);
1910 assert (opnd->addr.preind);
1911 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
1912 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
1913 assert (opnd->shifter.operator_present == 0);
1914 break;
1915
1916 case AARCH64_OPND_SVE_ADDR_R:
1917 case AARCH64_OPND_SVE_ADDR_RR:
1918 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1919 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1920 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1921 case AARCH64_OPND_SVE_ADDR_RX:
1922 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1923 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1924 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1925 case AARCH64_OPND_SVE_ADDR_RZ:
1926 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1927 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1928 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1929 modifiers = 1 << AARCH64_MOD_LSL;
1930 sve_rr_operand:
1931 assert (opnd->addr.offset.is_reg);
1932 assert (opnd->addr.preind);
1933 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1934 && opnd->addr.offset.regno == 31)
1935 {
1936 set_other_error (mismatch_detail, idx,
1937 _("index register xzr is not allowed"));
1938 return 0;
1939 }
1940 if (((1 << opnd->shifter.kind) & modifiers) == 0
1941 || (opnd->shifter.amount
1942 != get_operand_specific_data (&aarch64_operands[type])))
1943 {
1944 set_other_error (mismatch_detail, idx,
1945 _("invalid addressing mode"));
1946 return 0;
1947 }
1948 break;
1949
1950 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1951 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1952 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1953 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1954 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1955 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1956 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1957 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1958 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1959 goto sve_rr_operand;
1960
1961 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1962 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1963 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1964 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1965 min_value = 0;
1966 max_value = 31;
1967 goto sve_imm_offset;
1968
1969 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1970 modifiers = 1 << AARCH64_MOD_LSL;
1971 sve_zz_operand:
1972 assert (opnd->addr.offset.is_reg);
1973 assert (opnd->addr.preind);
1974 if (((1 << opnd->shifter.kind) & modifiers) == 0
1975 || opnd->shifter.amount < 0
1976 || opnd->shifter.amount > 3)
1977 {
1978 set_other_error (mismatch_detail, idx,
1979 _("invalid addressing mode"));
1980 return 0;
1981 }
1982 break;
1983
1984 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1985 modifiers = (1 << AARCH64_MOD_SXTW);
1986 goto sve_zz_operand;
1987
1988 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1989 modifiers = 1 << AARCH64_MOD_UXTW;
1990 goto sve_zz_operand;
1991
1992 default:
1993 break;
1994 }
1995 break;
1996
1997 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1998 if (type == AARCH64_OPND_LEt)
1999 {
2000 /* Get the upper bound for the element index. */
2001 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2002 if (!value_in_range_p (opnd->reglist.index, 0, num))
2003 {
2004 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2005 return 0;
2006 }
2007 }
2008 /* The opcode dependent area stores the number of elements in
2009 each structure to be loaded/stored. */
2010 num = get_opcode_dependent_value (opcode);
2011 switch (type)
2012 {
2013 case AARCH64_OPND_LVt:
2014 assert (num >= 1 && num <= 4);
2015 /* Unless LD1/ST1, the number of registers should be equal to that
2016 of the structure elements. */
2017 if (num != 1 && opnd->reglist.num_regs != num)
2018 {
2019 set_reg_list_error (mismatch_detail, idx, num);
2020 return 0;
2021 }
2022 break;
2023 case AARCH64_OPND_LVt_AL:
2024 case AARCH64_OPND_LEt:
2025 assert (num >= 1 && num <= 4);
2026 /* The number of registers should be equal to that of the structure
2027 elements. */
2028 if (opnd->reglist.num_regs != num)
2029 {
2030 set_reg_list_error (mismatch_detail, idx, num);
2031 return 0;
2032 }
2033 break;
2034 default:
2035 break;
2036 }
2037 break;
2038
2039 case AARCH64_OPND_CLASS_IMMEDIATE:
2040 /* Constraint check on immediate operand. */
2041 imm = opnd->imm.value;
2042 /* E.g. imm_0_31 constrains value to be 0..31. */
2043 if (qualifier_value_in_range_constraint_p (qualifier)
2044 && !value_in_range_p (imm, get_lower_bound (qualifier),
2045 get_upper_bound (qualifier)))
2046 {
2047 set_imm_out_of_range_error (mismatch_detail, idx,
2048 get_lower_bound (qualifier),
2049 get_upper_bound (qualifier));
2050 return 0;
2051 }
2052
2053 switch (type)
2054 {
2055 case AARCH64_OPND_AIMM:
2056 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2057 {
2058 set_other_error (mismatch_detail, idx,
2059 _("invalid shift operator"));
2060 return 0;
2061 }
2062 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2063 {
2064 set_other_error (mismatch_detail, idx,
2065 _("shift amount must be 0 or 12"));
2066 return 0;
2067 }
2068 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2069 {
2070 set_other_error (mismatch_detail, idx,
2071 _("immediate out of range"));
2072 return 0;
2073 }
2074 break;
2075
2076 case AARCH64_OPND_HALF:
2077 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2078 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2079 {
2080 set_other_error (mismatch_detail, idx,
2081 _("invalid shift operator"));
2082 return 0;
2083 }
2084 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2085 if (!value_aligned_p (opnd->shifter.amount, 16))
2086 {
2087 set_other_error (mismatch_detail, idx,
2088 _("shift amount must be a multiple of 16"));
2089 return 0;
2090 }
2091 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2092 {
2093 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2094 0, size * 8 - 16);
2095 return 0;
2096 }
2097 if (opnd->imm.value < 0)
2098 {
2099 set_other_error (mismatch_detail, idx,
2100 _("negative immediate value not allowed"));
2101 return 0;
2102 }
2103 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2104 {
2105 set_other_error (mismatch_detail, idx,
2106 _("immediate out of range"));
2107 return 0;
2108 }
2109 break;
2110
2111 case AARCH64_OPND_IMM_MOV:
2112 {
2113 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2114 imm = opnd->imm.value;
2115 assert (idx == 1);
2116 switch (opcode->op)
2117 {
2118 case OP_MOV_IMM_WIDEN:
2119 imm = ~imm;
2120 /* Fall through. */
2121 case OP_MOV_IMM_WIDE:
2122 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2123 {
2124 set_other_error (mismatch_detail, idx,
2125 _("immediate out of range"));
2126 return 0;
2127 }
2128 break;
2129 case OP_MOV_IMM_LOG:
2130 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2131 {
2132 set_other_error (mismatch_detail, idx,
2133 _("immediate out of range"));
2134 return 0;
2135 }
2136 break;
2137 default:
2138 assert (0);
2139 return 0;
2140 }
2141 }
2142 break;
2143
2144 case AARCH64_OPND_NZCV:
2145 case AARCH64_OPND_CCMP_IMM:
2146 case AARCH64_OPND_EXCEPTION:
2147 case AARCH64_OPND_TME_UIMM16:
2148 case AARCH64_OPND_UIMM4:
2149 case AARCH64_OPND_UIMM4_ADDG:
2150 case AARCH64_OPND_UIMM7:
2151 case AARCH64_OPND_UIMM3_OP1:
2152 case AARCH64_OPND_UIMM3_OP2:
2153 case AARCH64_OPND_SVE_UIMM3:
2154 case AARCH64_OPND_SVE_UIMM7:
2155 case AARCH64_OPND_SVE_UIMM8:
2156 case AARCH64_OPND_SVE_UIMM8_53:
2157 size = get_operand_fields_width (get_operand_from_code (type));
2158 assert (size < 32);
2159 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2160 {
2161 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2162 (1 << size) - 1);
2163 return 0;
2164 }
2165 break;
2166
2167 case AARCH64_OPND_UIMM10:
2168 /* Scaled unsigned 10 bits immediate offset. */
2169 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2170 {
2171 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2172 return 0;
2173 }
2174
2175 if (!value_aligned_p (opnd->imm.value, 16))
2176 {
2177 set_unaligned_error (mismatch_detail, idx, 16);
2178 return 0;
2179 }
2180 break;
2181
2182 case AARCH64_OPND_SIMM5:
2183 case AARCH64_OPND_SVE_SIMM5:
2184 case AARCH64_OPND_SVE_SIMM5B:
2185 case AARCH64_OPND_SVE_SIMM6:
2186 case AARCH64_OPND_SVE_SIMM8:
2187 size = get_operand_fields_width (get_operand_from_code (type));
2188 assert (size < 32);
2189 if (!value_fit_signed_field_p (opnd->imm.value, size))
2190 {
2191 set_imm_out_of_range_error (mismatch_detail, idx,
2192 -(1 << (size - 1)),
2193 (1 << (size - 1)) - 1);
2194 return 0;
2195 }
2196 break;
2197
2198 case AARCH64_OPND_WIDTH:
2199 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2200 && opnds[0].type == AARCH64_OPND_Rd);
2201 size = get_upper_bound (qualifier);
2202 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2203 /* lsb+width <= reg.size */
2204 {
2205 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2206 size - opnds[idx-1].imm.value);
2207 return 0;
2208 }
2209 break;
2210
2211 case AARCH64_OPND_LIMM:
2212 case AARCH64_OPND_SVE_LIMM:
2213 {
2214 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2215 uint64_t uimm = opnd->imm.value;
2216 if (opcode->op == OP_BIC)
2217 uimm = ~uimm;
2218 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2219 {
2220 set_other_error (mismatch_detail, idx,
2221 _("immediate out of range"));
2222 return 0;
2223 }
2224 }
2225 break;
2226
2227 case AARCH64_OPND_IMM0:
2228 case AARCH64_OPND_FPIMM0:
2229 if (opnd->imm.value != 0)
2230 {
2231 set_other_error (mismatch_detail, idx,
2232 _("immediate zero expected"));
2233 return 0;
2234 }
2235 break;
2236
2237 case AARCH64_OPND_IMM_ROT1:
2238 case AARCH64_OPND_IMM_ROT2:
2239 case AARCH64_OPND_SVE_IMM_ROT2:
2240 if (opnd->imm.value != 0
2241 && opnd->imm.value != 90
2242 && opnd->imm.value != 180
2243 && opnd->imm.value != 270)
2244 {
2245 set_other_error (mismatch_detail, idx,
2246 _("rotate expected to be 0, 90, 180 or 270"));
2247 return 0;
2248 }
2249 break;
2250
2251 case AARCH64_OPND_IMM_ROT3:
2252 case AARCH64_OPND_SVE_IMM_ROT1:
2253 case AARCH64_OPND_SVE_IMM_ROT3:
2254 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2255 {
2256 set_other_error (mismatch_detail, idx,
2257 _("rotate expected to be 90 or 270"));
2258 return 0;
2259 }
2260 break;
2261
2262 case AARCH64_OPND_SHLL_IMM:
2263 assert (idx == 2);
2264 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2265 if (opnd->imm.value != size)
2266 {
2267 set_other_error (mismatch_detail, idx,
2268 _("invalid shift amount"));
2269 return 0;
2270 }
2271 break;
2272
2273 case AARCH64_OPND_IMM_VLSL:
2274 size = aarch64_get_qualifier_esize (qualifier);
2275 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2276 {
2277 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2278 size * 8 - 1);
2279 return 0;
2280 }
2281 break;
2282
2283 case AARCH64_OPND_IMM_VLSR:
2284 size = aarch64_get_qualifier_esize (qualifier);
2285 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2286 {
2287 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2288 return 0;
2289 }
2290 break;
2291
2292 case AARCH64_OPND_SIMD_IMM:
2293 case AARCH64_OPND_SIMD_IMM_SFT:
2294 /* Qualifier check. */
2295 switch (qualifier)
2296 {
2297 case AARCH64_OPND_QLF_LSL:
2298 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2299 {
2300 set_other_error (mismatch_detail, idx,
2301 _("invalid shift operator"));
2302 return 0;
2303 }
2304 break;
2305 case AARCH64_OPND_QLF_MSL:
2306 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2307 {
2308 set_other_error (mismatch_detail, idx,
2309 _("invalid shift operator"));
2310 return 0;
2311 }
2312 break;
2313 case AARCH64_OPND_QLF_NIL:
2314 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2315 {
2316 set_other_error (mismatch_detail, idx,
2317 _("shift is not permitted"));
2318 return 0;
2319 }
2320 break;
2321 default:
2322 assert (0);
2323 return 0;
2324 }
2325 /* Is the immediate valid? */
2326 assert (idx == 1);
2327 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2328 {
2329 /* uimm8 or simm8 */
2330 if (!value_in_range_p (opnd->imm.value, -128, 255))
2331 {
2332 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2333 return 0;
2334 }
2335 }
2336 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2337 {
2338 /* uimm64 is not
2339 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2340 ffffffffgggggggghhhhhhhh'. */
2341 set_other_error (mismatch_detail, idx,
2342 _("invalid value for immediate"));
2343 return 0;
2344 }
2345 /* Is the shift amount valid? */
2346 switch (opnd->shifter.kind)
2347 {
2348 case AARCH64_MOD_LSL:
2349 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2350 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2351 {
2352 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2353 (size - 1) * 8);
2354 return 0;
2355 }
2356 if (!value_aligned_p (opnd->shifter.amount, 8))
2357 {
2358 set_unaligned_error (mismatch_detail, idx, 8);
2359 return 0;
2360 }
2361 break;
2362 case AARCH64_MOD_MSL:
2363 /* Only 8 and 16 are valid shift amount. */
2364 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2365 {
2366 set_other_error (mismatch_detail, idx,
2367 _("shift amount must be 0 or 16"));
2368 return 0;
2369 }
2370 break;
2371 default:
2372 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2373 {
2374 set_other_error (mismatch_detail, idx,
2375 _("invalid shift operator"));
2376 return 0;
2377 }
2378 break;
2379 }
2380 break;
2381
2382 case AARCH64_OPND_FPIMM:
2383 case AARCH64_OPND_SIMD_FPIMM:
2384 case AARCH64_OPND_SVE_FPIMM8:
2385 if (opnd->imm.is_fp == 0)
2386 {
2387 set_other_error (mismatch_detail, idx,
2388 _("floating-point immediate expected"));
2389 return 0;
2390 }
2391 /* The value is expected to be an 8-bit floating-point constant with
2392 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2393 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2394 instruction). */
2395 if (!value_in_range_p (opnd->imm.value, 0, 255))
2396 {
2397 set_other_error (mismatch_detail, idx,
2398 _("immediate out of range"));
2399 return 0;
2400 }
2401 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2402 {
2403 set_other_error (mismatch_detail, idx,
2404 _("invalid shift operator"));
2405 return 0;
2406 }
2407 break;
2408
2409 case AARCH64_OPND_SVE_AIMM:
2410 min_value = 0;
2411 sve_aimm:
2412 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2413 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2414 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2415 uvalue = opnd->imm.value;
2416 shift = opnd->shifter.amount;
2417 if (size == 1)
2418 {
2419 if (shift != 0)
2420 {
2421 set_other_error (mismatch_detail, idx,
2422 _("no shift amount allowed for"
2423 " 8-bit constants"));
2424 return 0;
2425 }
2426 }
2427 else
2428 {
2429 if (shift != 0 && shift != 8)
2430 {
2431 set_other_error (mismatch_detail, idx,
2432 _("shift amount must be 0 or 8"));
2433 return 0;
2434 }
2435 if (shift == 0 && (uvalue & 0xff) == 0)
2436 {
2437 shift = 8;
2438 uvalue = (int64_t) uvalue / 256;
2439 }
2440 }
2441 mask >>= shift;
2442 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2443 {
2444 set_other_error (mismatch_detail, idx,
2445 _("immediate too big for element size"));
2446 return 0;
2447 }
2448 uvalue = (uvalue - min_value) & mask;
2449 if (uvalue > 0xff)
2450 {
2451 set_other_error (mismatch_detail, idx,
2452 _("invalid arithmetic immediate"));
2453 return 0;
2454 }
2455 break;
2456
2457 case AARCH64_OPND_SVE_ASIMM:
2458 min_value = -128;
2459 goto sve_aimm;
2460
2461 case AARCH64_OPND_SVE_I1_HALF_ONE:
2462 assert (opnd->imm.is_fp);
2463 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2464 {
2465 set_other_error (mismatch_detail, idx,
2466 _("floating-point value must be 0.5 or 1.0"));
2467 return 0;
2468 }
2469 break;
2470
2471 case AARCH64_OPND_SVE_I1_HALF_TWO:
2472 assert (opnd->imm.is_fp);
2473 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2474 {
2475 set_other_error (mismatch_detail, idx,
2476 _("floating-point value must be 0.5 or 2.0"));
2477 return 0;
2478 }
2479 break;
2480
2481 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2482 assert (opnd->imm.is_fp);
2483 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2484 {
2485 set_other_error (mismatch_detail, idx,
2486 _("floating-point value must be 0.0 or 1.0"));
2487 return 0;
2488 }
2489 break;
2490
2491 case AARCH64_OPND_SVE_INV_LIMM:
2492 {
2493 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2494 uint64_t uimm = ~opnd->imm.value;
2495 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2496 {
2497 set_other_error (mismatch_detail, idx,
2498 _("immediate out of range"));
2499 return 0;
2500 }
2501 }
2502 break;
2503
2504 case AARCH64_OPND_SVE_LIMM_MOV:
2505 {
2506 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2507 uint64_t uimm = opnd->imm.value;
2508 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2509 {
2510 set_other_error (mismatch_detail, idx,
2511 _("immediate out of range"));
2512 return 0;
2513 }
2514 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2515 {
2516 set_other_error (mismatch_detail, idx,
2517 _("invalid replicated MOV immediate"));
2518 return 0;
2519 }
2520 }
2521 break;
2522
2523 case AARCH64_OPND_SVE_PATTERN_SCALED:
2524 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2525 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2526 {
2527 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2528 return 0;
2529 }
2530 break;
2531
2532 case AARCH64_OPND_SVE_SHLIMM_PRED:
2533 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2534 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2535 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2536 {
2537 set_imm_out_of_range_error (mismatch_detail, idx,
2538 0, 8 * size - 1);
2539 return 0;
2540 }
2541 break;
2542
2543 case AARCH64_OPND_SVE_SHRIMM_PRED:
2544 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2545 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2546 {
2547 unsigned int index =
2548 (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2549 size = aarch64_get_qualifier_esize (opnds[idx - index].qualifier);
2550 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2551 {
2552 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2553 return 0;
2554 }
2555 break;
2556 }
2557
2558 default:
2559 break;
2560 }
2561 break;
2562
2563 case AARCH64_OPND_CLASS_SYSTEM:
2564 switch (type)
2565 {
2566 case AARCH64_OPND_PSTATEFIELD:
2567 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2568 /* MSR UAO, #uimm4
2569 MSR PAN, #uimm4
2570 MSR SSBS,#uimm4
2571 The immediate must be #0 or #1. */
2572 if ((opnd->pstatefield == 0x03 /* UAO. */
2573 || opnd->pstatefield == 0x04 /* PAN. */
2574 || opnd->pstatefield == 0x19 /* SSBS. */
2575 || opnd->pstatefield == 0x1a) /* DIT. */
2576 && opnds[1].imm.value > 1)
2577 {
2578 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2579 return 0;
2580 }
2581 /* MSR SPSel, #uimm4
2582 Uses uimm4 as a control value to select the stack pointer: if
2583 bit 0 is set it selects the current exception level's stack
2584 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2585 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2586 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2587 {
2588 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2589 return 0;
2590 }
2591 break;
2592 default:
2593 break;
2594 }
2595 break;
2596
2597 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2598 /* Get the upper bound for the element index. */
2599 if (opcode->op == OP_FCMLA_ELEM)
2600 /* FCMLA index range depends on the vector size of other operands
2601 and is halfed because complex numbers take two elements. */
2602 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2603 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2604 else
2605 num = 16;
2606 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2607 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2608
2609 /* Index out-of-range. */
2610 if (!value_in_range_p (opnd->reglane.index, 0, num))
2611 {
2612 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2613 return 0;
2614 }
2615 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2616 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2617 number is encoded in "size:M:Rm":
2618 size <Vm>
2619 00 RESERVED
2620 01 0:Rm
2621 10 M:Rm
2622 11 RESERVED */
2623 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2624 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2625 {
2626 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2627 return 0;
2628 }
2629 break;
2630
2631 case AARCH64_OPND_CLASS_MODIFIED_REG:
2632 assert (idx == 1 || idx == 2);
2633 switch (type)
2634 {
2635 case AARCH64_OPND_Rm_EXT:
2636 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2637 && opnd->shifter.kind != AARCH64_MOD_LSL)
2638 {
2639 set_other_error (mismatch_detail, idx,
2640 _("extend operator expected"));
2641 return 0;
2642 }
2643 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2644 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2645 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2646 case. */
2647 if (!aarch64_stack_pointer_p (opnds + 0)
2648 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2649 {
2650 if (!opnd->shifter.operator_present)
2651 {
2652 set_other_error (mismatch_detail, idx,
2653 _("missing extend operator"));
2654 return 0;
2655 }
2656 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2657 {
2658 set_other_error (mismatch_detail, idx,
2659 _("'LSL' operator not allowed"));
2660 return 0;
2661 }
2662 }
2663 assert (opnd->shifter.operator_present /* Default to LSL. */
2664 || opnd->shifter.kind == AARCH64_MOD_LSL);
2665 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2666 {
2667 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2668 return 0;
2669 }
2670 /* In the 64-bit form, the final register operand is written as Wm
2671 for all but the (possibly omitted) UXTX/LSL and SXTX
2672 operators.
2673 N.B. GAS allows X register to be used with any operator as a
2674 programming convenience. */
2675 if (qualifier == AARCH64_OPND_QLF_X
2676 && opnd->shifter.kind != AARCH64_MOD_LSL
2677 && opnd->shifter.kind != AARCH64_MOD_UXTX
2678 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2679 {
2680 set_other_error (mismatch_detail, idx, _("W register expected"));
2681 return 0;
2682 }
2683 break;
2684
2685 case AARCH64_OPND_Rm_SFT:
2686 /* ROR is not available to the shifted register operand in
2687 arithmetic instructions. */
2688 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2689 {
2690 set_other_error (mismatch_detail, idx,
2691 _("shift operator expected"));
2692 return 0;
2693 }
2694 if (opnd->shifter.kind == AARCH64_MOD_ROR
2695 && opcode->iclass != log_shift)
2696 {
2697 set_other_error (mismatch_detail, idx,
2698 _("'ROR' operator not allowed"));
2699 return 0;
2700 }
2701 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2702 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2703 {
2704 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2705 return 0;
2706 }
2707 break;
2708
2709 default:
2710 break;
2711 }
2712 break;
2713
2714 default:
2715 break;
2716 }
2717
2718 return 1;
2719 }
2720
2721 /* Main entrypoint for the operand constraint checking.
2722
2723 Return 1 if operands of *INST meet the constraint applied by the operand
2724 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2725 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2726 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2727 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2728 error kind when it is notified that an instruction does not pass the check).
2729
2730 Un-determined operand qualifiers may get established during the process. */
2731
2732 int
2733 aarch64_match_operands_constraint (aarch64_inst *inst,
2734 aarch64_operand_error *mismatch_detail)
2735 {
2736 int i;
2737
2738 DEBUG_TRACE ("enter");
2739
2740 /* Check for cases where a source register needs to be the same as the
2741 destination register. Do this before matching qualifiers since if
2742 an instruction has both invalid tying and invalid qualifiers,
2743 the error about qualifiers would suggest several alternative
2744 instructions that also have invalid tying. */
2745 i = inst->opcode->tied_operand;
2746 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2747 {
2748 if (mismatch_detail)
2749 {
2750 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2751 mismatch_detail->index = i;
2752 mismatch_detail->error = NULL;
2753 }
2754 return 0;
2755 }
2756
2757 /* Match operands' qualifier.
2758 *INST has already had qualifier establish for some, if not all, of
2759 its operands; we need to find out whether these established
2760 qualifiers match one of the qualifier sequence in
2761 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2762 with the corresponding qualifier in such a sequence.
2763 Only basic operand constraint checking is done here; the more thorough
2764 constraint checking will carried out by operand_general_constraint_met_p,
2765 which has be to called after this in order to get all of the operands'
2766 qualifiers established. */
2767 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2768 {
2769 DEBUG_TRACE ("FAIL on operand qualifier matching");
2770 if (mismatch_detail)
2771 {
2772 /* Return an error type to indicate that it is the qualifier
2773 matching failure; we don't care about which operand as there
2774 are enough information in the opcode table to reproduce it. */
2775 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2776 mismatch_detail->index = -1;
2777 mismatch_detail->error = NULL;
2778 }
2779 return 0;
2780 }
2781
2782 /* Match operands' constraint. */
2783 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2784 {
2785 enum aarch64_opnd type = inst->opcode->operands[i];
2786 if (type == AARCH64_OPND_NIL)
2787 break;
2788 if (inst->operands[i].skip)
2789 {
2790 DEBUG_TRACE ("skip the incomplete operand %d", i);
2791 continue;
2792 }
2793 if (operand_general_constraint_met_p (inst->operands, i, type,
2794 inst->opcode, mismatch_detail) == 0)
2795 {
2796 DEBUG_TRACE ("FAIL on operand %d", i);
2797 return 0;
2798 }
2799 }
2800
2801 DEBUG_TRACE ("PASS");
2802
2803 return 1;
2804 }
2805
2806 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2807 Also updates the TYPE of each INST->OPERANDS with the corresponding
2808 value of OPCODE->OPERANDS.
2809
2810 Note that some operand qualifiers may need to be manually cleared by
2811 the caller before it further calls the aarch64_opcode_encode; by
2812 doing this, it helps the qualifier matching facilities work
2813 properly. */
2814
2815 const aarch64_opcode*
2816 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2817 {
2818 int i;
2819 const aarch64_opcode *old = inst->opcode;
2820
2821 inst->opcode = opcode;
2822
2823 /* Update the operand types. */
2824 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2825 {
2826 inst->operands[i].type = opcode->operands[i];
2827 if (opcode->operands[i] == AARCH64_OPND_NIL)
2828 break;
2829 }
2830
2831 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2832
2833 return old;
2834 }
2835
2836 int
2837 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2838 {
2839 int i;
2840 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2841 if (operands[i] == operand)
2842 return i;
2843 else if (operands[i] == AARCH64_OPND_NIL)
2844 break;
2845 return -1;
2846 }
2847 \f
2848 /* R0...R30, followed by FOR31. */
2849 #define BANK(R, FOR31) \
2850 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2851 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2852 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2853 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2854 /* [0][0] 32-bit integer regs with sp Wn
2855 [0][1] 64-bit integer regs with sp Xn sf=1
2856 [1][0] 32-bit integer regs with #0 Wn
2857 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2858 static const char *int_reg[2][2][32] = {
2859 #define R32(X) "w" #X
2860 #define R64(X) "x" #X
2861 { BANK (R32, "wsp"), BANK (R64, "sp") },
2862 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2863 #undef R64
2864 #undef R32
2865 };
2866
2867 /* Names of the SVE vector registers, first with .S suffixes,
2868 then with .D suffixes. */
2869
2870 static const char *sve_reg[2][32] = {
2871 #define ZS(X) "z" #X ".s"
2872 #define ZD(X) "z" #X ".d"
2873 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2874 #undef ZD
2875 #undef ZS
2876 };
2877 #undef BANK
2878
2879 /* Return the integer register name.
2880 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2881
2882 static inline const char *
2883 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2884 {
2885 const int has_zr = sp_reg_p ? 0 : 1;
2886 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2887 return int_reg[has_zr][is_64][regno];
2888 }
2889
2890 /* Like get_int_reg_name, but IS_64 is always 1. */
2891
2892 static inline const char *
2893 get_64bit_int_reg_name (int regno, int sp_reg_p)
2894 {
2895 const int has_zr = sp_reg_p ? 0 : 1;
2896 return int_reg[has_zr][1][regno];
2897 }
2898
2899 /* Get the name of the integer offset register in OPND, using the shift type
2900 to decide whether it's a word or doubleword. */
2901
2902 static inline const char *
2903 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2904 {
2905 switch (opnd->shifter.kind)
2906 {
2907 case AARCH64_MOD_UXTW:
2908 case AARCH64_MOD_SXTW:
2909 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2910
2911 case AARCH64_MOD_LSL:
2912 case AARCH64_MOD_SXTX:
2913 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2914
2915 default:
2916 abort ();
2917 }
2918 }
2919
2920 /* Get the name of the SVE vector offset register in OPND, using the operand
2921 qualifier to decide whether the suffix should be .S or .D. */
2922
2923 static inline const char *
2924 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2925 {
2926 assert (qualifier == AARCH64_OPND_QLF_S_S
2927 || qualifier == AARCH64_OPND_QLF_S_D);
2928 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2929 }
2930
2931 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2932
2933 typedef union
2934 {
2935 uint64_t i;
2936 double d;
2937 } double_conv_t;
2938
2939 typedef union
2940 {
2941 uint32_t i;
2942 float f;
2943 } single_conv_t;
2944
2945 typedef union
2946 {
2947 uint32_t i;
2948 float f;
2949 } half_conv_t;
2950
2951 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2952 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2953 (depending on the type of the instruction). IMM8 will be expanded to a
2954 single-precision floating-point value (SIZE == 4) or a double-precision
2955 floating-point value (SIZE == 8). A half-precision floating-point value
2956 (SIZE == 2) is expanded to a single-precision floating-point value. The
2957 expanded value is returned. */
2958
2959 static uint64_t
2960 expand_fp_imm (int size, uint32_t imm8)
2961 {
2962 uint64_t imm = 0;
2963 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2964
2965 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2966 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2967 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2968 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2969 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2970 if (size == 8)
2971 {
2972 imm = (imm8_7 << (63-32)) /* imm8<7> */
2973 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2974 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2975 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2976 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2977 imm <<= 32;
2978 }
2979 else if (size == 4 || size == 2)
2980 {
2981 imm = (imm8_7 << 31) /* imm8<7> */
2982 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2983 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2984 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2985 }
2986 else
2987 {
2988 /* An unsupported size. */
2989 assert (0);
2990 }
2991
2992 return imm;
2993 }
2994
2995 /* Produce the string representation of the register list operand *OPND
2996 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2997 the register name that comes before the register number, such as "v". */
2998 static void
2999 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3000 const char *prefix)
3001 {
3002 const int num_regs = opnd->reglist.num_regs;
3003 const int first_reg = opnd->reglist.first_regno;
3004 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3005 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3006 char tb[8]; /* Temporary buffer. */
3007
3008 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3009 assert (num_regs >= 1 && num_regs <= 4);
3010
3011 /* Prepare the index if any. */
3012 if (opnd->reglist.has_index)
3013 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3014 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
3015 else
3016 tb[0] = '\0';
3017
3018 /* The hyphenated form is preferred for disassembly if there are
3019 more than two registers in the list, and the register numbers
3020 are monotonically increasing in increments of one. */
3021 if (num_regs > 2 && last_reg > first_reg)
3022 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
3023 prefix, last_reg, qlf_name, tb);
3024 else
3025 {
3026 const int reg0 = first_reg;
3027 const int reg1 = (first_reg + 1) & 0x1f;
3028 const int reg2 = (first_reg + 2) & 0x1f;
3029 const int reg3 = (first_reg + 3) & 0x1f;
3030
3031 switch (num_regs)
3032 {
3033 case 1:
3034 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3035 break;
3036 case 2:
3037 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3038 prefix, reg1, qlf_name, tb);
3039 break;
3040 case 3:
3041 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3042 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3043 prefix, reg2, qlf_name, tb);
3044 break;
3045 case 4:
3046 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3047 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3048 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3049 break;
3050 }
3051 }
3052 }
3053
3054 /* Print the register+immediate address in OPND to BUF, which has SIZE
3055 characters. BASE is the name of the base register. */
3056
3057 static void
3058 print_immediate_offset_address (char *buf, size_t size,
3059 const aarch64_opnd_info *opnd,
3060 const char *base)
3061 {
3062 if (opnd->addr.writeback)
3063 {
3064 if (opnd->addr.preind)
3065 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3066 else
3067 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3068 }
3069 else
3070 {
3071 if (opnd->shifter.operator_present)
3072 {
3073 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3074 snprintf (buf, size, "[%s, #%d, mul vl]",
3075 base, opnd->addr.offset.imm);
3076 }
3077 else if (opnd->addr.offset.imm)
3078 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3079 else
3080 snprintf (buf, size, "[%s]", base);
3081 }
3082 }
3083
3084 /* Produce the string representation of the register offset address operand
3085 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3086 the names of the base and offset registers. */
3087 static void
3088 print_register_offset_address (char *buf, size_t size,
3089 const aarch64_opnd_info *opnd,
3090 const char *base, const char *offset)
3091 {
3092 char tb[16]; /* Temporary buffer. */
3093 bfd_boolean print_extend_p = TRUE;
3094 bfd_boolean print_amount_p = TRUE;
3095 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3096
3097 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3098 || !opnd->shifter.amount_present))
3099 {
3100 /* Not print the shift/extend amount when the amount is zero and
3101 when it is not the special case of 8-bit load/store instruction. */
3102 print_amount_p = FALSE;
3103 /* Likewise, no need to print the shift operator LSL in such a
3104 situation. */
3105 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3106 print_extend_p = FALSE;
3107 }
3108
3109 /* Prepare for the extend/shift. */
3110 if (print_extend_p)
3111 {
3112 if (print_amount_p)
3113 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3114 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3115 (opnd->shifter.amount % 100));
3116 else
3117 snprintf (tb, sizeof (tb), ", %s", shift_name);
3118 }
3119 else
3120 tb[0] = '\0';
3121
3122 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3123 }
3124
3125 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3126 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3127 PC, PCREL_P and ADDRESS are used to pass in and return information about
3128 the PC-relative address calculation, where the PC value is passed in
3129 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3130 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3131 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3132
3133 The function serves both the disassembler and the assembler diagnostics
3134 issuer, which is the reason why it lives in this file. */
3135
3136 void
3137 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3138 const aarch64_opcode *opcode,
3139 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3140 bfd_vma *address, char** notes)
3141 {
3142 unsigned int i, num_conds;
3143 const char *name = NULL;
3144 const aarch64_opnd_info *opnd = opnds + idx;
3145 enum aarch64_modifier_kind kind;
3146 uint64_t addr, enum_value;
3147
3148 buf[0] = '\0';
3149 if (pcrel_p)
3150 *pcrel_p = 0;
3151
3152 switch (opnd->type)
3153 {
3154 case AARCH64_OPND_Rd:
3155 case AARCH64_OPND_Rn:
3156 case AARCH64_OPND_Rm:
3157 case AARCH64_OPND_Rt:
3158 case AARCH64_OPND_Rt2:
3159 case AARCH64_OPND_Rs:
3160 case AARCH64_OPND_Ra:
3161 case AARCH64_OPND_Rt_SYS:
3162 case AARCH64_OPND_PAIRREG:
3163 case AARCH64_OPND_SVE_Rm:
3164 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3165 the <ic_op>, therefore we use opnd->present to override the
3166 generic optional-ness information. */
3167 if (opnd->type == AARCH64_OPND_Rt_SYS)
3168 {
3169 if (!opnd->present)
3170 break;
3171 }
3172 /* Omit the operand, e.g. RET. */
3173 else if (optional_operand_p (opcode, idx)
3174 && (opnd->reg.regno
3175 == get_optional_operand_default_value (opcode)))
3176 break;
3177 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3178 || opnd->qualifier == AARCH64_OPND_QLF_X);
3179 snprintf (buf, size, "%s",
3180 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3181 break;
3182
3183 case AARCH64_OPND_Rd_SP:
3184 case AARCH64_OPND_Rn_SP:
3185 case AARCH64_OPND_Rt_SP:
3186 case AARCH64_OPND_SVE_Rn_SP:
3187 case AARCH64_OPND_Rm_SP:
3188 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3189 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3190 || opnd->qualifier == AARCH64_OPND_QLF_X
3191 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3192 snprintf (buf, size, "%s",
3193 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3194 break;
3195
3196 case AARCH64_OPND_Rm_EXT:
3197 kind = opnd->shifter.kind;
3198 assert (idx == 1 || idx == 2);
3199 if ((aarch64_stack_pointer_p (opnds)
3200 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3201 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3202 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3203 && kind == AARCH64_MOD_UXTW)
3204 || (opnd->qualifier == AARCH64_OPND_QLF_X
3205 && kind == AARCH64_MOD_UXTX)))
3206 {
3207 /* 'LSL' is the preferred form in this case. */
3208 kind = AARCH64_MOD_LSL;
3209 if (opnd->shifter.amount == 0)
3210 {
3211 /* Shifter omitted. */
3212 snprintf (buf, size, "%s",
3213 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3214 break;
3215 }
3216 }
3217 if (opnd->shifter.amount)
3218 snprintf (buf, size, "%s, %s #%" PRIi64,
3219 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3220 aarch64_operand_modifiers[kind].name,
3221 opnd->shifter.amount);
3222 else
3223 snprintf (buf, size, "%s, %s",
3224 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3225 aarch64_operand_modifiers[kind].name);
3226 break;
3227
3228 case AARCH64_OPND_Rm_SFT:
3229 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3230 || opnd->qualifier == AARCH64_OPND_QLF_X);
3231 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3232 snprintf (buf, size, "%s",
3233 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3234 else
3235 snprintf (buf, size, "%s, %s #%" PRIi64,
3236 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3237 aarch64_operand_modifiers[opnd->shifter.kind].name,
3238 opnd->shifter.amount);
3239 break;
3240
3241 case AARCH64_OPND_Fd:
3242 case AARCH64_OPND_Fn:
3243 case AARCH64_OPND_Fm:
3244 case AARCH64_OPND_Fa:
3245 case AARCH64_OPND_Ft:
3246 case AARCH64_OPND_Ft2:
3247 case AARCH64_OPND_Sd:
3248 case AARCH64_OPND_Sn:
3249 case AARCH64_OPND_Sm:
3250 case AARCH64_OPND_SVE_VZn:
3251 case AARCH64_OPND_SVE_Vd:
3252 case AARCH64_OPND_SVE_Vm:
3253 case AARCH64_OPND_SVE_Vn:
3254 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3255 opnd->reg.regno);
3256 break;
3257
3258 case AARCH64_OPND_Va:
3259 case AARCH64_OPND_Vd:
3260 case AARCH64_OPND_Vn:
3261 case AARCH64_OPND_Vm:
3262 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3263 aarch64_get_qualifier_name (opnd->qualifier));
3264 break;
3265
3266 case AARCH64_OPND_Ed:
3267 case AARCH64_OPND_En:
3268 case AARCH64_OPND_Em:
3269 case AARCH64_OPND_Em16:
3270 case AARCH64_OPND_SM3_IMM2:
3271 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3272 aarch64_get_qualifier_name (opnd->qualifier),
3273 opnd->reglane.index);
3274 break;
3275
3276 case AARCH64_OPND_VdD1:
3277 case AARCH64_OPND_VnD1:
3278 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3279 break;
3280
3281 case AARCH64_OPND_LVn:
3282 case AARCH64_OPND_LVt:
3283 case AARCH64_OPND_LVt_AL:
3284 case AARCH64_OPND_LEt:
3285 print_register_list (buf, size, opnd, "v");
3286 break;
3287
3288 case AARCH64_OPND_SVE_Pd:
3289 case AARCH64_OPND_SVE_Pg3:
3290 case AARCH64_OPND_SVE_Pg4_5:
3291 case AARCH64_OPND_SVE_Pg4_10:
3292 case AARCH64_OPND_SVE_Pg4_16:
3293 case AARCH64_OPND_SVE_Pm:
3294 case AARCH64_OPND_SVE_Pn:
3295 case AARCH64_OPND_SVE_Pt:
3296 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3297 snprintf (buf, size, "p%d", opnd->reg.regno);
3298 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3299 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3300 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3301 aarch64_get_qualifier_name (opnd->qualifier));
3302 else
3303 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3304 aarch64_get_qualifier_name (opnd->qualifier));
3305 break;
3306
3307 case AARCH64_OPND_SVE_Za_5:
3308 case AARCH64_OPND_SVE_Za_16:
3309 case AARCH64_OPND_SVE_Zd:
3310 case AARCH64_OPND_SVE_Zm_5:
3311 case AARCH64_OPND_SVE_Zm_16:
3312 case AARCH64_OPND_SVE_Zn:
3313 case AARCH64_OPND_SVE_Zt:
3314 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3315 snprintf (buf, size, "z%d", opnd->reg.regno);
3316 else
3317 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3318 aarch64_get_qualifier_name (opnd->qualifier));
3319 break;
3320
3321 case AARCH64_OPND_SVE_ZnxN:
3322 case AARCH64_OPND_SVE_ZtxN:
3323 print_register_list (buf, size, opnd, "z");
3324 break;
3325
3326 case AARCH64_OPND_SVE_Zm3_INDEX:
3327 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3328 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3329 case AARCH64_OPND_SVE_Zm4_11_INDEX:
3330 case AARCH64_OPND_SVE_Zm4_INDEX:
3331 case AARCH64_OPND_SVE_Zn_INDEX:
3332 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3333 aarch64_get_qualifier_name (opnd->qualifier),
3334 opnd->reglane.index);
3335 break;
3336
3337 case AARCH64_OPND_CRn:
3338 case AARCH64_OPND_CRm:
3339 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3340 break;
3341
3342 case AARCH64_OPND_IDX:
3343 case AARCH64_OPND_MASK:
3344 case AARCH64_OPND_IMM:
3345 case AARCH64_OPND_IMM_2:
3346 case AARCH64_OPND_WIDTH:
3347 case AARCH64_OPND_UIMM3_OP1:
3348 case AARCH64_OPND_UIMM3_OP2:
3349 case AARCH64_OPND_BIT_NUM:
3350 case AARCH64_OPND_IMM_VLSL:
3351 case AARCH64_OPND_IMM_VLSR:
3352 case AARCH64_OPND_SHLL_IMM:
3353 case AARCH64_OPND_IMM0:
3354 case AARCH64_OPND_IMMR:
3355 case AARCH64_OPND_IMMS:
3356 case AARCH64_OPND_FBITS:
3357 case AARCH64_OPND_TME_UIMM16:
3358 case AARCH64_OPND_SIMM5:
3359 case AARCH64_OPND_SVE_SHLIMM_PRED:
3360 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3361 case AARCH64_OPND_SVE_SHRIMM_PRED:
3362 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3363 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3364 case AARCH64_OPND_SVE_SIMM5:
3365 case AARCH64_OPND_SVE_SIMM5B:
3366 case AARCH64_OPND_SVE_SIMM6:
3367 case AARCH64_OPND_SVE_SIMM8:
3368 case AARCH64_OPND_SVE_UIMM3:
3369 case AARCH64_OPND_SVE_UIMM7:
3370 case AARCH64_OPND_SVE_UIMM8:
3371 case AARCH64_OPND_SVE_UIMM8_53:
3372 case AARCH64_OPND_IMM_ROT1:
3373 case AARCH64_OPND_IMM_ROT2:
3374 case AARCH64_OPND_IMM_ROT3:
3375 case AARCH64_OPND_SVE_IMM_ROT1:
3376 case AARCH64_OPND_SVE_IMM_ROT2:
3377 case AARCH64_OPND_SVE_IMM_ROT3:
3378 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3379 break;
3380
3381 case AARCH64_OPND_SVE_I1_HALF_ONE:
3382 case AARCH64_OPND_SVE_I1_HALF_TWO:
3383 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3384 {
3385 single_conv_t c;
3386 c.i = opnd->imm.value;
3387 snprintf (buf, size, "#%.1f", c.f);
3388 break;
3389 }
3390
3391 case AARCH64_OPND_SVE_PATTERN:
3392 if (optional_operand_p (opcode, idx)
3393 && opnd->imm.value == get_optional_operand_default_value (opcode))
3394 break;
3395 enum_value = opnd->imm.value;
3396 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3397 if (aarch64_sve_pattern_array[enum_value])
3398 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3399 else
3400 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3401 break;
3402
3403 case AARCH64_OPND_SVE_PATTERN_SCALED:
3404 if (optional_operand_p (opcode, idx)
3405 && !opnd->shifter.operator_present
3406 && opnd->imm.value == get_optional_operand_default_value (opcode))
3407 break;
3408 enum_value = opnd->imm.value;
3409 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3410 if (aarch64_sve_pattern_array[opnd->imm.value])
3411 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3412 else
3413 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3414 if (opnd->shifter.operator_present)
3415 {
3416 size_t len = strlen (buf);
3417 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3418 aarch64_operand_modifiers[opnd->shifter.kind].name,
3419 opnd->shifter.amount);
3420 }
3421 break;
3422
3423 case AARCH64_OPND_SVE_PRFOP:
3424 enum_value = opnd->imm.value;
3425 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3426 if (aarch64_sve_prfop_array[enum_value])
3427 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3428 else
3429 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3430 break;
3431
3432 case AARCH64_OPND_IMM_MOV:
3433 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3434 {
3435 case 4: /* e.g. MOV Wd, #<imm32>. */
3436 {
3437 int imm32 = opnd->imm.value;
3438 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3439 }
3440 break;
3441 case 8: /* e.g. MOV Xd, #<imm64>. */
3442 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3443 opnd->imm.value, opnd->imm.value);
3444 break;
3445 default: assert (0);
3446 }
3447 break;
3448
3449 case AARCH64_OPND_FPIMM0:
3450 snprintf (buf, size, "#0.0");
3451 break;
3452
3453 case AARCH64_OPND_LIMM:
3454 case AARCH64_OPND_AIMM:
3455 case AARCH64_OPND_HALF:
3456 case AARCH64_OPND_SVE_INV_LIMM:
3457 case AARCH64_OPND_SVE_LIMM:
3458 case AARCH64_OPND_SVE_LIMM_MOV:
3459 if (opnd->shifter.amount)
3460 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3461 opnd->shifter.amount);
3462 else
3463 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3464 break;
3465
3466 case AARCH64_OPND_SIMD_IMM:
3467 case AARCH64_OPND_SIMD_IMM_SFT:
3468 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3469 || opnd->shifter.kind == AARCH64_MOD_NONE)
3470 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3471 else
3472 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3473 aarch64_operand_modifiers[opnd->shifter.kind].name,
3474 opnd->shifter.amount);
3475 break;
3476
3477 case AARCH64_OPND_SVE_AIMM:
3478 case AARCH64_OPND_SVE_ASIMM:
3479 if (opnd->shifter.amount)
3480 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3481 opnd->shifter.amount);
3482 else
3483 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3484 break;
3485
3486 case AARCH64_OPND_FPIMM:
3487 case AARCH64_OPND_SIMD_FPIMM:
3488 case AARCH64_OPND_SVE_FPIMM8:
3489 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3490 {
3491 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3492 {
3493 half_conv_t c;
3494 c.i = expand_fp_imm (2, opnd->imm.value);
3495 snprintf (buf, size, "#%.18e", c.f);
3496 }
3497 break;
3498 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3499 {
3500 single_conv_t c;
3501 c.i = expand_fp_imm (4, opnd->imm.value);
3502 snprintf (buf, size, "#%.18e", c.f);
3503 }
3504 break;
3505 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3506 {
3507 double_conv_t c;
3508 c.i = expand_fp_imm (8, opnd->imm.value);
3509 snprintf (buf, size, "#%.18e", c.d);
3510 }
3511 break;
3512 default: assert (0);
3513 }
3514 break;
3515
3516 case AARCH64_OPND_CCMP_IMM:
3517 case AARCH64_OPND_NZCV:
3518 case AARCH64_OPND_EXCEPTION:
3519 case AARCH64_OPND_UIMM4:
3520 case AARCH64_OPND_UIMM4_ADDG:
3521 case AARCH64_OPND_UIMM7:
3522 case AARCH64_OPND_UIMM10:
3523 if (optional_operand_p (opcode, idx) == TRUE
3524 && (opnd->imm.value ==
3525 (int64_t) get_optional_operand_default_value (opcode)))
3526 /* Omit the operand, e.g. DCPS1. */
3527 break;
3528 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3529 break;
3530
3531 case AARCH64_OPND_COND:
3532 case AARCH64_OPND_COND1:
3533 snprintf (buf, size, "%s", opnd->cond->names[0]);
3534 num_conds = ARRAY_SIZE (opnd->cond->names);
3535 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3536 {
3537 size_t len = strlen (buf);
3538 if (i == 1)
3539 snprintf (buf + len, size - len, " // %s = %s",
3540 opnd->cond->names[0], opnd->cond->names[i]);
3541 else
3542 snprintf (buf + len, size - len, ", %s",
3543 opnd->cond->names[i]);
3544 }
3545 break;
3546
3547 case AARCH64_OPND_ADDR_ADRP:
3548 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3549 + opnd->imm.value;
3550 if (pcrel_p)
3551 *pcrel_p = 1;
3552 if (address)
3553 *address = addr;
3554 /* This is not necessary during the disassembling, as print_address_func
3555 in the disassemble_info will take care of the printing. But some
3556 other callers may be still interested in getting the string in *STR,
3557 so here we do snprintf regardless. */
3558 snprintf (buf, size, "#0x%" PRIx64, addr);
3559 break;
3560
3561 case AARCH64_OPND_ADDR_PCREL14:
3562 case AARCH64_OPND_ADDR_PCREL19:
3563 case AARCH64_OPND_ADDR_PCREL21:
3564 case AARCH64_OPND_ADDR_PCREL26:
3565 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3566 if (pcrel_p)
3567 *pcrel_p = 1;
3568 if (address)
3569 *address = addr;
3570 /* This is not necessary during the disassembling, as print_address_func
3571 in the disassemble_info will take care of the printing. But some
3572 other callers may be still interested in getting the string in *STR,
3573 so here we do snprintf regardless. */
3574 snprintf (buf, size, "#0x%" PRIx64, addr);
3575 break;
3576
3577 case AARCH64_OPND_ADDR_SIMPLE:
3578 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3579 case AARCH64_OPND_SIMD_ADDR_POST:
3580 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3581 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3582 {
3583 if (opnd->addr.offset.is_reg)
3584 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3585 else
3586 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3587 }
3588 else
3589 snprintf (buf, size, "[%s]", name);
3590 break;
3591
3592 case AARCH64_OPND_ADDR_REGOFF:
3593 case AARCH64_OPND_SVE_ADDR_R:
3594 case AARCH64_OPND_SVE_ADDR_RR:
3595 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3596 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3597 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3598 case AARCH64_OPND_SVE_ADDR_RX:
3599 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3600 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3601 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3602 print_register_offset_address
3603 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3604 get_offset_int_reg_name (opnd));
3605 break;
3606
3607 case AARCH64_OPND_SVE_ADDR_ZX:
3608 print_register_offset_address
3609 (buf, size, opnd,
3610 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3611 get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
3612 break;
3613
3614 case AARCH64_OPND_SVE_ADDR_RZ:
3615 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3616 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3617 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3618 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3619 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3620 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3621 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3622 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3623 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3624 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3625 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3626 print_register_offset_address
3627 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3628 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3629 break;
3630
3631 case AARCH64_OPND_ADDR_SIMM7:
3632 case AARCH64_OPND_ADDR_SIMM9:
3633 case AARCH64_OPND_ADDR_SIMM9_2:
3634 case AARCH64_OPND_ADDR_SIMM10:
3635 case AARCH64_OPND_ADDR_SIMM11:
3636 case AARCH64_OPND_ADDR_SIMM13:
3637 case AARCH64_OPND_ADDR_OFFSET:
3638 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3639 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3640 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3641 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3642 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3643 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3644 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3645 case AARCH64_OPND_SVE_ADDR_RI_U6:
3646 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3647 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3648 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3649 print_immediate_offset_address
3650 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3651 break;
3652
3653 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3654 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3655 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3656 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3657 print_immediate_offset_address
3658 (buf, size, opnd,
3659 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3660 break;
3661
3662 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3663 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3664 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3665 print_register_offset_address
3666 (buf, size, opnd,
3667 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3668 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3669 break;
3670
3671 case AARCH64_OPND_ADDR_UIMM12:
3672 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3673 if (opnd->addr.offset.imm)
3674 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3675 else
3676 snprintf (buf, size, "[%s]", name);
3677 break;
3678
3679 case AARCH64_OPND_SYSREG:
3680 for (i = 0; aarch64_sys_regs[i].name; ++i)
3681 {
3682 bfd_boolean exact_match
3683 = (aarch64_sys_regs[i].flags & opnd->sysreg.flags)
3684 == opnd->sysreg.flags;
3685
3686 /* Try and find an exact match, But if that fails, return the first
3687 partial match that was found. */
3688 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3689 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])
3690 && (name == NULL || exact_match))
3691 {
3692 name = aarch64_sys_regs[i].name;
3693 if (exact_match)
3694 {
3695 if (notes)
3696 *notes = NULL;
3697 break;
3698 }
3699
3700 /* If we didn't match exactly, that means the presense of a flag
3701 indicates what we didn't want for this instruction. e.g. If
3702 F_REG_READ is there, that means we were looking for a write
3703 register. See aarch64_ext_sysreg. */
3704 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3705 *notes = _("reading from a write-only register");
3706 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3707 *notes = _("writing to a read-only register");
3708 }
3709 }
3710
3711 if (name)
3712 snprintf (buf, size, "%s", name);
3713 else
3714 {
3715 /* Implementation defined system register. */
3716 unsigned int value = opnd->sysreg.value;
3717 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3718 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3719 value & 0x7);
3720 }
3721 break;
3722
3723 case AARCH64_OPND_PSTATEFIELD:
3724 for (i = 0; aarch64_pstatefields[i].name; ++i)
3725 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3726 break;
3727 assert (aarch64_pstatefields[i].name);
3728 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3729 break;
3730
3731 case AARCH64_OPND_SYSREG_AT:
3732 case AARCH64_OPND_SYSREG_DC:
3733 case AARCH64_OPND_SYSREG_IC:
3734 case AARCH64_OPND_SYSREG_TLBI:
3735 case AARCH64_OPND_SYSREG_SR:
3736 snprintf (buf, size, "%s", opnd->sysins_op->name);
3737 break;
3738
3739 case AARCH64_OPND_BARRIER:
3740 snprintf (buf, size, "%s", opnd->barrier->name);
3741 break;
3742
3743 case AARCH64_OPND_BARRIER_ISB:
3744 /* Operand can be omitted, e.g. in DCPS1. */
3745 if (! optional_operand_p (opcode, idx)
3746 || (opnd->barrier->value
3747 != get_optional_operand_default_value (opcode)))
3748 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3749 break;
3750
3751 case AARCH64_OPND_PRFOP:
3752 if (opnd->prfop->name != NULL)
3753 snprintf (buf, size, "%s", opnd->prfop->name);
3754 else
3755 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3756 break;
3757
3758 case AARCH64_OPND_BARRIER_PSB:
3759 case AARCH64_OPND_BTI_TARGET:
3760 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3761 snprintf (buf, size, "%s", opnd->hint_option->name);
3762 break;
3763
3764 default:
3765 assert (0);
3766 }
3767 }
3768 \f
3769 #define CPENC(op0,op1,crn,crm,op2) \
3770 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3771 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3772 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3773 /* for 3.9.10 System Instructions */
3774 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3775
3776 #define C0 0
3777 #define C1 1
3778 #define C2 2
3779 #define C3 3
3780 #define C4 4
3781 #define C5 5
3782 #define C6 6
3783 #define C7 7
3784 #define C8 8
3785 #define C9 9
3786 #define C10 10
3787 #define C11 11
3788 #define C12 12
3789 #define C13 13
3790 #define C14 14
3791 #define C15 15
3792
3793 /* TODO there is one more issues need to be resolved
3794 1. handle cpu-implementation-defined system registers. */
3795 const aarch64_sys_reg aarch64_sys_regs [] =
3796 {
3797 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3798 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3799 { "elr_el1", CPEN_(0,C0,1), 0 },
3800 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3801 { "sp_el0", CPEN_(0,C1,0), 0 },
3802 { "spsel", CPEN_(0,C2,0), 0 },
3803 { "daif", CPEN_(3,C2,1), 0 },
3804 { "currentel", CPEN_(0,C2,2), F_REG_READ }, /* RO */
3805 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3806 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3807 { "nzcv", CPEN_(3,C2,0), 0 },
3808 { "ssbs", CPEN_(3,C2,6), F_ARCHEXT },
3809 { "fpcr", CPEN_(3,C4,0), 0 },
3810 { "fpsr", CPEN_(3,C4,1), 0 },
3811 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3812 { "dlr_el0", CPEN_(3,C5,1), 0 },
3813 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3814 { "elr_el2", CPEN_(4,C0,1), 0 },
3815 { "sp_el1", CPEN_(4,C1,0), 0 },
3816 { "spsr_irq", CPEN_(4,C3,0), 0 },
3817 { "spsr_abt", CPEN_(4,C3,1), 0 },
3818 { "spsr_und", CPEN_(4,C3,2), 0 },
3819 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3820 { "spsr_el3", CPEN_(6,C0,0), 0 },
3821 { "elr_el3", CPEN_(6,C0,1), 0 },
3822 { "sp_el2", CPEN_(6,C1,0), 0 },
3823 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3824 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3825 { "midr_el1", CPENC(3,0,C0,C0,0), F_REG_READ }, /* RO */
3826 { "ctr_el0", CPENC(3,3,C0,C0,1), F_REG_READ }, /* RO */
3827 { "mpidr_el1", CPENC(3,0,C0,C0,5), F_REG_READ }, /* RO */
3828 { "revidr_el1", CPENC(3,0,C0,C0,6), F_REG_READ }, /* RO */
3829 { "aidr_el1", CPENC(3,1,C0,C0,7), F_REG_READ }, /* RO */
3830 { "dczid_el0", CPENC(3,3,C0,C0,7), F_REG_READ }, /* RO */
3831 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), F_REG_READ }, /* RO */
3832 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), F_REG_READ }, /* RO */
3833 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), F_REG_READ }, /* RO */
3834 { "id_pfr2_el1", CPENC(3,0,C0,C3,4), F_ARCHEXT | F_REG_READ}, /* RO */
3835 { "id_afr0_el1", CPENC(3,0,C0,C1,3), F_REG_READ }, /* RO */
3836 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), F_REG_READ }, /* RO */
3837 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), F_REG_READ }, /* RO */
3838 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), F_REG_READ }, /* RO */
3839 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), F_REG_READ }, /* RO */
3840 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), F_REG_READ }, /* RO */
3841 { "id_isar0_el1", CPENC(3,0,C0,C2,0), F_REG_READ }, /* RO */
3842 { "id_isar1_el1", CPENC(3,0,C0,C2,1), F_REG_READ }, /* RO */
3843 { "id_isar2_el1", CPENC(3,0,C0,C2,2), F_REG_READ }, /* RO */
3844 { "id_isar3_el1", CPENC(3,0,C0,C2,3), F_REG_READ }, /* RO */
3845 { "id_isar4_el1", CPENC(3,0,C0,C2,4), F_REG_READ }, /* RO */
3846 { "id_isar5_el1", CPENC(3,0,C0,C2,5), F_REG_READ }, /* RO */
3847 { "mvfr0_el1", CPENC(3,0,C0,C3,0), F_REG_READ }, /* RO */
3848 { "mvfr1_el1", CPENC(3,0,C0,C3,1), F_REG_READ }, /* RO */
3849 { "mvfr2_el1", CPENC(3,0,C0,C3,2), F_REG_READ }, /* RO */
3850 { "ccsidr_el1", CPENC(3,1,C0,C0,0), F_REG_READ }, /* RO */
3851 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), F_REG_READ }, /* RO */
3852 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), F_REG_READ }, /* RO */
3853 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), F_REG_READ }, /* RO */
3854 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), F_REG_READ }, /* RO */
3855 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), F_REG_READ }, /* RO */
3856 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), F_REG_READ }, /* RO */
3857 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), F_REG_READ }, /* RO */
3858 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), F_REG_READ }, /* RO */
3859 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT | F_REG_READ }, /* RO */
3860 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), F_REG_READ }, /* RO */
3861 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), F_REG_READ }, /* RO */
3862 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT | F_REG_READ }, /* RO */
3863 { "clidr_el1", CPENC(3,1,C0,C0,1), F_REG_READ }, /* RO */
3864 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 },
3865 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3866 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3867 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3868 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3869 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3870 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3871 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3872 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3873 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3874 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3875 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3876 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3877 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3878 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3879 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3880 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3881 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3882 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3883 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3884 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3885 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3886 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3887 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3888 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3889 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3890 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3891 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3892 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3893 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3894 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3895 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3896 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3897 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3898 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3899 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3900 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3901 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3902 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3903 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3904 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3905 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3906 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3907 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3908 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3909 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3910 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3911 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3912 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3913 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3914 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3915 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3916 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3917 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3918 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3919 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3920 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3921 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3922 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3923 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3924 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT },
3925 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3926 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3927 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3928 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3929 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3930 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3931 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3932 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3933 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3934 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3935 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3936 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3937 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3938 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3939 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3940 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3941 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3942 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3943 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3944 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3945 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3946 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3947 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3948 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3949 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3950 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3951 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3952 { "rvbar_el1", CPENC(3,0,C12,C0,1), F_REG_READ }, /* RO */
3953 { "rvbar_el2", CPENC(3,4,C12,C0,1), F_REG_READ }, /* RO */
3954 { "rvbar_el3", CPENC(3,6,C12,C0,1), F_REG_READ }, /* RO */
3955 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3956 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3957 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3958 { "isr_el1", CPENC(3,0,C12,C1,0), F_REG_READ }, /* RO */
3959 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3960 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3961 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3962 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3963 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3964 { "rndr", CPENC(3,3,C2,C4,0), F_ARCHEXT | F_REG_READ }, /* RO */
3965 { "rndrrs", CPENC(3,3,C2,C4,1), F_ARCHEXT | F_REG_READ }, /* RO */
3966 { "tco", CPENC(3,3,C4,C2,7), F_ARCHEXT },
3967 { "tfsre0_el1", CPENC(3,0,C6,C6,1), F_ARCHEXT },
3968 { "tfsr_el1", CPENC(3,0,C6,C5,0), F_ARCHEXT },
3969 { "tfsr_el2", CPENC(3,4,C6,C5,0), F_ARCHEXT },
3970 { "tfsr_el3", CPENC(3,6,C6,C6,0), F_ARCHEXT },
3971 { "tfsr_el12", CPENC(3,5,C6,C6,0), F_ARCHEXT },
3972 { "rgsr_el1", CPENC(3,0,C1,C0,5), F_ARCHEXT },
3973 { "gcr_el1", CPENC(3,0,C1,C0,6), F_ARCHEXT },
3974 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3975 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RW */
3976 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3977 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3978 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3979 { "scxtnum_el0", CPENC(3,3,C13,C0,7), F_ARCHEXT },
3980 { "scxtnum_el1", CPENC(3,0,C13,C0,7), F_ARCHEXT },
3981 { "scxtnum_el2", CPENC(3,4,C13,C0,7), F_ARCHEXT },
3982 { "scxtnum_el12", CPENC(3,5,C13,C0,7), F_ARCHEXT },
3983 { "scxtnum_el3", CPENC(3,6,C13,C0,7), F_ARCHEXT },
3984 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3985 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RW */
3986 { "cntpct_el0", CPENC(3,3,C14,C0,1), F_REG_READ }, /* RO */
3987 { "cntvct_el0", CPENC(3,3,C14,C0,2), F_REG_READ }, /* RO */
3988 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3989 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3990 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3991 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3992 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3993 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3994 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3995 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3996 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3997 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3998 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3999 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
4000 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
4001 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
4002 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
4003 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
4004 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
4005 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
4006 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
4007 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
4008 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
4009 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
4010 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
4011 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
4012 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
4013 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
4014 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
4015 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
4016 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
4017 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
4018 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), F_REG_READ }, /* r */
4019 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
4020 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
4021 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), F_REG_READ }, /* r */
4022 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), F_REG_WRITE }, /* w */
4023 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 },
4024 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 },
4025 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
4026 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
4027 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
4028 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
4029 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
4030 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
4031 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
4032 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
4033 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
4034 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
4035 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
4036 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
4037 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
4038 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
4039 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
4040 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
4041 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
4042 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
4043 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
4044 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
4045 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
4046 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
4047 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
4048 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
4049 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
4050 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
4051 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
4052 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
4053 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
4054 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
4055 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
4056 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
4057 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
4058 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
4059 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
4060 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
4061 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
4062 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
4063 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
4064 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
4065 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
4066 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
4067 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
4068 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
4069 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
4070 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
4071 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
4072 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
4073 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
4074 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
4075 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
4076 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
4077 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
4078 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
4079 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
4080 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
4081 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
4082 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
4083 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
4084 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
4085 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
4086 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
4087 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
4088 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
4089 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
4090 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
4091 { "mdrar_el1", CPENC(2,0,C1, C0, 0), F_REG_READ }, /* r */
4092 { "oslar_el1", CPENC(2,0,C1, C0, 4), F_REG_WRITE }, /* w */
4093 { "oslsr_el1", CPENC(2,0,C1, C1, 4), F_REG_READ }, /* r */
4094 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
4095 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
4096 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
4097 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
4098 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), F_REG_READ }, /* r */
4099 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
4100 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
4101 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
4102 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT | F_REG_READ }, /* ro */
4103 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
4104 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
4105 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
4106 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
4107 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
4108 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
4109 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* rw */
4110 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
4111 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
4112 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
4113 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
4114 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
4115 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
4116 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), F_REG_WRITE }, /* w */
4117 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
4118 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), F_REG_READ }, /* r */
4119 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), F_REG_READ }, /* r */
4120 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
4121 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
4122 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
4123 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
4124 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
4125 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
4126 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
4127 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
4128 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
4129 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
4130 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
4131 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
4132 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
4133 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
4134 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
4135 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
4136 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
4137 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
4138 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
4139 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
4140 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
4141 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
4142 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
4143 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
4144 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
4145 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
4146 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
4147 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
4148 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
4149 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
4150 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
4151 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
4152 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
4153 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4154 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4155 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4156 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4157 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4158 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4159 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4160 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4161 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4162 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4163 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4164 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4165 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4166 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4167 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4168 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4169 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4170 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4171 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4172 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4173 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4174 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4175 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4176 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4177 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4178 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4179 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4180 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4181 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4182 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4183 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4184 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4185 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4186 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4187 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4188 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4189 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4190
4191 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4192 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4193 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4194 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4195 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4196 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4197 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4198 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4199 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4200 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4201 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4202 { 0, CPENC(0,0,0,0,0), 0 },
4203 };
4204
4205 bfd_boolean
4206 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4207 {
4208 return (reg->flags & F_DEPRECATED) != 0;
4209 }
4210
4211 bfd_boolean
4212 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4213 const aarch64_sys_reg *reg)
4214 {
4215 if (!(reg->flags & F_ARCHEXT))
4216 return TRUE;
4217
4218 /* PAN. Values are from aarch64_sys_regs. */
4219 if (reg->value == CPEN_(0,C2,3)
4220 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4221 return FALSE;
4222
4223 /* SCXTNUM_ELx registers. */
4224 if ((reg->value == CPENC (3, 3, C13, C0, 7)
4225 || reg->value == CPENC (3, 0, C13, C0, 7)
4226 || reg->value == CPENC (3, 4, C13, C0, 7)
4227 || reg->value == CPENC (3, 6, C13, C0, 7)
4228 || reg->value == CPENC (3, 5, C13, C0, 7))
4229 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SCXTNUM))
4230 return FALSE;
4231
4232 /* ID_PFR2_EL1 register. */
4233 if (reg->value == CPENC(3, 0, C0, C3, 4)
4234 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_ID_PFR2))
4235 return FALSE;
4236
4237 /* SSBS. Values are from aarch64_sys_regs. */
4238 if (reg->value == CPEN_(3,C2,6)
4239 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4240 return FALSE;
4241
4242 /* Virtualization host extensions: system registers. */
4243 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4244 || reg->value == CPENC (3, 4, C13, C0, 1)
4245 || reg->value == CPENC (3, 4, C14, C3, 0)
4246 || reg->value == CPENC (3, 4, C14, C3, 1)
4247 || reg->value == CPENC (3, 4, C14, C3, 2))
4248 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4249 return FALSE;
4250
4251 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4252 if ((reg->value == CPEN_ (5, C0, 0)
4253 || reg->value == CPEN_ (5, C0, 1)
4254 || reg->value == CPENC (3, 5, C1, C0, 0)
4255 || reg->value == CPENC (3, 5, C1, C0, 2)
4256 || reg->value == CPENC (3, 5, C2, C0, 0)
4257 || reg->value == CPENC (3, 5, C2, C0, 1)
4258 || reg->value == CPENC (3, 5, C2, C0, 2)
4259 || reg->value == CPENC (3, 5, C5, C1, 0)
4260 || reg->value == CPENC (3, 5, C5, C1, 1)
4261 || reg->value == CPENC (3, 5, C5, C2, 0)
4262 || reg->value == CPENC (3, 5, C6, C0, 0)
4263 || reg->value == CPENC (3, 5, C10, C2, 0)
4264 || reg->value == CPENC (3, 5, C10, C3, 0)
4265 || reg->value == CPENC (3, 5, C12, C0, 0)
4266 || reg->value == CPENC (3, 5, C13, C0, 1)
4267 || reg->value == CPENC (3, 5, C14, C1, 0))
4268 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4269 return FALSE;
4270
4271 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4272 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4273 || reg->value == CPENC (3, 5, C14, C2, 1)
4274 || reg->value == CPENC (3, 5, C14, C2, 2)
4275 || reg->value == CPENC (3, 5, C14, C3, 0)
4276 || reg->value == CPENC (3, 5, C14, C3, 1)
4277 || reg->value == CPENC (3, 5, C14, C3, 2))
4278 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4279 return FALSE;
4280
4281 /* ARMv8.2 features. */
4282
4283 /* ID_AA64MMFR2_EL1. */
4284 if (reg->value == CPENC (3, 0, C0, C7, 2)
4285 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4286 return FALSE;
4287
4288 /* PSTATE.UAO. */
4289 if (reg->value == CPEN_ (0, C2, 4)
4290 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4291 return FALSE;
4292
4293 /* RAS extension. */
4294
4295 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4296 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4297 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4298 || reg->value == CPENC (3, 0, C5, C3, 1)
4299 || reg->value == CPENC (3, 0, C5, C3, 2)
4300 || reg->value == CPENC (3, 0, C5, C3, 3)
4301 || reg->value == CPENC (3, 0, C5, C4, 0)
4302 || reg->value == CPENC (3, 0, C5, C4, 1)
4303 || reg->value == CPENC (3, 0, C5, C4, 2)
4304 || reg->value == CPENC (3, 0, C5, C4, 3)
4305 || reg->value == CPENC (3, 0, C5, C5, 0)
4306 || reg->value == CPENC (3, 0, C5, C5, 1))
4307 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4308 return FALSE;
4309
4310 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4311 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4312 || reg->value == CPENC (3, 0, C12, C1, 1)
4313 || reg->value == CPENC (3, 4, C12, C1, 1))
4314 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4315 return FALSE;
4316
4317 /* Statistical Profiling extension. */
4318 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4319 || reg->value == CPENC (3, 0, C9, C10, 1)
4320 || reg->value == CPENC (3, 0, C9, C10, 3)
4321 || reg->value == CPENC (3, 0, C9, C10, 7)
4322 || reg->value == CPENC (3, 0, C9, C9, 0)
4323 || reg->value == CPENC (3, 0, C9, C9, 2)
4324 || reg->value == CPENC (3, 0, C9, C9, 3)
4325 || reg->value == CPENC (3, 0, C9, C9, 4)
4326 || reg->value == CPENC (3, 0, C9, C9, 5)
4327 || reg->value == CPENC (3, 0, C9, C9, 6)
4328 || reg->value == CPENC (3, 0, C9, C9, 7)
4329 || reg->value == CPENC (3, 4, C9, C9, 0)
4330 || reg->value == CPENC (3, 5, C9, C9, 0))
4331 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4332 return FALSE;
4333
4334 /* ARMv8.3 Pointer authentication keys. */
4335 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4336 || reg->value == CPENC (3, 0, C2, C1, 1)
4337 || reg->value == CPENC (3, 0, C2, C1, 2)
4338 || reg->value == CPENC (3, 0, C2, C1, 3)
4339 || reg->value == CPENC (3, 0, C2, C2, 0)
4340 || reg->value == CPENC (3, 0, C2, C2, 1)
4341 || reg->value == CPENC (3, 0, C2, C2, 2)
4342 || reg->value == CPENC (3, 0, C2, C2, 3)
4343 || reg->value == CPENC (3, 0, C2, C3, 0)
4344 || reg->value == CPENC (3, 0, C2, C3, 1))
4345 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4346 return FALSE;
4347
4348 /* SVE. */
4349 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4350 || reg->value == CPENC (3, 0, C1, C2, 0)
4351 || reg->value == CPENC (3, 4, C1, C2, 0)
4352 || reg->value == CPENC (3, 6, C1, C2, 0)
4353 || reg->value == CPENC (3, 5, C1, C2, 0)
4354 || reg->value == CPENC (3, 0, C0, C0, 7))
4355 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4356 return FALSE;
4357
4358 /* ARMv8.4 features. */
4359
4360 /* PSTATE.DIT. */
4361 if (reg->value == CPEN_ (3, C2, 5)
4362 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4363 return FALSE;
4364
4365 /* Virtualization extensions. */
4366 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4367 || reg->value == CPENC(3, 4, C2, C6, 0)
4368 || reg->value == CPENC(3, 4, C14, C4, 0)
4369 || reg->value == CPENC(3, 4, C14, C4, 2)
4370 || reg->value == CPENC(3, 4, C14, C4, 1)
4371 || reg->value == CPENC(3, 4, C14, C5, 0)
4372 || reg->value == CPENC(3, 4, C14, C5, 2)
4373 || reg->value == CPENC(3, 4, C14, C5, 1)
4374 || reg->value == CPENC(3, 4, C1, C3, 1)
4375 || reg->value == CPENC(3, 4, C2, C2, 0))
4376 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4377 return FALSE;
4378
4379 /* ARMv8.4 TLB instructions. */
4380 if ((reg->value == CPENS (0, C8, C1, 0)
4381 || reg->value == CPENS (0, C8, C1, 1)
4382 || reg->value == CPENS (0, C8, C1, 2)
4383 || reg->value == CPENS (0, C8, C1, 3)
4384 || reg->value == CPENS (0, C8, C1, 5)
4385 || reg->value == CPENS (0, C8, C1, 7)
4386 || reg->value == CPENS (4, C8, C4, 0)
4387 || reg->value == CPENS (4, C8, C4, 4)
4388 || reg->value == CPENS (4, C8, C1, 1)
4389 || reg->value == CPENS (4, C8, C1, 5)
4390 || reg->value == CPENS (4, C8, C1, 6)
4391 || reg->value == CPENS (6, C8, C1, 1)
4392 || reg->value == CPENS (6, C8, C1, 5)
4393 || reg->value == CPENS (4, C8, C1, 0)
4394 || reg->value == CPENS (4, C8, C1, 4)
4395 || reg->value == CPENS (6, C8, C1, 0)
4396 || reg->value == CPENS (0, C8, C6, 1)
4397 || reg->value == CPENS (0, C8, C6, 3)
4398 || reg->value == CPENS (0, C8, C6, 5)
4399 || reg->value == CPENS (0, C8, C6, 7)
4400 || reg->value == CPENS (0, C8, C2, 1)
4401 || reg->value == CPENS (0, C8, C2, 3)
4402 || reg->value == CPENS (0, C8, C2, 5)
4403 || reg->value == CPENS (0, C8, C2, 7)
4404 || reg->value == CPENS (0, C8, C5, 1)
4405 || reg->value == CPENS (0, C8, C5, 3)
4406 || reg->value == CPENS (0, C8, C5, 5)
4407 || reg->value == CPENS (0, C8, C5, 7)
4408 || reg->value == CPENS (4, C8, C0, 2)
4409 || reg->value == CPENS (4, C8, C0, 6)
4410 || reg->value == CPENS (4, C8, C4, 2)
4411 || reg->value == CPENS (4, C8, C4, 6)
4412 || reg->value == CPENS (4, C8, C4, 3)
4413 || reg->value == CPENS (4, C8, C4, 7)
4414 || reg->value == CPENS (4, C8, C6, 1)
4415 || reg->value == CPENS (4, C8, C6, 5)
4416 || reg->value == CPENS (4, C8, C2, 1)
4417 || reg->value == CPENS (4, C8, C2, 5)
4418 || reg->value == CPENS (4, C8, C5, 1)
4419 || reg->value == CPENS (4, C8, C5, 5)
4420 || reg->value == CPENS (6, C8, C6, 1)
4421 || reg->value == CPENS (6, C8, C6, 5)
4422 || reg->value == CPENS (6, C8, C2, 1)
4423 || reg->value == CPENS (6, C8, C2, 5)
4424 || reg->value == CPENS (6, C8, C5, 1)
4425 || reg->value == CPENS (6, C8, C5, 5))
4426 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4427 return FALSE;
4428
4429 /* Random Number Instructions. For now they are available
4430 (and optional) only with ARMv8.5-A. */
4431 if ((reg->value == CPENC (3, 3, C2, C4, 0)
4432 || reg->value == CPENC (3, 3, C2, C4, 1))
4433 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RNG)
4434 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_5)))
4435 return FALSE;
4436
4437 /* System Registers in ARMv8.5-A with AARCH64_FEATURE_MEMTAG. */
4438 if ((reg->value == CPENC (3, 3, C4, C2, 7)
4439 || reg->value == CPENC (3, 0, C6, C6, 1)
4440 || reg->value == CPENC (3, 0, C6, C5, 0)
4441 || reg->value == CPENC (3, 4, C6, C5, 0)
4442 || reg->value == CPENC (3, 6, C6, C6, 0)
4443 || reg->value == CPENC (3, 5, C6, C6, 0)
4444 || reg->value == CPENC (3, 0, C1, C0, 5)
4445 || reg->value == CPENC (3, 0, C1, C0, 6))
4446 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG)))
4447 return FALSE;
4448
4449 return TRUE;
4450 }
4451
4452 /* The CPENC below is fairly misleading, the fields
4453 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4454 by ins_pstatefield, which just shifts the value by the width of the fields
4455 in a loop. So if you CPENC them only the first value will be set, the rest
4456 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4457 value of 0b110000000001000000 (0x30040) while what you want is
4458 0b011010 (0x1a). */
4459 const aarch64_sys_reg aarch64_pstatefields [] =
4460 {
4461 { "spsel", 0x05, 0 },
4462 { "daifset", 0x1e, 0 },
4463 { "daifclr", 0x1f, 0 },
4464 { "pan", 0x04, F_ARCHEXT },
4465 { "uao", 0x03, F_ARCHEXT },
4466 { "ssbs", 0x19, F_ARCHEXT },
4467 { "dit", 0x1a, F_ARCHEXT },
4468 { "tco", 0x1c, F_ARCHEXT },
4469 { 0, CPENC(0,0,0,0,0), 0 },
4470 };
4471
4472 bfd_boolean
4473 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4474 const aarch64_sys_reg *reg)
4475 {
4476 if (!(reg->flags & F_ARCHEXT))
4477 return TRUE;
4478
4479 /* PAN. Values are from aarch64_pstatefields. */
4480 if (reg->value == 0x04
4481 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4482 return FALSE;
4483
4484 /* UAO. Values are from aarch64_pstatefields. */
4485 if (reg->value == 0x03
4486 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4487 return FALSE;
4488
4489 /* SSBS. Values are from aarch64_pstatefields. */
4490 if (reg->value == 0x19
4491 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4492 return FALSE;
4493
4494 /* DIT. Values are from aarch64_pstatefields. */
4495 if (reg->value == 0x1a
4496 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4497 return FALSE;
4498
4499 /* TCO. Values are from aarch64_pstatefields. */
4500 if (reg->value == 0x1c
4501 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4502 return FALSE;
4503
4504 return TRUE;
4505 }
4506
4507 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4508 {
4509 { "ialluis", CPENS(0,C7,C1,0), 0 },
4510 { "iallu", CPENS(0,C7,C5,0), 0 },
4511 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4512 { 0, CPENS(0,0,0,0), 0 }
4513 };
4514
4515 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4516 {
4517 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4518 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
4519 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
4520 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4521 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
4522 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
4523 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4524 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
4525 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
4526 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4527 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4528 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4529 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4530 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4531 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4532 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4533 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4534 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4535 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4536 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4537 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4538 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4539 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4540 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4541 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4542 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4543 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4544 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4545 { 0, CPENS(0,0,0,0), 0 }
4546 };
4547
4548 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4549 {
4550 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4551 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4552 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4553 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4554 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4555 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4556 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4557 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4558 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4559 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4560 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4561 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4562 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4563 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4564 { 0, CPENS(0,0,0,0), 0 }
4565 };
4566
4567 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4568 {
4569 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4570 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4571 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4572 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4573 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4574 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4575 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4576 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4577 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4578 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4579 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4580 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4581 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4582 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4583 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4584 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4585 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4586 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4587 { "alle2", CPENS(4,C8,C7,0), 0 },
4588 { "alle2is", CPENS(4,C8,C3,0), 0 },
4589 { "alle1", CPENS(4,C8,C7,4), 0 },
4590 { "alle1is", CPENS(4,C8,C3,4), 0 },
4591 { "alle3", CPENS(6,C8,C7,0), 0 },
4592 { "alle3is", CPENS(6,C8,C3,0), 0 },
4593 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4594 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4595 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4596 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4597 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4598 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4599 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4600 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4601
4602 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4603 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4604 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4605 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4606 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4607 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4608 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4609 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4610 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4611 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4612 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4613 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4614 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4615 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4616 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4617 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4618
4619 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4620 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4621 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4622 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4623 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4624 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4625 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4626 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4627 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4628 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4629 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4630 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4631 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4632 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4633 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4634 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4635 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4636 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4637 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4638 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4639 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4640 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4641 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4642 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4643 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4644 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4645 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4646 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4647 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4648 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4649
4650 { 0, CPENS(0,0,0,0), 0 }
4651 };
4652
4653 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4654 {
4655 /* RCTX is somewhat unique in a way that it has different values
4656 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4657 Thus op2 is masked out and instead encoded directly in the
4658 aarch64_opcode_table entries for the respective instructions. */
4659 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4660
4661 { 0, CPENS(0,0,0,0), 0 }
4662 };
4663
4664 bfd_boolean
4665 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4666 {
4667 return (sys_ins_reg->flags & F_HASXT) != 0;
4668 }
4669
4670 extern bfd_boolean
4671 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4672 const aarch64_sys_ins_reg *reg)
4673 {
4674 if (!(reg->flags & F_ARCHEXT))
4675 return TRUE;
4676
4677 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4678 if (reg->value == CPENS (3, C7, C12, 1)
4679 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4680 return FALSE;
4681
4682 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4683 if (reg->value == CPENS (3, C7, C13, 1)
4684 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
4685 return FALSE;
4686
4687 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
4688 if ((reg->value == CPENS (0, C7, C6, 3)
4689 || reg->value == CPENS (0, C7, C6, 4)
4690 || reg->value == CPENS (0, C7, C10, 4)
4691 || reg->value == CPENS (0, C7, C14, 4)
4692 || reg->value == CPENS (3, C7, C10, 3)
4693 || reg->value == CPENS (3, C7, C12, 3)
4694 || reg->value == CPENS (3, C7, C13, 3)
4695 || reg->value == CPENS (3, C7, C14, 3)
4696 || reg->value == CPENS (3, C7, C4, 3)
4697 || reg->value == CPENS (0, C7, C6, 5)
4698 || reg->value == CPENS (0, C7, C6, 6)
4699 || reg->value == CPENS (0, C7, C10, 6)
4700 || reg->value == CPENS (0, C7, C14, 6)
4701 || reg->value == CPENS (3, C7, C10, 5)
4702 || reg->value == CPENS (3, C7, C12, 5)
4703 || reg->value == CPENS (3, C7, C13, 5)
4704 || reg->value == CPENS (3, C7, C14, 5)
4705 || reg->value == CPENS (3, C7, C4, 4))
4706 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4707 return FALSE;
4708
4709 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4710 if ((reg->value == CPENS (0, C7, C9, 0)
4711 || reg->value == CPENS (0, C7, C9, 1))
4712 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4713 return FALSE;
4714
4715 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4716 if (reg->value == CPENS (3, C7, C3, 0)
4717 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
4718 return FALSE;
4719
4720 return TRUE;
4721 }
4722
4723 #undef C0
4724 #undef C1
4725 #undef C2
4726 #undef C3
4727 #undef C4
4728 #undef C5
4729 #undef C6
4730 #undef C7
4731 #undef C8
4732 #undef C9
4733 #undef C10
4734 #undef C11
4735 #undef C12
4736 #undef C13
4737 #undef C14
4738 #undef C15
4739
4740 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4741 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4742
4743 static enum err_type
4744 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
4745 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
4746 bfd_boolean encoding ATTRIBUTE_UNUSED,
4747 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4748 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4749 {
4750 int t = BITS (insn, 4, 0);
4751 int n = BITS (insn, 9, 5);
4752 int t2 = BITS (insn, 14, 10);
4753
4754 if (BIT (insn, 23))
4755 {
4756 /* Write back enabled. */
4757 if ((t == n || t2 == n) && n != 31)
4758 return ERR_UND;
4759 }
4760
4761 if (BIT (insn, 22))
4762 {
4763 /* Load */
4764 if (t == t2)
4765 return ERR_UND;
4766 }
4767
4768 return ERR_OK;
4769 }
4770
4771 /* Verifier for vector by element 3 operands functions where the
4772 conditions `if sz:L == 11 then UNDEFINED` holds. */
4773
4774 static enum err_type
4775 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
4776 bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
4777 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4778 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4779 {
4780 const aarch64_insn undef_pattern = 0x3;
4781 aarch64_insn value;
4782
4783 assert (inst->opcode);
4784 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
4785 value = encoding ? inst->value : insn;
4786 assert (value);
4787
4788 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
4789 return ERR_UND;
4790
4791 return ERR_OK;
4792 }
4793
4794 /* Initialize an instruction sequence insn_sequence with the instruction INST.
4795 If INST is NULL the given insn_sequence is cleared and the sequence is left
4796 uninitialized. */
4797
4798 void
4799 init_insn_sequence (const struct aarch64_inst *inst,
4800 aarch64_instr_sequence *insn_sequence)
4801 {
4802 int num_req_entries = 0;
4803 insn_sequence->next_insn = 0;
4804 insn_sequence->num_insns = num_req_entries;
4805 if (insn_sequence->instr)
4806 XDELETE (insn_sequence->instr);
4807 insn_sequence->instr = NULL;
4808
4809 if (inst)
4810 {
4811 insn_sequence->instr = XNEW (aarch64_inst);
4812 memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
4813 }
4814
4815 /* Handle all the cases here. May need to think of something smarter than
4816 a giant if/else chain if this grows. At that time, a lookup table may be
4817 best. */
4818 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
4819 num_req_entries = 1;
4820
4821 if (insn_sequence->current_insns)
4822 XDELETEVEC (insn_sequence->current_insns);
4823 insn_sequence->current_insns = NULL;
4824
4825 if (num_req_entries != 0)
4826 {
4827 size_t size = num_req_entries * sizeof (aarch64_inst);
4828 insn_sequence->current_insns
4829 = (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
4830 memset (insn_sequence->current_insns, 0, size);
4831 }
4832 }
4833
4834
4835 /* This function verifies that the instruction INST adheres to its specified
4836 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
4837 returned and MISMATCH_DETAIL contains the reason why verification failed.
4838
4839 The function is called both during assembly and disassembly. If assembling
4840 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
4841 and will contain the PC of the current instruction w.r.t to the section.
4842
4843 If ENCODING and PC=0 then you are at a start of a section. The constraints
4844 are verified against the given state insn_sequence which is updated as it
4845 transitions through the verification. */
4846
4847 enum err_type
4848 verify_constraints (const struct aarch64_inst *inst,
4849 const aarch64_insn insn ATTRIBUTE_UNUSED,
4850 bfd_vma pc,
4851 bfd_boolean encoding,
4852 aarch64_operand_error *mismatch_detail,
4853 aarch64_instr_sequence *insn_sequence)
4854 {
4855 assert (inst);
4856 assert (inst->opcode);
4857
4858 const struct aarch64_opcode *opcode = inst->opcode;
4859 if (!opcode->constraints && !insn_sequence->instr)
4860 return ERR_OK;
4861
4862 assert (insn_sequence);
4863
4864 enum err_type res = ERR_OK;
4865
4866 /* This instruction puts a constraint on the insn_sequence. */
4867 if (opcode->flags & F_SCAN)
4868 {
4869 if (insn_sequence->instr)
4870 {
4871 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4872 mismatch_detail->error = _("instruction opens new dependency "
4873 "sequence without ending previous one");
4874 mismatch_detail->index = -1;
4875 mismatch_detail->non_fatal = TRUE;
4876 res = ERR_VFI;
4877 }
4878
4879 init_insn_sequence (inst, insn_sequence);
4880 return res;
4881 }
4882
4883 /* Verify constraints on an existing sequence. */
4884 if (insn_sequence->instr)
4885 {
4886 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
4887 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4888 closed a previous one that we should have. */
4889 if (!encoding && pc == 0)
4890 {
4891 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4892 mismatch_detail->error = _("previous `movprfx' sequence not closed");
4893 mismatch_detail->index = -1;
4894 mismatch_detail->non_fatal = TRUE;
4895 res = ERR_VFI;
4896 /* Reset the sequence. */
4897 init_insn_sequence (NULL, insn_sequence);
4898 return res;
4899 }
4900
4901 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
4902 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
4903 {
4904 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4905 instruction for better error messages. */
4906 if (!opcode->avariant
4907 || !(*opcode->avariant &
4908 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
4909 {
4910 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4911 mismatch_detail->error = _("SVE instruction expected after "
4912 "`movprfx'");
4913 mismatch_detail->index = -1;
4914 mismatch_detail->non_fatal = TRUE;
4915 res = ERR_VFI;
4916 goto done;
4917 }
4918
4919 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4920 instruction that is allowed to be used with a MOVPRFX. */
4921 if (!(opcode->constraints & C_SCAN_MOVPRFX))
4922 {
4923 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4924 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
4925 "expected");
4926 mismatch_detail->index = -1;
4927 mismatch_detail->non_fatal = TRUE;
4928 res = ERR_VFI;
4929 goto done;
4930 }
4931
4932 /* Next check for usage of the predicate register. */
4933 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
4934 aarch64_opnd_info blk_pred, inst_pred;
4935 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
4936 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
4937 bfd_boolean predicated = FALSE;
4938 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
4939
4940 /* Determine if the movprfx instruction used is predicated or not. */
4941 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
4942 {
4943 predicated = TRUE;
4944 blk_pred = insn_sequence->instr->operands[1];
4945 }
4946
4947 unsigned char max_elem_size = 0;
4948 unsigned char current_elem_size;
4949 int num_op_used = 0, last_op_usage = 0;
4950 int i, inst_pred_idx = -1;
4951 int num_ops = aarch64_num_of_operands (opcode);
4952 for (i = 0; i < num_ops; i++)
4953 {
4954 aarch64_opnd_info inst_op = inst->operands[i];
4955 switch (inst_op.type)
4956 {
4957 case AARCH64_OPND_SVE_Zd:
4958 case AARCH64_OPND_SVE_Zm_5:
4959 case AARCH64_OPND_SVE_Zm_16:
4960 case AARCH64_OPND_SVE_Zn:
4961 case AARCH64_OPND_SVE_Zt:
4962 case AARCH64_OPND_SVE_Vm:
4963 case AARCH64_OPND_SVE_Vn:
4964 case AARCH64_OPND_Va:
4965 case AARCH64_OPND_Vn:
4966 case AARCH64_OPND_Vm:
4967 case AARCH64_OPND_Sn:
4968 case AARCH64_OPND_Sm:
4969 case AARCH64_OPND_Rn:
4970 case AARCH64_OPND_Rm:
4971 case AARCH64_OPND_Rn_SP:
4972 case AARCH64_OPND_Rt_SP:
4973 case AARCH64_OPND_Rm_SP:
4974 if (inst_op.reg.regno == blk_dest.reg.regno)
4975 {
4976 num_op_used++;
4977 last_op_usage = i;
4978 }
4979 current_elem_size
4980 = aarch64_get_qualifier_esize (inst_op.qualifier);
4981 if (current_elem_size > max_elem_size)
4982 max_elem_size = current_elem_size;
4983 break;
4984 case AARCH64_OPND_SVE_Pd:
4985 case AARCH64_OPND_SVE_Pg3:
4986 case AARCH64_OPND_SVE_Pg4_5:
4987 case AARCH64_OPND_SVE_Pg4_10:
4988 case AARCH64_OPND_SVE_Pg4_16:
4989 case AARCH64_OPND_SVE_Pm:
4990 case AARCH64_OPND_SVE_Pn:
4991 case AARCH64_OPND_SVE_Pt:
4992 inst_pred = inst_op;
4993 inst_pred_idx = i;
4994 break;
4995 default:
4996 break;
4997 }
4998 }
4999
5000 assert (max_elem_size != 0);
5001 aarch64_opnd_info inst_dest = inst->operands[0];
5002 /* Determine the size that should be used to compare against the
5003 movprfx size. */
5004 current_elem_size
5005 = opcode->constraints & C_MAX_ELEM
5006 ? max_elem_size
5007 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5008
5009 /* If movprfx is predicated do some extra checks. */
5010 if (predicated)
5011 {
5012 /* The instruction must be predicated. */
5013 if (inst_pred_idx < 0)
5014 {
5015 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5016 mismatch_detail->error = _("predicated instruction expected "
5017 "after `movprfx'");
5018 mismatch_detail->index = -1;
5019 mismatch_detail->non_fatal = TRUE;
5020 res = ERR_VFI;
5021 goto done;
5022 }
5023
5024 /* The instruction must have a merging predicate. */
5025 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5026 {
5027 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5028 mismatch_detail->error = _("merging predicate expected due "
5029 "to preceding `movprfx'");
5030 mismatch_detail->index = inst_pred_idx;
5031 mismatch_detail->non_fatal = TRUE;
5032 res = ERR_VFI;
5033 goto done;
5034 }
5035
5036 /* The same register must be used in instruction. */
5037 if (blk_pred.reg.regno != inst_pred.reg.regno)
5038 {
5039 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5040 mismatch_detail->error = _("predicate register differs "
5041 "from that in preceding "
5042 "`movprfx'");
5043 mismatch_detail->index = inst_pred_idx;
5044 mismatch_detail->non_fatal = TRUE;
5045 res = ERR_VFI;
5046 goto done;
5047 }
5048 }
5049
5050 /* Destructive operations by definition must allow one usage of the
5051 same register. */
5052 int allowed_usage
5053 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5054
5055 /* Operand is not used at all. */
5056 if (num_op_used == 0)
5057 {
5058 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5059 mismatch_detail->error = _("output register of preceding "
5060 "`movprfx' not used in current "
5061 "instruction");
5062 mismatch_detail->index = 0;
5063 mismatch_detail->non_fatal = TRUE;
5064 res = ERR_VFI;
5065 goto done;
5066 }
5067
5068 /* We now know it's used, now determine exactly where it's used. */
5069 if (blk_dest.reg.regno != inst_dest.reg.regno)
5070 {
5071 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5072 mismatch_detail->error = _("output register of preceding "
5073 "`movprfx' expected as output");
5074 mismatch_detail->index = 0;
5075 mismatch_detail->non_fatal = TRUE;
5076 res = ERR_VFI;
5077 goto done;
5078 }
5079
5080 /* Operand used more than allowed for the specific opcode type. */
5081 if (num_op_used > allowed_usage)
5082 {
5083 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5084 mismatch_detail->error = _("output register of preceding "
5085 "`movprfx' used as input");
5086 mismatch_detail->index = last_op_usage;
5087 mismatch_detail->non_fatal = TRUE;
5088 res = ERR_VFI;
5089 goto done;
5090 }
5091
5092 /* Now the only thing left is the qualifiers checks. The register
5093 must have the same maximum element size. */
5094 if (inst_dest.qualifier
5095 && blk_dest.qualifier
5096 && current_elem_size
5097 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5098 {
5099 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5100 mismatch_detail->error = _("register size not compatible with "
5101 "previous `movprfx'");
5102 mismatch_detail->index = 0;
5103 mismatch_detail->non_fatal = TRUE;
5104 res = ERR_VFI;
5105 goto done;
5106 }
5107 }
5108
5109 done:
5110 /* Add the new instruction to the sequence. */
5111 memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
5112 inst, sizeof (aarch64_inst));
5113
5114 /* Check if sequence is now full. */
5115 if (insn_sequence->next_insn >= insn_sequence->num_insns)
5116 {
5117 /* Sequence is full, but we don't have anything special to do for now,
5118 so clear and reset it. */
5119 init_insn_sequence (NULL, insn_sequence);
5120 }
5121 }
5122
5123 return res;
5124 }
5125
5126
5127 /* Return true if VALUE cannot be moved into an SVE register using DUP
5128 (with any element size, not just ESIZE) and if using DUPM would
5129 therefore be OK. ESIZE is the number of bytes in the immediate. */
5130
5131 bfd_boolean
5132 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5133 {
5134 int64_t svalue = uvalue;
5135 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5136
5137 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5138 return FALSE;
5139 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5140 {
5141 svalue = (int32_t) uvalue;
5142 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5143 {
5144 svalue = (int16_t) uvalue;
5145 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5146 return FALSE;
5147 }
5148 }
5149 if ((svalue & 0xff) == 0)
5150 svalue /= 256;
5151 return svalue < -128 || svalue >= 128;
5152 }
5153
5154 /* Include the opcode description table as well as the operand description
5155 table. */
5156 #define VERIFIER(x) verify_##x
5157 #include "aarch64-tbl.h"
This page took 0.205074 seconds and 5 git commands to generate.