[binutils][aarch64] New SVE_SHRIMM_UNPRED_22 operand.
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include "bfd_stdint.h"
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
247 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
248 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
249 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
250 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
251 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
252 { 5, 14 }, /* imm14: in test bit and branch instructions. */
253 { 5, 16 }, /* imm16: in exception instructions. */
254 { 0, 26 }, /* imm26: in unconditional branch instructions. */
255 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
256 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
257 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
258 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
259 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
260 { 22, 1 }, /* N: in logical (immediate) instructions. */
261 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
262 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
263 { 31, 1 }, /* sf: in integer data processing instructions. */
264 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
265 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
266 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
267 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
268 { 31, 1 }, /* b5: in the test bit and branch instructions. */
269 { 19, 5 }, /* b40: in the test bit and branch instructions. */
270 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
271 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
272 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
273 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
274 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
275 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
276 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
277 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
278 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
279 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
280 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
281 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
282 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
283 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
284 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
285 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
286 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
289 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
290 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
291 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
292 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
293 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
294 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
295 { 5, 1 }, /* SVE_i1: single-bit immediate. */
296 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
297 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
298 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
299 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
300 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
301 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
302 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
303 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
304 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
305 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
306 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
307 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
308 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
309 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
310 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
311 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
312 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
313 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
314 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
315 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
316 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
317 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
318 { 16, 4 }, /* SVE_tsz: triangular size select. */
319 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
320 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
321 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
322 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
323 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
324 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
325 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
326 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
327 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
328 { 22, 1 }, /* sz: 1-bit element size select. */
329 };
330
331 enum aarch64_operand_class
332 aarch64_get_operand_class (enum aarch64_opnd type)
333 {
334 return aarch64_operands[type].op_class;
335 }
336
337 const char *
338 aarch64_get_operand_name (enum aarch64_opnd type)
339 {
340 return aarch64_operands[type].name;
341 }
342
343 /* Get operand description string.
344 This is usually for the diagnosis purpose. */
345 const char *
346 aarch64_get_operand_desc (enum aarch64_opnd type)
347 {
348 return aarch64_operands[type].desc;
349 }
350
351 /* Table of all conditional affixes. */
352 const aarch64_cond aarch64_conds[16] =
353 {
354 {{"eq", "none"}, 0x0},
355 {{"ne", "any"}, 0x1},
356 {{"cs", "hs", "nlast"}, 0x2},
357 {{"cc", "lo", "ul", "last"}, 0x3},
358 {{"mi", "first"}, 0x4},
359 {{"pl", "nfrst"}, 0x5},
360 {{"vs"}, 0x6},
361 {{"vc"}, 0x7},
362 {{"hi", "pmore"}, 0x8},
363 {{"ls", "plast"}, 0x9},
364 {{"ge", "tcont"}, 0xa},
365 {{"lt", "tstop"}, 0xb},
366 {{"gt"}, 0xc},
367 {{"le"}, 0xd},
368 {{"al"}, 0xe},
369 {{"nv"}, 0xf},
370 };
371
372 const aarch64_cond *
373 get_cond_from_value (aarch64_insn value)
374 {
375 assert (value < 16);
376 return &aarch64_conds[(unsigned int) value];
377 }
378
379 const aarch64_cond *
380 get_inverted_cond (const aarch64_cond *cond)
381 {
382 return &aarch64_conds[cond->value ^ 0x1];
383 }
384
385 /* Table describing the operand extension/shifting operators; indexed by
386 enum aarch64_modifier_kind.
387
388 The value column provides the most common values for encoding modifiers,
389 which enables table-driven encoding/decoding for the modifiers. */
390 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
391 {
392 {"none", 0x0},
393 {"msl", 0x0},
394 {"ror", 0x3},
395 {"asr", 0x2},
396 {"lsr", 0x1},
397 {"lsl", 0x0},
398 {"uxtb", 0x0},
399 {"uxth", 0x1},
400 {"uxtw", 0x2},
401 {"uxtx", 0x3},
402 {"sxtb", 0x4},
403 {"sxth", 0x5},
404 {"sxtw", 0x6},
405 {"sxtx", 0x7},
406 {"mul", 0x0},
407 {"mul vl", 0x0},
408 {NULL, 0},
409 };
410
411 enum aarch64_modifier_kind
412 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
413 {
414 return desc - aarch64_operand_modifiers;
415 }
416
417 aarch64_insn
418 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
419 {
420 return aarch64_operand_modifiers[kind].value;
421 }
422
423 enum aarch64_modifier_kind
424 aarch64_get_operand_modifier_from_value (aarch64_insn value,
425 bfd_boolean extend_p)
426 {
427 if (extend_p == TRUE)
428 return AARCH64_MOD_UXTB + value;
429 else
430 return AARCH64_MOD_LSL - value;
431 }
432
433 bfd_boolean
434 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
435 {
436 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
437 ? TRUE : FALSE;
438 }
439
440 static inline bfd_boolean
441 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
442 {
443 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
444 ? TRUE : FALSE;
445 }
446
447 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
448 {
449 { "#0x00", 0x0 },
450 { "oshld", 0x1 },
451 { "oshst", 0x2 },
452 { "osh", 0x3 },
453 { "#0x04", 0x4 },
454 { "nshld", 0x5 },
455 { "nshst", 0x6 },
456 { "nsh", 0x7 },
457 { "#0x08", 0x8 },
458 { "ishld", 0x9 },
459 { "ishst", 0xa },
460 { "ish", 0xb },
461 { "#0x0c", 0xc },
462 { "ld", 0xd },
463 { "st", 0xe },
464 { "sy", 0xf },
465 };
466
467 /* Table describing the operands supported by the aliases of the HINT
468 instruction.
469
470 The name column is the operand that is accepted for the alias. The value
471 column is the hint number of the alias. The list of operands is terminated
472 by NULL in the name column. */
473
474 const struct aarch64_name_value_pair aarch64_hint_options[] =
475 {
476 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
477 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
478 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
479 { "c", HINT_OPD_C }, /* BTI C. */
480 { "j", HINT_OPD_J }, /* BTI J. */
481 { "jc", HINT_OPD_JC }, /* BTI JC. */
482 { NULL, HINT_OPD_NULL },
483 };
484
485 /* op -> op: load = 0 instruction = 1 store = 2
486 l -> level: 1-3
487 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
488 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
489 const struct aarch64_name_value_pair aarch64_prfops[32] =
490 {
491 { "pldl1keep", B(0, 1, 0) },
492 { "pldl1strm", B(0, 1, 1) },
493 { "pldl2keep", B(0, 2, 0) },
494 { "pldl2strm", B(0, 2, 1) },
495 { "pldl3keep", B(0, 3, 0) },
496 { "pldl3strm", B(0, 3, 1) },
497 { NULL, 0x06 },
498 { NULL, 0x07 },
499 { "plil1keep", B(1, 1, 0) },
500 { "plil1strm", B(1, 1, 1) },
501 { "plil2keep", B(1, 2, 0) },
502 { "plil2strm", B(1, 2, 1) },
503 { "plil3keep", B(1, 3, 0) },
504 { "plil3strm", B(1, 3, 1) },
505 { NULL, 0x0e },
506 { NULL, 0x0f },
507 { "pstl1keep", B(2, 1, 0) },
508 { "pstl1strm", B(2, 1, 1) },
509 { "pstl2keep", B(2, 2, 0) },
510 { "pstl2strm", B(2, 2, 1) },
511 { "pstl3keep", B(2, 3, 0) },
512 { "pstl3strm", B(2, 3, 1) },
513 { NULL, 0x16 },
514 { NULL, 0x17 },
515 { NULL, 0x18 },
516 { NULL, 0x19 },
517 { NULL, 0x1a },
518 { NULL, 0x1b },
519 { NULL, 0x1c },
520 { NULL, 0x1d },
521 { NULL, 0x1e },
522 { NULL, 0x1f },
523 };
524 #undef B
525 \f
526 /* Utilities on value constraint. */
527
528 static inline int
529 value_in_range_p (int64_t value, int low, int high)
530 {
531 return (value >= low && value <= high) ? 1 : 0;
532 }
533
534 /* Return true if VALUE is a multiple of ALIGN. */
535 static inline int
536 value_aligned_p (int64_t value, int align)
537 {
538 return (value % align) == 0;
539 }
540
541 /* A signed value fits in a field. */
542 static inline int
543 value_fit_signed_field_p (int64_t value, unsigned width)
544 {
545 assert (width < 32);
546 if (width < sizeof (value) * 8)
547 {
548 int64_t lim = (int64_t)1 << (width - 1);
549 if (value >= -lim && value < lim)
550 return 1;
551 }
552 return 0;
553 }
554
555 /* An unsigned value fits in a field. */
556 static inline int
557 value_fit_unsigned_field_p (int64_t value, unsigned width)
558 {
559 assert (width < 32);
560 if (width < sizeof (value) * 8)
561 {
562 int64_t lim = (int64_t)1 << width;
563 if (value >= 0 && value < lim)
564 return 1;
565 }
566 return 0;
567 }
568
569 /* Return 1 if OPERAND is SP or WSP. */
570 int
571 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
572 {
573 return ((aarch64_get_operand_class (operand->type)
574 == AARCH64_OPND_CLASS_INT_REG)
575 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
576 && operand->reg.regno == 31);
577 }
578
579 /* Return 1 if OPERAND is XZR or WZP. */
580 int
581 aarch64_zero_register_p (const aarch64_opnd_info *operand)
582 {
583 return ((aarch64_get_operand_class (operand->type)
584 == AARCH64_OPND_CLASS_INT_REG)
585 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
586 && operand->reg.regno == 31);
587 }
588
589 /* Return true if the operand *OPERAND that has the operand code
590 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
591 qualified by the qualifier TARGET. */
592
593 static inline int
594 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
595 aarch64_opnd_qualifier_t target)
596 {
597 switch (operand->qualifier)
598 {
599 case AARCH64_OPND_QLF_W:
600 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
601 return 1;
602 break;
603 case AARCH64_OPND_QLF_X:
604 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
605 return 1;
606 break;
607 case AARCH64_OPND_QLF_WSP:
608 if (target == AARCH64_OPND_QLF_W
609 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
610 return 1;
611 break;
612 case AARCH64_OPND_QLF_SP:
613 if (target == AARCH64_OPND_QLF_X
614 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
615 return 1;
616 break;
617 default:
618 break;
619 }
620
621 return 0;
622 }
623
624 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
625 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
626
627 Return NIL if more than one expected qualifiers are found. */
628
629 aarch64_opnd_qualifier_t
630 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
631 int idx,
632 const aarch64_opnd_qualifier_t known_qlf,
633 int known_idx)
634 {
635 int i, saved_i;
636
637 /* Special case.
638
639 When the known qualifier is NIL, we have to assume that there is only
640 one qualifier sequence in the *QSEQ_LIST and return the corresponding
641 qualifier directly. One scenario is that for instruction
642 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
643 which has only one possible valid qualifier sequence
644 NIL, S_D
645 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
646 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
647
648 Because the qualifier NIL has dual roles in the qualifier sequence:
649 it can mean no qualifier for the operand, or the qualifer sequence is
650 not in use (when all qualifiers in the sequence are NILs), we have to
651 handle this special case here. */
652 if (known_qlf == AARCH64_OPND_NIL)
653 {
654 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
655 return qseq_list[0][idx];
656 }
657
658 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
659 {
660 if (qseq_list[i][known_idx] == known_qlf)
661 {
662 if (saved_i != -1)
663 /* More than one sequences are found to have KNOWN_QLF at
664 KNOWN_IDX. */
665 return AARCH64_OPND_NIL;
666 saved_i = i;
667 }
668 }
669
670 return qseq_list[saved_i][idx];
671 }
672
673 enum operand_qualifier_kind
674 {
675 OQK_NIL,
676 OQK_OPD_VARIANT,
677 OQK_VALUE_IN_RANGE,
678 OQK_MISC,
679 };
680
681 /* Operand qualifier description. */
682 struct operand_qualifier_data
683 {
684 /* The usage of the three data fields depends on the qualifier kind. */
685 int data0;
686 int data1;
687 int data2;
688 /* Description. */
689 const char *desc;
690 /* Kind. */
691 enum operand_qualifier_kind kind;
692 };
693
694 /* Indexed by the operand qualifier enumerators. */
695 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
696 {
697 {0, 0, 0, "NIL", OQK_NIL},
698
699 /* Operand variant qualifiers.
700 First 3 fields:
701 element size, number of elements and common value for encoding. */
702
703 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
704 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
705 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
706 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
707
708 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
709 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
710 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
711 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
712 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
713 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
714
715 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
716 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
717 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
718 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
719 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
720 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
721 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
722 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
723 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
724 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
725 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
726
727 {0, 0, 0, "z", OQK_OPD_VARIANT},
728 {0, 0, 0, "m", OQK_OPD_VARIANT},
729
730 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
731 {16, 0, 0, "tag", OQK_OPD_VARIANT},
732
733 /* Qualifiers constraining the value range.
734 First 3 fields:
735 Lower bound, higher bound, unused. */
736
737 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
738 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
739 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
740 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
741 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
742 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
743 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
744
745 /* Qualifiers for miscellaneous purpose.
746 First 3 fields:
747 unused, unused and unused. */
748
749 {0, 0, 0, "lsl", 0},
750 {0, 0, 0, "msl", 0},
751
752 {0, 0, 0, "retrieving", 0},
753 };
754
755 static inline bfd_boolean
756 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
757 {
758 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
759 ? TRUE : FALSE;
760 }
761
762 static inline bfd_boolean
763 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
764 {
765 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
766 ? TRUE : FALSE;
767 }
768
769 const char*
770 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
771 {
772 return aarch64_opnd_qualifiers[qualifier].desc;
773 }
774
775 /* Given an operand qualifier, return the expected data element size
776 of a qualified operand. */
777 unsigned char
778 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
779 {
780 assert (operand_variant_qualifier_p (qualifier) == TRUE);
781 return aarch64_opnd_qualifiers[qualifier].data0;
782 }
783
784 unsigned char
785 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
786 {
787 assert (operand_variant_qualifier_p (qualifier) == TRUE);
788 return aarch64_opnd_qualifiers[qualifier].data1;
789 }
790
791 aarch64_insn
792 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
793 {
794 assert (operand_variant_qualifier_p (qualifier) == TRUE);
795 return aarch64_opnd_qualifiers[qualifier].data2;
796 }
797
798 static int
799 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
800 {
801 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
802 return aarch64_opnd_qualifiers[qualifier].data0;
803 }
804
805 static int
806 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
807 {
808 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
809 return aarch64_opnd_qualifiers[qualifier].data1;
810 }
811
812 #ifdef DEBUG_AARCH64
813 void
814 aarch64_verbose (const char *str, ...)
815 {
816 va_list ap;
817 va_start (ap, str);
818 printf ("#### ");
819 vprintf (str, ap);
820 printf ("\n");
821 va_end (ap);
822 }
823
824 static inline void
825 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
826 {
827 int i;
828 printf ("#### \t");
829 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
830 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
831 printf ("\n");
832 }
833
834 static void
835 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
836 const aarch64_opnd_qualifier_t *qualifier)
837 {
838 int i;
839 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
840
841 aarch64_verbose ("dump_match_qualifiers:");
842 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
843 curr[i] = opnd[i].qualifier;
844 dump_qualifier_sequence (curr);
845 aarch64_verbose ("against");
846 dump_qualifier_sequence (qualifier);
847 }
848 #endif /* DEBUG_AARCH64 */
849
850 /* This function checks if the given instruction INSN is a destructive
851 instruction based on the usage of the registers. It does not recognize
852 unary destructive instructions. */
853 bfd_boolean
854 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
855 {
856 int i = 0;
857 const enum aarch64_opnd *opnds = opcode->operands;
858
859 if (opnds[0] == AARCH64_OPND_NIL)
860 return FALSE;
861
862 while (opnds[++i] != AARCH64_OPND_NIL)
863 if (opnds[i] == opnds[0])
864 return TRUE;
865
866 return FALSE;
867 }
868
869 /* TODO improve this, we can have an extra field at the runtime to
870 store the number of operands rather than calculating it every time. */
871
872 int
873 aarch64_num_of_operands (const aarch64_opcode *opcode)
874 {
875 int i = 0;
876 const enum aarch64_opnd *opnds = opcode->operands;
877 while (opnds[i++] != AARCH64_OPND_NIL)
878 ;
879 --i;
880 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
881 return i;
882 }
883
884 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
885 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
886
887 N.B. on the entry, it is very likely that only some operands in *INST
888 have had their qualifiers been established.
889
890 If STOP_AT is not -1, the function will only try to match
891 the qualifier sequence for operands before and including the operand
892 of index STOP_AT; and on success *RET will only be filled with the first
893 (STOP_AT+1) qualifiers.
894
895 A couple examples of the matching algorithm:
896
897 X,W,NIL should match
898 X,W,NIL
899
900 NIL,NIL should match
901 X ,NIL
902
903 Apart from serving the main encoding routine, this can also be called
904 during or after the operand decoding. */
905
906 int
907 aarch64_find_best_match (const aarch64_inst *inst,
908 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
909 int stop_at, aarch64_opnd_qualifier_t *ret)
910 {
911 int found = 0;
912 int i, num_opnds;
913 const aarch64_opnd_qualifier_t *qualifiers;
914
915 num_opnds = aarch64_num_of_operands (inst->opcode);
916 if (num_opnds == 0)
917 {
918 DEBUG_TRACE ("SUCCEED: no operand");
919 return 1;
920 }
921
922 if (stop_at < 0 || stop_at >= num_opnds)
923 stop_at = num_opnds - 1;
924
925 /* For each pattern. */
926 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
927 {
928 int j;
929 qualifiers = *qualifiers_list;
930
931 /* Start as positive. */
932 found = 1;
933
934 DEBUG_TRACE ("%d", i);
935 #ifdef DEBUG_AARCH64
936 if (debug_dump)
937 dump_match_qualifiers (inst->operands, qualifiers);
938 #endif
939
940 /* Most opcodes has much fewer patterns in the list.
941 First NIL qualifier indicates the end in the list. */
942 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
943 {
944 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
945 if (i)
946 found = 0;
947 break;
948 }
949
950 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
951 {
952 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
953 {
954 /* Either the operand does not have qualifier, or the qualifier
955 for the operand needs to be deduced from the qualifier
956 sequence.
957 In the latter case, any constraint checking related with
958 the obtained qualifier should be done later in
959 operand_general_constraint_met_p. */
960 continue;
961 }
962 else if (*qualifiers != inst->operands[j].qualifier)
963 {
964 /* Unless the target qualifier can also qualify the operand
965 (which has already had a non-nil qualifier), non-equal
966 qualifiers are generally un-matched. */
967 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
968 continue;
969 else
970 {
971 found = 0;
972 break;
973 }
974 }
975 else
976 continue; /* Equal qualifiers are certainly matched. */
977 }
978
979 /* Qualifiers established. */
980 if (found == 1)
981 break;
982 }
983
984 if (found == 1)
985 {
986 /* Fill the result in *RET. */
987 int j;
988 qualifiers = *qualifiers_list;
989
990 DEBUG_TRACE ("complete qualifiers using list %d", i);
991 #ifdef DEBUG_AARCH64
992 if (debug_dump)
993 dump_qualifier_sequence (qualifiers);
994 #endif
995
996 for (j = 0; j <= stop_at; ++j, ++qualifiers)
997 ret[j] = *qualifiers;
998 for (; j < AARCH64_MAX_OPND_NUM; ++j)
999 ret[j] = AARCH64_OPND_QLF_NIL;
1000
1001 DEBUG_TRACE ("SUCCESS");
1002 return 1;
1003 }
1004
1005 DEBUG_TRACE ("FAIL");
1006 return 0;
1007 }
1008
1009 /* Operand qualifier matching and resolving.
1010
1011 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1012 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1013
1014 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1015 succeeds. */
1016
1017 static int
1018 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
1019 {
1020 int i, nops;
1021 aarch64_opnd_qualifier_seq_t qualifiers;
1022
1023 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1024 qualifiers))
1025 {
1026 DEBUG_TRACE ("matching FAIL");
1027 return 0;
1028 }
1029
1030 if (inst->opcode->flags & F_STRICT)
1031 {
1032 /* Require an exact qualifier match, even for NIL qualifiers. */
1033 nops = aarch64_num_of_operands (inst->opcode);
1034 for (i = 0; i < nops; ++i)
1035 if (inst->operands[i].qualifier != qualifiers[i])
1036 return FALSE;
1037 }
1038
1039 /* Update the qualifiers. */
1040 if (update_p == TRUE)
1041 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1042 {
1043 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1044 break;
1045 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1046 "update %s with %s for operand %d",
1047 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1048 aarch64_get_qualifier_name (qualifiers[i]), i);
1049 inst->operands[i].qualifier = qualifiers[i];
1050 }
1051
1052 DEBUG_TRACE ("matching SUCCESS");
1053 return 1;
1054 }
1055
1056 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1057 register by MOVZ.
1058
1059 IS32 indicates whether value is a 32-bit immediate or not.
1060 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1061 amount will be returned in *SHIFT_AMOUNT. */
1062
1063 bfd_boolean
1064 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1065 {
1066 int amount;
1067
1068 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1069
1070 if (is32)
1071 {
1072 /* Allow all zeros or all ones in top 32-bits, so that
1073 32-bit constant expressions like ~0x80000000 are
1074 permitted. */
1075 uint64_t ext = value;
1076 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1077 /* Immediate out of range. */
1078 return FALSE;
1079 value &= (int64_t) 0xffffffff;
1080 }
1081
1082 /* first, try movz then movn */
1083 amount = -1;
1084 if ((value & ((int64_t) 0xffff << 0)) == value)
1085 amount = 0;
1086 else if ((value & ((int64_t) 0xffff << 16)) == value)
1087 amount = 16;
1088 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1089 amount = 32;
1090 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1091 amount = 48;
1092
1093 if (amount == -1)
1094 {
1095 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1096 return FALSE;
1097 }
1098
1099 if (shift_amount != NULL)
1100 *shift_amount = amount;
1101
1102 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1103
1104 return TRUE;
1105 }
1106
1107 /* Build the accepted values for immediate logical SIMD instructions.
1108
1109 The standard encodings of the immediate value are:
1110 N imms immr SIMD size R S
1111 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1112 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1113 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1114 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1115 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1116 0 11110s 00000r 2 UInt(r) UInt(s)
1117 where all-ones value of S is reserved.
1118
1119 Let's call E the SIMD size.
1120
1121 The immediate value is: S+1 bits '1' rotated to the right by R.
1122
1123 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1124 (remember S != E - 1). */
1125
1126 #define TOTAL_IMM_NB 5334
1127
1128 typedef struct
1129 {
1130 uint64_t imm;
1131 aarch64_insn encoding;
1132 } simd_imm_encoding;
1133
1134 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1135
1136 static int
1137 simd_imm_encoding_cmp(const void *i1, const void *i2)
1138 {
1139 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1140 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1141
1142 if (imm1->imm < imm2->imm)
1143 return -1;
1144 if (imm1->imm > imm2->imm)
1145 return +1;
1146 return 0;
1147 }
1148
1149 /* immediate bitfield standard encoding
1150 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1151 1 ssssss rrrrrr 64 rrrrrr ssssss
1152 0 0sssss 0rrrrr 32 rrrrr sssss
1153 0 10ssss 00rrrr 16 rrrr ssss
1154 0 110sss 000rrr 8 rrr sss
1155 0 1110ss 0000rr 4 rr ss
1156 0 11110s 00000r 2 r s */
1157 static inline int
1158 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1159 {
1160 return (is64 << 12) | (r << 6) | s;
1161 }
1162
1163 static void
1164 build_immediate_table (void)
1165 {
1166 uint32_t log_e, e, s, r, s_mask;
1167 uint64_t mask, imm;
1168 int nb_imms;
1169 int is64;
1170
1171 nb_imms = 0;
1172 for (log_e = 1; log_e <= 6; log_e++)
1173 {
1174 /* Get element size. */
1175 e = 1u << log_e;
1176 if (log_e == 6)
1177 {
1178 is64 = 1;
1179 mask = 0xffffffffffffffffull;
1180 s_mask = 0;
1181 }
1182 else
1183 {
1184 is64 = 0;
1185 mask = (1ull << e) - 1;
1186 /* log_e s_mask
1187 1 ((1 << 4) - 1) << 2 = 111100
1188 2 ((1 << 3) - 1) << 3 = 111000
1189 3 ((1 << 2) - 1) << 4 = 110000
1190 4 ((1 << 1) - 1) << 5 = 100000
1191 5 ((1 << 0) - 1) << 6 = 000000 */
1192 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1193 }
1194 for (s = 0; s < e - 1; s++)
1195 for (r = 0; r < e; r++)
1196 {
1197 /* s+1 consecutive bits to 1 (s < 63) */
1198 imm = (1ull << (s + 1)) - 1;
1199 /* rotate right by r */
1200 if (r != 0)
1201 imm = (imm >> r) | ((imm << (e - r)) & mask);
1202 /* replicate the constant depending on SIMD size */
1203 switch (log_e)
1204 {
1205 case 1: imm = (imm << 2) | imm;
1206 /* Fall through. */
1207 case 2: imm = (imm << 4) | imm;
1208 /* Fall through. */
1209 case 3: imm = (imm << 8) | imm;
1210 /* Fall through. */
1211 case 4: imm = (imm << 16) | imm;
1212 /* Fall through. */
1213 case 5: imm = (imm << 32) | imm;
1214 /* Fall through. */
1215 case 6: break;
1216 default: abort ();
1217 }
1218 simd_immediates[nb_imms].imm = imm;
1219 simd_immediates[nb_imms].encoding =
1220 encode_immediate_bitfield(is64, s | s_mask, r);
1221 nb_imms++;
1222 }
1223 }
1224 assert (nb_imms == TOTAL_IMM_NB);
1225 qsort(simd_immediates, nb_imms,
1226 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1227 }
1228
1229 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1230 be accepted by logical (immediate) instructions
1231 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1232
1233 ESIZE is the number of bytes in the decoded immediate value.
1234 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1235 VALUE will be returned in *ENCODING. */
1236
1237 bfd_boolean
1238 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1239 {
1240 simd_imm_encoding imm_enc;
1241 const simd_imm_encoding *imm_encoding;
1242 static bfd_boolean initialized = FALSE;
1243 uint64_t upper;
1244 int i;
1245
1246 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1247 value, esize);
1248
1249 if (!initialized)
1250 {
1251 build_immediate_table ();
1252 initialized = TRUE;
1253 }
1254
1255 /* Allow all zeros or all ones in top bits, so that
1256 constant expressions like ~1 are permitted. */
1257 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1258 if ((value & ~upper) != value && (value | upper) != value)
1259 return FALSE;
1260
1261 /* Replicate to a full 64-bit value. */
1262 value &= ~upper;
1263 for (i = esize * 8; i < 64; i *= 2)
1264 value |= (value << i);
1265
1266 imm_enc.imm = value;
1267 imm_encoding = (const simd_imm_encoding *)
1268 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1269 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1270 if (imm_encoding == NULL)
1271 {
1272 DEBUG_TRACE ("exit with FALSE");
1273 return FALSE;
1274 }
1275 if (encoding != NULL)
1276 *encoding = imm_encoding->encoding;
1277 DEBUG_TRACE ("exit with TRUE");
1278 return TRUE;
1279 }
1280
1281 /* If 64-bit immediate IMM is in the format of
1282 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1283 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1284 of value "abcdefgh". Otherwise return -1. */
1285 int
1286 aarch64_shrink_expanded_imm8 (uint64_t imm)
1287 {
1288 int i, ret;
1289 uint32_t byte;
1290
1291 ret = 0;
1292 for (i = 0; i < 8; i++)
1293 {
1294 byte = (imm >> (8 * i)) & 0xff;
1295 if (byte == 0xff)
1296 ret |= 1 << i;
1297 else if (byte != 0x00)
1298 return -1;
1299 }
1300 return ret;
1301 }
1302
1303 /* Utility inline functions for operand_general_constraint_met_p. */
1304
1305 static inline void
1306 set_error (aarch64_operand_error *mismatch_detail,
1307 enum aarch64_operand_error_kind kind, int idx,
1308 const char* error)
1309 {
1310 if (mismatch_detail == NULL)
1311 return;
1312 mismatch_detail->kind = kind;
1313 mismatch_detail->index = idx;
1314 mismatch_detail->error = error;
1315 }
1316
1317 static inline void
1318 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1319 const char* error)
1320 {
1321 if (mismatch_detail == NULL)
1322 return;
1323 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1324 }
1325
1326 static inline void
1327 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1328 int idx, int lower_bound, int upper_bound,
1329 const char* error)
1330 {
1331 if (mismatch_detail == NULL)
1332 return;
1333 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1334 mismatch_detail->data[0] = lower_bound;
1335 mismatch_detail->data[1] = upper_bound;
1336 }
1337
1338 static inline void
1339 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1340 int idx, int lower_bound, int upper_bound)
1341 {
1342 if (mismatch_detail == NULL)
1343 return;
1344 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1345 _("immediate value"));
1346 }
1347
1348 static inline void
1349 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1350 int idx, int lower_bound, int upper_bound)
1351 {
1352 if (mismatch_detail == NULL)
1353 return;
1354 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1355 _("immediate offset"));
1356 }
1357
1358 static inline void
1359 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1360 int idx, int lower_bound, int upper_bound)
1361 {
1362 if (mismatch_detail == NULL)
1363 return;
1364 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1365 _("register number"));
1366 }
1367
1368 static inline void
1369 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1370 int idx, int lower_bound, int upper_bound)
1371 {
1372 if (mismatch_detail == NULL)
1373 return;
1374 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1375 _("register element index"));
1376 }
1377
1378 static inline void
1379 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1380 int idx, int lower_bound, int upper_bound)
1381 {
1382 if (mismatch_detail == NULL)
1383 return;
1384 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1385 _("shift amount"));
1386 }
1387
1388 /* Report that the MUL modifier in operand IDX should be in the range
1389 [LOWER_BOUND, UPPER_BOUND]. */
1390 static inline void
1391 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1392 int idx, int lower_bound, int upper_bound)
1393 {
1394 if (mismatch_detail == NULL)
1395 return;
1396 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1397 _("multiplier"));
1398 }
1399
1400 static inline void
1401 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1402 int alignment)
1403 {
1404 if (mismatch_detail == NULL)
1405 return;
1406 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1407 mismatch_detail->data[0] = alignment;
1408 }
1409
1410 static inline void
1411 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1412 int expected_num)
1413 {
1414 if (mismatch_detail == NULL)
1415 return;
1416 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1417 mismatch_detail->data[0] = expected_num;
1418 }
1419
1420 static inline void
1421 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1422 const char* error)
1423 {
1424 if (mismatch_detail == NULL)
1425 return;
1426 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1427 }
1428
1429 /* General constraint checking based on operand code.
1430
1431 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1432 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1433
1434 This function has to be called after the qualifiers for all operands
1435 have been resolved.
1436
1437 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1438 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1439 of error message during the disassembling where error message is not
1440 wanted. We avoid the dynamic construction of strings of error messages
1441 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1442 use a combination of error code, static string and some integer data to
1443 represent an error. */
1444
1445 static int
1446 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1447 enum aarch64_opnd type,
1448 const aarch64_opcode *opcode,
1449 aarch64_operand_error *mismatch_detail)
1450 {
1451 unsigned num, modifiers, shift;
1452 unsigned char size;
1453 int64_t imm, min_value, max_value;
1454 uint64_t uvalue, mask;
1455 const aarch64_opnd_info *opnd = opnds + idx;
1456 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1457
1458 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1459
1460 switch (aarch64_operands[type].op_class)
1461 {
1462 case AARCH64_OPND_CLASS_INT_REG:
1463 /* Check pair reg constraints for cas* instructions. */
1464 if (type == AARCH64_OPND_PAIRREG)
1465 {
1466 assert (idx == 1 || idx == 3);
1467 if (opnds[idx - 1].reg.regno % 2 != 0)
1468 {
1469 set_syntax_error (mismatch_detail, idx - 1,
1470 _("reg pair must start from even reg"));
1471 return 0;
1472 }
1473 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1474 {
1475 set_syntax_error (mismatch_detail, idx,
1476 _("reg pair must be contiguous"));
1477 return 0;
1478 }
1479 break;
1480 }
1481
1482 /* <Xt> may be optional in some IC and TLBI instructions. */
1483 if (type == AARCH64_OPND_Rt_SYS)
1484 {
1485 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1486 == AARCH64_OPND_CLASS_SYSTEM));
1487 if (opnds[1].present
1488 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1489 {
1490 set_other_error (mismatch_detail, idx, _("extraneous register"));
1491 return 0;
1492 }
1493 if (!opnds[1].present
1494 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1495 {
1496 set_other_error (mismatch_detail, idx, _("missing register"));
1497 return 0;
1498 }
1499 }
1500 switch (qualifier)
1501 {
1502 case AARCH64_OPND_QLF_WSP:
1503 case AARCH64_OPND_QLF_SP:
1504 if (!aarch64_stack_pointer_p (opnd))
1505 {
1506 set_other_error (mismatch_detail, idx,
1507 _("stack pointer register expected"));
1508 return 0;
1509 }
1510 break;
1511 default:
1512 break;
1513 }
1514 break;
1515
1516 case AARCH64_OPND_CLASS_SVE_REG:
1517 switch (type)
1518 {
1519 case AARCH64_OPND_SVE_Zm3_INDEX:
1520 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1521 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1522 case AARCH64_OPND_SVE_Zm4_INDEX:
1523 size = get_operand_fields_width (get_operand_from_code (type));
1524 shift = get_operand_specific_data (&aarch64_operands[type]);
1525 mask = (1 << shift) - 1;
1526 if (opnd->reg.regno > mask)
1527 {
1528 assert (mask == 7 || mask == 15);
1529 set_other_error (mismatch_detail, idx,
1530 mask == 15
1531 ? _("z0-z15 expected")
1532 : _("z0-z7 expected"));
1533 return 0;
1534 }
1535 mask = (1 << (size - shift)) - 1;
1536 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1537 {
1538 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1539 return 0;
1540 }
1541 break;
1542
1543 case AARCH64_OPND_SVE_Zn_INDEX:
1544 size = aarch64_get_qualifier_esize (opnd->qualifier);
1545 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1546 {
1547 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1548 0, 64 / size - 1);
1549 return 0;
1550 }
1551 break;
1552
1553 case AARCH64_OPND_SVE_ZnxN:
1554 case AARCH64_OPND_SVE_ZtxN:
1555 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1556 {
1557 set_other_error (mismatch_detail, idx,
1558 _("invalid register list"));
1559 return 0;
1560 }
1561 break;
1562
1563 default:
1564 break;
1565 }
1566 break;
1567
1568 case AARCH64_OPND_CLASS_PRED_REG:
1569 if (opnd->reg.regno >= 8
1570 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1571 {
1572 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1573 return 0;
1574 }
1575 break;
1576
1577 case AARCH64_OPND_CLASS_COND:
1578 if (type == AARCH64_OPND_COND1
1579 && (opnds[idx].cond->value & 0xe) == 0xe)
1580 {
1581 /* Not allow AL or NV. */
1582 set_syntax_error (mismatch_detail, idx, NULL);
1583 }
1584 break;
1585
1586 case AARCH64_OPND_CLASS_ADDRESS:
1587 /* Check writeback. */
1588 switch (opcode->iclass)
1589 {
1590 case ldst_pos:
1591 case ldst_unscaled:
1592 case ldstnapair_offs:
1593 case ldstpair_off:
1594 case ldst_unpriv:
1595 if (opnd->addr.writeback == 1)
1596 {
1597 set_syntax_error (mismatch_detail, idx,
1598 _("unexpected address writeback"));
1599 return 0;
1600 }
1601 break;
1602 case ldst_imm10:
1603 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1604 {
1605 set_syntax_error (mismatch_detail, idx,
1606 _("unexpected address writeback"));
1607 return 0;
1608 }
1609 break;
1610 case ldst_imm9:
1611 case ldstpair_indexed:
1612 case asisdlsep:
1613 case asisdlsop:
1614 if (opnd->addr.writeback == 0)
1615 {
1616 set_syntax_error (mismatch_detail, idx,
1617 _("address writeback expected"));
1618 return 0;
1619 }
1620 break;
1621 default:
1622 assert (opnd->addr.writeback == 0);
1623 break;
1624 }
1625 switch (type)
1626 {
1627 case AARCH64_OPND_ADDR_SIMM7:
1628 /* Scaled signed 7 bits immediate offset. */
1629 /* Get the size of the data element that is accessed, which may be
1630 different from that of the source register size,
1631 e.g. in strb/ldrb. */
1632 size = aarch64_get_qualifier_esize (opnd->qualifier);
1633 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1634 {
1635 set_offset_out_of_range_error (mismatch_detail, idx,
1636 -64 * size, 63 * size);
1637 return 0;
1638 }
1639 if (!value_aligned_p (opnd->addr.offset.imm, size))
1640 {
1641 set_unaligned_error (mismatch_detail, idx, size);
1642 return 0;
1643 }
1644 break;
1645 case AARCH64_OPND_ADDR_OFFSET:
1646 case AARCH64_OPND_ADDR_SIMM9:
1647 /* Unscaled signed 9 bits immediate offset. */
1648 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1649 {
1650 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1651 return 0;
1652 }
1653 break;
1654
1655 case AARCH64_OPND_ADDR_SIMM9_2:
1656 /* Unscaled signed 9 bits immediate offset, which has to be negative
1657 or unaligned. */
1658 size = aarch64_get_qualifier_esize (qualifier);
1659 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1660 && !value_aligned_p (opnd->addr.offset.imm, size))
1661 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1662 return 1;
1663 set_other_error (mismatch_detail, idx,
1664 _("negative or unaligned offset expected"));
1665 return 0;
1666
1667 case AARCH64_OPND_ADDR_SIMM10:
1668 /* Scaled signed 10 bits immediate offset. */
1669 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1670 {
1671 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1672 return 0;
1673 }
1674 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1675 {
1676 set_unaligned_error (mismatch_detail, idx, 8);
1677 return 0;
1678 }
1679 break;
1680
1681 case AARCH64_OPND_ADDR_SIMM11:
1682 /* Signed 11 bits immediate offset (multiple of 16). */
1683 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1684 {
1685 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1686 return 0;
1687 }
1688
1689 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1690 {
1691 set_unaligned_error (mismatch_detail, idx, 16);
1692 return 0;
1693 }
1694 break;
1695
1696 case AARCH64_OPND_ADDR_SIMM13:
1697 /* Signed 13 bits immediate offset (multiple of 16). */
1698 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1699 {
1700 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1701 return 0;
1702 }
1703
1704 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1705 {
1706 set_unaligned_error (mismatch_detail, idx, 16);
1707 return 0;
1708 }
1709 break;
1710
1711 case AARCH64_OPND_SIMD_ADDR_POST:
1712 /* AdvSIMD load/store multiple structures, post-index. */
1713 assert (idx == 1);
1714 if (opnd->addr.offset.is_reg)
1715 {
1716 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1717 return 1;
1718 else
1719 {
1720 set_other_error (mismatch_detail, idx,
1721 _("invalid register offset"));
1722 return 0;
1723 }
1724 }
1725 else
1726 {
1727 const aarch64_opnd_info *prev = &opnds[idx-1];
1728 unsigned num_bytes; /* total number of bytes transferred. */
1729 /* The opcode dependent area stores the number of elements in
1730 each structure to be loaded/stored. */
1731 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1732 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1733 /* Special handling of loading single structure to all lane. */
1734 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1735 * aarch64_get_qualifier_esize (prev->qualifier);
1736 else
1737 num_bytes = prev->reglist.num_regs
1738 * aarch64_get_qualifier_esize (prev->qualifier)
1739 * aarch64_get_qualifier_nelem (prev->qualifier);
1740 if ((int) num_bytes != opnd->addr.offset.imm)
1741 {
1742 set_other_error (mismatch_detail, idx,
1743 _("invalid post-increment amount"));
1744 return 0;
1745 }
1746 }
1747 break;
1748
1749 case AARCH64_OPND_ADDR_REGOFF:
1750 /* Get the size of the data element that is accessed, which may be
1751 different from that of the source register size,
1752 e.g. in strb/ldrb. */
1753 size = aarch64_get_qualifier_esize (opnd->qualifier);
1754 /* It is either no shift or shift by the binary logarithm of SIZE. */
1755 if (opnd->shifter.amount != 0
1756 && opnd->shifter.amount != (int)get_logsz (size))
1757 {
1758 set_other_error (mismatch_detail, idx,
1759 _("invalid shift amount"));
1760 return 0;
1761 }
1762 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1763 operators. */
1764 switch (opnd->shifter.kind)
1765 {
1766 case AARCH64_MOD_UXTW:
1767 case AARCH64_MOD_LSL:
1768 case AARCH64_MOD_SXTW:
1769 case AARCH64_MOD_SXTX: break;
1770 default:
1771 set_other_error (mismatch_detail, idx,
1772 _("invalid extend/shift operator"));
1773 return 0;
1774 }
1775 break;
1776
1777 case AARCH64_OPND_ADDR_UIMM12:
1778 imm = opnd->addr.offset.imm;
1779 /* Get the size of the data element that is accessed, which may be
1780 different from that of the source register size,
1781 e.g. in strb/ldrb. */
1782 size = aarch64_get_qualifier_esize (qualifier);
1783 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1784 {
1785 set_offset_out_of_range_error (mismatch_detail, idx,
1786 0, 4095 * size);
1787 return 0;
1788 }
1789 if (!value_aligned_p (opnd->addr.offset.imm, size))
1790 {
1791 set_unaligned_error (mismatch_detail, idx, size);
1792 return 0;
1793 }
1794 break;
1795
1796 case AARCH64_OPND_ADDR_PCREL14:
1797 case AARCH64_OPND_ADDR_PCREL19:
1798 case AARCH64_OPND_ADDR_PCREL21:
1799 case AARCH64_OPND_ADDR_PCREL26:
1800 imm = opnd->imm.value;
1801 if (operand_need_shift_by_two (get_operand_from_code (type)))
1802 {
1803 /* The offset value in a PC-relative branch instruction is alway
1804 4-byte aligned and is encoded without the lowest 2 bits. */
1805 if (!value_aligned_p (imm, 4))
1806 {
1807 set_unaligned_error (mismatch_detail, idx, 4);
1808 return 0;
1809 }
1810 /* Right shift by 2 so that we can carry out the following check
1811 canonically. */
1812 imm >>= 2;
1813 }
1814 size = get_operand_fields_width (get_operand_from_code (type));
1815 if (!value_fit_signed_field_p (imm, size))
1816 {
1817 set_other_error (mismatch_detail, idx,
1818 _("immediate out of range"));
1819 return 0;
1820 }
1821 break;
1822
1823 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1824 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1825 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1826 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1827 min_value = -8;
1828 max_value = 7;
1829 sve_imm_offset_vl:
1830 assert (!opnd->addr.offset.is_reg);
1831 assert (opnd->addr.preind);
1832 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1833 min_value *= num;
1834 max_value *= num;
1835 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1836 || (opnd->shifter.operator_present
1837 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1838 {
1839 set_other_error (mismatch_detail, idx,
1840 _("invalid addressing mode"));
1841 return 0;
1842 }
1843 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1844 {
1845 set_offset_out_of_range_error (mismatch_detail, idx,
1846 min_value, max_value);
1847 return 0;
1848 }
1849 if (!value_aligned_p (opnd->addr.offset.imm, num))
1850 {
1851 set_unaligned_error (mismatch_detail, idx, num);
1852 return 0;
1853 }
1854 break;
1855
1856 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1857 min_value = -32;
1858 max_value = 31;
1859 goto sve_imm_offset_vl;
1860
1861 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1862 min_value = -256;
1863 max_value = 255;
1864 goto sve_imm_offset_vl;
1865
1866 case AARCH64_OPND_SVE_ADDR_RI_U6:
1867 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1868 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1869 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1870 min_value = 0;
1871 max_value = 63;
1872 sve_imm_offset:
1873 assert (!opnd->addr.offset.is_reg);
1874 assert (opnd->addr.preind);
1875 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1876 min_value *= num;
1877 max_value *= num;
1878 if (opnd->shifter.operator_present
1879 || opnd->shifter.amount_present)
1880 {
1881 set_other_error (mismatch_detail, idx,
1882 _("invalid addressing mode"));
1883 return 0;
1884 }
1885 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1886 {
1887 set_offset_out_of_range_error (mismatch_detail, idx,
1888 min_value, max_value);
1889 return 0;
1890 }
1891 if (!value_aligned_p (opnd->addr.offset.imm, num))
1892 {
1893 set_unaligned_error (mismatch_detail, idx, num);
1894 return 0;
1895 }
1896 break;
1897
1898 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1899 min_value = -8;
1900 max_value = 7;
1901 goto sve_imm_offset;
1902
1903 case AARCH64_OPND_SVE_ADDR_ZX:
1904 /* Everything is already ensured by parse_operands or
1905 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1906 argument type). */
1907 assert (opnd->addr.offset.is_reg);
1908 assert (opnd->addr.preind);
1909 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
1910 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
1911 assert (opnd->shifter.operator_present == 0);
1912 break;
1913
1914 case AARCH64_OPND_SVE_ADDR_R:
1915 case AARCH64_OPND_SVE_ADDR_RR:
1916 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1917 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1918 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1919 case AARCH64_OPND_SVE_ADDR_RX:
1920 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1921 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1922 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1923 case AARCH64_OPND_SVE_ADDR_RZ:
1924 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1925 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1926 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1927 modifiers = 1 << AARCH64_MOD_LSL;
1928 sve_rr_operand:
1929 assert (opnd->addr.offset.is_reg);
1930 assert (opnd->addr.preind);
1931 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1932 && opnd->addr.offset.regno == 31)
1933 {
1934 set_other_error (mismatch_detail, idx,
1935 _("index register xzr is not allowed"));
1936 return 0;
1937 }
1938 if (((1 << opnd->shifter.kind) & modifiers) == 0
1939 || (opnd->shifter.amount
1940 != get_operand_specific_data (&aarch64_operands[type])))
1941 {
1942 set_other_error (mismatch_detail, idx,
1943 _("invalid addressing mode"));
1944 return 0;
1945 }
1946 break;
1947
1948 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1949 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1950 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1951 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1952 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1953 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1954 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1955 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1956 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1957 goto sve_rr_operand;
1958
1959 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1960 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1961 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1962 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1963 min_value = 0;
1964 max_value = 31;
1965 goto sve_imm_offset;
1966
1967 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1968 modifiers = 1 << AARCH64_MOD_LSL;
1969 sve_zz_operand:
1970 assert (opnd->addr.offset.is_reg);
1971 assert (opnd->addr.preind);
1972 if (((1 << opnd->shifter.kind) & modifiers) == 0
1973 || opnd->shifter.amount < 0
1974 || opnd->shifter.amount > 3)
1975 {
1976 set_other_error (mismatch_detail, idx,
1977 _("invalid addressing mode"));
1978 return 0;
1979 }
1980 break;
1981
1982 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1983 modifiers = (1 << AARCH64_MOD_SXTW);
1984 goto sve_zz_operand;
1985
1986 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1987 modifiers = 1 << AARCH64_MOD_UXTW;
1988 goto sve_zz_operand;
1989
1990 default:
1991 break;
1992 }
1993 break;
1994
1995 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1996 if (type == AARCH64_OPND_LEt)
1997 {
1998 /* Get the upper bound for the element index. */
1999 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2000 if (!value_in_range_p (opnd->reglist.index, 0, num))
2001 {
2002 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2003 return 0;
2004 }
2005 }
2006 /* The opcode dependent area stores the number of elements in
2007 each structure to be loaded/stored. */
2008 num = get_opcode_dependent_value (opcode);
2009 switch (type)
2010 {
2011 case AARCH64_OPND_LVt:
2012 assert (num >= 1 && num <= 4);
2013 /* Unless LD1/ST1, the number of registers should be equal to that
2014 of the structure elements. */
2015 if (num != 1 && opnd->reglist.num_regs != num)
2016 {
2017 set_reg_list_error (mismatch_detail, idx, num);
2018 return 0;
2019 }
2020 break;
2021 case AARCH64_OPND_LVt_AL:
2022 case AARCH64_OPND_LEt:
2023 assert (num >= 1 && num <= 4);
2024 /* The number of registers should be equal to that of the structure
2025 elements. */
2026 if (opnd->reglist.num_regs != num)
2027 {
2028 set_reg_list_error (mismatch_detail, idx, num);
2029 return 0;
2030 }
2031 break;
2032 default:
2033 break;
2034 }
2035 break;
2036
2037 case AARCH64_OPND_CLASS_IMMEDIATE:
2038 /* Constraint check on immediate operand. */
2039 imm = opnd->imm.value;
2040 /* E.g. imm_0_31 constrains value to be 0..31. */
2041 if (qualifier_value_in_range_constraint_p (qualifier)
2042 && !value_in_range_p (imm, get_lower_bound (qualifier),
2043 get_upper_bound (qualifier)))
2044 {
2045 set_imm_out_of_range_error (mismatch_detail, idx,
2046 get_lower_bound (qualifier),
2047 get_upper_bound (qualifier));
2048 return 0;
2049 }
2050
2051 switch (type)
2052 {
2053 case AARCH64_OPND_AIMM:
2054 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2055 {
2056 set_other_error (mismatch_detail, idx,
2057 _("invalid shift operator"));
2058 return 0;
2059 }
2060 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2061 {
2062 set_other_error (mismatch_detail, idx,
2063 _("shift amount must be 0 or 12"));
2064 return 0;
2065 }
2066 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2067 {
2068 set_other_error (mismatch_detail, idx,
2069 _("immediate out of range"));
2070 return 0;
2071 }
2072 break;
2073
2074 case AARCH64_OPND_HALF:
2075 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2076 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2077 {
2078 set_other_error (mismatch_detail, idx,
2079 _("invalid shift operator"));
2080 return 0;
2081 }
2082 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2083 if (!value_aligned_p (opnd->shifter.amount, 16))
2084 {
2085 set_other_error (mismatch_detail, idx,
2086 _("shift amount must be a multiple of 16"));
2087 return 0;
2088 }
2089 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2090 {
2091 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2092 0, size * 8 - 16);
2093 return 0;
2094 }
2095 if (opnd->imm.value < 0)
2096 {
2097 set_other_error (mismatch_detail, idx,
2098 _("negative immediate value not allowed"));
2099 return 0;
2100 }
2101 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2102 {
2103 set_other_error (mismatch_detail, idx,
2104 _("immediate out of range"));
2105 return 0;
2106 }
2107 break;
2108
2109 case AARCH64_OPND_IMM_MOV:
2110 {
2111 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2112 imm = opnd->imm.value;
2113 assert (idx == 1);
2114 switch (opcode->op)
2115 {
2116 case OP_MOV_IMM_WIDEN:
2117 imm = ~imm;
2118 /* Fall through. */
2119 case OP_MOV_IMM_WIDE:
2120 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2121 {
2122 set_other_error (mismatch_detail, idx,
2123 _("immediate out of range"));
2124 return 0;
2125 }
2126 break;
2127 case OP_MOV_IMM_LOG:
2128 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2129 {
2130 set_other_error (mismatch_detail, idx,
2131 _("immediate out of range"));
2132 return 0;
2133 }
2134 break;
2135 default:
2136 assert (0);
2137 return 0;
2138 }
2139 }
2140 break;
2141
2142 case AARCH64_OPND_NZCV:
2143 case AARCH64_OPND_CCMP_IMM:
2144 case AARCH64_OPND_EXCEPTION:
2145 case AARCH64_OPND_TME_UIMM16:
2146 case AARCH64_OPND_UIMM4:
2147 case AARCH64_OPND_UIMM4_ADDG:
2148 case AARCH64_OPND_UIMM7:
2149 case AARCH64_OPND_UIMM3_OP1:
2150 case AARCH64_OPND_UIMM3_OP2:
2151 case AARCH64_OPND_SVE_UIMM3:
2152 case AARCH64_OPND_SVE_UIMM7:
2153 case AARCH64_OPND_SVE_UIMM8:
2154 case AARCH64_OPND_SVE_UIMM8_53:
2155 size = get_operand_fields_width (get_operand_from_code (type));
2156 assert (size < 32);
2157 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2158 {
2159 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2160 (1 << size) - 1);
2161 return 0;
2162 }
2163 break;
2164
2165 case AARCH64_OPND_UIMM10:
2166 /* Scaled unsigned 10 bits immediate offset. */
2167 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2168 {
2169 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2170 return 0;
2171 }
2172
2173 if (!value_aligned_p (opnd->imm.value, 16))
2174 {
2175 set_unaligned_error (mismatch_detail, idx, 16);
2176 return 0;
2177 }
2178 break;
2179
2180 case AARCH64_OPND_SIMM5:
2181 case AARCH64_OPND_SVE_SIMM5:
2182 case AARCH64_OPND_SVE_SIMM5B:
2183 case AARCH64_OPND_SVE_SIMM6:
2184 case AARCH64_OPND_SVE_SIMM8:
2185 size = get_operand_fields_width (get_operand_from_code (type));
2186 assert (size < 32);
2187 if (!value_fit_signed_field_p (opnd->imm.value, size))
2188 {
2189 set_imm_out_of_range_error (mismatch_detail, idx,
2190 -(1 << (size - 1)),
2191 (1 << (size - 1)) - 1);
2192 return 0;
2193 }
2194 break;
2195
2196 case AARCH64_OPND_WIDTH:
2197 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2198 && opnds[0].type == AARCH64_OPND_Rd);
2199 size = get_upper_bound (qualifier);
2200 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2201 /* lsb+width <= reg.size */
2202 {
2203 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2204 size - opnds[idx-1].imm.value);
2205 return 0;
2206 }
2207 break;
2208
2209 case AARCH64_OPND_LIMM:
2210 case AARCH64_OPND_SVE_LIMM:
2211 {
2212 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2213 uint64_t uimm = opnd->imm.value;
2214 if (opcode->op == OP_BIC)
2215 uimm = ~uimm;
2216 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2217 {
2218 set_other_error (mismatch_detail, idx,
2219 _("immediate out of range"));
2220 return 0;
2221 }
2222 }
2223 break;
2224
2225 case AARCH64_OPND_IMM0:
2226 case AARCH64_OPND_FPIMM0:
2227 if (opnd->imm.value != 0)
2228 {
2229 set_other_error (mismatch_detail, idx,
2230 _("immediate zero expected"));
2231 return 0;
2232 }
2233 break;
2234
2235 case AARCH64_OPND_IMM_ROT1:
2236 case AARCH64_OPND_IMM_ROT2:
2237 case AARCH64_OPND_SVE_IMM_ROT2:
2238 if (opnd->imm.value != 0
2239 && opnd->imm.value != 90
2240 && opnd->imm.value != 180
2241 && opnd->imm.value != 270)
2242 {
2243 set_other_error (mismatch_detail, idx,
2244 _("rotate expected to be 0, 90, 180 or 270"));
2245 return 0;
2246 }
2247 break;
2248
2249 case AARCH64_OPND_IMM_ROT3:
2250 case AARCH64_OPND_SVE_IMM_ROT1:
2251 case AARCH64_OPND_SVE_IMM_ROT3:
2252 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2253 {
2254 set_other_error (mismatch_detail, idx,
2255 _("rotate expected to be 90 or 270"));
2256 return 0;
2257 }
2258 break;
2259
2260 case AARCH64_OPND_SHLL_IMM:
2261 assert (idx == 2);
2262 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2263 if (opnd->imm.value != size)
2264 {
2265 set_other_error (mismatch_detail, idx,
2266 _("invalid shift amount"));
2267 return 0;
2268 }
2269 break;
2270
2271 case AARCH64_OPND_IMM_VLSL:
2272 size = aarch64_get_qualifier_esize (qualifier);
2273 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2274 {
2275 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2276 size * 8 - 1);
2277 return 0;
2278 }
2279 break;
2280
2281 case AARCH64_OPND_IMM_VLSR:
2282 size = aarch64_get_qualifier_esize (qualifier);
2283 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2284 {
2285 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2286 return 0;
2287 }
2288 break;
2289
2290 case AARCH64_OPND_SIMD_IMM:
2291 case AARCH64_OPND_SIMD_IMM_SFT:
2292 /* Qualifier check. */
2293 switch (qualifier)
2294 {
2295 case AARCH64_OPND_QLF_LSL:
2296 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2297 {
2298 set_other_error (mismatch_detail, idx,
2299 _("invalid shift operator"));
2300 return 0;
2301 }
2302 break;
2303 case AARCH64_OPND_QLF_MSL:
2304 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2305 {
2306 set_other_error (mismatch_detail, idx,
2307 _("invalid shift operator"));
2308 return 0;
2309 }
2310 break;
2311 case AARCH64_OPND_QLF_NIL:
2312 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2313 {
2314 set_other_error (mismatch_detail, idx,
2315 _("shift is not permitted"));
2316 return 0;
2317 }
2318 break;
2319 default:
2320 assert (0);
2321 return 0;
2322 }
2323 /* Is the immediate valid? */
2324 assert (idx == 1);
2325 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2326 {
2327 /* uimm8 or simm8 */
2328 if (!value_in_range_p (opnd->imm.value, -128, 255))
2329 {
2330 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2331 return 0;
2332 }
2333 }
2334 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2335 {
2336 /* uimm64 is not
2337 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2338 ffffffffgggggggghhhhhhhh'. */
2339 set_other_error (mismatch_detail, idx,
2340 _("invalid value for immediate"));
2341 return 0;
2342 }
2343 /* Is the shift amount valid? */
2344 switch (opnd->shifter.kind)
2345 {
2346 case AARCH64_MOD_LSL:
2347 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2348 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2349 {
2350 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2351 (size - 1) * 8);
2352 return 0;
2353 }
2354 if (!value_aligned_p (opnd->shifter.amount, 8))
2355 {
2356 set_unaligned_error (mismatch_detail, idx, 8);
2357 return 0;
2358 }
2359 break;
2360 case AARCH64_MOD_MSL:
2361 /* Only 8 and 16 are valid shift amount. */
2362 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2363 {
2364 set_other_error (mismatch_detail, idx,
2365 _("shift amount must be 0 or 16"));
2366 return 0;
2367 }
2368 break;
2369 default:
2370 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2371 {
2372 set_other_error (mismatch_detail, idx,
2373 _("invalid shift operator"));
2374 return 0;
2375 }
2376 break;
2377 }
2378 break;
2379
2380 case AARCH64_OPND_FPIMM:
2381 case AARCH64_OPND_SIMD_FPIMM:
2382 case AARCH64_OPND_SVE_FPIMM8:
2383 if (opnd->imm.is_fp == 0)
2384 {
2385 set_other_error (mismatch_detail, idx,
2386 _("floating-point immediate expected"));
2387 return 0;
2388 }
2389 /* The value is expected to be an 8-bit floating-point constant with
2390 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2391 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2392 instruction). */
2393 if (!value_in_range_p (opnd->imm.value, 0, 255))
2394 {
2395 set_other_error (mismatch_detail, idx,
2396 _("immediate out of range"));
2397 return 0;
2398 }
2399 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2400 {
2401 set_other_error (mismatch_detail, idx,
2402 _("invalid shift operator"));
2403 return 0;
2404 }
2405 break;
2406
2407 case AARCH64_OPND_SVE_AIMM:
2408 min_value = 0;
2409 sve_aimm:
2410 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2411 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2412 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2413 uvalue = opnd->imm.value;
2414 shift = opnd->shifter.amount;
2415 if (size == 1)
2416 {
2417 if (shift != 0)
2418 {
2419 set_other_error (mismatch_detail, idx,
2420 _("no shift amount allowed for"
2421 " 8-bit constants"));
2422 return 0;
2423 }
2424 }
2425 else
2426 {
2427 if (shift != 0 && shift != 8)
2428 {
2429 set_other_error (mismatch_detail, idx,
2430 _("shift amount must be 0 or 8"));
2431 return 0;
2432 }
2433 if (shift == 0 && (uvalue & 0xff) == 0)
2434 {
2435 shift = 8;
2436 uvalue = (int64_t) uvalue / 256;
2437 }
2438 }
2439 mask >>= shift;
2440 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2441 {
2442 set_other_error (mismatch_detail, idx,
2443 _("immediate too big for element size"));
2444 return 0;
2445 }
2446 uvalue = (uvalue - min_value) & mask;
2447 if (uvalue > 0xff)
2448 {
2449 set_other_error (mismatch_detail, idx,
2450 _("invalid arithmetic immediate"));
2451 return 0;
2452 }
2453 break;
2454
2455 case AARCH64_OPND_SVE_ASIMM:
2456 min_value = -128;
2457 goto sve_aimm;
2458
2459 case AARCH64_OPND_SVE_I1_HALF_ONE:
2460 assert (opnd->imm.is_fp);
2461 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2462 {
2463 set_other_error (mismatch_detail, idx,
2464 _("floating-point value must be 0.5 or 1.0"));
2465 return 0;
2466 }
2467 break;
2468
2469 case AARCH64_OPND_SVE_I1_HALF_TWO:
2470 assert (opnd->imm.is_fp);
2471 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2472 {
2473 set_other_error (mismatch_detail, idx,
2474 _("floating-point value must be 0.5 or 2.0"));
2475 return 0;
2476 }
2477 break;
2478
2479 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2480 assert (opnd->imm.is_fp);
2481 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2482 {
2483 set_other_error (mismatch_detail, idx,
2484 _("floating-point value must be 0.0 or 1.0"));
2485 return 0;
2486 }
2487 break;
2488
2489 case AARCH64_OPND_SVE_INV_LIMM:
2490 {
2491 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2492 uint64_t uimm = ~opnd->imm.value;
2493 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2494 {
2495 set_other_error (mismatch_detail, idx,
2496 _("immediate out of range"));
2497 return 0;
2498 }
2499 }
2500 break;
2501
2502 case AARCH64_OPND_SVE_LIMM_MOV:
2503 {
2504 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2505 uint64_t uimm = opnd->imm.value;
2506 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2507 {
2508 set_other_error (mismatch_detail, idx,
2509 _("immediate out of range"));
2510 return 0;
2511 }
2512 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2513 {
2514 set_other_error (mismatch_detail, idx,
2515 _("invalid replicated MOV immediate"));
2516 return 0;
2517 }
2518 }
2519 break;
2520
2521 case AARCH64_OPND_SVE_PATTERN_SCALED:
2522 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2523 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2524 {
2525 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2526 return 0;
2527 }
2528 break;
2529
2530 case AARCH64_OPND_SVE_SHLIMM_PRED:
2531 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2532 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2533 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2534 {
2535 set_imm_out_of_range_error (mismatch_detail, idx,
2536 0, 8 * size - 1);
2537 return 0;
2538 }
2539 break;
2540
2541 case AARCH64_OPND_SVE_SHRIMM_PRED:
2542 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2543 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2544 {
2545 unsigned int index =
2546 (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2547 size = aarch64_get_qualifier_esize (opnds[idx - index].qualifier);
2548 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2549 {
2550 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2551 return 0;
2552 }
2553 break;
2554 }
2555
2556 default:
2557 break;
2558 }
2559 break;
2560
2561 case AARCH64_OPND_CLASS_SYSTEM:
2562 switch (type)
2563 {
2564 case AARCH64_OPND_PSTATEFIELD:
2565 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2566 /* MSR UAO, #uimm4
2567 MSR PAN, #uimm4
2568 MSR SSBS,#uimm4
2569 The immediate must be #0 or #1. */
2570 if ((opnd->pstatefield == 0x03 /* UAO. */
2571 || opnd->pstatefield == 0x04 /* PAN. */
2572 || opnd->pstatefield == 0x19 /* SSBS. */
2573 || opnd->pstatefield == 0x1a) /* DIT. */
2574 && opnds[1].imm.value > 1)
2575 {
2576 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2577 return 0;
2578 }
2579 /* MSR SPSel, #uimm4
2580 Uses uimm4 as a control value to select the stack pointer: if
2581 bit 0 is set it selects the current exception level's stack
2582 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2583 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2584 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2585 {
2586 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2587 return 0;
2588 }
2589 break;
2590 default:
2591 break;
2592 }
2593 break;
2594
2595 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2596 /* Get the upper bound for the element index. */
2597 if (opcode->op == OP_FCMLA_ELEM)
2598 /* FCMLA index range depends on the vector size of other operands
2599 and is halfed because complex numbers take two elements. */
2600 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2601 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2602 else
2603 num = 16;
2604 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2605 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2606
2607 /* Index out-of-range. */
2608 if (!value_in_range_p (opnd->reglane.index, 0, num))
2609 {
2610 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2611 return 0;
2612 }
2613 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2614 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2615 number is encoded in "size:M:Rm":
2616 size <Vm>
2617 00 RESERVED
2618 01 0:Rm
2619 10 M:Rm
2620 11 RESERVED */
2621 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2622 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2623 {
2624 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2625 return 0;
2626 }
2627 break;
2628
2629 case AARCH64_OPND_CLASS_MODIFIED_REG:
2630 assert (idx == 1 || idx == 2);
2631 switch (type)
2632 {
2633 case AARCH64_OPND_Rm_EXT:
2634 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2635 && opnd->shifter.kind != AARCH64_MOD_LSL)
2636 {
2637 set_other_error (mismatch_detail, idx,
2638 _("extend operator expected"));
2639 return 0;
2640 }
2641 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2642 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2643 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2644 case. */
2645 if (!aarch64_stack_pointer_p (opnds + 0)
2646 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2647 {
2648 if (!opnd->shifter.operator_present)
2649 {
2650 set_other_error (mismatch_detail, idx,
2651 _("missing extend operator"));
2652 return 0;
2653 }
2654 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2655 {
2656 set_other_error (mismatch_detail, idx,
2657 _("'LSL' operator not allowed"));
2658 return 0;
2659 }
2660 }
2661 assert (opnd->shifter.operator_present /* Default to LSL. */
2662 || opnd->shifter.kind == AARCH64_MOD_LSL);
2663 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2664 {
2665 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2666 return 0;
2667 }
2668 /* In the 64-bit form, the final register operand is written as Wm
2669 for all but the (possibly omitted) UXTX/LSL and SXTX
2670 operators.
2671 N.B. GAS allows X register to be used with any operator as a
2672 programming convenience. */
2673 if (qualifier == AARCH64_OPND_QLF_X
2674 && opnd->shifter.kind != AARCH64_MOD_LSL
2675 && opnd->shifter.kind != AARCH64_MOD_UXTX
2676 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2677 {
2678 set_other_error (mismatch_detail, idx, _("W register expected"));
2679 return 0;
2680 }
2681 break;
2682
2683 case AARCH64_OPND_Rm_SFT:
2684 /* ROR is not available to the shifted register operand in
2685 arithmetic instructions. */
2686 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2687 {
2688 set_other_error (mismatch_detail, idx,
2689 _("shift operator expected"));
2690 return 0;
2691 }
2692 if (opnd->shifter.kind == AARCH64_MOD_ROR
2693 && opcode->iclass != log_shift)
2694 {
2695 set_other_error (mismatch_detail, idx,
2696 _("'ROR' operator not allowed"));
2697 return 0;
2698 }
2699 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2700 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2701 {
2702 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2703 return 0;
2704 }
2705 break;
2706
2707 default:
2708 break;
2709 }
2710 break;
2711
2712 default:
2713 break;
2714 }
2715
2716 return 1;
2717 }
2718
2719 /* Main entrypoint for the operand constraint checking.
2720
2721 Return 1 if operands of *INST meet the constraint applied by the operand
2722 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2723 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2724 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2725 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2726 error kind when it is notified that an instruction does not pass the check).
2727
2728 Un-determined operand qualifiers may get established during the process. */
2729
2730 int
2731 aarch64_match_operands_constraint (aarch64_inst *inst,
2732 aarch64_operand_error *mismatch_detail)
2733 {
2734 int i;
2735
2736 DEBUG_TRACE ("enter");
2737
2738 /* Check for cases where a source register needs to be the same as the
2739 destination register. Do this before matching qualifiers since if
2740 an instruction has both invalid tying and invalid qualifiers,
2741 the error about qualifiers would suggest several alternative
2742 instructions that also have invalid tying. */
2743 i = inst->opcode->tied_operand;
2744 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2745 {
2746 if (mismatch_detail)
2747 {
2748 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2749 mismatch_detail->index = i;
2750 mismatch_detail->error = NULL;
2751 }
2752 return 0;
2753 }
2754
2755 /* Match operands' qualifier.
2756 *INST has already had qualifier establish for some, if not all, of
2757 its operands; we need to find out whether these established
2758 qualifiers match one of the qualifier sequence in
2759 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2760 with the corresponding qualifier in such a sequence.
2761 Only basic operand constraint checking is done here; the more thorough
2762 constraint checking will carried out by operand_general_constraint_met_p,
2763 which has be to called after this in order to get all of the operands'
2764 qualifiers established. */
2765 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2766 {
2767 DEBUG_TRACE ("FAIL on operand qualifier matching");
2768 if (mismatch_detail)
2769 {
2770 /* Return an error type to indicate that it is the qualifier
2771 matching failure; we don't care about which operand as there
2772 are enough information in the opcode table to reproduce it. */
2773 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2774 mismatch_detail->index = -1;
2775 mismatch_detail->error = NULL;
2776 }
2777 return 0;
2778 }
2779
2780 /* Match operands' constraint. */
2781 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2782 {
2783 enum aarch64_opnd type = inst->opcode->operands[i];
2784 if (type == AARCH64_OPND_NIL)
2785 break;
2786 if (inst->operands[i].skip)
2787 {
2788 DEBUG_TRACE ("skip the incomplete operand %d", i);
2789 continue;
2790 }
2791 if (operand_general_constraint_met_p (inst->operands, i, type,
2792 inst->opcode, mismatch_detail) == 0)
2793 {
2794 DEBUG_TRACE ("FAIL on operand %d", i);
2795 return 0;
2796 }
2797 }
2798
2799 DEBUG_TRACE ("PASS");
2800
2801 return 1;
2802 }
2803
2804 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2805 Also updates the TYPE of each INST->OPERANDS with the corresponding
2806 value of OPCODE->OPERANDS.
2807
2808 Note that some operand qualifiers may need to be manually cleared by
2809 the caller before it further calls the aarch64_opcode_encode; by
2810 doing this, it helps the qualifier matching facilities work
2811 properly. */
2812
2813 const aarch64_opcode*
2814 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2815 {
2816 int i;
2817 const aarch64_opcode *old = inst->opcode;
2818
2819 inst->opcode = opcode;
2820
2821 /* Update the operand types. */
2822 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2823 {
2824 inst->operands[i].type = opcode->operands[i];
2825 if (opcode->operands[i] == AARCH64_OPND_NIL)
2826 break;
2827 }
2828
2829 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2830
2831 return old;
2832 }
2833
2834 int
2835 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2836 {
2837 int i;
2838 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2839 if (operands[i] == operand)
2840 return i;
2841 else if (operands[i] == AARCH64_OPND_NIL)
2842 break;
2843 return -1;
2844 }
2845 \f
2846 /* R0...R30, followed by FOR31. */
2847 #define BANK(R, FOR31) \
2848 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2849 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2850 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2851 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2852 /* [0][0] 32-bit integer regs with sp Wn
2853 [0][1] 64-bit integer regs with sp Xn sf=1
2854 [1][0] 32-bit integer regs with #0 Wn
2855 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2856 static const char *int_reg[2][2][32] = {
2857 #define R32(X) "w" #X
2858 #define R64(X) "x" #X
2859 { BANK (R32, "wsp"), BANK (R64, "sp") },
2860 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2861 #undef R64
2862 #undef R32
2863 };
2864
2865 /* Names of the SVE vector registers, first with .S suffixes,
2866 then with .D suffixes. */
2867
2868 static const char *sve_reg[2][32] = {
2869 #define ZS(X) "z" #X ".s"
2870 #define ZD(X) "z" #X ".d"
2871 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2872 #undef ZD
2873 #undef ZS
2874 };
2875 #undef BANK
2876
2877 /* Return the integer register name.
2878 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2879
2880 static inline const char *
2881 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2882 {
2883 const int has_zr = sp_reg_p ? 0 : 1;
2884 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2885 return int_reg[has_zr][is_64][regno];
2886 }
2887
2888 /* Like get_int_reg_name, but IS_64 is always 1. */
2889
2890 static inline const char *
2891 get_64bit_int_reg_name (int regno, int sp_reg_p)
2892 {
2893 const int has_zr = sp_reg_p ? 0 : 1;
2894 return int_reg[has_zr][1][regno];
2895 }
2896
2897 /* Get the name of the integer offset register in OPND, using the shift type
2898 to decide whether it's a word or doubleword. */
2899
2900 static inline const char *
2901 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2902 {
2903 switch (opnd->shifter.kind)
2904 {
2905 case AARCH64_MOD_UXTW:
2906 case AARCH64_MOD_SXTW:
2907 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2908
2909 case AARCH64_MOD_LSL:
2910 case AARCH64_MOD_SXTX:
2911 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2912
2913 default:
2914 abort ();
2915 }
2916 }
2917
2918 /* Get the name of the SVE vector offset register in OPND, using the operand
2919 qualifier to decide whether the suffix should be .S or .D. */
2920
2921 static inline const char *
2922 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2923 {
2924 assert (qualifier == AARCH64_OPND_QLF_S_S
2925 || qualifier == AARCH64_OPND_QLF_S_D);
2926 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2927 }
2928
2929 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2930
2931 typedef union
2932 {
2933 uint64_t i;
2934 double d;
2935 } double_conv_t;
2936
2937 typedef union
2938 {
2939 uint32_t i;
2940 float f;
2941 } single_conv_t;
2942
2943 typedef union
2944 {
2945 uint32_t i;
2946 float f;
2947 } half_conv_t;
2948
2949 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2950 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2951 (depending on the type of the instruction). IMM8 will be expanded to a
2952 single-precision floating-point value (SIZE == 4) or a double-precision
2953 floating-point value (SIZE == 8). A half-precision floating-point value
2954 (SIZE == 2) is expanded to a single-precision floating-point value. The
2955 expanded value is returned. */
2956
2957 static uint64_t
2958 expand_fp_imm (int size, uint32_t imm8)
2959 {
2960 uint64_t imm = 0;
2961 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2962
2963 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2964 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2965 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2966 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2967 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2968 if (size == 8)
2969 {
2970 imm = (imm8_7 << (63-32)) /* imm8<7> */
2971 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2972 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2973 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2974 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2975 imm <<= 32;
2976 }
2977 else if (size == 4 || size == 2)
2978 {
2979 imm = (imm8_7 << 31) /* imm8<7> */
2980 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2981 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2982 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2983 }
2984 else
2985 {
2986 /* An unsupported size. */
2987 assert (0);
2988 }
2989
2990 return imm;
2991 }
2992
2993 /* Produce the string representation of the register list operand *OPND
2994 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2995 the register name that comes before the register number, such as "v". */
2996 static void
2997 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2998 const char *prefix)
2999 {
3000 const int num_regs = opnd->reglist.num_regs;
3001 const int first_reg = opnd->reglist.first_regno;
3002 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3003 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3004 char tb[8]; /* Temporary buffer. */
3005
3006 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3007 assert (num_regs >= 1 && num_regs <= 4);
3008
3009 /* Prepare the index if any. */
3010 if (opnd->reglist.has_index)
3011 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3012 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
3013 else
3014 tb[0] = '\0';
3015
3016 /* The hyphenated form is preferred for disassembly if there are
3017 more than two registers in the list, and the register numbers
3018 are monotonically increasing in increments of one. */
3019 if (num_regs > 2 && last_reg > first_reg)
3020 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
3021 prefix, last_reg, qlf_name, tb);
3022 else
3023 {
3024 const int reg0 = first_reg;
3025 const int reg1 = (first_reg + 1) & 0x1f;
3026 const int reg2 = (first_reg + 2) & 0x1f;
3027 const int reg3 = (first_reg + 3) & 0x1f;
3028
3029 switch (num_regs)
3030 {
3031 case 1:
3032 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3033 break;
3034 case 2:
3035 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3036 prefix, reg1, qlf_name, tb);
3037 break;
3038 case 3:
3039 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3040 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3041 prefix, reg2, qlf_name, tb);
3042 break;
3043 case 4:
3044 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3045 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3046 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3047 break;
3048 }
3049 }
3050 }
3051
3052 /* Print the register+immediate address in OPND to BUF, which has SIZE
3053 characters. BASE is the name of the base register. */
3054
3055 static void
3056 print_immediate_offset_address (char *buf, size_t size,
3057 const aarch64_opnd_info *opnd,
3058 const char *base)
3059 {
3060 if (opnd->addr.writeback)
3061 {
3062 if (opnd->addr.preind)
3063 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3064 else
3065 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3066 }
3067 else
3068 {
3069 if (opnd->shifter.operator_present)
3070 {
3071 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3072 snprintf (buf, size, "[%s, #%d, mul vl]",
3073 base, opnd->addr.offset.imm);
3074 }
3075 else if (opnd->addr.offset.imm)
3076 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3077 else
3078 snprintf (buf, size, "[%s]", base);
3079 }
3080 }
3081
3082 /* Produce the string representation of the register offset address operand
3083 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3084 the names of the base and offset registers. */
3085 static void
3086 print_register_offset_address (char *buf, size_t size,
3087 const aarch64_opnd_info *opnd,
3088 const char *base, const char *offset)
3089 {
3090 char tb[16]; /* Temporary buffer. */
3091 bfd_boolean print_extend_p = TRUE;
3092 bfd_boolean print_amount_p = TRUE;
3093 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3094
3095 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3096 || !opnd->shifter.amount_present))
3097 {
3098 /* Not print the shift/extend amount when the amount is zero and
3099 when it is not the special case of 8-bit load/store instruction. */
3100 print_amount_p = FALSE;
3101 /* Likewise, no need to print the shift operator LSL in such a
3102 situation. */
3103 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3104 print_extend_p = FALSE;
3105 }
3106
3107 /* Prepare for the extend/shift. */
3108 if (print_extend_p)
3109 {
3110 if (print_amount_p)
3111 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3112 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3113 (opnd->shifter.amount % 100));
3114 else
3115 snprintf (tb, sizeof (tb), ", %s", shift_name);
3116 }
3117 else
3118 tb[0] = '\0';
3119
3120 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3121 }
3122
3123 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3124 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3125 PC, PCREL_P and ADDRESS are used to pass in and return information about
3126 the PC-relative address calculation, where the PC value is passed in
3127 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3128 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3129 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3130
3131 The function serves both the disassembler and the assembler diagnostics
3132 issuer, which is the reason why it lives in this file. */
3133
3134 void
3135 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3136 const aarch64_opcode *opcode,
3137 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3138 bfd_vma *address, char** notes)
3139 {
3140 unsigned int i, num_conds;
3141 const char *name = NULL;
3142 const aarch64_opnd_info *opnd = opnds + idx;
3143 enum aarch64_modifier_kind kind;
3144 uint64_t addr, enum_value;
3145
3146 buf[0] = '\0';
3147 if (pcrel_p)
3148 *pcrel_p = 0;
3149
3150 switch (opnd->type)
3151 {
3152 case AARCH64_OPND_Rd:
3153 case AARCH64_OPND_Rn:
3154 case AARCH64_OPND_Rm:
3155 case AARCH64_OPND_Rt:
3156 case AARCH64_OPND_Rt2:
3157 case AARCH64_OPND_Rs:
3158 case AARCH64_OPND_Ra:
3159 case AARCH64_OPND_Rt_SYS:
3160 case AARCH64_OPND_PAIRREG:
3161 case AARCH64_OPND_SVE_Rm:
3162 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3163 the <ic_op>, therefore we use opnd->present to override the
3164 generic optional-ness information. */
3165 if (opnd->type == AARCH64_OPND_Rt_SYS)
3166 {
3167 if (!opnd->present)
3168 break;
3169 }
3170 /* Omit the operand, e.g. RET. */
3171 else if (optional_operand_p (opcode, idx)
3172 && (opnd->reg.regno
3173 == get_optional_operand_default_value (opcode)))
3174 break;
3175 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3176 || opnd->qualifier == AARCH64_OPND_QLF_X);
3177 snprintf (buf, size, "%s",
3178 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3179 break;
3180
3181 case AARCH64_OPND_Rd_SP:
3182 case AARCH64_OPND_Rn_SP:
3183 case AARCH64_OPND_Rt_SP:
3184 case AARCH64_OPND_SVE_Rn_SP:
3185 case AARCH64_OPND_Rm_SP:
3186 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3187 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3188 || opnd->qualifier == AARCH64_OPND_QLF_X
3189 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3190 snprintf (buf, size, "%s",
3191 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3192 break;
3193
3194 case AARCH64_OPND_Rm_EXT:
3195 kind = opnd->shifter.kind;
3196 assert (idx == 1 || idx == 2);
3197 if ((aarch64_stack_pointer_p (opnds)
3198 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3199 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3200 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3201 && kind == AARCH64_MOD_UXTW)
3202 || (opnd->qualifier == AARCH64_OPND_QLF_X
3203 && kind == AARCH64_MOD_UXTX)))
3204 {
3205 /* 'LSL' is the preferred form in this case. */
3206 kind = AARCH64_MOD_LSL;
3207 if (opnd->shifter.amount == 0)
3208 {
3209 /* Shifter omitted. */
3210 snprintf (buf, size, "%s",
3211 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3212 break;
3213 }
3214 }
3215 if (opnd->shifter.amount)
3216 snprintf (buf, size, "%s, %s #%" PRIi64,
3217 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3218 aarch64_operand_modifiers[kind].name,
3219 opnd->shifter.amount);
3220 else
3221 snprintf (buf, size, "%s, %s",
3222 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3223 aarch64_operand_modifiers[kind].name);
3224 break;
3225
3226 case AARCH64_OPND_Rm_SFT:
3227 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3228 || opnd->qualifier == AARCH64_OPND_QLF_X);
3229 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3230 snprintf (buf, size, "%s",
3231 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3232 else
3233 snprintf (buf, size, "%s, %s #%" PRIi64,
3234 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3235 aarch64_operand_modifiers[opnd->shifter.kind].name,
3236 opnd->shifter.amount);
3237 break;
3238
3239 case AARCH64_OPND_Fd:
3240 case AARCH64_OPND_Fn:
3241 case AARCH64_OPND_Fm:
3242 case AARCH64_OPND_Fa:
3243 case AARCH64_OPND_Ft:
3244 case AARCH64_OPND_Ft2:
3245 case AARCH64_OPND_Sd:
3246 case AARCH64_OPND_Sn:
3247 case AARCH64_OPND_Sm:
3248 case AARCH64_OPND_SVE_VZn:
3249 case AARCH64_OPND_SVE_Vd:
3250 case AARCH64_OPND_SVE_Vm:
3251 case AARCH64_OPND_SVE_Vn:
3252 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3253 opnd->reg.regno);
3254 break;
3255
3256 case AARCH64_OPND_Va:
3257 case AARCH64_OPND_Vd:
3258 case AARCH64_OPND_Vn:
3259 case AARCH64_OPND_Vm:
3260 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3261 aarch64_get_qualifier_name (opnd->qualifier));
3262 break;
3263
3264 case AARCH64_OPND_Ed:
3265 case AARCH64_OPND_En:
3266 case AARCH64_OPND_Em:
3267 case AARCH64_OPND_Em16:
3268 case AARCH64_OPND_SM3_IMM2:
3269 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3270 aarch64_get_qualifier_name (opnd->qualifier),
3271 opnd->reglane.index);
3272 break;
3273
3274 case AARCH64_OPND_VdD1:
3275 case AARCH64_OPND_VnD1:
3276 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3277 break;
3278
3279 case AARCH64_OPND_LVn:
3280 case AARCH64_OPND_LVt:
3281 case AARCH64_OPND_LVt_AL:
3282 case AARCH64_OPND_LEt:
3283 print_register_list (buf, size, opnd, "v");
3284 break;
3285
3286 case AARCH64_OPND_SVE_Pd:
3287 case AARCH64_OPND_SVE_Pg3:
3288 case AARCH64_OPND_SVE_Pg4_5:
3289 case AARCH64_OPND_SVE_Pg4_10:
3290 case AARCH64_OPND_SVE_Pg4_16:
3291 case AARCH64_OPND_SVE_Pm:
3292 case AARCH64_OPND_SVE_Pn:
3293 case AARCH64_OPND_SVE_Pt:
3294 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3295 snprintf (buf, size, "p%d", opnd->reg.regno);
3296 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3297 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3298 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3299 aarch64_get_qualifier_name (opnd->qualifier));
3300 else
3301 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3302 aarch64_get_qualifier_name (opnd->qualifier));
3303 break;
3304
3305 case AARCH64_OPND_SVE_Za_5:
3306 case AARCH64_OPND_SVE_Za_16:
3307 case AARCH64_OPND_SVE_Zd:
3308 case AARCH64_OPND_SVE_Zm_5:
3309 case AARCH64_OPND_SVE_Zm_16:
3310 case AARCH64_OPND_SVE_Zn:
3311 case AARCH64_OPND_SVE_Zt:
3312 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3313 snprintf (buf, size, "z%d", opnd->reg.regno);
3314 else
3315 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3316 aarch64_get_qualifier_name (opnd->qualifier));
3317 break;
3318
3319 case AARCH64_OPND_SVE_ZnxN:
3320 case AARCH64_OPND_SVE_ZtxN:
3321 print_register_list (buf, size, opnd, "z");
3322 break;
3323
3324 case AARCH64_OPND_SVE_Zm3_INDEX:
3325 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3326 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3327 case AARCH64_OPND_SVE_Zm4_INDEX:
3328 case AARCH64_OPND_SVE_Zn_INDEX:
3329 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3330 aarch64_get_qualifier_name (opnd->qualifier),
3331 opnd->reglane.index);
3332 break;
3333
3334 case AARCH64_OPND_CRn:
3335 case AARCH64_OPND_CRm:
3336 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3337 break;
3338
3339 case AARCH64_OPND_IDX:
3340 case AARCH64_OPND_MASK:
3341 case AARCH64_OPND_IMM:
3342 case AARCH64_OPND_IMM_2:
3343 case AARCH64_OPND_WIDTH:
3344 case AARCH64_OPND_UIMM3_OP1:
3345 case AARCH64_OPND_UIMM3_OP2:
3346 case AARCH64_OPND_BIT_NUM:
3347 case AARCH64_OPND_IMM_VLSL:
3348 case AARCH64_OPND_IMM_VLSR:
3349 case AARCH64_OPND_SHLL_IMM:
3350 case AARCH64_OPND_IMM0:
3351 case AARCH64_OPND_IMMR:
3352 case AARCH64_OPND_IMMS:
3353 case AARCH64_OPND_FBITS:
3354 case AARCH64_OPND_TME_UIMM16:
3355 case AARCH64_OPND_SIMM5:
3356 case AARCH64_OPND_SVE_SHLIMM_PRED:
3357 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3358 case AARCH64_OPND_SVE_SHRIMM_PRED:
3359 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3360 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3361 case AARCH64_OPND_SVE_SIMM5:
3362 case AARCH64_OPND_SVE_SIMM5B:
3363 case AARCH64_OPND_SVE_SIMM6:
3364 case AARCH64_OPND_SVE_SIMM8:
3365 case AARCH64_OPND_SVE_UIMM3:
3366 case AARCH64_OPND_SVE_UIMM7:
3367 case AARCH64_OPND_SVE_UIMM8:
3368 case AARCH64_OPND_SVE_UIMM8_53:
3369 case AARCH64_OPND_IMM_ROT1:
3370 case AARCH64_OPND_IMM_ROT2:
3371 case AARCH64_OPND_IMM_ROT3:
3372 case AARCH64_OPND_SVE_IMM_ROT1:
3373 case AARCH64_OPND_SVE_IMM_ROT2:
3374 case AARCH64_OPND_SVE_IMM_ROT3:
3375 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3376 break;
3377
3378 case AARCH64_OPND_SVE_I1_HALF_ONE:
3379 case AARCH64_OPND_SVE_I1_HALF_TWO:
3380 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3381 {
3382 single_conv_t c;
3383 c.i = opnd->imm.value;
3384 snprintf (buf, size, "#%.1f", c.f);
3385 break;
3386 }
3387
3388 case AARCH64_OPND_SVE_PATTERN:
3389 if (optional_operand_p (opcode, idx)
3390 && opnd->imm.value == get_optional_operand_default_value (opcode))
3391 break;
3392 enum_value = opnd->imm.value;
3393 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3394 if (aarch64_sve_pattern_array[enum_value])
3395 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3396 else
3397 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3398 break;
3399
3400 case AARCH64_OPND_SVE_PATTERN_SCALED:
3401 if (optional_operand_p (opcode, idx)
3402 && !opnd->shifter.operator_present
3403 && opnd->imm.value == get_optional_operand_default_value (opcode))
3404 break;
3405 enum_value = opnd->imm.value;
3406 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3407 if (aarch64_sve_pattern_array[opnd->imm.value])
3408 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3409 else
3410 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3411 if (opnd->shifter.operator_present)
3412 {
3413 size_t len = strlen (buf);
3414 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3415 aarch64_operand_modifiers[opnd->shifter.kind].name,
3416 opnd->shifter.amount);
3417 }
3418 break;
3419
3420 case AARCH64_OPND_SVE_PRFOP:
3421 enum_value = opnd->imm.value;
3422 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3423 if (aarch64_sve_prfop_array[enum_value])
3424 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3425 else
3426 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3427 break;
3428
3429 case AARCH64_OPND_IMM_MOV:
3430 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3431 {
3432 case 4: /* e.g. MOV Wd, #<imm32>. */
3433 {
3434 int imm32 = opnd->imm.value;
3435 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3436 }
3437 break;
3438 case 8: /* e.g. MOV Xd, #<imm64>. */
3439 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3440 opnd->imm.value, opnd->imm.value);
3441 break;
3442 default: assert (0);
3443 }
3444 break;
3445
3446 case AARCH64_OPND_FPIMM0:
3447 snprintf (buf, size, "#0.0");
3448 break;
3449
3450 case AARCH64_OPND_LIMM:
3451 case AARCH64_OPND_AIMM:
3452 case AARCH64_OPND_HALF:
3453 case AARCH64_OPND_SVE_INV_LIMM:
3454 case AARCH64_OPND_SVE_LIMM:
3455 case AARCH64_OPND_SVE_LIMM_MOV:
3456 if (opnd->shifter.amount)
3457 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3458 opnd->shifter.amount);
3459 else
3460 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3461 break;
3462
3463 case AARCH64_OPND_SIMD_IMM:
3464 case AARCH64_OPND_SIMD_IMM_SFT:
3465 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3466 || opnd->shifter.kind == AARCH64_MOD_NONE)
3467 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3468 else
3469 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3470 aarch64_operand_modifiers[opnd->shifter.kind].name,
3471 opnd->shifter.amount);
3472 break;
3473
3474 case AARCH64_OPND_SVE_AIMM:
3475 case AARCH64_OPND_SVE_ASIMM:
3476 if (opnd->shifter.amount)
3477 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3478 opnd->shifter.amount);
3479 else
3480 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3481 break;
3482
3483 case AARCH64_OPND_FPIMM:
3484 case AARCH64_OPND_SIMD_FPIMM:
3485 case AARCH64_OPND_SVE_FPIMM8:
3486 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3487 {
3488 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3489 {
3490 half_conv_t c;
3491 c.i = expand_fp_imm (2, opnd->imm.value);
3492 snprintf (buf, size, "#%.18e", c.f);
3493 }
3494 break;
3495 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3496 {
3497 single_conv_t c;
3498 c.i = expand_fp_imm (4, opnd->imm.value);
3499 snprintf (buf, size, "#%.18e", c.f);
3500 }
3501 break;
3502 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3503 {
3504 double_conv_t c;
3505 c.i = expand_fp_imm (8, opnd->imm.value);
3506 snprintf (buf, size, "#%.18e", c.d);
3507 }
3508 break;
3509 default: assert (0);
3510 }
3511 break;
3512
3513 case AARCH64_OPND_CCMP_IMM:
3514 case AARCH64_OPND_NZCV:
3515 case AARCH64_OPND_EXCEPTION:
3516 case AARCH64_OPND_UIMM4:
3517 case AARCH64_OPND_UIMM4_ADDG:
3518 case AARCH64_OPND_UIMM7:
3519 case AARCH64_OPND_UIMM10:
3520 if (optional_operand_p (opcode, idx) == TRUE
3521 && (opnd->imm.value ==
3522 (int64_t) get_optional_operand_default_value (opcode)))
3523 /* Omit the operand, e.g. DCPS1. */
3524 break;
3525 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3526 break;
3527
3528 case AARCH64_OPND_COND:
3529 case AARCH64_OPND_COND1:
3530 snprintf (buf, size, "%s", opnd->cond->names[0]);
3531 num_conds = ARRAY_SIZE (opnd->cond->names);
3532 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3533 {
3534 size_t len = strlen (buf);
3535 if (i == 1)
3536 snprintf (buf + len, size - len, " // %s = %s",
3537 opnd->cond->names[0], opnd->cond->names[i]);
3538 else
3539 snprintf (buf + len, size - len, ", %s",
3540 opnd->cond->names[i]);
3541 }
3542 break;
3543
3544 case AARCH64_OPND_ADDR_ADRP:
3545 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3546 + opnd->imm.value;
3547 if (pcrel_p)
3548 *pcrel_p = 1;
3549 if (address)
3550 *address = addr;
3551 /* This is not necessary during the disassembling, as print_address_func
3552 in the disassemble_info will take care of the printing. But some
3553 other callers may be still interested in getting the string in *STR,
3554 so here we do snprintf regardless. */
3555 snprintf (buf, size, "#0x%" PRIx64, addr);
3556 break;
3557
3558 case AARCH64_OPND_ADDR_PCREL14:
3559 case AARCH64_OPND_ADDR_PCREL19:
3560 case AARCH64_OPND_ADDR_PCREL21:
3561 case AARCH64_OPND_ADDR_PCREL26:
3562 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3563 if (pcrel_p)
3564 *pcrel_p = 1;
3565 if (address)
3566 *address = addr;
3567 /* This is not necessary during the disassembling, as print_address_func
3568 in the disassemble_info will take care of the printing. But some
3569 other callers may be still interested in getting the string in *STR,
3570 so here we do snprintf regardless. */
3571 snprintf (buf, size, "#0x%" PRIx64, addr);
3572 break;
3573
3574 case AARCH64_OPND_ADDR_SIMPLE:
3575 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3576 case AARCH64_OPND_SIMD_ADDR_POST:
3577 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3578 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3579 {
3580 if (opnd->addr.offset.is_reg)
3581 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3582 else
3583 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3584 }
3585 else
3586 snprintf (buf, size, "[%s]", name);
3587 break;
3588
3589 case AARCH64_OPND_ADDR_REGOFF:
3590 case AARCH64_OPND_SVE_ADDR_R:
3591 case AARCH64_OPND_SVE_ADDR_RR:
3592 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3593 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3594 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3595 case AARCH64_OPND_SVE_ADDR_RX:
3596 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3597 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3598 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3599 print_register_offset_address
3600 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3601 get_offset_int_reg_name (opnd));
3602 break;
3603
3604 case AARCH64_OPND_SVE_ADDR_ZX:
3605 print_register_offset_address
3606 (buf, size, opnd,
3607 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3608 get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
3609 break;
3610
3611 case AARCH64_OPND_SVE_ADDR_RZ:
3612 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3613 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3614 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3615 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3616 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3617 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3618 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3619 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3620 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3621 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3622 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3623 print_register_offset_address
3624 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3625 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3626 break;
3627
3628 case AARCH64_OPND_ADDR_SIMM7:
3629 case AARCH64_OPND_ADDR_SIMM9:
3630 case AARCH64_OPND_ADDR_SIMM9_2:
3631 case AARCH64_OPND_ADDR_SIMM10:
3632 case AARCH64_OPND_ADDR_SIMM11:
3633 case AARCH64_OPND_ADDR_SIMM13:
3634 case AARCH64_OPND_ADDR_OFFSET:
3635 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3636 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3637 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3638 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3639 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3640 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3641 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3642 case AARCH64_OPND_SVE_ADDR_RI_U6:
3643 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3644 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3645 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3646 print_immediate_offset_address
3647 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3648 break;
3649
3650 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3651 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3652 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3653 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3654 print_immediate_offset_address
3655 (buf, size, opnd,
3656 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3657 break;
3658
3659 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3660 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3661 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3662 print_register_offset_address
3663 (buf, size, opnd,
3664 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3665 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3666 break;
3667
3668 case AARCH64_OPND_ADDR_UIMM12:
3669 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3670 if (opnd->addr.offset.imm)
3671 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3672 else
3673 snprintf (buf, size, "[%s]", name);
3674 break;
3675
3676 case AARCH64_OPND_SYSREG:
3677 for (i = 0; aarch64_sys_regs[i].name; ++i)
3678 {
3679 bfd_boolean exact_match
3680 = (aarch64_sys_regs[i].flags & opnd->sysreg.flags)
3681 == opnd->sysreg.flags;
3682
3683 /* Try and find an exact match, But if that fails, return the first
3684 partial match that was found. */
3685 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3686 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])
3687 && (name == NULL || exact_match))
3688 {
3689 name = aarch64_sys_regs[i].name;
3690 if (exact_match)
3691 {
3692 if (notes)
3693 *notes = NULL;
3694 break;
3695 }
3696
3697 /* If we didn't match exactly, that means the presense of a flag
3698 indicates what we didn't want for this instruction. e.g. If
3699 F_REG_READ is there, that means we were looking for a write
3700 register. See aarch64_ext_sysreg. */
3701 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3702 *notes = _("reading from a write-only register");
3703 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3704 *notes = _("writing to a read-only register");
3705 }
3706 }
3707
3708 if (name)
3709 snprintf (buf, size, "%s", name);
3710 else
3711 {
3712 /* Implementation defined system register. */
3713 unsigned int value = opnd->sysreg.value;
3714 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3715 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3716 value & 0x7);
3717 }
3718 break;
3719
3720 case AARCH64_OPND_PSTATEFIELD:
3721 for (i = 0; aarch64_pstatefields[i].name; ++i)
3722 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3723 break;
3724 assert (aarch64_pstatefields[i].name);
3725 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3726 break;
3727
3728 case AARCH64_OPND_SYSREG_AT:
3729 case AARCH64_OPND_SYSREG_DC:
3730 case AARCH64_OPND_SYSREG_IC:
3731 case AARCH64_OPND_SYSREG_TLBI:
3732 case AARCH64_OPND_SYSREG_SR:
3733 snprintf (buf, size, "%s", opnd->sysins_op->name);
3734 break;
3735
3736 case AARCH64_OPND_BARRIER:
3737 snprintf (buf, size, "%s", opnd->barrier->name);
3738 break;
3739
3740 case AARCH64_OPND_BARRIER_ISB:
3741 /* Operand can be omitted, e.g. in DCPS1. */
3742 if (! optional_operand_p (opcode, idx)
3743 || (opnd->barrier->value
3744 != get_optional_operand_default_value (opcode)))
3745 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3746 break;
3747
3748 case AARCH64_OPND_PRFOP:
3749 if (opnd->prfop->name != NULL)
3750 snprintf (buf, size, "%s", opnd->prfop->name);
3751 else
3752 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3753 break;
3754
3755 case AARCH64_OPND_BARRIER_PSB:
3756 case AARCH64_OPND_BTI_TARGET:
3757 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3758 snprintf (buf, size, "%s", opnd->hint_option->name);
3759 break;
3760
3761 default:
3762 assert (0);
3763 }
3764 }
3765 \f
3766 #define CPENC(op0,op1,crn,crm,op2) \
3767 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3768 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3769 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3770 /* for 3.9.10 System Instructions */
3771 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3772
3773 #define C0 0
3774 #define C1 1
3775 #define C2 2
3776 #define C3 3
3777 #define C4 4
3778 #define C5 5
3779 #define C6 6
3780 #define C7 7
3781 #define C8 8
3782 #define C9 9
3783 #define C10 10
3784 #define C11 11
3785 #define C12 12
3786 #define C13 13
3787 #define C14 14
3788 #define C15 15
3789
3790 /* TODO there is one more issues need to be resolved
3791 1. handle cpu-implementation-defined system registers. */
3792 const aarch64_sys_reg aarch64_sys_regs [] =
3793 {
3794 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3795 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3796 { "elr_el1", CPEN_(0,C0,1), 0 },
3797 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3798 { "sp_el0", CPEN_(0,C1,0), 0 },
3799 { "spsel", CPEN_(0,C2,0), 0 },
3800 { "daif", CPEN_(3,C2,1), 0 },
3801 { "currentel", CPEN_(0,C2,2), F_REG_READ }, /* RO */
3802 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3803 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3804 { "nzcv", CPEN_(3,C2,0), 0 },
3805 { "ssbs", CPEN_(3,C2,6), F_ARCHEXT },
3806 { "fpcr", CPEN_(3,C4,0), 0 },
3807 { "fpsr", CPEN_(3,C4,1), 0 },
3808 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3809 { "dlr_el0", CPEN_(3,C5,1), 0 },
3810 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3811 { "elr_el2", CPEN_(4,C0,1), 0 },
3812 { "sp_el1", CPEN_(4,C1,0), 0 },
3813 { "spsr_irq", CPEN_(4,C3,0), 0 },
3814 { "spsr_abt", CPEN_(4,C3,1), 0 },
3815 { "spsr_und", CPEN_(4,C3,2), 0 },
3816 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3817 { "spsr_el3", CPEN_(6,C0,0), 0 },
3818 { "elr_el3", CPEN_(6,C0,1), 0 },
3819 { "sp_el2", CPEN_(6,C1,0), 0 },
3820 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3821 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3822 { "midr_el1", CPENC(3,0,C0,C0,0), F_REG_READ }, /* RO */
3823 { "ctr_el0", CPENC(3,3,C0,C0,1), F_REG_READ }, /* RO */
3824 { "mpidr_el1", CPENC(3,0,C0,C0,5), F_REG_READ }, /* RO */
3825 { "revidr_el1", CPENC(3,0,C0,C0,6), F_REG_READ }, /* RO */
3826 { "aidr_el1", CPENC(3,1,C0,C0,7), F_REG_READ }, /* RO */
3827 { "dczid_el0", CPENC(3,3,C0,C0,7), F_REG_READ }, /* RO */
3828 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), F_REG_READ }, /* RO */
3829 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), F_REG_READ }, /* RO */
3830 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), F_REG_READ }, /* RO */
3831 { "id_pfr2_el1", CPENC(3,0,C0,C3,4), F_ARCHEXT | F_REG_READ}, /* RO */
3832 { "id_afr0_el1", CPENC(3,0,C0,C1,3), F_REG_READ }, /* RO */
3833 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), F_REG_READ }, /* RO */
3834 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), F_REG_READ }, /* RO */
3835 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), F_REG_READ }, /* RO */
3836 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), F_REG_READ }, /* RO */
3837 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), F_REG_READ }, /* RO */
3838 { "id_isar0_el1", CPENC(3,0,C0,C2,0), F_REG_READ }, /* RO */
3839 { "id_isar1_el1", CPENC(3,0,C0,C2,1), F_REG_READ }, /* RO */
3840 { "id_isar2_el1", CPENC(3,0,C0,C2,2), F_REG_READ }, /* RO */
3841 { "id_isar3_el1", CPENC(3,0,C0,C2,3), F_REG_READ }, /* RO */
3842 { "id_isar4_el1", CPENC(3,0,C0,C2,4), F_REG_READ }, /* RO */
3843 { "id_isar5_el1", CPENC(3,0,C0,C2,5), F_REG_READ }, /* RO */
3844 { "mvfr0_el1", CPENC(3,0,C0,C3,0), F_REG_READ }, /* RO */
3845 { "mvfr1_el1", CPENC(3,0,C0,C3,1), F_REG_READ }, /* RO */
3846 { "mvfr2_el1", CPENC(3,0,C0,C3,2), F_REG_READ }, /* RO */
3847 { "ccsidr_el1", CPENC(3,1,C0,C0,0), F_REG_READ }, /* RO */
3848 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), F_REG_READ }, /* RO */
3849 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), F_REG_READ }, /* RO */
3850 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), F_REG_READ }, /* RO */
3851 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), F_REG_READ }, /* RO */
3852 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), F_REG_READ }, /* RO */
3853 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), F_REG_READ }, /* RO */
3854 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), F_REG_READ }, /* RO */
3855 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), F_REG_READ }, /* RO */
3856 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT | F_REG_READ }, /* RO */
3857 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), F_REG_READ }, /* RO */
3858 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), F_REG_READ }, /* RO */
3859 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT | F_REG_READ }, /* RO */
3860 { "clidr_el1", CPENC(3,1,C0,C0,1), F_REG_READ }, /* RO */
3861 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 },
3862 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3863 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3864 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3865 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3866 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3867 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3868 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3869 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3870 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3871 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3872 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3873 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3874 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3875 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3876 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3877 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3878 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3879 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3880 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3881 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3882 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3883 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3884 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3885 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3886 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3887 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3888 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3889 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3890 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3891 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3892 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3893 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3894 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3895 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3896 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3897 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3898 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3899 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3900 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3901 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3902 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3903 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3904 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3905 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3906 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3907 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3908 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3909 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3910 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3911 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3912 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3913 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3914 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3915 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3916 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3917 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3918 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3919 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3920 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3921 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT },
3922 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3923 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3924 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3925 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3926 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3927 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3928 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3929 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3930 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3931 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3932 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3933 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3934 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3935 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3936 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3937 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3938 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3939 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3940 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3941 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3942 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3943 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3944 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3945 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3946 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3947 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3948 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3949 { "rvbar_el1", CPENC(3,0,C12,C0,1), F_REG_READ }, /* RO */
3950 { "rvbar_el2", CPENC(3,4,C12,C0,1), F_REG_READ }, /* RO */
3951 { "rvbar_el3", CPENC(3,6,C12,C0,1), F_REG_READ }, /* RO */
3952 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3953 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3954 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3955 { "isr_el1", CPENC(3,0,C12,C1,0), F_REG_READ }, /* RO */
3956 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3957 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3958 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3959 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3960 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3961 { "rndr", CPENC(3,3,C2,C4,0), F_ARCHEXT | F_REG_READ }, /* RO */
3962 { "rndrrs", CPENC(3,3,C2,C4,1), F_ARCHEXT | F_REG_READ }, /* RO */
3963 { "tco", CPENC(3,3,C4,C2,7), F_ARCHEXT },
3964 { "tfsre0_el1", CPENC(3,0,C6,C6,1), F_ARCHEXT },
3965 { "tfsr_el1", CPENC(3,0,C6,C5,0), F_ARCHEXT },
3966 { "tfsr_el2", CPENC(3,4,C6,C5,0), F_ARCHEXT },
3967 { "tfsr_el3", CPENC(3,6,C6,C6,0), F_ARCHEXT },
3968 { "tfsr_el12", CPENC(3,5,C6,C6,0), F_ARCHEXT },
3969 { "rgsr_el1", CPENC(3,0,C1,C0,5), F_ARCHEXT },
3970 { "gcr_el1", CPENC(3,0,C1,C0,6), F_ARCHEXT },
3971 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3972 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RW */
3973 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3974 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3975 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3976 { "scxtnum_el0", CPENC(3,3,C13,C0,7), F_ARCHEXT },
3977 { "scxtnum_el1", CPENC(3,0,C13,C0,7), F_ARCHEXT },
3978 { "scxtnum_el2", CPENC(3,4,C13,C0,7), F_ARCHEXT },
3979 { "scxtnum_el12", CPENC(3,5,C13,C0,7), F_ARCHEXT },
3980 { "scxtnum_el3", CPENC(3,6,C13,C0,7), F_ARCHEXT },
3981 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3982 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RW */
3983 { "cntpct_el0", CPENC(3,3,C14,C0,1), F_REG_READ }, /* RO */
3984 { "cntvct_el0", CPENC(3,3,C14,C0,2), F_REG_READ }, /* RO */
3985 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3986 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3987 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3988 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3989 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3990 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3991 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3992 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3993 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3994 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3995 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3996 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3997 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3998 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3999 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
4000 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
4001 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
4002 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
4003 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
4004 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
4005 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
4006 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
4007 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
4008 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
4009 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
4010 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
4011 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
4012 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
4013 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
4014 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
4015 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), F_REG_READ }, /* r */
4016 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
4017 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
4018 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), F_REG_READ }, /* r */
4019 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), F_REG_WRITE }, /* w */
4020 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 },
4021 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 },
4022 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
4023 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
4024 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
4025 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
4026 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
4027 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
4028 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
4029 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
4030 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
4031 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
4032 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
4033 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
4034 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
4035 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
4036 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
4037 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
4038 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
4039 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
4040 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
4041 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
4042 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
4043 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
4044 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
4045 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
4046 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
4047 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
4048 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
4049 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
4050 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
4051 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
4052 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
4053 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
4054 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
4055 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
4056 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
4057 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
4058 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
4059 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
4060 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
4061 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
4062 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
4063 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
4064 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
4065 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
4066 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
4067 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
4068 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
4069 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
4070 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
4071 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
4072 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
4073 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
4074 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
4075 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
4076 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
4077 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
4078 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
4079 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
4080 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
4081 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
4082 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
4083 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
4084 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
4085 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
4086 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
4087 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
4088 { "mdrar_el1", CPENC(2,0,C1, C0, 0), F_REG_READ }, /* r */
4089 { "oslar_el1", CPENC(2,0,C1, C0, 4), F_REG_WRITE }, /* w */
4090 { "oslsr_el1", CPENC(2,0,C1, C1, 4), F_REG_READ }, /* r */
4091 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
4092 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
4093 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
4094 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
4095 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), F_REG_READ }, /* r */
4096 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
4097 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
4098 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
4099 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT | F_REG_READ }, /* ro */
4100 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
4101 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
4102 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
4103 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
4104 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
4105 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
4106 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* rw */
4107 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
4108 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
4109 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
4110 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
4111 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
4112 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
4113 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), F_REG_WRITE }, /* w */
4114 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
4115 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), F_REG_READ }, /* r */
4116 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), F_REG_READ }, /* r */
4117 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
4118 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
4119 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
4120 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
4121 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
4122 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
4123 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
4124 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
4125 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
4126 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
4127 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
4128 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
4129 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
4130 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
4131 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
4132 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
4133 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
4134 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
4135 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
4136 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
4137 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
4138 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
4139 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
4140 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
4141 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
4142 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
4143 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
4144 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
4145 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
4146 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
4147 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
4148 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
4149 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
4150 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4151 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4152 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4153 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4154 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4155 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4156 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4157 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4158 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4159 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4160 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4161 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4162 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4163 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4164 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4165 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4166 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4167 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4168 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4169 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4170 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4171 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4172 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4173 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4174 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4175 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4176 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4177 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4178 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4179 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4180 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4181 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4182 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4183 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4184 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4185 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4186 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4187
4188 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4189 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4190 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4191 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4192 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4193 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4194 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4195 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4196 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4197 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4198 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4199 { 0, CPENC(0,0,0,0,0), 0 },
4200 };
4201
4202 bfd_boolean
4203 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4204 {
4205 return (reg->flags & F_DEPRECATED) != 0;
4206 }
4207
4208 bfd_boolean
4209 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4210 const aarch64_sys_reg *reg)
4211 {
4212 if (!(reg->flags & F_ARCHEXT))
4213 return TRUE;
4214
4215 /* PAN. Values are from aarch64_sys_regs. */
4216 if (reg->value == CPEN_(0,C2,3)
4217 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4218 return FALSE;
4219
4220 /* SCXTNUM_ELx registers. */
4221 if ((reg->value == CPENC (3, 3, C13, C0, 7)
4222 || reg->value == CPENC (3, 0, C13, C0, 7)
4223 || reg->value == CPENC (3, 4, C13, C0, 7)
4224 || reg->value == CPENC (3, 6, C13, C0, 7)
4225 || reg->value == CPENC (3, 5, C13, C0, 7))
4226 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SCXTNUM))
4227 return FALSE;
4228
4229 /* ID_PFR2_EL1 register. */
4230 if (reg->value == CPENC(3, 0, C0, C3, 4)
4231 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_ID_PFR2))
4232 return FALSE;
4233
4234 /* SSBS. Values are from aarch64_sys_regs. */
4235 if (reg->value == CPEN_(3,C2,6)
4236 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4237 return FALSE;
4238
4239 /* Virtualization host extensions: system registers. */
4240 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4241 || reg->value == CPENC (3, 4, C13, C0, 1)
4242 || reg->value == CPENC (3, 4, C14, C3, 0)
4243 || reg->value == CPENC (3, 4, C14, C3, 1)
4244 || reg->value == CPENC (3, 4, C14, C3, 2))
4245 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4246 return FALSE;
4247
4248 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4249 if ((reg->value == CPEN_ (5, C0, 0)
4250 || reg->value == CPEN_ (5, C0, 1)
4251 || reg->value == CPENC (3, 5, C1, C0, 0)
4252 || reg->value == CPENC (3, 5, C1, C0, 2)
4253 || reg->value == CPENC (3, 5, C2, C0, 0)
4254 || reg->value == CPENC (3, 5, C2, C0, 1)
4255 || reg->value == CPENC (3, 5, C2, C0, 2)
4256 || reg->value == CPENC (3, 5, C5, C1, 0)
4257 || reg->value == CPENC (3, 5, C5, C1, 1)
4258 || reg->value == CPENC (3, 5, C5, C2, 0)
4259 || reg->value == CPENC (3, 5, C6, C0, 0)
4260 || reg->value == CPENC (3, 5, C10, C2, 0)
4261 || reg->value == CPENC (3, 5, C10, C3, 0)
4262 || reg->value == CPENC (3, 5, C12, C0, 0)
4263 || reg->value == CPENC (3, 5, C13, C0, 1)
4264 || reg->value == CPENC (3, 5, C14, C1, 0))
4265 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4266 return FALSE;
4267
4268 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4269 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4270 || reg->value == CPENC (3, 5, C14, C2, 1)
4271 || reg->value == CPENC (3, 5, C14, C2, 2)
4272 || reg->value == CPENC (3, 5, C14, C3, 0)
4273 || reg->value == CPENC (3, 5, C14, C3, 1)
4274 || reg->value == CPENC (3, 5, C14, C3, 2))
4275 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4276 return FALSE;
4277
4278 /* ARMv8.2 features. */
4279
4280 /* ID_AA64MMFR2_EL1. */
4281 if (reg->value == CPENC (3, 0, C0, C7, 2)
4282 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4283 return FALSE;
4284
4285 /* PSTATE.UAO. */
4286 if (reg->value == CPEN_ (0, C2, 4)
4287 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4288 return FALSE;
4289
4290 /* RAS extension. */
4291
4292 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4293 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4294 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4295 || reg->value == CPENC (3, 0, C5, C3, 1)
4296 || reg->value == CPENC (3, 0, C5, C3, 2)
4297 || reg->value == CPENC (3, 0, C5, C3, 3)
4298 || reg->value == CPENC (3, 0, C5, C4, 0)
4299 || reg->value == CPENC (3, 0, C5, C4, 1)
4300 || reg->value == CPENC (3, 0, C5, C4, 2)
4301 || reg->value == CPENC (3, 0, C5, C4, 3)
4302 || reg->value == CPENC (3, 0, C5, C5, 0)
4303 || reg->value == CPENC (3, 0, C5, C5, 1))
4304 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4305 return FALSE;
4306
4307 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4308 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4309 || reg->value == CPENC (3, 0, C12, C1, 1)
4310 || reg->value == CPENC (3, 4, C12, C1, 1))
4311 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4312 return FALSE;
4313
4314 /* Statistical Profiling extension. */
4315 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4316 || reg->value == CPENC (3, 0, C9, C10, 1)
4317 || reg->value == CPENC (3, 0, C9, C10, 3)
4318 || reg->value == CPENC (3, 0, C9, C10, 7)
4319 || reg->value == CPENC (3, 0, C9, C9, 0)
4320 || reg->value == CPENC (3, 0, C9, C9, 2)
4321 || reg->value == CPENC (3, 0, C9, C9, 3)
4322 || reg->value == CPENC (3, 0, C9, C9, 4)
4323 || reg->value == CPENC (3, 0, C9, C9, 5)
4324 || reg->value == CPENC (3, 0, C9, C9, 6)
4325 || reg->value == CPENC (3, 0, C9, C9, 7)
4326 || reg->value == CPENC (3, 4, C9, C9, 0)
4327 || reg->value == CPENC (3, 5, C9, C9, 0))
4328 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4329 return FALSE;
4330
4331 /* ARMv8.3 Pointer authentication keys. */
4332 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4333 || reg->value == CPENC (3, 0, C2, C1, 1)
4334 || reg->value == CPENC (3, 0, C2, C1, 2)
4335 || reg->value == CPENC (3, 0, C2, C1, 3)
4336 || reg->value == CPENC (3, 0, C2, C2, 0)
4337 || reg->value == CPENC (3, 0, C2, C2, 1)
4338 || reg->value == CPENC (3, 0, C2, C2, 2)
4339 || reg->value == CPENC (3, 0, C2, C2, 3)
4340 || reg->value == CPENC (3, 0, C2, C3, 0)
4341 || reg->value == CPENC (3, 0, C2, C3, 1))
4342 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4343 return FALSE;
4344
4345 /* SVE. */
4346 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4347 || reg->value == CPENC (3, 0, C1, C2, 0)
4348 || reg->value == CPENC (3, 4, C1, C2, 0)
4349 || reg->value == CPENC (3, 6, C1, C2, 0)
4350 || reg->value == CPENC (3, 5, C1, C2, 0)
4351 || reg->value == CPENC (3, 0, C0, C0, 7))
4352 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4353 return FALSE;
4354
4355 /* ARMv8.4 features. */
4356
4357 /* PSTATE.DIT. */
4358 if (reg->value == CPEN_ (3, C2, 5)
4359 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4360 return FALSE;
4361
4362 /* Virtualization extensions. */
4363 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4364 || reg->value == CPENC(3, 4, C2, C6, 0)
4365 || reg->value == CPENC(3, 4, C14, C4, 0)
4366 || reg->value == CPENC(3, 4, C14, C4, 2)
4367 || reg->value == CPENC(3, 4, C14, C4, 1)
4368 || reg->value == CPENC(3, 4, C14, C5, 0)
4369 || reg->value == CPENC(3, 4, C14, C5, 2)
4370 || reg->value == CPENC(3, 4, C14, C5, 1)
4371 || reg->value == CPENC(3, 4, C1, C3, 1)
4372 || reg->value == CPENC(3, 4, C2, C2, 0))
4373 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4374 return FALSE;
4375
4376 /* ARMv8.4 TLB instructions. */
4377 if ((reg->value == CPENS (0, C8, C1, 0)
4378 || reg->value == CPENS (0, C8, C1, 1)
4379 || reg->value == CPENS (0, C8, C1, 2)
4380 || reg->value == CPENS (0, C8, C1, 3)
4381 || reg->value == CPENS (0, C8, C1, 5)
4382 || reg->value == CPENS (0, C8, C1, 7)
4383 || reg->value == CPENS (4, C8, C4, 0)
4384 || reg->value == CPENS (4, C8, C4, 4)
4385 || reg->value == CPENS (4, C8, C1, 1)
4386 || reg->value == CPENS (4, C8, C1, 5)
4387 || reg->value == CPENS (4, C8, C1, 6)
4388 || reg->value == CPENS (6, C8, C1, 1)
4389 || reg->value == CPENS (6, C8, C1, 5)
4390 || reg->value == CPENS (4, C8, C1, 0)
4391 || reg->value == CPENS (4, C8, C1, 4)
4392 || reg->value == CPENS (6, C8, C1, 0)
4393 || reg->value == CPENS (0, C8, C6, 1)
4394 || reg->value == CPENS (0, C8, C6, 3)
4395 || reg->value == CPENS (0, C8, C6, 5)
4396 || reg->value == CPENS (0, C8, C6, 7)
4397 || reg->value == CPENS (0, C8, C2, 1)
4398 || reg->value == CPENS (0, C8, C2, 3)
4399 || reg->value == CPENS (0, C8, C2, 5)
4400 || reg->value == CPENS (0, C8, C2, 7)
4401 || reg->value == CPENS (0, C8, C5, 1)
4402 || reg->value == CPENS (0, C8, C5, 3)
4403 || reg->value == CPENS (0, C8, C5, 5)
4404 || reg->value == CPENS (0, C8, C5, 7)
4405 || reg->value == CPENS (4, C8, C0, 2)
4406 || reg->value == CPENS (4, C8, C0, 6)
4407 || reg->value == CPENS (4, C8, C4, 2)
4408 || reg->value == CPENS (4, C8, C4, 6)
4409 || reg->value == CPENS (4, C8, C4, 3)
4410 || reg->value == CPENS (4, C8, C4, 7)
4411 || reg->value == CPENS (4, C8, C6, 1)
4412 || reg->value == CPENS (4, C8, C6, 5)
4413 || reg->value == CPENS (4, C8, C2, 1)
4414 || reg->value == CPENS (4, C8, C2, 5)
4415 || reg->value == CPENS (4, C8, C5, 1)
4416 || reg->value == CPENS (4, C8, C5, 5)
4417 || reg->value == CPENS (6, C8, C6, 1)
4418 || reg->value == CPENS (6, C8, C6, 5)
4419 || reg->value == CPENS (6, C8, C2, 1)
4420 || reg->value == CPENS (6, C8, C2, 5)
4421 || reg->value == CPENS (6, C8, C5, 1)
4422 || reg->value == CPENS (6, C8, C5, 5))
4423 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4424 return FALSE;
4425
4426 /* Random Number Instructions. For now they are available
4427 (and optional) only with ARMv8.5-A. */
4428 if ((reg->value == CPENC (3, 3, C2, C4, 0)
4429 || reg->value == CPENC (3, 3, C2, C4, 1))
4430 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RNG)
4431 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_5)))
4432 return FALSE;
4433
4434 /* System Registers in ARMv8.5-A with AARCH64_FEATURE_MEMTAG. */
4435 if ((reg->value == CPENC (3, 3, C4, C2, 7)
4436 || reg->value == CPENC (3, 0, C6, C6, 1)
4437 || reg->value == CPENC (3, 0, C6, C5, 0)
4438 || reg->value == CPENC (3, 4, C6, C5, 0)
4439 || reg->value == CPENC (3, 6, C6, C6, 0)
4440 || reg->value == CPENC (3, 5, C6, C6, 0)
4441 || reg->value == CPENC (3, 0, C1, C0, 5)
4442 || reg->value == CPENC (3, 0, C1, C0, 6))
4443 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG)))
4444 return FALSE;
4445
4446 return TRUE;
4447 }
4448
4449 /* The CPENC below is fairly misleading, the fields
4450 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4451 by ins_pstatefield, which just shifts the value by the width of the fields
4452 in a loop. So if you CPENC them only the first value will be set, the rest
4453 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4454 value of 0b110000000001000000 (0x30040) while what you want is
4455 0b011010 (0x1a). */
4456 const aarch64_sys_reg aarch64_pstatefields [] =
4457 {
4458 { "spsel", 0x05, 0 },
4459 { "daifset", 0x1e, 0 },
4460 { "daifclr", 0x1f, 0 },
4461 { "pan", 0x04, F_ARCHEXT },
4462 { "uao", 0x03, F_ARCHEXT },
4463 { "ssbs", 0x19, F_ARCHEXT },
4464 { "dit", 0x1a, F_ARCHEXT },
4465 { "tco", 0x1c, F_ARCHEXT },
4466 { 0, CPENC(0,0,0,0,0), 0 },
4467 };
4468
4469 bfd_boolean
4470 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4471 const aarch64_sys_reg *reg)
4472 {
4473 if (!(reg->flags & F_ARCHEXT))
4474 return TRUE;
4475
4476 /* PAN. Values are from aarch64_pstatefields. */
4477 if (reg->value == 0x04
4478 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4479 return FALSE;
4480
4481 /* UAO. Values are from aarch64_pstatefields. */
4482 if (reg->value == 0x03
4483 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4484 return FALSE;
4485
4486 /* SSBS. Values are from aarch64_pstatefields. */
4487 if (reg->value == 0x19
4488 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4489 return FALSE;
4490
4491 /* DIT. Values are from aarch64_pstatefields. */
4492 if (reg->value == 0x1a
4493 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4494 return FALSE;
4495
4496 /* TCO. Values are from aarch64_pstatefields. */
4497 if (reg->value == 0x1c
4498 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4499 return FALSE;
4500
4501 return TRUE;
4502 }
4503
4504 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4505 {
4506 { "ialluis", CPENS(0,C7,C1,0), 0 },
4507 { "iallu", CPENS(0,C7,C5,0), 0 },
4508 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4509 { 0, CPENS(0,0,0,0), 0 }
4510 };
4511
4512 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4513 {
4514 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4515 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
4516 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
4517 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4518 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
4519 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
4520 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4521 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
4522 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
4523 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4524 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4525 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4526 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4527 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4528 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4529 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4530 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4531 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4532 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4533 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4534 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4535 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4536 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4537 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4538 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4539 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4540 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4541 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4542 { 0, CPENS(0,0,0,0), 0 }
4543 };
4544
4545 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4546 {
4547 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4548 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4549 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4550 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4551 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4552 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4553 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4554 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4555 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4556 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4557 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4558 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4559 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4560 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4561 { 0, CPENS(0,0,0,0), 0 }
4562 };
4563
4564 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4565 {
4566 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4567 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4568 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4569 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4570 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4571 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4572 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4573 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4574 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4575 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4576 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4577 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4578 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4579 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4580 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4581 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4582 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4583 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4584 { "alle2", CPENS(4,C8,C7,0), 0 },
4585 { "alle2is", CPENS(4,C8,C3,0), 0 },
4586 { "alle1", CPENS(4,C8,C7,4), 0 },
4587 { "alle1is", CPENS(4,C8,C3,4), 0 },
4588 { "alle3", CPENS(6,C8,C7,0), 0 },
4589 { "alle3is", CPENS(6,C8,C3,0), 0 },
4590 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4591 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4592 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4593 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4594 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4595 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4596 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4597 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4598
4599 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4600 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4601 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4602 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4603 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4604 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4605 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4606 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4607 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4608 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4609 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4610 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4611 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4612 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4613 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4614 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4615
4616 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4617 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4618 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4619 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4620 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4621 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4622 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4623 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4624 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4625 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4626 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4627 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4628 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4629 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4630 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4631 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4632 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4633 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4634 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4635 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4636 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4637 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4638 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4639 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4640 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4641 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4642 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4643 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4644 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4645 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4646
4647 { 0, CPENS(0,0,0,0), 0 }
4648 };
4649
4650 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4651 {
4652 /* RCTX is somewhat unique in a way that it has different values
4653 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4654 Thus op2 is masked out and instead encoded directly in the
4655 aarch64_opcode_table entries for the respective instructions. */
4656 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4657
4658 { 0, CPENS(0,0,0,0), 0 }
4659 };
4660
4661 bfd_boolean
4662 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4663 {
4664 return (sys_ins_reg->flags & F_HASXT) != 0;
4665 }
4666
4667 extern bfd_boolean
4668 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4669 const aarch64_sys_ins_reg *reg)
4670 {
4671 if (!(reg->flags & F_ARCHEXT))
4672 return TRUE;
4673
4674 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4675 if (reg->value == CPENS (3, C7, C12, 1)
4676 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4677 return FALSE;
4678
4679 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4680 if (reg->value == CPENS (3, C7, C13, 1)
4681 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
4682 return FALSE;
4683
4684 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
4685 if ((reg->value == CPENS (0, C7, C6, 3)
4686 || reg->value == CPENS (0, C7, C6, 4)
4687 || reg->value == CPENS (0, C7, C10, 4)
4688 || reg->value == CPENS (0, C7, C14, 4)
4689 || reg->value == CPENS (3, C7, C10, 3)
4690 || reg->value == CPENS (3, C7, C12, 3)
4691 || reg->value == CPENS (3, C7, C13, 3)
4692 || reg->value == CPENS (3, C7, C14, 3)
4693 || reg->value == CPENS (3, C7, C4, 3)
4694 || reg->value == CPENS (0, C7, C6, 5)
4695 || reg->value == CPENS (0, C7, C6, 6)
4696 || reg->value == CPENS (0, C7, C10, 6)
4697 || reg->value == CPENS (0, C7, C14, 6)
4698 || reg->value == CPENS (3, C7, C10, 5)
4699 || reg->value == CPENS (3, C7, C12, 5)
4700 || reg->value == CPENS (3, C7, C13, 5)
4701 || reg->value == CPENS (3, C7, C14, 5)
4702 || reg->value == CPENS (3, C7, C4, 4))
4703 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4704 return FALSE;
4705
4706 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4707 if ((reg->value == CPENS (0, C7, C9, 0)
4708 || reg->value == CPENS (0, C7, C9, 1))
4709 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4710 return FALSE;
4711
4712 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4713 if (reg->value == CPENS (3, C7, C3, 0)
4714 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
4715 return FALSE;
4716
4717 return TRUE;
4718 }
4719
4720 #undef C0
4721 #undef C1
4722 #undef C2
4723 #undef C3
4724 #undef C4
4725 #undef C5
4726 #undef C6
4727 #undef C7
4728 #undef C8
4729 #undef C9
4730 #undef C10
4731 #undef C11
4732 #undef C12
4733 #undef C13
4734 #undef C14
4735 #undef C15
4736
4737 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4738 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4739
4740 static enum err_type
4741 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
4742 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
4743 bfd_boolean encoding ATTRIBUTE_UNUSED,
4744 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4745 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4746 {
4747 int t = BITS (insn, 4, 0);
4748 int n = BITS (insn, 9, 5);
4749 int t2 = BITS (insn, 14, 10);
4750
4751 if (BIT (insn, 23))
4752 {
4753 /* Write back enabled. */
4754 if ((t == n || t2 == n) && n != 31)
4755 return ERR_UND;
4756 }
4757
4758 if (BIT (insn, 22))
4759 {
4760 /* Load */
4761 if (t == t2)
4762 return ERR_UND;
4763 }
4764
4765 return ERR_OK;
4766 }
4767
4768 /* Verifier for vector by element 3 operands functions where the
4769 conditions `if sz:L == 11 then UNDEFINED` holds. */
4770
4771 static enum err_type
4772 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
4773 bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
4774 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4775 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4776 {
4777 const aarch64_insn undef_pattern = 0x3;
4778 aarch64_insn value;
4779
4780 assert (inst->opcode);
4781 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
4782 value = encoding ? inst->value : insn;
4783 assert (value);
4784
4785 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
4786 return ERR_UND;
4787
4788 return ERR_OK;
4789 }
4790
4791 /* Initialize an instruction sequence insn_sequence with the instruction INST.
4792 If INST is NULL the given insn_sequence is cleared and the sequence is left
4793 uninitialized. */
4794
4795 void
4796 init_insn_sequence (const struct aarch64_inst *inst,
4797 aarch64_instr_sequence *insn_sequence)
4798 {
4799 int num_req_entries = 0;
4800 insn_sequence->next_insn = 0;
4801 insn_sequence->num_insns = num_req_entries;
4802 if (insn_sequence->instr)
4803 XDELETE (insn_sequence->instr);
4804 insn_sequence->instr = NULL;
4805
4806 if (inst)
4807 {
4808 insn_sequence->instr = XNEW (aarch64_inst);
4809 memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
4810 }
4811
4812 /* Handle all the cases here. May need to think of something smarter than
4813 a giant if/else chain if this grows. At that time, a lookup table may be
4814 best. */
4815 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
4816 num_req_entries = 1;
4817
4818 if (insn_sequence->current_insns)
4819 XDELETEVEC (insn_sequence->current_insns);
4820 insn_sequence->current_insns = NULL;
4821
4822 if (num_req_entries != 0)
4823 {
4824 size_t size = num_req_entries * sizeof (aarch64_inst);
4825 insn_sequence->current_insns
4826 = (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
4827 memset (insn_sequence->current_insns, 0, size);
4828 }
4829 }
4830
4831
4832 /* This function verifies that the instruction INST adheres to its specified
4833 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
4834 returned and MISMATCH_DETAIL contains the reason why verification failed.
4835
4836 The function is called both during assembly and disassembly. If assembling
4837 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
4838 and will contain the PC of the current instruction w.r.t to the section.
4839
4840 If ENCODING and PC=0 then you are at a start of a section. The constraints
4841 are verified against the given state insn_sequence which is updated as it
4842 transitions through the verification. */
4843
4844 enum err_type
4845 verify_constraints (const struct aarch64_inst *inst,
4846 const aarch64_insn insn ATTRIBUTE_UNUSED,
4847 bfd_vma pc,
4848 bfd_boolean encoding,
4849 aarch64_operand_error *mismatch_detail,
4850 aarch64_instr_sequence *insn_sequence)
4851 {
4852 assert (inst);
4853 assert (inst->opcode);
4854
4855 const struct aarch64_opcode *opcode = inst->opcode;
4856 if (!opcode->constraints && !insn_sequence->instr)
4857 return ERR_OK;
4858
4859 assert (insn_sequence);
4860
4861 enum err_type res = ERR_OK;
4862
4863 /* This instruction puts a constraint on the insn_sequence. */
4864 if (opcode->flags & F_SCAN)
4865 {
4866 if (insn_sequence->instr)
4867 {
4868 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4869 mismatch_detail->error = _("instruction opens new dependency "
4870 "sequence without ending previous one");
4871 mismatch_detail->index = -1;
4872 mismatch_detail->non_fatal = TRUE;
4873 res = ERR_VFI;
4874 }
4875
4876 init_insn_sequence (inst, insn_sequence);
4877 return res;
4878 }
4879
4880 /* Verify constraints on an existing sequence. */
4881 if (insn_sequence->instr)
4882 {
4883 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
4884 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4885 closed a previous one that we should have. */
4886 if (!encoding && pc == 0)
4887 {
4888 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4889 mismatch_detail->error = _("previous `movprfx' sequence not closed");
4890 mismatch_detail->index = -1;
4891 mismatch_detail->non_fatal = TRUE;
4892 res = ERR_VFI;
4893 /* Reset the sequence. */
4894 init_insn_sequence (NULL, insn_sequence);
4895 return res;
4896 }
4897
4898 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
4899 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
4900 {
4901 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4902 instruction for better error messages. */
4903 if (!opcode->avariant
4904 || !(*opcode->avariant &
4905 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
4906 {
4907 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4908 mismatch_detail->error = _("SVE instruction expected after "
4909 "`movprfx'");
4910 mismatch_detail->index = -1;
4911 mismatch_detail->non_fatal = TRUE;
4912 res = ERR_VFI;
4913 goto done;
4914 }
4915
4916 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4917 instruction that is allowed to be used with a MOVPRFX. */
4918 if (!(opcode->constraints & C_SCAN_MOVPRFX))
4919 {
4920 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4921 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
4922 "expected");
4923 mismatch_detail->index = -1;
4924 mismatch_detail->non_fatal = TRUE;
4925 res = ERR_VFI;
4926 goto done;
4927 }
4928
4929 /* Next check for usage of the predicate register. */
4930 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
4931 aarch64_opnd_info blk_pred, inst_pred;
4932 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
4933 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
4934 bfd_boolean predicated = FALSE;
4935 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
4936
4937 /* Determine if the movprfx instruction used is predicated or not. */
4938 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
4939 {
4940 predicated = TRUE;
4941 blk_pred = insn_sequence->instr->operands[1];
4942 }
4943
4944 unsigned char max_elem_size = 0;
4945 unsigned char current_elem_size;
4946 int num_op_used = 0, last_op_usage = 0;
4947 int i, inst_pred_idx = -1;
4948 int num_ops = aarch64_num_of_operands (opcode);
4949 for (i = 0; i < num_ops; i++)
4950 {
4951 aarch64_opnd_info inst_op = inst->operands[i];
4952 switch (inst_op.type)
4953 {
4954 case AARCH64_OPND_SVE_Zd:
4955 case AARCH64_OPND_SVE_Zm_5:
4956 case AARCH64_OPND_SVE_Zm_16:
4957 case AARCH64_OPND_SVE_Zn:
4958 case AARCH64_OPND_SVE_Zt:
4959 case AARCH64_OPND_SVE_Vm:
4960 case AARCH64_OPND_SVE_Vn:
4961 case AARCH64_OPND_Va:
4962 case AARCH64_OPND_Vn:
4963 case AARCH64_OPND_Vm:
4964 case AARCH64_OPND_Sn:
4965 case AARCH64_OPND_Sm:
4966 case AARCH64_OPND_Rn:
4967 case AARCH64_OPND_Rm:
4968 case AARCH64_OPND_Rn_SP:
4969 case AARCH64_OPND_Rt_SP:
4970 case AARCH64_OPND_Rm_SP:
4971 if (inst_op.reg.regno == blk_dest.reg.regno)
4972 {
4973 num_op_used++;
4974 last_op_usage = i;
4975 }
4976 current_elem_size
4977 = aarch64_get_qualifier_esize (inst_op.qualifier);
4978 if (current_elem_size > max_elem_size)
4979 max_elem_size = current_elem_size;
4980 break;
4981 case AARCH64_OPND_SVE_Pd:
4982 case AARCH64_OPND_SVE_Pg3:
4983 case AARCH64_OPND_SVE_Pg4_5:
4984 case AARCH64_OPND_SVE_Pg4_10:
4985 case AARCH64_OPND_SVE_Pg4_16:
4986 case AARCH64_OPND_SVE_Pm:
4987 case AARCH64_OPND_SVE_Pn:
4988 case AARCH64_OPND_SVE_Pt:
4989 inst_pred = inst_op;
4990 inst_pred_idx = i;
4991 break;
4992 default:
4993 break;
4994 }
4995 }
4996
4997 assert (max_elem_size != 0);
4998 aarch64_opnd_info inst_dest = inst->operands[0];
4999 /* Determine the size that should be used to compare against the
5000 movprfx size. */
5001 current_elem_size
5002 = opcode->constraints & C_MAX_ELEM
5003 ? max_elem_size
5004 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5005
5006 /* If movprfx is predicated do some extra checks. */
5007 if (predicated)
5008 {
5009 /* The instruction must be predicated. */
5010 if (inst_pred_idx < 0)
5011 {
5012 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5013 mismatch_detail->error = _("predicated instruction expected "
5014 "after `movprfx'");
5015 mismatch_detail->index = -1;
5016 mismatch_detail->non_fatal = TRUE;
5017 res = ERR_VFI;
5018 goto done;
5019 }
5020
5021 /* The instruction must have a merging predicate. */
5022 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5023 {
5024 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5025 mismatch_detail->error = _("merging predicate expected due "
5026 "to preceding `movprfx'");
5027 mismatch_detail->index = inst_pred_idx;
5028 mismatch_detail->non_fatal = TRUE;
5029 res = ERR_VFI;
5030 goto done;
5031 }
5032
5033 /* The same register must be used in instruction. */
5034 if (blk_pred.reg.regno != inst_pred.reg.regno)
5035 {
5036 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5037 mismatch_detail->error = _("predicate register differs "
5038 "from that in preceding "
5039 "`movprfx'");
5040 mismatch_detail->index = inst_pred_idx;
5041 mismatch_detail->non_fatal = TRUE;
5042 res = ERR_VFI;
5043 goto done;
5044 }
5045 }
5046
5047 /* Destructive operations by definition must allow one usage of the
5048 same register. */
5049 int allowed_usage
5050 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5051
5052 /* Operand is not used at all. */
5053 if (num_op_used == 0)
5054 {
5055 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5056 mismatch_detail->error = _("output register of preceding "
5057 "`movprfx' not used in current "
5058 "instruction");
5059 mismatch_detail->index = 0;
5060 mismatch_detail->non_fatal = TRUE;
5061 res = ERR_VFI;
5062 goto done;
5063 }
5064
5065 /* We now know it's used, now determine exactly where it's used. */
5066 if (blk_dest.reg.regno != inst_dest.reg.regno)
5067 {
5068 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5069 mismatch_detail->error = _("output register of preceding "
5070 "`movprfx' expected as output");
5071 mismatch_detail->index = 0;
5072 mismatch_detail->non_fatal = TRUE;
5073 res = ERR_VFI;
5074 goto done;
5075 }
5076
5077 /* Operand used more than allowed for the specific opcode type. */
5078 if (num_op_used > allowed_usage)
5079 {
5080 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5081 mismatch_detail->error = _("output register of preceding "
5082 "`movprfx' used as input");
5083 mismatch_detail->index = last_op_usage;
5084 mismatch_detail->non_fatal = TRUE;
5085 res = ERR_VFI;
5086 goto done;
5087 }
5088
5089 /* Now the only thing left is the qualifiers checks. The register
5090 must have the same maximum element size. */
5091 if (inst_dest.qualifier
5092 && blk_dest.qualifier
5093 && current_elem_size
5094 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5095 {
5096 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5097 mismatch_detail->error = _("register size not compatible with "
5098 "previous `movprfx'");
5099 mismatch_detail->index = 0;
5100 mismatch_detail->non_fatal = TRUE;
5101 res = ERR_VFI;
5102 goto done;
5103 }
5104 }
5105
5106 done:
5107 /* Add the new instruction to the sequence. */
5108 memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
5109 inst, sizeof (aarch64_inst));
5110
5111 /* Check if sequence is now full. */
5112 if (insn_sequence->next_insn >= insn_sequence->num_insns)
5113 {
5114 /* Sequence is full, but we don't have anything special to do for now,
5115 so clear and reset it. */
5116 init_insn_sequence (NULL, insn_sequence);
5117 }
5118 }
5119
5120 return res;
5121 }
5122
5123
5124 /* Return true if VALUE cannot be moved into an SVE register using DUP
5125 (with any element size, not just ESIZE) and if using DUPM would
5126 therefore be OK. ESIZE is the number of bytes in the immediate. */
5127
5128 bfd_boolean
5129 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5130 {
5131 int64_t svalue = uvalue;
5132 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5133
5134 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5135 return FALSE;
5136 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5137 {
5138 svalue = (int32_t) uvalue;
5139 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5140 {
5141 svalue = (int16_t) uvalue;
5142 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5143 return FALSE;
5144 }
5145 }
5146 if ((svalue & 0xff) == 0)
5147 svalue /= 256;
5148 return svalue < -128 || svalue >= 128;
5149 }
5150
5151 /* Include the opcode description table as well as the operand description
5152 table. */
5153 #define VERIFIER(x) verify_##x
5154 #include "aarch64-tbl.h"
This page took 0.228089 seconds and 5 git commands to generate.