fb1ec0bcc31116cb320fe2530bdc74995b0bf7dc
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 unsigned long value;
254 } asm_barrier_opt;
255
256 typedef struct
257 {
258 const char *template;
259 uint32_t value;
260 } asm_nzcv;
261
262 struct reloc_entry
263 {
264 char *name;
265 bfd_reloc_code_real_type reloc;
266 };
267
268 /* Macros to define the register types and masks for the purpose
269 of parsing. */
270
271 #undef AARCH64_REG_TYPES
272 #define AARCH64_REG_TYPES \
273 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
274 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
275 BASIC_REG_TYPE(SP_32) /* wsp */ \
276 BASIC_REG_TYPE(SP_64) /* sp */ \
277 BASIC_REG_TYPE(Z_32) /* wzr */ \
278 BASIC_REG_TYPE(Z_64) /* xzr */ \
279 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
280 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
281 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
282 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
283 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
284 BASIC_REG_TYPE(VN) /* v[0-31] */ \
285 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
286 BASIC_REG_TYPE(PN) /* p[0-15] */ \
287 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
288 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
289 /* Typecheck: same, plus SVE registers. */ \
290 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
291 | REG_TYPE(ZN)) \
292 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
293 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Typecheck: same, plus SVE registers. */ \
296 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
298 | REG_TYPE(ZN)) \
299 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
300 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
301 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
302 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
303 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
306 /* Typecheck: any [BHSDQ]P FP. */ \
307 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
308 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
309 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
310 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
312 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
313 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
314 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
315 be used for SVE instructions, since Zn and Pn are valid symbols \
316 in other contexts. */ \
317 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
320 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
321 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
322 | REG_TYPE(ZN) | REG_TYPE(PN)) \
323 /* Any integer register; used for error messages only. */ \
324 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
325 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
326 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
327 /* Pseudo type to mark the end of the enumerator sequence. */ \
328 BASIC_REG_TYPE(MAX)
329
330 #undef BASIC_REG_TYPE
331 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
332 #undef MULTI_REG_TYPE
333 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
334
335 /* Register type enumerators. */
336 typedef enum aarch64_reg_type_
337 {
338 /* A list of REG_TYPE_*. */
339 AARCH64_REG_TYPES
340 } aarch64_reg_type;
341
342 #undef BASIC_REG_TYPE
343 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
344 #undef REG_TYPE
345 #define REG_TYPE(T) (1 << REG_TYPE_##T)
346 #undef MULTI_REG_TYPE
347 #define MULTI_REG_TYPE(T,V) V,
348
349 /* Structure for a hash table entry for a register. */
350 typedef struct
351 {
352 const char *name;
353 unsigned char number;
354 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
355 unsigned char builtin;
356 } reg_entry;
357
358 /* Values indexed by aarch64_reg_type to assist the type checking. */
359 static const unsigned reg_type_masks[] =
360 {
361 AARCH64_REG_TYPES
362 };
363
364 #undef BASIC_REG_TYPE
365 #undef REG_TYPE
366 #undef MULTI_REG_TYPE
367 #undef AARCH64_REG_TYPES
368
369 /* Diagnostics used when we don't get a register of the expected type.
370 Note: this has to synchronized with aarch64_reg_type definitions
371 above. */
372 static const char *
373 get_reg_expected_msg (aarch64_reg_type reg_type)
374 {
375 const char *msg;
376
377 switch (reg_type)
378 {
379 case REG_TYPE_R_32:
380 msg = N_("integer 32-bit register expected");
381 break;
382 case REG_TYPE_R_64:
383 msg = N_("integer 64-bit register expected");
384 break;
385 case REG_TYPE_R_N:
386 msg = N_("integer register expected");
387 break;
388 case REG_TYPE_R64_SP:
389 msg = N_("64-bit integer or SP register expected");
390 break;
391 case REG_TYPE_SVE_BASE:
392 msg = N_("base register expected");
393 break;
394 case REG_TYPE_R_Z:
395 msg = N_("integer or zero register expected");
396 break;
397 case REG_TYPE_SVE_OFFSET:
398 msg = N_("offset register expected");
399 break;
400 case REG_TYPE_R_SP:
401 msg = N_("integer or SP register expected");
402 break;
403 case REG_TYPE_R_Z_SP:
404 msg = N_("integer, zero or SP register expected");
405 break;
406 case REG_TYPE_FP_B:
407 msg = N_("8-bit SIMD scalar register expected");
408 break;
409 case REG_TYPE_FP_H:
410 msg = N_("16-bit SIMD scalar or floating-point half precision "
411 "register expected");
412 break;
413 case REG_TYPE_FP_S:
414 msg = N_("32-bit SIMD scalar or floating-point single precision "
415 "register expected");
416 break;
417 case REG_TYPE_FP_D:
418 msg = N_("64-bit SIMD scalar or floating-point double precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_Q:
422 msg = N_("128-bit SIMD scalar or floating-point quad precision "
423 "register expected");
424 break;
425 case REG_TYPE_R_Z_BHSDQ_V:
426 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
427 msg = N_("register expected");
428 break;
429 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
430 msg = N_("SIMD scalar or floating-point register expected");
431 break;
432 case REG_TYPE_VN: /* any V reg */
433 msg = N_("vector register expected");
434 break;
435 case REG_TYPE_ZN:
436 msg = N_("SVE vector register expected");
437 break;
438 case REG_TYPE_PN:
439 msg = N_("SVE predicate register expected");
440 break;
441 default:
442 as_fatal (_("invalid register type %d"), reg_type);
443 }
444 return msg;
445 }
446
447 /* Some well known registers that we refer to directly elsewhere. */
448 #define REG_SP 31
449 #define REG_ZR 31
450
451 /* Instructions take 4 bytes in the object file. */
452 #define INSN_SIZE 4
453
454 static struct hash_control *aarch64_ops_hsh;
455 static struct hash_control *aarch64_cond_hsh;
456 static struct hash_control *aarch64_shift_hsh;
457 static struct hash_control *aarch64_sys_regs_hsh;
458 static struct hash_control *aarch64_pstatefield_hsh;
459 static struct hash_control *aarch64_sys_regs_ic_hsh;
460 static struct hash_control *aarch64_sys_regs_dc_hsh;
461 static struct hash_control *aarch64_sys_regs_at_hsh;
462 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
463 static struct hash_control *aarch64_sys_regs_sr_hsh;
464 static struct hash_control *aarch64_reg_hsh;
465 static struct hash_control *aarch64_barrier_opt_hsh;
466 static struct hash_control *aarch64_nzcv_hsh;
467 static struct hash_control *aarch64_pldop_hsh;
468 static struct hash_control *aarch64_hint_opt_hsh;
469
470 /* Stuff needed to resolve the label ambiguity
471 As:
472 ...
473 label: <insn>
474 may differ from:
475 ...
476 label:
477 <insn> */
478
479 static symbolS *last_label_seen;
480
481 /* Literal pool structure. Held on a per-section
482 and per-sub-section basis. */
483
484 #define MAX_LITERAL_POOL_SIZE 1024
485 typedef struct literal_expression
486 {
487 expressionS exp;
488 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
489 LITTLENUM_TYPE * bignum;
490 } literal_expression;
491
492 typedef struct literal_pool
493 {
494 literal_expression literals[MAX_LITERAL_POOL_SIZE];
495 unsigned int next_free_entry;
496 unsigned int id;
497 symbolS *symbol;
498 segT section;
499 subsegT sub_section;
500 int size;
501 struct literal_pool *next;
502 } literal_pool;
503
504 /* Pointer to a linked list of literal pools. */
505 static literal_pool *list_of_pools = NULL;
506 \f
507 /* Pure syntax. */
508
509 /* This array holds the chars that always start a comment. If the
510 pre-processor is disabled, these aren't very useful. */
511 const char comment_chars[] = "";
512
513 /* This array holds the chars that only start a comment at the beginning of
514 a line. If the line seems to have the form '# 123 filename'
515 .line and .file directives will appear in the pre-processed output. */
516 /* Note that input_file.c hand checks for '#' at the beginning of the
517 first line of the input file. This is because the compiler outputs
518 #NO_APP at the beginning of its output. */
519 /* Also note that comments like this one will always work. */
520 const char line_comment_chars[] = "#";
521
522 const char line_separator_chars[] = ";";
523
524 /* Chars that can be used to separate mant
525 from exp in floating point numbers. */
526 const char EXP_CHARS[] = "eE";
527
528 /* Chars that mean this number is a floating point constant. */
529 /* As in 0f12.456 */
530 /* or 0d1.2345e12 */
531
532 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH";
533
534 /* Prefix character that indicates the start of an immediate value. */
535 #define is_immediate_prefix(C) ((C) == '#')
536
537 /* Separator character handling. */
538
539 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
540
541 static inline bfd_boolean
542 skip_past_char (char **str, char c)
543 {
544 if (**str == c)
545 {
546 (*str)++;
547 return TRUE;
548 }
549 else
550 return FALSE;
551 }
552
553 #define skip_past_comma(str) skip_past_char (str, ',')
554
555 /* Arithmetic expressions (possibly involving symbols). */
556
557 static bfd_boolean in_my_get_expression_p = FALSE;
558
559 /* Third argument to my_get_expression. */
560 #define GE_NO_PREFIX 0
561 #define GE_OPT_PREFIX 1
562
563 /* Return TRUE if the string pointed by *STR is successfully parsed
564 as an valid expression; *EP will be filled with the information of
565 such an expression. Otherwise return FALSE. */
566
567 static bfd_boolean
568 my_get_expression (expressionS * ep, char **str, int prefix_mode,
569 int reject_absent)
570 {
571 char *save_in;
572 segT seg;
573 int prefix_present_p = 0;
574
575 switch (prefix_mode)
576 {
577 case GE_NO_PREFIX:
578 break;
579 case GE_OPT_PREFIX:
580 if (is_immediate_prefix (**str))
581 {
582 (*str)++;
583 prefix_present_p = 1;
584 }
585 break;
586 default:
587 abort ();
588 }
589
590 memset (ep, 0, sizeof (expressionS));
591
592 save_in = input_line_pointer;
593 input_line_pointer = *str;
594 in_my_get_expression_p = TRUE;
595 seg = expression (ep);
596 in_my_get_expression_p = FALSE;
597
598 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
599 {
600 /* We found a bad expression in md_operand(). */
601 *str = input_line_pointer;
602 input_line_pointer = save_in;
603 if (prefix_present_p && ! error_p ())
604 set_fatal_syntax_error (_("bad expression"));
605 else
606 set_first_syntax_error (_("bad expression"));
607 return FALSE;
608 }
609
610 #ifdef OBJ_AOUT
611 if (seg != absolute_section
612 && seg != text_section
613 && seg != data_section
614 && seg != bss_section && seg != undefined_section)
615 {
616 set_syntax_error (_("bad segment"));
617 *str = input_line_pointer;
618 input_line_pointer = save_in;
619 return FALSE;
620 }
621 #else
622 (void) seg;
623 #endif
624
625 *str = input_line_pointer;
626 input_line_pointer = save_in;
627 return TRUE;
628 }
629
630 /* Turn a string in input_line_pointer into a floating point constant
631 of type TYPE, and store the appropriate bytes in *LITP. The number
632 of LITTLENUMS emitted is stored in *SIZEP. An error message is
633 returned, or NULL on OK. */
634
635 const char *
636 md_atof (int type, char *litP, int *sizeP)
637 {
638 return ieee_md_atof (type, litP, sizeP, target_big_endian);
639 }
640
641 /* We handle all bad expressions here, so that we can report the faulty
642 instruction in the error message. */
643 void
644 md_operand (expressionS * exp)
645 {
646 if (in_my_get_expression_p)
647 exp->X_op = O_illegal;
648 }
649
650 /* Immediate values. */
651
652 /* Errors may be set multiple times during parsing or bit encoding
653 (particularly in the Neon bits), but usually the earliest error which is set
654 will be the most meaningful. Avoid overwriting it with later (cascading)
655 errors by calling this function. */
656
657 static void
658 first_error (const char *error)
659 {
660 if (! error_p ())
661 set_syntax_error (error);
662 }
663
664 /* Similar to first_error, but this function accepts formatted error
665 message. */
666 static void
667 first_error_fmt (const char *format, ...)
668 {
669 va_list args;
670 enum
671 { size = 100 };
672 /* N.B. this single buffer will not cause error messages for different
673 instructions to pollute each other; this is because at the end of
674 processing of each assembly line, error message if any will be
675 collected by as_bad. */
676 static char buffer[size];
677
678 if (! error_p ())
679 {
680 int ret ATTRIBUTE_UNUSED;
681 va_start (args, format);
682 ret = vsnprintf (buffer, size, format, args);
683 know (ret <= size - 1 && ret >= 0);
684 va_end (args);
685 set_syntax_error (buffer);
686 }
687 }
688
689 /* Register parsing. */
690
691 /* Generic register parser which is called by other specialized
692 register parsers.
693 CCP points to what should be the beginning of a register name.
694 If it is indeed a valid register name, advance CCP over it and
695 return the reg_entry structure; otherwise return NULL.
696 It does not issue diagnostics. */
697
698 static reg_entry *
699 parse_reg (char **ccp)
700 {
701 char *start = *ccp;
702 char *p;
703 reg_entry *reg;
704
705 #ifdef REGISTER_PREFIX
706 if (*start != REGISTER_PREFIX)
707 return NULL;
708 start++;
709 #endif
710
711 p = start;
712 if (!ISALPHA (*p) || !is_name_beginner (*p))
713 return NULL;
714
715 do
716 p++;
717 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
718
719 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
720
721 if (!reg)
722 return NULL;
723
724 *ccp = p;
725 return reg;
726 }
727
728 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
729 return FALSE. */
730 static bfd_boolean
731 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
732 {
733 return (reg_type_masks[type] & (1 << reg->type)) != 0;
734 }
735
736 /* Try to parse a base or offset register. Allow SVE base and offset
737 registers if REG_TYPE includes SVE registers. Return the register
738 entry on success, setting *QUALIFIER to the register qualifier.
739 Return null otherwise.
740
741 Note that this function does not issue any diagnostics. */
742
743 static const reg_entry *
744 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
745 aarch64_opnd_qualifier_t *qualifier)
746 {
747 char *str = *ccp;
748 const reg_entry *reg = parse_reg (&str);
749
750 if (reg == NULL)
751 return NULL;
752
753 switch (reg->type)
754 {
755 case REG_TYPE_R_32:
756 case REG_TYPE_SP_32:
757 case REG_TYPE_Z_32:
758 *qualifier = AARCH64_OPND_QLF_W;
759 break;
760
761 case REG_TYPE_R_64:
762 case REG_TYPE_SP_64:
763 case REG_TYPE_Z_64:
764 *qualifier = AARCH64_OPND_QLF_X;
765 break;
766
767 case REG_TYPE_ZN:
768 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
769 || str[0] != '.')
770 return NULL;
771 switch (TOLOWER (str[1]))
772 {
773 case 's':
774 *qualifier = AARCH64_OPND_QLF_S_S;
775 break;
776 case 'd':
777 *qualifier = AARCH64_OPND_QLF_S_D;
778 break;
779 default:
780 return NULL;
781 }
782 str += 2;
783 break;
784
785 default:
786 return NULL;
787 }
788
789 *ccp = str;
790
791 return reg;
792 }
793
794 /* Try to parse a base or offset register. Return the register entry
795 on success, setting *QUALIFIER to the register qualifier. Return null
796 otherwise.
797
798 Note that this function does not issue any diagnostics. */
799
800 static const reg_entry *
801 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
802 {
803 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
804 }
805
806 /* Parse the qualifier of a vector register or vector element of type
807 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
808 succeeds; otherwise return FALSE.
809
810 Accept only one occurrence of:
811 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
812 b h s d q */
813 static bfd_boolean
814 parse_vector_type_for_operand (aarch64_reg_type reg_type,
815 struct vector_type_el *parsed_type, char **str)
816 {
817 char *ptr = *str;
818 unsigned width;
819 unsigned element_size;
820 enum vector_el_type type;
821
822 /* skip '.' */
823 gas_assert (*ptr == '.');
824 ptr++;
825
826 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
827 {
828 width = 0;
829 goto elt_size;
830 }
831 width = strtoul (ptr, &ptr, 10);
832 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
833 {
834 first_error_fmt (_("bad size %d in vector width specifier"), width);
835 return FALSE;
836 }
837
838 elt_size:
839 switch (TOLOWER (*ptr))
840 {
841 case 'b':
842 type = NT_b;
843 element_size = 8;
844 break;
845 case 'h':
846 type = NT_h;
847 element_size = 16;
848 break;
849 case 's':
850 type = NT_s;
851 element_size = 32;
852 break;
853 case 'd':
854 type = NT_d;
855 element_size = 64;
856 break;
857 case 'q':
858 if (reg_type == REG_TYPE_ZN || width == 1)
859 {
860 type = NT_q;
861 element_size = 128;
862 break;
863 }
864 /* fall through. */
865 default:
866 if (*ptr != '\0')
867 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
868 else
869 first_error (_("missing element size"));
870 return FALSE;
871 }
872 if (width != 0 && width * element_size != 64
873 && width * element_size != 128
874 && !(width == 2 && element_size == 16)
875 && !(width == 4 && element_size == 8))
876 {
877 first_error_fmt (_
878 ("invalid element size %d and vector size combination %c"),
879 width, *ptr);
880 return FALSE;
881 }
882 ptr++;
883
884 parsed_type->type = type;
885 parsed_type->width = width;
886
887 *str = ptr;
888
889 return TRUE;
890 }
891
892 /* *STR contains an SVE zero/merge predication suffix. Parse it into
893 *PARSED_TYPE and point *STR at the end of the suffix. */
894
895 static bfd_boolean
896 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
897 {
898 char *ptr = *str;
899
900 /* Skip '/'. */
901 gas_assert (*ptr == '/');
902 ptr++;
903 switch (TOLOWER (*ptr))
904 {
905 case 'z':
906 parsed_type->type = NT_zero;
907 break;
908 case 'm':
909 parsed_type->type = NT_merge;
910 break;
911 default:
912 if (*ptr != '\0' && *ptr != ',')
913 first_error_fmt (_("unexpected character `%c' in predication type"),
914 *ptr);
915 else
916 first_error (_("missing predication type"));
917 return FALSE;
918 }
919 parsed_type->width = 0;
920 *str = ptr + 1;
921 return TRUE;
922 }
923
924 /* Parse a register of the type TYPE.
925
926 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
927 name or the parsed register is not of TYPE.
928
929 Otherwise return the register number, and optionally fill in the actual
930 type of the register in *RTYPE when multiple alternatives were given, and
931 return the register shape and element index information in *TYPEINFO.
932
933 IN_REG_LIST should be set with TRUE if the caller is parsing a register
934 list. */
935
936 static int
937 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
938 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
939 {
940 char *str = *ccp;
941 const reg_entry *reg = parse_reg (&str);
942 struct vector_type_el atype;
943 struct vector_type_el parsetype;
944 bfd_boolean is_typed_vecreg = FALSE;
945
946 atype.defined = 0;
947 atype.type = NT_invtype;
948 atype.width = -1;
949 atype.index = 0;
950
951 if (reg == NULL)
952 {
953 if (typeinfo)
954 *typeinfo = atype;
955 set_default_error ();
956 return PARSE_FAIL;
957 }
958
959 if (! aarch64_check_reg_type (reg, type))
960 {
961 DEBUG_TRACE ("reg type check failed");
962 set_default_error ();
963 return PARSE_FAIL;
964 }
965 type = reg->type;
966
967 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
968 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
969 {
970 if (*str == '.')
971 {
972 if (!parse_vector_type_for_operand (type, &parsetype, &str))
973 return PARSE_FAIL;
974 }
975 else
976 {
977 if (!parse_predication_for_operand (&parsetype, &str))
978 return PARSE_FAIL;
979 }
980
981 /* Register if of the form Vn.[bhsdq]. */
982 is_typed_vecreg = TRUE;
983
984 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
985 {
986 /* The width is always variable; we don't allow an integer width
987 to be specified. */
988 gas_assert (parsetype.width == 0);
989 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
990 }
991 else if (parsetype.width == 0)
992 /* Expect index. In the new scheme we cannot have
993 Vn.[bhsdq] represent a scalar. Therefore any
994 Vn.[bhsdq] should have an index following it.
995 Except in reglists of course. */
996 atype.defined |= NTA_HASINDEX;
997 else
998 atype.defined |= NTA_HASTYPE;
999
1000 atype.type = parsetype.type;
1001 atype.width = parsetype.width;
1002 }
1003
1004 if (skip_past_char (&str, '['))
1005 {
1006 expressionS exp;
1007
1008 /* Reject Sn[index] syntax. */
1009 if (!is_typed_vecreg)
1010 {
1011 first_error (_("this type of register can't be indexed"));
1012 return PARSE_FAIL;
1013 }
1014
1015 if (in_reg_list)
1016 {
1017 first_error (_("index not allowed inside register list"));
1018 return PARSE_FAIL;
1019 }
1020
1021 atype.defined |= NTA_HASINDEX;
1022
1023 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1024
1025 if (exp.X_op != O_constant)
1026 {
1027 first_error (_("constant expression required"));
1028 return PARSE_FAIL;
1029 }
1030
1031 if (! skip_past_char (&str, ']'))
1032 return PARSE_FAIL;
1033
1034 atype.index = exp.X_add_number;
1035 }
1036 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1037 {
1038 /* Indexed vector register expected. */
1039 first_error (_("indexed vector register expected"));
1040 return PARSE_FAIL;
1041 }
1042
1043 /* A vector reg Vn should be typed or indexed. */
1044 if (type == REG_TYPE_VN && atype.defined == 0)
1045 {
1046 first_error (_("invalid use of vector register"));
1047 }
1048
1049 if (typeinfo)
1050 *typeinfo = atype;
1051
1052 if (rtype)
1053 *rtype = type;
1054
1055 *ccp = str;
1056
1057 return reg->number;
1058 }
1059
1060 /* Parse register.
1061
1062 Return the register number on success; return PARSE_FAIL otherwise.
1063
1064 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1065 the register (e.g. NEON double or quad reg when either has been requested).
1066
1067 If this is a NEON vector register with additional type information, fill
1068 in the struct pointed to by VECTYPE (if non-NULL).
1069
1070 This parser does not handle register list. */
1071
1072 static int
1073 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1074 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1075 {
1076 struct vector_type_el atype;
1077 char *str = *ccp;
1078 int reg = parse_typed_reg (&str, type, rtype, &atype,
1079 /*in_reg_list= */ FALSE);
1080
1081 if (reg == PARSE_FAIL)
1082 return PARSE_FAIL;
1083
1084 if (vectype)
1085 *vectype = atype;
1086
1087 *ccp = str;
1088
1089 return reg;
1090 }
1091
1092 static inline bfd_boolean
1093 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1094 {
1095 return
1096 e1.type == e2.type
1097 && e1.defined == e2.defined
1098 && e1.width == e2.width && e1.index == e2.index;
1099 }
1100
1101 /* This function parses a list of vector registers of type TYPE.
1102 On success, it returns the parsed register list information in the
1103 following encoded format:
1104
1105 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1106 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1107
1108 The information of the register shape and/or index is returned in
1109 *VECTYPE.
1110
1111 It returns PARSE_FAIL if the register list is invalid.
1112
1113 The list contains one to four registers.
1114 Each register can be one of:
1115 <Vt>.<T>[<index>]
1116 <Vt>.<T>
1117 All <T> should be identical.
1118 All <index> should be identical.
1119 There are restrictions on <Vt> numbers which are checked later
1120 (by reg_list_valid_p). */
1121
1122 static int
1123 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1124 struct vector_type_el *vectype)
1125 {
1126 char *str = *ccp;
1127 int nb_regs;
1128 struct vector_type_el typeinfo, typeinfo_first;
1129 int val, val_range;
1130 int in_range;
1131 int ret_val;
1132 int i;
1133 bfd_boolean error = FALSE;
1134 bfd_boolean expect_index = FALSE;
1135
1136 if (*str != '{')
1137 {
1138 set_syntax_error (_("expecting {"));
1139 return PARSE_FAIL;
1140 }
1141 str++;
1142
1143 nb_regs = 0;
1144 typeinfo_first.defined = 0;
1145 typeinfo_first.type = NT_invtype;
1146 typeinfo_first.width = -1;
1147 typeinfo_first.index = 0;
1148 ret_val = 0;
1149 val = -1;
1150 val_range = -1;
1151 in_range = 0;
1152 do
1153 {
1154 if (in_range)
1155 {
1156 str++; /* skip over '-' */
1157 val_range = val;
1158 }
1159 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1160 /*in_reg_list= */ TRUE);
1161 if (val == PARSE_FAIL)
1162 {
1163 set_first_syntax_error (_("invalid vector register in list"));
1164 error = TRUE;
1165 continue;
1166 }
1167 /* reject [bhsd]n */
1168 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1169 {
1170 set_first_syntax_error (_("invalid scalar register in list"));
1171 error = TRUE;
1172 continue;
1173 }
1174
1175 if (typeinfo.defined & NTA_HASINDEX)
1176 expect_index = TRUE;
1177
1178 if (in_range)
1179 {
1180 if (val < val_range)
1181 {
1182 set_first_syntax_error
1183 (_("invalid range in vector register list"));
1184 error = TRUE;
1185 }
1186 val_range++;
1187 }
1188 else
1189 {
1190 val_range = val;
1191 if (nb_regs == 0)
1192 typeinfo_first = typeinfo;
1193 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1194 {
1195 set_first_syntax_error
1196 (_("type mismatch in vector register list"));
1197 error = TRUE;
1198 }
1199 }
1200 if (! error)
1201 for (i = val_range; i <= val; i++)
1202 {
1203 ret_val |= i << (5 * nb_regs);
1204 nb_regs++;
1205 }
1206 in_range = 0;
1207 }
1208 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1209
1210 skip_whitespace (str);
1211 if (*str != '}')
1212 {
1213 set_first_syntax_error (_("end of vector register list not found"));
1214 error = TRUE;
1215 }
1216 str++;
1217
1218 skip_whitespace (str);
1219
1220 if (expect_index)
1221 {
1222 if (skip_past_char (&str, '['))
1223 {
1224 expressionS exp;
1225
1226 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1227 if (exp.X_op != O_constant)
1228 {
1229 set_first_syntax_error (_("constant expression required."));
1230 error = TRUE;
1231 }
1232 if (! skip_past_char (&str, ']'))
1233 error = TRUE;
1234 else
1235 typeinfo_first.index = exp.X_add_number;
1236 }
1237 else
1238 {
1239 set_first_syntax_error (_("expected index"));
1240 error = TRUE;
1241 }
1242 }
1243
1244 if (nb_regs > 4)
1245 {
1246 set_first_syntax_error (_("too many registers in vector register list"));
1247 error = TRUE;
1248 }
1249 else if (nb_regs == 0)
1250 {
1251 set_first_syntax_error (_("empty vector register list"));
1252 error = TRUE;
1253 }
1254
1255 *ccp = str;
1256 if (! error)
1257 *vectype = typeinfo_first;
1258
1259 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1260 }
1261
1262 /* Directives: register aliases. */
1263
1264 static reg_entry *
1265 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1266 {
1267 reg_entry *new;
1268 const char *name;
1269
1270 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1271 {
1272 if (new->builtin)
1273 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1274 str);
1275
1276 /* Only warn about a redefinition if it's not defined as the
1277 same register. */
1278 else if (new->number != number || new->type != type)
1279 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1280
1281 return NULL;
1282 }
1283
1284 name = xstrdup (str);
1285 new = XNEW (reg_entry);
1286
1287 new->name = name;
1288 new->number = number;
1289 new->type = type;
1290 new->builtin = FALSE;
1291
1292 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1293 abort ();
1294
1295 return new;
1296 }
1297
1298 /* Look for the .req directive. This is of the form:
1299
1300 new_register_name .req existing_register_name
1301
1302 If we find one, or if it looks sufficiently like one that we want to
1303 handle any error here, return TRUE. Otherwise return FALSE. */
1304
1305 static bfd_boolean
1306 create_register_alias (char *newname, char *p)
1307 {
1308 const reg_entry *old;
1309 char *oldname, *nbuf;
1310 size_t nlen;
1311
1312 /* The input scrubber ensures that whitespace after the mnemonic is
1313 collapsed to single spaces. */
1314 oldname = p;
1315 if (strncmp (oldname, " .req ", 6) != 0)
1316 return FALSE;
1317
1318 oldname += 6;
1319 if (*oldname == '\0')
1320 return FALSE;
1321
1322 old = hash_find (aarch64_reg_hsh, oldname);
1323 if (!old)
1324 {
1325 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1326 return TRUE;
1327 }
1328
1329 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1330 the desired alias name, and p points to its end. If not, then
1331 the desired alias name is in the global original_case_string. */
1332 #ifdef TC_CASE_SENSITIVE
1333 nlen = p - newname;
1334 #else
1335 newname = original_case_string;
1336 nlen = strlen (newname);
1337 #endif
1338
1339 nbuf = xmemdup0 (newname, nlen);
1340
1341 /* Create aliases under the new name as stated; an all-lowercase
1342 version of the new name; and an all-uppercase version of the new
1343 name. */
1344 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1345 {
1346 for (p = nbuf; *p; p++)
1347 *p = TOUPPER (*p);
1348
1349 if (strncmp (nbuf, newname, nlen))
1350 {
1351 /* If this attempt to create an additional alias fails, do not bother
1352 trying to create the all-lower case alias. We will fail and issue
1353 a second, duplicate error message. This situation arises when the
1354 programmer does something like:
1355 foo .req r0
1356 Foo .req r1
1357 The second .req creates the "Foo" alias but then fails to create
1358 the artificial FOO alias because it has already been created by the
1359 first .req. */
1360 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1361 {
1362 free (nbuf);
1363 return TRUE;
1364 }
1365 }
1366
1367 for (p = nbuf; *p; p++)
1368 *p = TOLOWER (*p);
1369
1370 if (strncmp (nbuf, newname, nlen))
1371 insert_reg_alias (nbuf, old->number, old->type);
1372 }
1373
1374 free (nbuf);
1375 return TRUE;
1376 }
1377
1378 /* Should never be called, as .req goes between the alias and the
1379 register name, not at the beginning of the line. */
1380 static void
1381 s_req (int a ATTRIBUTE_UNUSED)
1382 {
1383 as_bad (_("invalid syntax for .req directive"));
1384 }
1385
1386 /* The .unreq directive deletes an alias which was previously defined
1387 by .req. For example:
1388
1389 my_alias .req r11
1390 .unreq my_alias */
1391
1392 static void
1393 s_unreq (int a ATTRIBUTE_UNUSED)
1394 {
1395 char *name;
1396 char saved_char;
1397
1398 name = input_line_pointer;
1399
1400 while (*input_line_pointer != 0
1401 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1402 ++input_line_pointer;
1403
1404 saved_char = *input_line_pointer;
1405 *input_line_pointer = 0;
1406
1407 if (!*name)
1408 as_bad (_("invalid syntax for .unreq directive"));
1409 else
1410 {
1411 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1412
1413 if (!reg)
1414 as_bad (_("unknown register alias '%s'"), name);
1415 else if (reg->builtin)
1416 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1417 name);
1418 else
1419 {
1420 char *p;
1421 char *nbuf;
1422
1423 hash_delete (aarch64_reg_hsh, name, FALSE);
1424 free ((char *) reg->name);
1425 free (reg);
1426
1427 /* Also locate the all upper case and all lower case versions.
1428 Do not complain if we cannot find one or the other as it
1429 was probably deleted above. */
1430
1431 nbuf = strdup (name);
1432 for (p = nbuf; *p; p++)
1433 *p = TOUPPER (*p);
1434 reg = hash_find (aarch64_reg_hsh, nbuf);
1435 if (reg)
1436 {
1437 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1438 free ((char *) reg->name);
1439 free (reg);
1440 }
1441
1442 for (p = nbuf; *p; p++)
1443 *p = TOLOWER (*p);
1444 reg = hash_find (aarch64_reg_hsh, nbuf);
1445 if (reg)
1446 {
1447 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1448 free ((char *) reg->name);
1449 free (reg);
1450 }
1451
1452 free (nbuf);
1453 }
1454 }
1455
1456 *input_line_pointer = saved_char;
1457 demand_empty_rest_of_line ();
1458 }
1459
1460 /* Directives: Instruction set selection. */
1461
1462 #ifdef OBJ_ELF
1463 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1464 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1465 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1466 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1467
1468 /* Create a new mapping symbol for the transition to STATE. */
1469
1470 static void
1471 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1472 {
1473 symbolS *symbolP;
1474 const char *symname;
1475 int type;
1476
1477 switch (state)
1478 {
1479 case MAP_DATA:
1480 symname = "$d";
1481 type = BSF_NO_FLAGS;
1482 break;
1483 case MAP_INSN:
1484 symname = "$x";
1485 type = BSF_NO_FLAGS;
1486 break;
1487 default:
1488 abort ();
1489 }
1490
1491 symbolP = symbol_new (symname, now_seg, value, frag);
1492 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1493
1494 /* Save the mapping symbols for future reference. Also check that
1495 we do not place two mapping symbols at the same offset within a
1496 frag. We'll handle overlap between frags in
1497 check_mapping_symbols.
1498
1499 If .fill or other data filling directive generates zero sized data,
1500 the mapping symbol for the following code will have the same value
1501 as the one generated for the data filling directive. In this case,
1502 we replace the old symbol with the new one at the same address. */
1503 if (value == 0)
1504 {
1505 if (frag->tc_frag_data.first_map != NULL)
1506 {
1507 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1508 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1509 &symbol_lastP);
1510 }
1511 frag->tc_frag_data.first_map = symbolP;
1512 }
1513 if (frag->tc_frag_data.last_map != NULL)
1514 {
1515 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1516 S_GET_VALUE (symbolP));
1517 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1518 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1519 &symbol_lastP);
1520 }
1521 frag->tc_frag_data.last_map = symbolP;
1522 }
1523
1524 /* We must sometimes convert a region marked as code to data during
1525 code alignment, if an odd number of bytes have to be padded. The
1526 code mapping symbol is pushed to an aligned address. */
1527
1528 static void
1529 insert_data_mapping_symbol (enum mstate state,
1530 valueT value, fragS * frag, offsetT bytes)
1531 {
1532 /* If there was already a mapping symbol, remove it. */
1533 if (frag->tc_frag_data.last_map != NULL
1534 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1535 frag->fr_address + value)
1536 {
1537 symbolS *symp = frag->tc_frag_data.last_map;
1538
1539 if (value == 0)
1540 {
1541 know (frag->tc_frag_data.first_map == symp);
1542 frag->tc_frag_data.first_map = NULL;
1543 }
1544 frag->tc_frag_data.last_map = NULL;
1545 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1546 }
1547
1548 make_mapping_symbol (MAP_DATA, value, frag);
1549 make_mapping_symbol (state, value + bytes, frag);
1550 }
1551
1552 static void mapping_state_2 (enum mstate state, int max_chars);
1553
1554 /* Set the mapping state to STATE. Only call this when about to
1555 emit some STATE bytes to the file. */
1556
1557 void
1558 mapping_state (enum mstate state)
1559 {
1560 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1561
1562 if (state == MAP_INSN)
1563 /* AArch64 instructions require 4-byte alignment. When emitting
1564 instructions into any section, record the appropriate section
1565 alignment. */
1566 record_alignment (now_seg, 2);
1567
1568 if (mapstate == state)
1569 /* The mapping symbol has already been emitted.
1570 There is nothing else to do. */
1571 return;
1572
1573 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1574 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1575 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1576 evaluated later in the next else. */
1577 return;
1578 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1579 {
1580 /* Only add the symbol if the offset is > 0:
1581 if we're at the first frag, check it's size > 0;
1582 if we're not at the first frag, then for sure
1583 the offset is > 0. */
1584 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1585 const int add_symbol = (frag_now != frag_first)
1586 || (frag_now_fix () > 0);
1587
1588 if (add_symbol)
1589 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1590 }
1591 #undef TRANSITION
1592
1593 mapping_state_2 (state, 0);
1594 }
1595
1596 /* Same as mapping_state, but MAX_CHARS bytes have already been
1597 allocated. Put the mapping symbol that far back. */
1598
1599 static void
1600 mapping_state_2 (enum mstate state, int max_chars)
1601 {
1602 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1603
1604 if (!SEG_NORMAL (now_seg))
1605 return;
1606
1607 if (mapstate == state)
1608 /* The mapping symbol has already been emitted.
1609 There is nothing else to do. */
1610 return;
1611
1612 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1613 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1614 }
1615 #else
1616 #define mapping_state(x) /* nothing */
1617 #define mapping_state_2(x, y) /* nothing */
1618 #endif
1619
1620 /* Directives: sectioning and alignment. */
1621
1622 static void
1623 s_bss (int ignore ATTRIBUTE_UNUSED)
1624 {
1625 /* We don't support putting frags in the BSS segment, we fake it by
1626 marking in_bss, then looking at s_skip for clues. */
1627 subseg_set (bss_section, 0);
1628 demand_empty_rest_of_line ();
1629 mapping_state (MAP_DATA);
1630 }
1631
1632 static void
1633 s_even (int ignore ATTRIBUTE_UNUSED)
1634 {
1635 /* Never make frag if expect extra pass. */
1636 if (!need_pass_2)
1637 frag_align (1, 0, 0);
1638
1639 record_alignment (now_seg, 1);
1640
1641 demand_empty_rest_of_line ();
1642 }
1643
1644 /* Directives: Literal pools. */
1645
1646 static literal_pool *
1647 find_literal_pool (int size)
1648 {
1649 literal_pool *pool;
1650
1651 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1652 {
1653 if (pool->section == now_seg
1654 && pool->sub_section == now_subseg && pool->size == size)
1655 break;
1656 }
1657
1658 return pool;
1659 }
1660
1661 static literal_pool *
1662 find_or_make_literal_pool (int size)
1663 {
1664 /* Next literal pool ID number. */
1665 static unsigned int latest_pool_num = 1;
1666 literal_pool *pool;
1667
1668 pool = find_literal_pool (size);
1669
1670 if (pool == NULL)
1671 {
1672 /* Create a new pool. */
1673 pool = XNEW (literal_pool);
1674 if (!pool)
1675 return NULL;
1676
1677 /* Currently we always put the literal pool in the current text
1678 section. If we were generating "small" model code where we
1679 knew that all code and initialised data was within 1MB then
1680 we could output literals to mergeable, read-only data
1681 sections. */
1682
1683 pool->next_free_entry = 0;
1684 pool->section = now_seg;
1685 pool->sub_section = now_subseg;
1686 pool->size = size;
1687 pool->next = list_of_pools;
1688 pool->symbol = NULL;
1689
1690 /* Add it to the list. */
1691 list_of_pools = pool;
1692 }
1693
1694 /* New pools, and emptied pools, will have a NULL symbol. */
1695 if (pool->symbol == NULL)
1696 {
1697 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1698 (valueT) 0, &zero_address_frag);
1699 pool->id = latest_pool_num++;
1700 }
1701
1702 /* Done. */
1703 return pool;
1704 }
1705
1706 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1707 Return TRUE on success, otherwise return FALSE. */
1708 static bfd_boolean
1709 add_to_lit_pool (expressionS *exp, int size)
1710 {
1711 literal_pool *pool;
1712 unsigned int entry;
1713
1714 pool = find_or_make_literal_pool (size);
1715
1716 /* Check if this literal value is already in the pool. */
1717 for (entry = 0; entry < pool->next_free_entry; entry++)
1718 {
1719 expressionS * litexp = & pool->literals[entry].exp;
1720
1721 if ((litexp->X_op == exp->X_op)
1722 && (exp->X_op == O_constant)
1723 && (litexp->X_add_number == exp->X_add_number)
1724 && (litexp->X_unsigned == exp->X_unsigned))
1725 break;
1726
1727 if ((litexp->X_op == exp->X_op)
1728 && (exp->X_op == O_symbol)
1729 && (litexp->X_add_number == exp->X_add_number)
1730 && (litexp->X_add_symbol == exp->X_add_symbol)
1731 && (litexp->X_op_symbol == exp->X_op_symbol))
1732 break;
1733 }
1734
1735 /* Do we need to create a new entry? */
1736 if (entry == pool->next_free_entry)
1737 {
1738 if (entry >= MAX_LITERAL_POOL_SIZE)
1739 {
1740 set_syntax_error (_("literal pool overflow"));
1741 return FALSE;
1742 }
1743
1744 pool->literals[entry].exp = *exp;
1745 pool->next_free_entry += 1;
1746 if (exp->X_op == O_big)
1747 {
1748 /* PR 16688: Bignums are held in a single global array. We must
1749 copy and preserve that value now, before it is overwritten. */
1750 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1751 exp->X_add_number);
1752 memcpy (pool->literals[entry].bignum, generic_bignum,
1753 CHARS_PER_LITTLENUM * exp->X_add_number);
1754 }
1755 else
1756 pool->literals[entry].bignum = NULL;
1757 }
1758
1759 exp->X_op = O_symbol;
1760 exp->X_add_number = ((int) entry) * size;
1761 exp->X_add_symbol = pool->symbol;
1762
1763 return TRUE;
1764 }
1765
1766 /* Can't use symbol_new here, so have to create a symbol and then at
1767 a later date assign it a value. That's what these functions do. */
1768
1769 static void
1770 symbol_locate (symbolS * symbolP,
1771 const char *name,/* It is copied, the caller can modify. */
1772 segT segment, /* Segment identifier (SEG_<something>). */
1773 valueT valu, /* Symbol value. */
1774 fragS * frag) /* Associated fragment. */
1775 {
1776 size_t name_length;
1777 char *preserved_copy_of_name;
1778
1779 name_length = strlen (name) + 1; /* +1 for \0. */
1780 obstack_grow (&notes, name, name_length);
1781 preserved_copy_of_name = obstack_finish (&notes);
1782
1783 #ifdef tc_canonicalize_symbol_name
1784 preserved_copy_of_name =
1785 tc_canonicalize_symbol_name (preserved_copy_of_name);
1786 #endif
1787
1788 S_SET_NAME (symbolP, preserved_copy_of_name);
1789
1790 S_SET_SEGMENT (symbolP, segment);
1791 S_SET_VALUE (symbolP, valu);
1792 symbol_clear_list_pointers (symbolP);
1793
1794 symbol_set_frag (symbolP, frag);
1795
1796 /* Link to end of symbol chain. */
1797 {
1798 extern int symbol_table_frozen;
1799
1800 if (symbol_table_frozen)
1801 abort ();
1802 }
1803
1804 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1805
1806 obj_symbol_new_hook (symbolP);
1807
1808 #ifdef tc_symbol_new_hook
1809 tc_symbol_new_hook (symbolP);
1810 #endif
1811
1812 #ifdef DEBUG_SYMS
1813 verify_symbol_chain (symbol_rootP, symbol_lastP);
1814 #endif /* DEBUG_SYMS */
1815 }
1816
1817
1818 static void
1819 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1820 {
1821 unsigned int entry;
1822 literal_pool *pool;
1823 char sym_name[20];
1824 int align;
1825
1826 for (align = 2; align <= 4; align++)
1827 {
1828 int size = 1 << align;
1829
1830 pool = find_literal_pool (size);
1831 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1832 continue;
1833
1834 /* Align pool as you have word accesses.
1835 Only make a frag if we have to. */
1836 if (!need_pass_2)
1837 frag_align (align, 0, 0);
1838
1839 mapping_state (MAP_DATA);
1840
1841 record_alignment (now_seg, align);
1842
1843 sprintf (sym_name, "$$lit_\002%x", pool->id);
1844
1845 symbol_locate (pool->symbol, sym_name, now_seg,
1846 (valueT) frag_now_fix (), frag_now);
1847 symbol_table_insert (pool->symbol);
1848
1849 for (entry = 0; entry < pool->next_free_entry; entry++)
1850 {
1851 expressionS * exp = & pool->literals[entry].exp;
1852
1853 if (exp->X_op == O_big)
1854 {
1855 /* PR 16688: Restore the global bignum value. */
1856 gas_assert (pool->literals[entry].bignum != NULL);
1857 memcpy (generic_bignum, pool->literals[entry].bignum,
1858 CHARS_PER_LITTLENUM * exp->X_add_number);
1859 }
1860
1861 /* First output the expression in the instruction to the pool. */
1862 emit_expr (exp, size); /* .word|.xword */
1863
1864 if (exp->X_op == O_big)
1865 {
1866 free (pool->literals[entry].bignum);
1867 pool->literals[entry].bignum = NULL;
1868 }
1869 }
1870
1871 /* Mark the pool as empty. */
1872 pool->next_free_entry = 0;
1873 pool->symbol = NULL;
1874 }
1875 }
1876
1877 #ifdef OBJ_ELF
1878 /* Forward declarations for functions below, in the MD interface
1879 section. */
1880 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1881 static struct reloc_table_entry * find_reloc_table_entry (char **);
1882
1883 /* Directives: Data. */
1884 /* N.B. the support for relocation suffix in this directive needs to be
1885 implemented properly. */
1886
1887 static void
1888 s_aarch64_elf_cons (int nbytes)
1889 {
1890 expressionS exp;
1891
1892 #ifdef md_flush_pending_output
1893 md_flush_pending_output ();
1894 #endif
1895
1896 if (is_it_end_of_statement ())
1897 {
1898 demand_empty_rest_of_line ();
1899 return;
1900 }
1901
1902 #ifdef md_cons_align
1903 md_cons_align (nbytes);
1904 #endif
1905
1906 mapping_state (MAP_DATA);
1907 do
1908 {
1909 struct reloc_table_entry *reloc;
1910
1911 expression (&exp);
1912
1913 if (exp.X_op != O_symbol)
1914 emit_expr (&exp, (unsigned int) nbytes);
1915 else
1916 {
1917 skip_past_char (&input_line_pointer, '#');
1918 if (skip_past_char (&input_line_pointer, ':'))
1919 {
1920 reloc = find_reloc_table_entry (&input_line_pointer);
1921 if (reloc == NULL)
1922 as_bad (_("unrecognized relocation suffix"));
1923 else
1924 as_bad (_("unimplemented relocation suffix"));
1925 ignore_rest_of_line ();
1926 return;
1927 }
1928 else
1929 emit_expr (&exp, (unsigned int) nbytes);
1930 }
1931 }
1932 while (*input_line_pointer++ == ',');
1933
1934 /* Put terminator back into stream. */
1935 input_line_pointer--;
1936 demand_empty_rest_of_line ();
1937 }
1938
1939 /* Mark symbol that it follows a variant PCS convention. */
1940
1941 static void
1942 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1943 {
1944 char *name;
1945 char c;
1946 symbolS *sym;
1947 asymbol *bfdsym;
1948 elf_symbol_type *elfsym;
1949
1950 c = get_symbol_name (&name);
1951 if (!*name)
1952 as_bad (_("Missing symbol name in directive"));
1953 sym = symbol_find_or_make (name);
1954 restore_line_pointer (c);
1955 demand_empty_rest_of_line ();
1956 bfdsym = symbol_get_bfdsym (sym);
1957 elfsym = elf_symbol_from (bfd_asymbol_bfd (bfdsym), bfdsym);
1958 gas_assert (elfsym);
1959 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1960 }
1961 #endif /* OBJ_ELF */
1962
1963 /* Output a 32-bit word, but mark as an instruction. */
1964
1965 static void
1966 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1967 {
1968 expressionS exp;
1969
1970 #ifdef md_flush_pending_output
1971 md_flush_pending_output ();
1972 #endif
1973
1974 if (is_it_end_of_statement ())
1975 {
1976 demand_empty_rest_of_line ();
1977 return;
1978 }
1979
1980 /* Sections are assumed to start aligned. In executable section, there is no
1981 MAP_DATA symbol pending. So we only align the address during
1982 MAP_DATA --> MAP_INSN transition.
1983 For other sections, this is not guaranteed. */
1984 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1985 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1986 frag_align_code (2, 0);
1987
1988 #ifdef OBJ_ELF
1989 mapping_state (MAP_INSN);
1990 #endif
1991
1992 do
1993 {
1994 expression (&exp);
1995 if (exp.X_op != O_constant)
1996 {
1997 as_bad (_("constant expression required"));
1998 ignore_rest_of_line ();
1999 return;
2000 }
2001
2002 if (target_big_endian)
2003 {
2004 unsigned int val = exp.X_add_number;
2005 exp.X_add_number = SWAP_32 (val);
2006 }
2007 emit_expr (&exp, 4);
2008 }
2009 while (*input_line_pointer++ == ',');
2010
2011 /* Put terminator back into stream. */
2012 input_line_pointer--;
2013 demand_empty_rest_of_line ();
2014 }
2015
2016 static void
2017 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2018 {
2019 demand_empty_rest_of_line ();
2020 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2021 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2022 }
2023
2024 #ifdef OBJ_ELF
2025 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2026
2027 static void
2028 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2029 {
2030 expressionS exp;
2031
2032 expression (&exp);
2033 frag_grow (4);
2034 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2035 BFD_RELOC_AARCH64_TLSDESC_ADD);
2036
2037 demand_empty_rest_of_line ();
2038 }
2039
2040 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2041
2042 static void
2043 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2044 {
2045 expressionS exp;
2046
2047 /* Since we're just labelling the code, there's no need to define a
2048 mapping symbol. */
2049 expression (&exp);
2050 /* Make sure there is enough room in this frag for the following
2051 blr. This trick only works if the blr follows immediately after
2052 the .tlsdesc directive. */
2053 frag_grow (4);
2054 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2055 BFD_RELOC_AARCH64_TLSDESC_CALL);
2056
2057 demand_empty_rest_of_line ();
2058 }
2059
2060 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2061
2062 static void
2063 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2064 {
2065 expressionS exp;
2066
2067 expression (&exp);
2068 frag_grow (4);
2069 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2070 BFD_RELOC_AARCH64_TLSDESC_LDR);
2071
2072 demand_empty_rest_of_line ();
2073 }
2074 #endif /* OBJ_ELF */
2075
2076 static void s_aarch64_arch (int);
2077 static void s_aarch64_cpu (int);
2078 static void s_aarch64_arch_extension (int);
2079
2080 /* This table describes all the machine specific pseudo-ops the assembler
2081 has to support. The fields are:
2082 pseudo-op name without dot
2083 function to call to execute this pseudo-op
2084 Integer arg to pass to the function. */
2085
2086 const pseudo_typeS md_pseudo_table[] = {
2087 /* Never called because '.req' does not start a line. */
2088 {"req", s_req, 0},
2089 {"unreq", s_unreq, 0},
2090 {"bss", s_bss, 0},
2091 {"even", s_even, 0},
2092 {"ltorg", s_ltorg, 0},
2093 {"pool", s_ltorg, 0},
2094 {"cpu", s_aarch64_cpu, 0},
2095 {"arch", s_aarch64_arch, 0},
2096 {"arch_extension", s_aarch64_arch_extension, 0},
2097 {"inst", s_aarch64_inst, 0},
2098 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2099 #ifdef OBJ_ELF
2100 {"tlsdescadd", s_tlsdescadd, 0},
2101 {"tlsdesccall", s_tlsdesccall, 0},
2102 {"tlsdescldr", s_tlsdescldr, 0},
2103 {"word", s_aarch64_elf_cons, 4},
2104 {"long", s_aarch64_elf_cons, 4},
2105 {"xword", s_aarch64_elf_cons, 8},
2106 {"dword", s_aarch64_elf_cons, 8},
2107 {"variant_pcs", s_variant_pcs, 0},
2108 #endif
2109 {"float16", float_cons, 'h'},
2110 {0, 0, 0}
2111 };
2112 \f
2113
2114 /* Check whether STR points to a register name followed by a comma or the
2115 end of line; REG_TYPE indicates which register types are checked
2116 against. Return TRUE if STR is such a register name; otherwise return
2117 FALSE. The function does not intend to produce any diagnostics, but since
2118 the register parser aarch64_reg_parse, which is called by this function,
2119 does produce diagnostics, we call clear_error to clear any diagnostics
2120 that may be generated by aarch64_reg_parse.
2121 Also, the function returns FALSE directly if there is any user error
2122 present at the function entry. This prevents the existing diagnostics
2123 state from being spoiled.
2124 The function currently serves parse_constant_immediate and
2125 parse_big_immediate only. */
2126 static bfd_boolean
2127 reg_name_p (char *str, aarch64_reg_type reg_type)
2128 {
2129 int reg;
2130
2131 /* Prevent the diagnostics state from being spoiled. */
2132 if (error_p ())
2133 return FALSE;
2134
2135 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2136
2137 /* Clear the parsing error that may be set by the reg parser. */
2138 clear_error ();
2139
2140 if (reg == PARSE_FAIL)
2141 return FALSE;
2142
2143 skip_whitespace (str);
2144 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2145 return TRUE;
2146
2147 return FALSE;
2148 }
2149
2150 /* Parser functions used exclusively in instruction operands. */
2151
2152 /* Parse an immediate expression which may not be constant.
2153
2154 To prevent the expression parser from pushing a register name
2155 into the symbol table as an undefined symbol, firstly a check is
2156 done to find out whether STR is a register of type REG_TYPE followed
2157 by a comma or the end of line. Return FALSE if STR is such a string. */
2158
2159 static bfd_boolean
2160 parse_immediate_expression (char **str, expressionS *exp,
2161 aarch64_reg_type reg_type)
2162 {
2163 if (reg_name_p (*str, reg_type))
2164 {
2165 set_recoverable_error (_("immediate operand required"));
2166 return FALSE;
2167 }
2168
2169 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2170
2171 if (exp->X_op == O_absent)
2172 {
2173 set_fatal_syntax_error (_("missing immediate expression"));
2174 return FALSE;
2175 }
2176
2177 return TRUE;
2178 }
2179
2180 /* Constant immediate-value read function for use in insn parsing.
2181 STR points to the beginning of the immediate (with the optional
2182 leading #); *VAL receives the value. REG_TYPE says which register
2183 names should be treated as registers rather than as symbolic immediates.
2184
2185 Return TRUE on success; otherwise return FALSE. */
2186
2187 static bfd_boolean
2188 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2189 {
2190 expressionS exp;
2191
2192 if (! parse_immediate_expression (str, &exp, reg_type))
2193 return FALSE;
2194
2195 if (exp.X_op != O_constant)
2196 {
2197 set_syntax_error (_("constant expression required"));
2198 return FALSE;
2199 }
2200
2201 *val = exp.X_add_number;
2202 return TRUE;
2203 }
2204
2205 static uint32_t
2206 encode_imm_float_bits (uint32_t imm)
2207 {
2208 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2209 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2210 }
2211
2212 /* Return TRUE if the single-precision floating-point value encoded in IMM
2213 can be expressed in the AArch64 8-bit signed floating-point format with
2214 3-bit exponent and normalized 4 bits of precision; in other words, the
2215 floating-point value must be expressable as
2216 (+/-) n / 16 * power (2, r)
2217 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2218
2219 static bfd_boolean
2220 aarch64_imm_float_p (uint32_t imm)
2221 {
2222 /* If a single-precision floating-point value has the following bit
2223 pattern, it can be expressed in the AArch64 8-bit floating-point
2224 format:
2225
2226 3 32222222 2221111111111
2227 1 09876543 21098765432109876543210
2228 n Eeeeeexx xxxx0000000000000000000
2229
2230 where n, e and each x are either 0 or 1 independently, with
2231 E == ~ e. */
2232
2233 uint32_t pattern;
2234
2235 /* Prepare the pattern for 'Eeeeee'. */
2236 if (((imm >> 30) & 0x1) == 0)
2237 pattern = 0x3e000000;
2238 else
2239 pattern = 0x40000000;
2240
2241 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2242 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2243 }
2244
2245 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2246 as an IEEE float without any loss of precision. Store the value in
2247 *FPWORD if so. */
2248
2249 static bfd_boolean
2250 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2251 {
2252 /* If a double-precision floating-point value has the following bit
2253 pattern, it can be expressed in a float:
2254
2255 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2256 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2257 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2258
2259 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2260 if Eeee_eeee != 1111_1111
2261
2262 where n, e, s and S are either 0 or 1 independently and where ~ is the
2263 inverse of E. */
2264
2265 uint32_t pattern;
2266 uint32_t high32 = imm >> 32;
2267 uint32_t low32 = imm;
2268
2269 /* Lower 29 bits need to be 0s. */
2270 if ((imm & 0x1fffffff) != 0)
2271 return FALSE;
2272
2273 /* Prepare the pattern for 'Eeeeeeeee'. */
2274 if (((high32 >> 30) & 0x1) == 0)
2275 pattern = 0x38000000;
2276 else
2277 pattern = 0x40000000;
2278
2279 /* Check E~~~. */
2280 if ((high32 & 0x78000000) != pattern)
2281 return FALSE;
2282
2283 /* Check Eeee_eeee != 1111_1111. */
2284 if ((high32 & 0x7ff00000) == 0x47f00000)
2285 return FALSE;
2286
2287 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2288 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2289 | (low32 >> 29)); /* 3 S bits. */
2290 return TRUE;
2291 }
2292
2293 /* Return true if we should treat OPERAND as a double-precision
2294 floating-point operand rather than a single-precision one. */
2295 static bfd_boolean
2296 double_precision_operand_p (const aarch64_opnd_info *operand)
2297 {
2298 /* Check for unsuffixed SVE registers, which are allowed
2299 for LDR and STR but not in instructions that require an
2300 immediate. We get better error messages if we arbitrarily
2301 pick one size, parse the immediate normally, and then
2302 report the match failure in the normal way. */
2303 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2304 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2305 }
2306
2307 /* Parse a floating-point immediate. Return TRUE on success and return the
2308 value in *IMMED in the format of IEEE754 single-precision encoding.
2309 *CCP points to the start of the string; DP_P is TRUE when the immediate
2310 is expected to be in double-precision (N.B. this only matters when
2311 hexadecimal representation is involved). REG_TYPE says which register
2312 names should be treated as registers rather than as symbolic immediates.
2313
2314 This routine accepts any IEEE float; it is up to the callers to reject
2315 invalid ones. */
2316
2317 static bfd_boolean
2318 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2319 aarch64_reg_type reg_type)
2320 {
2321 char *str = *ccp;
2322 char *fpnum;
2323 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2324 int64_t val = 0;
2325 unsigned fpword = 0;
2326 bfd_boolean hex_p = FALSE;
2327
2328 skip_past_char (&str, '#');
2329
2330 fpnum = str;
2331 skip_whitespace (fpnum);
2332
2333 if (strncmp (fpnum, "0x", 2) == 0)
2334 {
2335 /* Support the hexadecimal representation of the IEEE754 encoding.
2336 Double-precision is expected when DP_P is TRUE, otherwise the
2337 representation should be in single-precision. */
2338 if (! parse_constant_immediate (&str, &val, reg_type))
2339 goto invalid_fp;
2340
2341 if (dp_p)
2342 {
2343 if (!can_convert_double_to_float (val, &fpword))
2344 goto invalid_fp;
2345 }
2346 else if ((uint64_t) val > 0xffffffff)
2347 goto invalid_fp;
2348 else
2349 fpword = val;
2350
2351 hex_p = TRUE;
2352 }
2353 else if (reg_name_p (str, reg_type))
2354 {
2355 set_recoverable_error (_("immediate operand required"));
2356 return FALSE;
2357 }
2358
2359 if (! hex_p)
2360 {
2361 int i;
2362
2363 if ((str = atof_ieee (str, 's', words)) == NULL)
2364 goto invalid_fp;
2365
2366 /* Our FP word must be 32 bits (single-precision FP). */
2367 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2368 {
2369 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2370 fpword |= words[i];
2371 }
2372 }
2373
2374 *immed = fpword;
2375 *ccp = str;
2376 return TRUE;
2377
2378 invalid_fp:
2379 set_fatal_syntax_error (_("invalid floating-point constant"));
2380 return FALSE;
2381 }
2382
2383 /* Less-generic immediate-value read function with the possibility of loading
2384 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2385 instructions.
2386
2387 To prevent the expression parser from pushing a register name into the
2388 symbol table as an undefined symbol, a check is firstly done to find
2389 out whether STR is a register of type REG_TYPE followed by a comma or
2390 the end of line. Return FALSE if STR is such a register. */
2391
2392 static bfd_boolean
2393 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2394 {
2395 char *ptr = *str;
2396
2397 if (reg_name_p (ptr, reg_type))
2398 {
2399 set_syntax_error (_("immediate operand required"));
2400 return FALSE;
2401 }
2402
2403 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2404
2405 if (inst.reloc.exp.X_op == O_constant)
2406 *imm = inst.reloc.exp.X_add_number;
2407
2408 *str = ptr;
2409
2410 return TRUE;
2411 }
2412
2413 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2414 if NEED_LIBOPCODES is non-zero, the fixup will need
2415 assistance from the libopcodes. */
2416
2417 static inline void
2418 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2419 const aarch64_opnd_info *operand,
2420 int need_libopcodes_p)
2421 {
2422 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2423 reloc->opnd = operand->type;
2424 if (need_libopcodes_p)
2425 reloc->need_libopcodes_p = 1;
2426 };
2427
2428 /* Return TRUE if the instruction needs to be fixed up later internally by
2429 the GAS; otherwise return FALSE. */
2430
2431 static inline bfd_boolean
2432 aarch64_gas_internal_fixup_p (void)
2433 {
2434 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2435 }
2436
2437 /* Assign the immediate value to the relevant field in *OPERAND if
2438 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2439 needs an internal fixup in a later stage.
2440 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2441 IMM.VALUE that may get assigned with the constant. */
2442 static inline void
2443 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2444 aarch64_opnd_info *operand,
2445 int addr_off_p,
2446 int need_libopcodes_p,
2447 int skip_p)
2448 {
2449 if (reloc->exp.X_op == O_constant)
2450 {
2451 if (addr_off_p)
2452 operand->addr.offset.imm = reloc->exp.X_add_number;
2453 else
2454 operand->imm.value = reloc->exp.X_add_number;
2455 reloc->type = BFD_RELOC_UNUSED;
2456 }
2457 else
2458 {
2459 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2460 /* Tell libopcodes to ignore this operand or not. This is helpful
2461 when one of the operands needs to be fixed up later but we need
2462 libopcodes to check the other operands. */
2463 operand->skip = skip_p;
2464 }
2465 }
2466
2467 /* Relocation modifiers. Each entry in the table contains the textual
2468 name for the relocation which may be placed before a symbol used as
2469 a load/store offset, or add immediate. It must be surrounded by a
2470 leading and trailing colon, for example:
2471
2472 ldr x0, [x1, #:rello:varsym]
2473 add x0, x1, #:rello:varsym */
2474
2475 struct reloc_table_entry
2476 {
2477 const char *name;
2478 int pc_rel;
2479 bfd_reloc_code_real_type adr_type;
2480 bfd_reloc_code_real_type adrp_type;
2481 bfd_reloc_code_real_type movw_type;
2482 bfd_reloc_code_real_type add_type;
2483 bfd_reloc_code_real_type ldst_type;
2484 bfd_reloc_code_real_type ld_literal_type;
2485 };
2486
2487 static struct reloc_table_entry reloc_table[] = {
2488 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2489 {"lo12", 0,
2490 0, /* adr_type */
2491 0,
2492 0,
2493 BFD_RELOC_AARCH64_ADD_LO12,
2494 BFD_RELOC_AARCH64_LDST_LO12,
2495 0},
2496
2497 /* Higher 21 bits of pc-relative page offset: ADRP */
2498 {"pg_hi21", 1,
2499 0, /* adr_type */
2500 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2501 0,
2502 0,
2503 0,
2504 0},
2505
2506 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2507 {"pg_hi21_nc", 1,
2508 0, /* adr_type */
2509 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2510 0,
2511 0,
2512 0,
2513 0},
2514
2515 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2516 {"abs_g0", 0,
2517 0, /* adr_type */
2518 0,
2519 BFD_RELOC_AARCH64_MOVW_G0,
2520 0,
2521 0,
2522 0},
2523
2524 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2525 {"abs_g0_s", 0,
2526 0, /* adr_type */
2527 0,
2528 BFD_RELOC_AARCH64_MOVW_G0_S,
2529 0,
2530 0,
2531 0},
2532
2533 /* Less significant bits 0-15 of address/value: MOVK, no check */
2534 {"abs_g0_nc", 0,
2535 0, /* adr_type */
2536 0,
2537 BFD_RELOC_AARCH64_MOVW_G0_NC,
2538 0,
2539 0,
2540 0},
2541
2542 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2543 {"abs_g1", 0,
2544 0, /* adr_type */
2545 0,
2546 BFD_RELOC_AARCH64_MOVW_G1,
2547 0,
2548 0,
2549 0},
2550
2551 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2552 {"abs_g1_s", 0,
2553 0, /* adr_type */
2554 0,
2555 BFD_RELOC_AARCH64_MOVW_G1_S,
2556 0,
2557 0,
2558 0},
2559
2560 /* Less significant bits 16-31 of address/value: MOVK, no check */
2561 {"abs_g1_nc", 0,
2562 0, /* adr_type */
2563 0,
2564 BFD_RELOC_AARCH64_MOVW_G1_NC,
2565 0,
2566 0,
2567 0},
2568
2569 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2570 {"abs_g2", 0,
2571 0, /* adr_type */
2572 0,
2573 BFD_RELOC_AARCH64_MOVW_G2,
2574 0,
2575 0,
2576 0},
2577
2578 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2579 {"abs_g2_s", 0,
2580 0, /* adr_type */
2581 0,
2582 BFD_RELOC_AARCH64_MOVW_G2_S,
2583 0,
2584 0,
2585 0},
2586
2587 /* Less significant bits 32-47 of address/value: MOVK, no check */
2588 {"abs_g2_nc", 0,
2589 0, /* adr_type */
2590 0,
2591 BFD_RELOC_AARCH64_MOVW_G2_NC,
2592 0,
2593 0,
2594 0},
2595
2596 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2597 {"abs_g3", 0,
2598 0, /* adr_type */
2599 0,
2600 BFD_RELOC_AARCH64_MOVW_G3,
2601 0,
2602 0,
2603 0},
2604
2605 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2606 {"prel_g0", 1,
2607 0, /* adr_type */
2608 0,
2609 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2610 0,
2611 0,
2612 0},
2613
2614 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2615 {"prel_g0_nc", 1,
2616 0, /* adr_type */
2617 0,
2618 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2619 0,
2620 0,
2621 0},
2622
2623 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2624 {"prel_g1", 1,
2625 0, /* adr_type */
2626 0,
2627 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2628 0,
2629 0,
2630 0},
2631
2632 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2633 {"prel_g1_nc", 1,
2634 0, /* adr_type */
2635 0,
2636 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2637 0,
2638 0,
2639 0},
2640
2641 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2642 {"prel_g2", 1,
2643 0, /* adr_type */
2644 0,
2645 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2646 0,
2647 0,
2648 0},
2649
2650 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2651 {"prel_g2_nc", 1,
2652 0, /* adr_type */
2653 0,
2654 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2655 0,
2656 0,
2657 0},
2658
2659 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2660 {"prel_g3", 1,
2661 0, /* adr_type */
2662 0,
2663 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2664 0,
2665 0,
2666 0},
2667
2668 /* Get to the page containing GOT entry for a symbol. */
2669 {"got", 1,
2670 0, /* adr_type */
2671 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2672 0,
2673 0,
2674 0,
2675 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2676
2677 /* 12 bit offset into the page containing GOT entry for that symbol. */
2678 {"got_lo12", 0,
2679 0, /* adr_type */
2680 0,
2681 0,
2682 0,
2683 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2684 0},
2685
2686 /* 0-15 bits of address/value: MOVk, no check. */
2687 {"gotoff_g0_nc", 0,
2688 0, /* adr_type */
2689 0,
2690 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2691 0,
2692 0,
2693 0},
2694
2695 /* Most significant bits 16-31 of address/value: MOVZ. */
2696 {"gotoff_g1", 0,
2697 0, /* adr_type */
2698 0,
2699 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2700 0,
2701 0,
2702 0},
2703
2704 /* 15 bit offset into the page containing GOT entry for that symbol. */
2705 {"gotoff_lo15", 0,
2706 0, /* adr_type */
2707 0,
2708 0,
2709 0,
2710 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2711 0},
2712
2713 /* Get to the page containing GOT TLS entry for a symbol */
2714 {"gottprel_g0_nc", 0,
2715 0, /* adr_type */
2716 0,
2717 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2718 0,
2719 0,
2720 0},
2721
2722 /* Get to the page containing GOT TLS entry for a symbol */
2723 {"gottprel_g1", 0,
2724 0, /* adr_type */
2725 0,
2726 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2727 0,
2728 0,
2729 0},
2730
2731 /* Get to the page containing GOT TLS entry for a symbol */
2732 {"tlsgd", 0,
2733 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2734 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2735 0,
2736 0,
2737 0,
2738 0},
2739
2740 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2741 {"tlsgd_lo12", 0,
2742 0, /* adr_type */
2743 0,
2744 0,
2745 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2746 0,
2747 0},
2748
2749 /* Lower 16 bits address/value: MOVk. */
2750 {"tlsgd_g0_nc", 0,
2751 0, /* adr_type */
2752 0,
2753 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2754 0,
2755 0,
2756 0},
2757
2758 /* Most significant bits 16-31 of address/value: MOVZ. */
2759 {"tlsgd_g1", 0,
2760 0, /* adr_type */
2761 0,
2762 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2763 0,
2764 0,
2765 0},
2766
2767 /* Get to the page containing GOT TLS entry for a symbol */
2768 {"tlsdesc", 0,
2769 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2770 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2771 0,
2772 0,
2773 0,
2774 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2775
2776 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2777 {"tlsdesc_lo12", 0,
2778 0, /* adr_type */
2779 0,
2780 0,
2781 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2782 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2783 0},
2784
2785 /* Get to the page containing GOT TLS entry for a symbol.
2786 The same as GD, we allocate two consecutive GOT slots
2787 for module index and module offset, the only difference
2788 with GD is the module offset should be initialized to
2789 zero without any outstanding runtime relocation. */
2790 {"tlsldm", 0,
2791 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2792 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2793 0,
2794 0,
2795 0,
2796 0},
2797
2798 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2799 {"tlsldm_lo12_nc", 0,
2800 0, /* adr_type */
2801 0,
2802 0,
2803 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2804 0,
2805 0},
2806
2807 /* 12 bit offset into the module TLS base address. */
2808 {"dtprel_lo12", 0,
2809 0, /* adr_type */
2810 0,
2811 0,
2812 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2813 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2814 0},
2815
2816 /* Same as dtprel_lo12, no overflow check. */
2817 {"dtprel_lo12_nc", 0,
2818 0, /* adr_type */
2819 0,
2820 0,
2821 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2822 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2823 0},
2824
2825 /* bits[23:12] of offset to the module TLS base address. */
2826 {"dtprel_hi12", 0,
2827 0, /* adr_type */
2828 0,
2829 0,
2830 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2831 0,
2832 0},
2833
2834 /* bits[15:0] of offset to the module TLS base address. */
2835 {"dtprel_g0", 0,
2836 0, /* adr_type */
2837 0,
2838 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2839 0,
2840 0,
2841 0},
2842
2843 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2844 {"dtprel_g0_nc", 0,
2845 0, /* adr_type */
2846 0,
2847 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2848 0,
2849 0,
2850 0},
2851
2852 /* bits[31:16] of offset to the module TLS base address. */
2853 {"dtprel_g1", 0,
2854 0, /* adr_type */
2855 0,
2856 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2857 0,
2858 0,
2859 0},
2860
2861 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2862 {"dtprel_g1_nc", 0,
2863 0, /* adr_type */
2864 0,
2865 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2866 0,
2867 0,
2868 0},
2869
2870 /* bits[47:32] of offset to the module TLS base address. */
2871 {"dtprel_g2", 0,
2872 0, /* adr_type */
2873 0,
2874 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2875 0,
2876 0,
2877 0},
2878
2879 /* Lower 16 bit offset into GOT entry for a symbol */
2880 {"tlsdesc_off_g0_nc", 0,
2881 0, /* adr_type */
2882 0,
2883 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2884 0,
2885 0,
2886 0},
2887
2888 /* Higher 16 bit offset into GOT entry for a symbol */
2889 {"tlsdesc_off_g1", 0,
2890 0, /* adr_type */
2891 0,
2892 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2893 0,
2894 0,
2895 0},
2896
2897 /* Get to the page containing GOT TLS entry for a symbol */
2898 {"gottprel", 0,
2899 0, /* adr_type */
2900 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2901 0,
2902 0,
2903 0,
2904 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2905
2906 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2907 {"gottprel_lo12", 0,
2908 0, /* adr_type */
2909 0,
2910 0,
2911 0,
2912 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2913 0},
2914
2915 /* Get tp offset for a symbol. */
2916 {"tprel", 0,
2917 0, /* adr_type */
2918 0,
2919 0,
2920 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2921 0,
2922 0},
2923
2924 /* Get tp offset for a symbol. */
2925 {"tprel_lo12", 0,
2926 0, /* adr_type */
2927 0,
2928 0,
2929 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2930 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2931 0},
2932
2933 /* Get tp offset for a symbol. */
2934 {"tprel_hi12", 0,
2935 0, /* adr_type */
2936 0,
2937 0,
2938 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2939 0,
2940 0},
2941
2942 /* Get tp offset for a symbol. */
2943 {"tprel_lo12_nc", 0,
2944 0, /* adr_type */
2945 0,
2946 0,
2947 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2948 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2949 0},
2950
2951 /* Most significant bits 32-47 of address/value: MOVZ. */
2952 {"tprel_g2", 0,
2953 0, /* adr_type */
2954 0,
2955 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2956 0,
2957 0,
2958 0},
2959
2960 /* Most significant bits 16-31 of address/value: MOVZ. */
2961 {"tprel_g1", 0,
2962 0, /* adr_type */
2963 0,
2964 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2965 0,
2966 0,
2967 0},
2968
2969 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2970 {"tprel_g1_nc", 0,
2971 0, /* adr_type */
2972 0,
2973 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2974 0,
2975 0,
2976 0},
2977
2978 /* Most significant bits 0-15 of address/value: MOVZ. */
2979 {"tprel_g0", 0,
2980 0, /* adr_type */
2981 0,
2982 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2983 0,
2984 0,
2985 0},
2986
2987 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2988 {"tprel_g0_nc", 0,
2989 0, /* adr_type */
2990 0,
2991 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2992 0,
2993 0,
2994 0},
2995
2996 /* 15bit offset from got entry to base address of GOT table. */
2997 {"gotpage_lo15", 0,
2998 0,
2999 0,
3000 0,
3001 0,
3002 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3003 0},
3004
3005 /* 14bit offset from got entry to base address of GOT table. */
3006 {"gotpage_lo14", 0,
3007 0,
3008 0,
3009 0,
3010 0,
3011 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3012 0},
3013 };
3014
3015 /* Given the address of a pointer pointing to the textual name of a
3016 relocation as may appear in assembler source, attempt to find its
3017 details in reloc_table. The pointer will be updated to the character
3018 after the trailing colon. On failure, NULL will be returned;
3019 otherwise return the reloc_table_entry. */
3020
3021 static struct reloc_table_entry *
3022 find_reloc_table_entry (char **str)
3023 {
3024 unsigned int i;
3025 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3026 {
3027 int length = strlen (reloc_table[i].name);
3028
3029 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3030 && (*str)[length] == ':')
3031 {
3032 *str += (length + 1);
3033 return &reloc_table[i];
3034 }
3035 }
3036
3037 return NULL;
3038 }
3039
3040 /* Mode argument to parse_shift and parser_shifter_operand. */
3041 enum parse_shift_mode
3042 {
3043 SHIFTED_NONE, /* no shifter allowed */
3044 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3045 "#imm{,lsl #n}" */
3046 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3047 "#imm" */
3048 SHIFTED_LSL, /* bare "lsl #n" */
3049 SHIFTED_MUL, /* bare "mul #n" */
3050 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3051 SHIFTED_MUL_VL, /* "mul vl" */
3052 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3053 };
3054
3055 /* Parse a <shift> operator on an AArch64 data processing instruction.
3056 Return TRUE on success; otherwise return FALSE. */
3057 static bfd_boolean
3058 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3059 {
3060 const struct aarch64_name_value_pair *shift_op;
3061 enum aarch64_modifier_kind kind;
3062 expressionS exp;
3063 int exp_has_prefix;
3064 char *s = *str;
3065 char *p = s;
3066
3067 for (p = *str; ISALPHA (*p); p++)
3068 ;
3069
3070 if (p == *str)
3071 {
3072 set_syntax_error (_("shift expression expected"));
3073 return FALSE;
3074 }
3075
3076 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
3077
3078 if (shift_op == NULL)
3079 {
3080 set_syntax_error (_("shift operator expected"));
3081 return FALSE;
3082 }
3083
3084 kind = aarch64_get_operand_modifier (shift_op);
3085
3086 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3087 {
3088 set_syntax_error (_("invalid use of 'MSL'"));
3089 return FALSE;
3090 }
3091
3092 if (kind == AARCH64_MOD_MUL
3093 && mode != SHIFTED_MUL
3094 && mode != SHIFTED_MUL_VL)
3095 {
3096 set_syntax_error (_("invalid use of 'MUL'"));
3097 return FALSE;
3098 }
3099
3100 switch (mode)
3101 {
3102 case SHIFTED_LOGIC_IMM:
3103 if (aarch64_extend_operator_p (kind))
3104 {
3105 set_syntax_error (_("extending shift is not permitted"));
3106 return FALSE;
3107 }
3108 break;
3109
3110 case SHIFTED_ARITH_IMM:
3111 if (kind == AARCH64_MOD_ROR)
3112 {
3113 set_syntax_error (_("'ROR' shift is not permitted"));
3114 return FALSE;
3115 }
3116 break;
3117
3118 case SHIFTED_LSL:
3119 if (kind != AARCH64_MOD_LSL)
3120 {
3121 set_syntax_error (_("only 'LSL' shift is permitted"));
3122 return FALSE;
3123 }
3124 break;
3125
3126 case SHIFTED_MUL:
3127 if (kind != AARCH64_MOD_MUL)
3128 {
3129 set_syntax_error (_("only 'MUL' is permitted"));
3130 return FALSE;
3131 }
3132 break;
3133
3134 case SHIFTED_MUL_VL:
3135 /* "MUL VL" consists of two separate tokens. Require the first
3136 token to be "MUL" and look for a following "VL". */
3137 if (kind == AARCH64_MOD_MUL)
3138 {
3139 skip_whitespace (p);
3140 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3141 {
3142 p += 2;
3143 kind = AARCH64_MOD_MUL_VL;
3144 break;
3145 }
3146 }
3147 set_syntax_error (_("only 'MUL VL' is permitted"));
3148 return FALSE;
3149
3150 case SHIFTED_REG_OFFSET:
3151 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3152 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3153 {
3154 set_fatal_syntax_error
3155 (_("invalid shift for the register offset addressing mode"));
3156 return FALSE;
3157 }
3158 break;
3159
3160 case SHIFTED_LSL_MSL:
3161 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3162 {
3163 set_syntax_error (_("invalid shift operator"));
3164 return FALSE;
3165 }
3166 break;
3167
3168 default:
3169 abort ();
3170 }
3171
3172 /* Whitespace can appear here if the next thing is a bare digit. */
3173 skip_whitespace (p);
3174
3175 /* Parse shift amount. */
3176 exp_has_prefix = 0;
3177 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3178 exp.X_op = O_absent;
3179 else
3180 {
3181 if (is_immediate_prefix (*p))
3182 {
3183 p++;
3184 exp_has_prefix = 1;
3185 }
3186 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3187 }
3188 if (kind == AARCH64_MOD_MUL_VL)
3189 /* For consistency, give MUL VL the same shift amount as an implicit
3190 MUL #1. */
3191 operand->shifter.amount = 1;
3192 else if (exp.X_op == O_absent)
3193 {
3194 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3195 {
3196 set_syntax_error (_("missing shift amount"));
3197 return FALSE;
3198 }
3199 operand->shifter.amount = 0;
3200 }
3201 else if (exp.X_op != O_constant)
3202 {
3203 set_syntax_error (_("constant shift amount required"));
3204 return FALSE;
3205 }
3206 /* For parsing purposes, MUL #n has no inherent range. The range
3207 depends on the operand and will be checked by operand-specific
3208 routines. */
3209 else if (kind != AARCH64_MOD_MUL
3210 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3211 {
3212 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3213 return FALSE;
3214 }
3215 else
3216 {
3217 operand->shifter.amount = exp.X_add_number;
3218 operand->shifter.amount_present = 1;
3219 }
3220
3221 operand->shifter.operator_present = 1;
3222 operand->shifter.kind = kind;
3223
3224 *str = p;
3225 return TRUE;
3226 }
3227
3228 /* Parse a <shifter_operand> for a data processing instruction:
3229
3230 #<immediate>
3231 #<immediate>, LSL #imm
3232
3233 Validation of immediate operands is deferred to md_apply_fix.
3234
3235 Return TRUE on success; otherwise return FALSE. */
3236
3237 static bfd_boolean
3238 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3239 enum parse_shift_mode mode)
3240 {
3241 char *p;
3242
3243 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3244 return FALSE;
3245
3246 p = *str;
3247
3248 /* Accept an immediate expression. */
3249 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3250 return FALSE;
3251
3252 /* Accept optional LSL for arithmetic immediate values. */
3253 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3254 if (! parse_shift (&p, operand, SHIFTED_LSL))
3255 return FALSE;
3256
3257 /* Not accept any shifter for logical immediate values. */
3258 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3259 && parse_shift (&p, operand, mode))
3260 {
3261 set_syntax_error (_("unexpected shift operator"));
3262 return FALSE;
3263 }
3264
3265 *str = p;
3266 return TRUE;
3267 }
3268
3269 /* Parse a <shifter_operand> for a data processing instruction:
3270
3271 <Rm>
3272 <Rm>, <shift>
3273 #<immediate>
3274 #<immediate>, LSL #imm
3275
3276 where <shift> is handled by parse_shift above, and the last two
3277 cases are handled by the function above.
3278
3279 Validation of immediate operands is deferred to md_apply_fix.
3280
3281 Return TRUE on success; otherwise return FALSE. */
3282
3283 static bfd_boolean
3284 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3285 enum parse_shift_mode mode)
3286 {
3287 const reg_entry *reg;
3288 aarch64_opnd_qualifier_t qualifier;
3289 enum aarch64_operand_class opd_class
3290 = aarch64_get_operand_class (operand->type);
3291
3292 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3293 if (reg)
3294 {
3295 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3296 {
3297 set_syntax_error (_("unexpected register in the immediate operand"));
3298 return FALSE;
3299 }
3300
3301 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3302 {
3303 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3304 return FALSE;
3305 }
3306
3307 operand->reg.regno = reg->number;
3308 operand->qualifier = qualifier;
3309
3310 /* Accept optional shift operation on register. */
3311 if (! skip_past_comma (str))
3312 return TRUE;
3313
3314 if (! parse_shift (str, operand, mode))
3315 return FALSE;
3316
3317 return TRUE;
3318 }
3319 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3320 {
3321 set_syntax_error
3322 (_("integer register expected in the extended/shifted operand "
3323 "register"));
3324 return FALSE;
3325 }
3326
3327 /* We have a shifted immediate variable. */
3328 return parse_shifter_operand_imm (str, operand, mode);
3329 }
3330
3331 /* Return TRUE on success; return FALSE otherwise. */
3332
3333 static bfd_boolean
3334 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3335 enum parse_shift_mode mode)
3336 {
3337 char *p = *str;
3338
3339 /* Determine if we have the sequence of characters #: or just :
3340 coming next. If we do, then we check for a :rello: relocation
3341 modifier. If we don't, punt the whole lot to
3342 parse_shifter_operand. */
3343
3344 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3345 {
3346 struct reloc_table_entry *entry;
3347
3348 if (p[0] == '#')
3349 p += 2;
3350 else
3351 p++;
3352 *str = p;
3353
3354 /* Try to parse a relocation. Anything else is an error. */
3355 if (!(entry = find_reloc_table_entry (str)))
3356 {
3357 set_syntax_error (_("unknown relocation modifier"));
3358 return FALSE;
3359 }
3360
3361 if (entry->add_type == 0)
3362 {
3363 set_syntax_error
3364 (_("this relocation modifier is not allowed on this instruction"));
3365 return FALSE;
3366 }
3367
3368 /* Save str before we decompose it. */
3369 p = *str;
3370
3371 /* Next, we parse the expression. */
3372 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3373 return FALSE;
3374
3375 /* Record the relocation type (use the ADD variant here). */
3376 inst.reloc.type = entry->add_type;
3377 inst.reloc.pc_rel = entry->pc_rel;
3378
3379 /* If str is empty, we've reached the end, stop here. */
3380 if (**str == '\0')
3381 return TRUE;
3382
3383 /* Otherwise, we have a shifted reloc modifier, so rewind to
3384 recover the variable name and continue parsing for the shifter. */
3385 *str = p;
3386 return parse_shifter_operand_imm (str, operand, mode);
3387 }
3388
3389 return parse_shifter_operand (str, operand, mode);
3390 }
3391
3392 /* Parse all forms of an address expression. Information is written
3393 to *OPERAND and/or inst.reloc.
3394
3395 The A64 instruction set has the following addressing modes:
3396
3397 Offset
3398 [base] // in SIMD ld/st structure
3399 [base{,#0}] // in ld/st exclusive
3400 [base{,#imm}]
3401 [base,Xm{,LSL #imm}]
3402 [base,Xm,SXTX {#imm}]
3403 [base,Wm,(S|U)XTW {#imm}]
3404 Pre-indexed
3405 [base]! // in ldraa/ldrab exclusive
3406 [base,#imm]!
3407 Post-indexed
3408 [base],#imm
3409 [base],Xm // in SIMD ld/st structure
3410 PC-relative (literal)
3411 label
3412 SVE:
3413 [base,#imm,MUL VL]
3414 [base,Zm.D{,LSL #imm}]
3415 [base,Zm.S,(S|U)XTW {#imm}]
3416 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3417 [Zn.S,#imm]
3418 [Zn.D,#imm]
3419 [Zn.S{, Xm}]
3420 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3421 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3422 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3423
3424 (As a convenience, the notation "=immediate" is permitted in conjunction
3425 with the pc-relative literal load instructions to automatically place an
3426 immediate value or symbolic address in a nearby literal pool and generate
3427 a hidden label which references it.)
3428
3429 Upon a successful parsing, the address structure in *OPERAND will be
3430 filled in the following way:
3431
3432 .base_regno = <base>
3433 .offset.is_reg // 1 if the offset is a register
3434 .offset.imm = <imm>
3435 .offset.regno = <Rm>
3436
3437 For different addressing modes defined in the A64 ISA:
3438
3439 Offset
3440 .pcrel=0; .preind=1; .postind=0; .writeback=0
3441 Pre-indexed
3442 .pcrel=0; .preind=1; .postind=0; .writeback=1
3443 Post-indexed
3444 .pcrel=0; .preind=0; .postind=1; .writeback=1
3445 PC-relative (literal)
3446 .pcrel=1; .preind=1; .postind=0; .writeback=0
3447
3448 The shift/extension information, if any, will be stored in .shifter.
3449 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3450 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3451 corresponding register.
3452
3453 BASE_TYPE says which types of base register should be accepted and
3454 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3455 is the type of shifter that is allowed for immediate offsets,
3456 or SHIFTED_NONE if none.
3457
3458 In all other respects, it is the caller's responsibility to check
3459 for addressing modes not supported by the instruction, and to set
3460 inst.reloc.type. */
3461
3462 static bfd_boolean
3463 parse_address_main (char **str, aarch64_opnd_info *operand,
3464 aarch64_opnd_qualifier_t *base_qualifier,
3465 aarch64_opnd_qualifier_t *offset_qualifier,
3466 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3467 enum parse_shift_mode imm_shift_mode)
3468 {
3469 char *p = *str;
3470 const reg_entry *reg;
3471 expressionS *exp = &inst.reloc.exp;
3472
3473 *base_qualifier = AARCH64_OPND_QLF_NIL;
3474 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3475 if (! skip_past_char (&p, '['))
3476 {
3477 /* =immediate or label. */
3478 operand->addr.pcrel = 1;
3479 operand->addr.preind = 1;
3480
3481 /* #:<reloc_op>:<symbol> */
3482 skip_past_char (&p, '#');
3483 if (skip_past_char (&p, ':'))
3484 {
3485 bfd_reloc_code_real_type ty;
3486 struct reloc_table_entry *entry;
3487
3488 /* Try to parse a relocation modifier. Anything else is
3489 an error. */
3490 entry = find_reloc_table_entry (&p);
3491 if (! entry)
3492 {
3493 set_syntax_error (_("unknown relocation modifier"));
3494 return FALSE;
3495 }
3496
3497 switch (operand->type)
3498 {
3499 case AARCH64_OPND_ADDR_PCREL21:
3500 /* adr */
3501 ty = entry->adr_type;
3502 break;
3503
3504 default:
3505 ty = entry->ld_literal_type;
3506 break;
3507 }
3508
3509 if (ty == 0)
3510 {
3511 set_syntax_error
3512 (_("this relocation modifier is not allowed on this "
3513 "instruction"));
3514 return FALSE;
3515 }
3516
3517 /* #:<reloc_op>: */
3518 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3519 {
3520 set_syntax_error (_("invalid relocation expression"));
3521 return FALSE;
3522 }
3523
3524 /* #:<reloc_op>:<expr> */
3525 /* Record the relocation type. */
3526 inst.reloc.type = ty;
3527 inst.reloc.pc_rel = entry->pc_rel;
3528 }
3529 else
3530 {
3531
3532 if (skip_past_char (&p, '='))
3533 /* =immediate; need to generate the literal in the literal pool. */
3534 inst.gen_lit_pool = 1;
3535
3536 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3537 {
3538 set_syntax_error (_("invalid address"));
3539 return FALSE;
3540 }
3541 }
3542
3543 *str = p;
3544 return TRUE;
3545 }
3546
3547 /* [ */
3548
3549 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3550 if (!reg || !aarch64_check_reg_type (reg, base_type))
3551 {
3552 set_syntax_error (_(get_reg_expected_msg (base_type)));
3553 return FALSE;
3554 }
3555 operand->addr.base_regno = reg->number;
3556
3557 /* [Xn */
3558 if (skip_past_comma (&p))
3559 {
3560 /* [Xn, */
3561 operand->addr.preind = 1;
3562
3563 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3564 if (reg)
3565 {
3566 if (!aarch64_check_reg_type (reg, offset_type))
3567 {
3568 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3569 return FALSE;
3570 }
3571
3572 /* [Xn,Rm */
3573 operand->addr.offset.regno = reg->number;
3574 operand->addr.offset.is_reg = 1;
3575 /* Shifted index. */
3576 if (skip_past_comma (&p))
3577 {
3578 /* [Xn,Rm, */
3579 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3580 /* Use the diagnostics set in parse_shift, so not set new
3581 error message here. */
3582 return FALSE;
3583 }
3584 /* We only accept:
3585 [base,Xm] # For vector plus scalar SVE2 indexing.
3586 [base,Xm{,LSL #imm}]
3587 [base,Xm,SXTX {#imm}]
3588 [base,Wm,(S|U)XTW {#imm}] */
3589 if (operand->shifter.kind == AARCH64_MOD_NONE
3590 || operand->shifter.kind == AARCH64_MOD_LSL
3591 || operand->shifter.kind == AARCH64_MOD_SXTX)
3592 {
3593 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3594 {
3595 set_syntax_error (_("invalid use of 32-bit register offset"));
3596 return FALSE;
3597 }
3598 if (aarch64_get_qualifier_esize (*base_qualifier)
3599 != aarch64_get_qualifier_esize (*offset_qualifier)
3600 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3601 || *base_qualifier != AARCH64_OPND_QLF_S_S
3602 || *offset_qualifier != AARCH64_OPND_QLF_X))
3603 {
3604 set_syntax_error (_("offset has different size from base"));
3605 return FALSE;
3606 }
3607 }
3608 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3609 {
3610 set_syntax_error (_("invalid use of 64-bit register offset"));
3611 return FALSE;
3612 }
3613 }
3614 else
3615 {
3616 /* [Xn,#:<reloc_op>:<symbol> */
3617 skip_past_char (&p, '#');
3618 if (skip_past_char (&p, ':'))
3619 {
3620 struct reloc_table_entry *entry;
3621
3622 /* Try to parse a relocation modifier. Anything else is
3623 an error. */
3624 if (!(entry = find_reloc_table_entry (&p)))
3625 {
3626 set_syntax_error (_("unknown relocation modifier"));
3627 return FALSE;
3628 }
3629
3630 if (entry->ldst_type == 0)
3631 {
3632 set_syntax_error
3633 (_("this relocation modifier is not allowed on this "
3634 "instruction"));
3635 return FALSE;
3636 }
3637
3638 /* [Xn,#:<reloc_op>: */
3639 /* We now have the group relocation table entry corresponding to
3640 the name in the assembler source. Next, we parse the
3641 expression. */
3642 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3643 {
3644 set_syntax_error (_("invalid relocation expression"));
3645 return FALSE;
3646 }
3647
3648 /* [Xn,#:<reloc_op>:<expr> */
3649 /* Record the load/store relocation type. */
3650 inst.reloc.type = entry->ldst_type;
3651 inst.reloc.pc_rel = entry->pc_rel;
3652 }
3653 else
3654 {
3655 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3656 {
3657 set_syntax_error (_("invalid expression in the address"));
3658 return FALSE;
3659 }
3660 /* [Xn,<expr> */
3661 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3662 /* [Xn,<expr>,<shifter> */
3663 if (! parse_shift (&p, operand, imm_shift_mode))
3664 return FALSE;
3665 }
3666 }
3667 }
3668
3669 if (! skip_past_char (&p, ']'))
3670 {
3671 set_syntax_error (_("']' expected"));
3672 return FALSE;
3673 }
3674
3675 if (skip_past_char (&p, '!'))
3676 {
3677 if (operand->addr.preind && operand->addr.offset.is_reg)
3678 {
3679 set_syntax_error (_("register offset not allowed in pre-indexed "
3680 "addressing mode"));
3681 return FALSE;
3682 }
3683 /* [Xn]! */
3684 operand->addr.writeback = 1;
3685 }
3686 else if (skip_past_comma (&p))
3687 {
3688 /* [Xn], */
3689 operand->addr.postind = 1;
3690 operand->addr.writeback = 1;
3691
3692 if (operand->addr.preind)
3693 {
3694 set_syntax_error (_("cannot combine pre- and post-indexing"));
3695 return FALSE;
3696 }
3697
3698 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3699 if (reg)
3700 {
3701 /* [Xn],Xm */
3702 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3703 {
3704 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3705 return FALSE;
3706 }
3707
3708 operand->addr.offset.regno = reg->number;
3709 operand->addr.offset.is_reg = 1;
3710 }
3711 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3712 {
3713 /* [Xn],#expr */
3714 set_syntax_error (_("invalid expression in the address"));
3715 return FALSE;
3716 }
3717 }
3718
3719 /* If at this point neither .preind nor .postind is set, we have a
3720 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3721 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3722 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3723 [Zn.<T>, xzr]. */
3724 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3725 {
3726 if (operand->addr.writeback)
3727 {
3728 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3729 {
3730 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3731 operand->addr.offset.is_reg = 0;
3732 operand->addr.offset.imm = 0;
3733 operand->addr.preind = 1;
3734 }
3735 else
3736 {
3737 /* Reject [Rn]! */
3738 set_syntax_error (_("missing offset in the pre-indexed address"));
3739 return FALSE;
3740 }
3741 }
3742 else
3743 {
3744 operand->addr.preind = 1;
3745 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3746 {
3747 operand->addr.offset.is_reg = 1;
3748 operand->addr.offset.regno = REG_ZR;
3749 *offset_qualifier = AARCH64_OPND_QLF_X;
3750 }
3751 else
3752 {
3753 inst.reloc.exp.X_op = O_constant;
3754 inst.reloc.exp.X_add_number = 0;
3755 }
3756 }
3757 }
3758
3759 *str = p;
3760 return TRUE;
3761 }
3762
3763 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3764 on success. */
3765 static bfd_boolean
3766 parse_address (char **str, aarch64_opnd_info *operand)
3767 {
3768 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3769 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3770 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3771 }
3772
3773 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3774 The arguments have the same meaning as for parse_address_main.
3775 Return TRUE on success. */
3776 static bfd_boolean
3777 parse_sve_address (char **str, aarch64_opnd_info *operand,
3778 aarch64_opnd_qualifier_t *base_qualifier,
3779 aarch64_opnd_qualifier_t *offset_qualifier)
3780 {
3781 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3782 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3783 SHIFTED_MUL_VL);
3784 }
3785
3786 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3787 Return TRUE on success; otherwise return FALSE. */
3788 static bfd_boolean
3789 parse_half (char **str, int *internal_fixup_p)
3790 {
3791 char *p = *str;
3792
3793 skip_past_char (&p, '#');
3794
3795 gas_assert (internal_fixup_p);
3796 *internal_fixup_p = 0;
3797
3798 if (*p == ':')
3799 {
3800 struct reloc_table_entry *entry;
3801
3802 /* Try to parse a relocation. Anything else is an error. */
3803 ++p;
3804 if (!(entry = find_reloc_table_entry (&p)))
3805 {
3806 set_syntax_error (_("unknown relocation modifier"));
3807 return FALSE;
3808 }
3809
3810 if (entry->movw_type == 0)
3811 {
3812 set_syntax_error
3813 (_("this relocation modifier is not allowed on this instruction"));
3814 return FALSE;
3815 }
3816
3817 inst.reloc.type = entry->movw_type;
3818 }
3819 else
3820 *internal_fixup_p = 1;
3821
3822 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3823 return FALSE;
3824
3825 *str = p;
3826 return TRUE;
3827 }
3828
3829 /* Parse an operand for an ADRP instruction:
3830 ADRP <Xd>, <label>
3831 Return TRUE on success; otherwise return FALSE. */
3832
3833 static bfd_boolean
3834 parse_adrp (char **str)
3835 {
3836 char *p;
3837
3838 p = *str;
3839 if (*p == ':')
3840 {
3841 struct reloc_table_entry *entry;
3842
3843 /* Try to parse a relocation. Anything else is an error. */
3844 ++p;
3845 if (!(entry = find_reloc_table_entry (&p)))
3846 {
3847 set_syntax_error (_("unknown relocation modifier"));
3848 return FALSE;
3849 }
3850
3851 if (entry->adrp_type == 0)
3852 {
3853 set_syntax_error
3854 (_("this relocation modifier is not allowed on this instruction"));
3855 return FALSE;
3856 }
3857
3858 inst.reloc.type = entry->adrp_type;
3859 }
3860 else
3861 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3862
3863 inst.reloc.pc_rel = 1;
3864
3865 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3866 return FALSE;
3867
3868 *str = p;
3869 return TRUE;
3870 }
3871
3872 /* Miscellaneous. */
3873
3874 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3875 of SIZE tokens in which index I gives the token for field value I,
3876 or is null if field value I is invalid. REG_TYPE says which register
3877 names should be treated as registers rather than as symbolic immediates.
3878
3879 Return true on success, moving *STR past the operand and storing the
3880 field value in *VAL. */
3881
3882 static int
3883 parse_enum_string (char **str, int64_t *val, const char *const *array,
3884 size_t size, aarch64_reg_type reg_type)
3885 {
3886 expressionS exp;
3887 char *p, *q;
3888 size_t i;
3889
3890 /* Match C-like tokens. */
3891 p = q = *str;
3892 while (ISALNUM (*q))
3893 q++;
3894
3895 for (i = 0; i < size; ++i)
3896 if (array[i]
3897 && strncasecmp (array[i], p, q - p) == 0
3898 && array[i][q - p] == 0)
3899 {
3900 *val = i;
3901 *str = q;
3902 return TRUE;
3903 }
3904
3905 if (!parse_immediate_expression (&p, &exp, reg_type))
3906 return FALSE;
3907
3908 if (exp.X_op == O_constant
3909 && (uint64_t) exp.X_add_number < size)
3910 {
3911 *val = exp.X_add_number;
3912 *str = p;
3913 return TRUE;
3914 }
3915
3916 /* Use the default error for this operand. */
3917 return FALSE;
3918 }
3919
3920 /* Parse an option for a preload instruction. Returns the encoding for the
3921 option, or PARSE_FAIL. */
3922
3923 static int
3924 parse_pldop (char **str)
3925 {
3926 char *p, *q;
3927 const struct aarch64_name_value_pair *o;
3928
3929 p = q = *str;
3930 while (ISALNUM (*q))
3931 q++;
3932
3933 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3934 if (!o)
3935 return PARSE_FAIL;
3936
3937 *str = q;
3938 return o->value;
3939 }
3940
3941 /* Parse an option for a barrier instruction. Returns the encoding for the
3942 option, or PARSE_FAIL. */
3943
3944 static int
3945 parse_barrier (char **str)
3946 {
3947 char *p, *q;
3948 const asm_barrier_opt *o;
3949
3950 p = q = *str;
3951 while (ISALPHA (*q))
3952 q++;
3953
3954 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3955 if (!o)
3956 return PARSE_FAIL;
3957
3958 *str = q;
3959 return o->value;
3960 }
3961
3962 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3963 return 0 if successful. Otherwise return PARSE_FAIL. */
3964
3965 static int
3966 parse_barrier_psb (char **str,
3967 const struct aarch64_name_value_pair ** hint_opt)
3968 {
3969 char *p, *q;
3970 const struct aarch64_name_value_pair *o;
3971
3972 p = q = *str;
3973 while (ISALPHA (*q))
3974 q++;
3975
3976 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3977 if (!o)
3978 {
3979 set_fatal_syntax_error
3980 ( _("unknown or missing option to PSB"));
3981 return PARSE_FAIL;
3982 }
3983
3984 if (o->value != 0x11)
3985 {
3986 /* PSB only accepts option name 'CSYNC'. */
3987 set_syntax_error
3988 (_("the specified option is not accepted for PSB"));
3989 return PARSE_FAIL;
3990 }
3991
3992 *str = q;
3993 *hint_opt = o;
3994 return 0;
3995 }
3996
3997 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
3998 return 0 if successful. Otherwise return PARSE_FAIL. */
3999
4000 static int
4001 parse_bti_operand (char **str,
4002 const struct aarch64_name_value_pair ** hint_opt)
4003 {
4004 char *p, *q;
4005 const struct aarch64_name_value_pair *o;
4006
4007 p = q = *str;
4008 while (ISALPHA (*q))
4009 q++;
4010
4011 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4012 if (!o)
4013 {
4014 set_fatal_syntax_error
4015 ( _("unknown option to BTI"));
4016 return PARSE_FAIL;
4017 }
4018
4019 switch (o->value)
4020 {
4021 /* Valid BTI operands. */
4022 case HINT_OPD_C:
4023 case HINT_OPD_J:
4024 case HINT_OPD_JC:
4025 break;
4026
4027 default:
4028 set_syntax_error
4029 (_("unknown option to BTI"));
4030 return PARSE_FAIL;
4031 }
4032
4033 *str = q;
4034 *hint_opt = o;
4035 return 0;
4036 }
4037
4038 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4039 Returns the encoding for the option, or PARSE_FAIL.
4040
4041 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4042 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4043
4044 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4045 field, otherwise as a system register.
4046 */
4047
4048 static int
4049 parse_sys_reg (char **str, struct hash_control *sys_regs,
4050 int imple_defined_p, int pstatefield_p,
4051 uint32_t* flags)
4052 {
4053 char *p, *q;
4054 char buf[32];
4055 const aarch64_sys_reg *o;
4056 int value;
4057
4058 p = buf;
4059 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4060 if (p < buf + 31)
4061 *p++ = TOLOWER (*q);
4062 *p = '\0';
4063 /* Assert that BUF be large enough. */
4064 gas_assert (p - buf == q - *str);
4065
4066 o = hash_find (sys_regs, buf);
4067 if (!o)
4068 {
4069 if (!imple_defined_p)
4070 return PARSE_FAIL;
4071 else
4072 {
4073 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4074 unsigned int op0, op1, cn, cm, op2;
4075
4076 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4077 != 5)
4078 return PARSE_FAIL;
4079 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4080 return PARSE_FAIL;
4081 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4082 if (flags)
4083 *flags = 0;
4084 }
4085 }
4086 else
4087 {
4088 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4089 as_bad (_("selected processor does not support PSTATE field "
4090 "name '%s'"), buf);
4091 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
4092 as_bad (_("selected processor does not support system register "
4093 "name '%s'"), buf);
4094 if (aarch64_sys_reg_deprecated_p (o))
4095 as_warn (_("system register name '%s' is deprecated and may be "
4096 "removed in a future release"), buf);
4097 value = o->value;
4098 if (flags)
4099 *flags = o->flags;
4100 }
4101
4102 *str = q;
4103 return value;
4104 }
4105
4106 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4107 for the option, or NULL. */
4108
4109 static const aarch64_sys_ins_reg *
4110 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
4111 {
4112 char *p, *q;
4113 char buf[32];
4114 const aarch64_sys_ins_reg *o;
4115
4116 p = buf;
4117 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4118 if (p < buf + 31)
4119 *p++ = TOLOWER (*q);
4120 *p = '\0';
4121
4122 o = hash_find (sys_ins_regs, buf);
4123 if (!o)
4124 return NULL;
4125
4126 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
4127 as_bad (_("selected processor does not support system register "
4128 "name '%s'"), buf);
4129
4130 *str = q;
4131 return o;
4132 }
4133 \f
4134 #define po_char_or_fail(chr) do { \
4135 if (! skip_past_char (&str, chr)) \
4136 goto failure; \
4137 } while (0)
4138
4139 #define po_reg_or_fail(regtype) do { \
4140 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4141 if (val == PARSE_FAIL) \
4142 { \
4143 set_default_error (); \
4144 goto failure; \
4145 } \
4146 } while (0)
4147
4148 #define po_int_reg_or_fail(reg_type) do { \
4149 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4150 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4151 { \
4152 set_default_error (); \
4153 goto failure; \
4154 } \
4155 info->reg.regno = reg->number; \
4156 info->qualifier = qualifier; \
4157 } while (0)
4158
4159 #define po_imm_nc_or_fail() do { \
4160 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4161 goto failure; \
4162 } while (0)
4163
4164 #define po_imm_or_fail(min, max) do { \
4165 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4166 goto failure; \
4167 if (val < min || val > max) \
4168 { \
4169 set_fatal_syntax_error (_("immediate value out of range "\
4170 #min " to "#max)); \
4171 goto failure; \
4172 } \
4173 } while (0)
4174
4175 #define po_enum_or_fail(array) do { \
4176 if (!parse_enum_string (&str, &val, array, \
4177 ARRAY_SIZE (array), imm_reg_type)) \
4178 goto failure; \
4179 } while (0)
4180
4181 #define po_misc_or_fail(expr) do { \
4182 if (!expr) \
4183 goto failure; \
4184 } while (0)
4185 \f
4186 /* encode the 12-bit imm field of Add/sub immediate */
4187 static inline uint32_t
4188 encode_addsub_imm (uint32_t imm)
4189 {
4190 return imm << 10;
4191 }
4192
4193 /* encode the shift amount field of Add/sub immediate */
4194 static inline uint32_t
4195 encode_addsub_imm_shift_amount (uint32_t cnt)
4196 {
4197 return cnt << 22;
4198 }
4199
4200
4201 /* encode the imm field of Adr instruction */
4202 static inline uint32_t
4203 encode_adr_imm (uint32_t imm)
4204 {
4205 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4206 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4207 }
4208
4209 /* encode the immediate field of Move wide immediate */
4210 static inline uint32_t
4211 encode_movw_imm (uint32_t imm)
4212 {
4213 return imm << 5;
4214 }
4215
4216 /* encode the 26-bit offset of unconditional branch */
4217 static inline uint32_t
4218 encode_branch_ofs_26 (uint32_t ofs)
4219 {
4220 return ofs & ((1 << 26) - 1);
4221 }
4222
4223 /* encode the 19-bit offset of conditional branch and compare & branch */
4224 static inline uint32_t
4225 encode_cond_branch_ofs_19 (uint32_t ofs)
4226 {
4227 return (ofs & ((1 << 19) - 1)) << 5;
4228 }
4229
4230 /* encode the 19-bit offset of ld literal */
4231 static inline uint32_t
4232 encode_ld_lit_ofs_19 (uint32_t ofs)
4233 {
4234 return (ofs & ((1 << 19) - 1)) << 5;
4235 }
4236
4237 /* Encode the 14-bit offset of test & branch. */
4238 static inline uint32_t
4239 encode_tst_branch_ofs_14 (uint32_t ofs)
4240 {
4241 return (ofs & ((1 << 14) - 1)) << 5;
4242 }
4243
4244 /* Encode the 16-bit imm field of svc/hvc/smc. */
4245 static inline uint32_t
4246 encode_svc_imm (uint32_t imm)
4247 {
4248 return imm << 5;
4249 }
4250
4251 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4252 static inline uint32_t
4253 reencode_addsub_switch_add_sub (uint32_t opcode)
4254 {
4255 return opcode ^ (1 << 30);
4256 }
4257
4258 static inline uint32_t
4259 reencode_movzn_to_movz (uint32_t opcode)
4260 {
4261 return opcode | (1 << 30);
4262 }
4263
4264 static inline uint32_t
4265 reencode_movzn_to_movn (uint32_t opcode)
4266 {
4267 return opcode & ~(1 << 30);
4268 }
4269
4270 /* Overall per-instruction processing. */
4271
4272 /* We need to be able to fix up arbitrary expressions in some statements.
4273 This is so that we can handle symbols that are an arbitrary distance from
4274 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4275 which returns part of an address in a form which will be valid for
4276 a data instruction. We do this by pushing the expression into a symbol
4277 in the expr_section, and creating a fix for that. */
4278
4279 static fixS *
4280 fix_new_aarch64 (fragS * frag,
4281 int where,
4282 short int size, expressionS * exp, int pc_rel, int reloc)
4283 {
4284 fixS *new_fix;
4285
4286 switch (exp->X_op)
4287 {
4288 case O_constant:
4289 case O_symbol:
4290 case O_add:
4291 case O_subtract:
4292 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4293 break;
4294
4295 default:
4296 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4297 pc_rel, reloc);
4298 break;
4299 }
4300 return new_fix;
4301 }
4302 \f
4303 /* Diagnostics on operands errors. */
4304
4305 /* By default, output verbose error message.
4306 Disable the verbose error message by -mno-verbose-error. */
4307 static int verbose_error_p = 1;
4308
4309 #ifdef DEBUG_AARCH64
4310 /* N.B. this is only for the purpose of debugging. */
4311 const char* operand_mismatch_kind_names[] =
4312 {
4313 "AARCH64_OPDE_NIL",
4314 "AARCH64_OPDE_RECOVERABLE",
4315 "AARCH64_OPDE_SYNTAX_ERROR",
4316 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4317 "AARCH64_OPDE_INVALID_VARIANT",
4318 "AARCH64_OPDE_OUT_OF_RANGE",
4319 "AARCH64_OPDE_UNALIGNED",
4320 "AARCH64_OPDE_REG_LIST",
4321 "AARCH64_OPDE_OTHER_ERROR",
4322 };
4323 #endif /* DEBUG_AARCH64 */
4324
4325 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4326
4327 When multiple errors of different kinds are found in the same assembly
4328 line, only the error of the highest severity will be picked up for
4329 issuing the diagnostics. */
4330
4331 static inline bfd_boolean
4332 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4333 enum aarch64_operand_error_kind rhs)
4334 {
4335 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4336 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4337 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4338 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4339 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4340 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4341 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4342 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4343 return lhs > rhs;
4344 }
4345
4346 /* Helper routine to get the mnemonic name from the assembly instruction
4347 line; should only be called for the diagnosis purpose, as there is
4348 string copy operation involved, which may affect the runtime
4349 performance if used in elsewhere. */
4350
4351 static const char*
4352 get_mnemonic_name (const char *str)
4353 {
4354 static char mnemonic[32];
4355 char *ptr;
4356
4357 /* Get the first 15 bytes and assume that the full name is included. */
4358 strncpy (mnemonic, str, 31);
4359 mnemonic[31] = '\0';
4360
4361 /* Scan up to the end of the mnemonic, which must end in white space,
4362 '.', or end of string. */
4363 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4364 ;
4365
4366 *ptr = '\0';
4367
4368 /* Append '...' to the truncated long name. */
4369 if (ptr - mnemonic == 31)
4370 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4371
4372 return mnemonic;
4373 }
4374
4375 static void
4376 reset_aarch64_instruction (aarch64_instruction *instruction)
4377 {
4378 memset (instruction, '\0', sizeof (aarch64_instruction));
4379 instruction->reloc.type = BFD_RELOC_UNUSED;
4380 }
4381
4382 /* Data structures storing one user error in the assembly code related to
4383 operands. */
4384
4385 struct operand_error_record
4386 {
4387 const aarch64_opcode *opcode;
4388 aarch64_operand_error detail;
4389 struct operand_error_record *next;
4390 };
4391
4392 typedef struct operand_error_record operand_error_record;
4393
4394 struct operand_errors
4395 {
4396 operand_error_record *head;
4397 operand_error_record *tail;
4398 };
4399
4400 typedef struct operand_errors operand_errors;
4401
4402 /* Top-level data structure reporting user errors for the current line of
4403 the assembly code.
4404 The way md_assemble works is that all opcodes sharing the same mnemonic
4405 name are iterated to find a match to the assembly line. In this data
4406 structure, each of the such opcodes will have one operand_error_record
4407 allocated and inserted. In other words, excessive errors related with
4408 a single opcode are disregarded. */
4409 operand_errors operand_error_report;
4410
4411 /* Free record nodes. */
4412 static operand_error_record *free_opnd_error_record_nodes = NULL;
4413
4414 /* Initialize the data structure that stores the operand mismatch
4415 information on assembling one line of the assembly code. */
4416 static void
4417 init_operand_error_report (void)
4418 {
4419 if (operand_error_report.head != NULL)
4420 {
4421 gas_assert (operand_error_report.tail != NULL);
4422 operand_error_report.tail->next = free_opnd_error_record_nodes;
4423 free_opnd_error_record_nodes = operand_error_report.head;
4424 operand_error_report.head = NULL;
4425 operand_error_report.tail = NULL;
4426 return;
4427 }
4428 gas_assert (operand_error_report.tail == NULL);
4429 }
4430
4431 /* Return TRUE if some operand error has been recorded during the
4432 parsing of the current assembly line using the opcode *OPCODE;
4433 otherwise return FALSE. */
4434 static inline bfd_boolean
4435 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4436 {
4437 operand_error_record *record = operand_error_report.head;
4438 return record && record->opcode == opcode;
4439 }
4440
4441 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4442 OPCODE field is initialized with OPCODE.
4443 N.B. only one record for each opcode, i.e. the maximum of one error is
4444 recorded for each instruction template. */
4445
4446 static void
4447 add_operand_error_record (const operand_error_record* new_record)
4448 {
4449 const aarch64_opcode *opcode = new_record->opcode;
4450 operand_error_record* record = operand_error_report.head;
4451
4452 /* The record may have been created for this opcode. If not, we need
4453 to prepare one. */
4454 if (! opcode_has_operand_error_p (opcode))
4455 {
4456 /* Get one empty record. */
4457 if (free_opnd_error_record_nodes == NULL)
4458 {
4459 record = XNEW (operand_error_record);
4460 }
4461 else
4462 {
4463 record = free_opnd_error_record_nodes;
4464 free_opnd_error_record_nodes = record->next;
4465 }
4466 record->opcode = opcode;
4467 /* Insert at the head. */
4468 record->next = operand_error_report.head;
4469 operand_error_report.head = record;
4470 if (operand_error_report.tail == NULL)
4471 operand_error_report.tail = record;
4472 }
4473 else if (record->detail.kind != AARCH64_OPDE_NIL
4474 && record->detail.index <= new_record->detail.index
4475 && operand_error_higher_severity_p (record->detail.kind,
4476 new_record->detail.kind))
4477 {
4478 /* In the case of multiple errors found on operands related with a
4479 single opcode, only record the error of the leftmost operand and
4480 only if the error is of higher severity. */
4481 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4482 " the existing error %s on operand %d",
4483 operand_mismatch_kind_names[new_record->detail.kind],
4484 new_record->detail.index,
4485 operand_mismatch_kind_names[record->detail.kind],
4486 record->detail.index);
4487 return;
4488 }
4489
4490 record->detail = new_record->detail;
4491 }
4492
4493 static inline void
4494 record_operand_error_info (const aarch64_opcode *opcode,
4495 aarch64_operand_error *error_info)
4496 {
4497 operand_error_record record;
4498 record.opcode = opcode;
4499 record.detail = *error_info;
4500 add_operand_error_record (&record);
4501 }
4502
4503 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4504 error message *ERROR, for operand IDX (count from 0). */
4505
4506 static void
4507 record_operand_error (const aarch64_opcode *opcode, int idx,
4508 enum aarch64_operand_error_kind kind,
4509 const char* error)
4510 {
4511 aarch64_operand_error info;
4512 memset(&info, 0, sizeof (info));
4513 info.index = idx;
4514 info.kind = kind;
4515 info.error = error;
4516 info.non_fatal = FALSE;
4517 record_operand_error_info (opcode, &info);
4518 }
4519
4520 static void
4521 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4522 enum aarch64_operand_error_kind kind,
4523 const char* error, const int *extra_data)
4524 {
4525 aarch64_operand_error info;
4526 info.index = idx;
4527 info.kind = kind;
4528 info.error = error;
4529 info.data[0] = extra_data[0];
4530 info.data[1] = extra_data[1];
4531 info.data[2] = extra_data[2];
4532 info.non_fatal = FALSE;
4533 record_operand_error_info (opcode, &info);
4534 }
4535
4536 static void
4537 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4538 const char* error, int lower_bound,
4539 int upper_bound)
4540 {
4541 int data[3] = {lower_bound, upper_bound, 0};
4542 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4543 error, data);
4544 }
4545
4546 /* Remove the operand error record for *OPCODE. */
4547 static void ATTRIBUTE_UNUSED
4548 remove_operand_error_record (const aarch64_opcode *opcode)
4549 {
4550 if (opcode_has_operand_error_p (opcode))
4551 {
4552 operand_error_record* record = operand_error_report.head;
4553 gas_assert (record != NULL && operand_error_report.tail != NULL);
4554 operand_error_report.head = record->next;
4555 record->next = free_opnd_error_record_nodes;
4556 free_opnd_error_record_nodes = record;
4557 if (operand_error_report.head == NULL)
4558 {
4559 gas_assert (operand_error_report.tail == record);
4560 operand_error_report.tail = NULL;
4561 }
4562 }
4563 }
4564
4565 /* Given the instruction in *INSTR, return the index of the best matched
4566 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4567
4568 Return -1 if there is no qualifier sequence; return the first match
4569 if there is multiple matches found. */
4570
4571 static int
4572 find_best_match (const aarch64_inst *instr,
4573 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4574 {
4575 int i, num_opnds, max_num_matched, idx;
4576
4577 num_opnds = aarch64_num_of_operands (instr->opcode);
4578 if (num_opnds == 0)
4579 {
4580 DEBUG_TRACE ("no operand");
4581 return -1;
4582 }
4583
4584 max_num_matched = 0;
4585 idx = 0;
4586
4587 /* For each pattern. */
4588 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4589 {
4590 int j, num_matched;
4591 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4592
4593 /* Most opcodes has much fewer patterns in the list. */
4594 if (empty_qualifier_sequence_p (qualifiers))
4595 {
4596 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4597 break;
4598 }
4599
4600 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4601 if (*qualifiers == instr->operands[j].qualifier)
4602 ++num_matched;
4603
4604 if (num_matched > max_num_matched)
4605 {
4606 max_num_matched = num_matched;
4607 idx = i;
4608 }
4609 }
4610
4611 DEBUG_TRACE ("return with %d", idx);
4612 return idx;
4613 }
4614
4615 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4616 corresponding operands in *INSTR. */
4617
4618 static inline void
4619 assign_qualifier_sequence (aarch64_inst *instr,
4620 const aarch64_opnd_qualifier_t *qualifiers)
4621 {
4622 int i = 0;
4623 int num_opnds = aarch64_num_of_operands (instr->opcode);
4624 gas_assert (num_opnds);
4625 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4626 instr->operands[i].qualifier = *qualifiers;
4627 }
4628
4629 /* Print operands for the diagnosis purpose. */
4630
4631 static void
4632 print_operands (char *buf, const aarch64_opcode *opcode,
4633 const aarch64_opnd_info *opnds)
4634 {
4635 int i;
4636
4637 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4638 {
4639 char str[128];
4640
4641 /* We regard the opcode operand info more, however we also look into
4642 the inst->operands to support the disassembling of the optional
4643 operand.
4644 The two operand code should be the same in all cases, apart from
4645 when the operand can be optional. */
4646 if (opcode->operands[i] == AARCH64_OPND_NIL
4647 || opnds[i].type == AARCH64_OPND_NIL)
4648 break;
4649
4650 /* Generate the operand string in STR. */
4651 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4652 NULL);
4653
4654 /* Delimiter. */
4655 if (str[0] != '\0')
4656 strcat (buf, i == 0 ? " " : ", ");
4657
4658 /* Append the operand string. */
4659 strcat (buf, str);
4660 }
4661 }
4662
4663 /* Send to stderr a string as information. */
4664
4665 static void
4666 output_info (const char *format, ...)
4667 {
4668 const char *file;
4669 unsigned int line;
4670 va_list args;
4671
4672 file = as_where (&line);
4673 if (file)
4674 {
4675 if (line != 0)
4676 fprintf (stderr, "%s:%u: ", file, line);
4677 else
4678 fprintf (stderr, "%s: ", file);
4679 }
4680 fprintf (stderr, _("Info: "));
4681 va_start (args, format);
4682 vfprintf (stderr, format, args);
4683 va_end (args);
4684 (void) putc ('\n', stderr);
4685 }
4686
4687 /* Output one operand error record. */
4688
4689 static void
4690 output_operand_error_record (const operand_error_record *record, char *str)
4691 {
4692 const aarch64_operand_error *detail = &record->detail;
4693 int idx = detail->index;
4694 const aarch64_opcode *opcode = record->opcode;
4695 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4696 : AARCH64_OPND_NIL);
4697
4698 typedef void (*handler_t)(const char *format, ...);
4699 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4700
4701 switch (detail->kind)
4702 {
4703 case AARCH64_OPDE_NIL:
4704 gas_assert (0);
4705 break;
4706 case AARCH64_OPDE_SYNTAX_ERROR:
4707 case AARCH64_OPDE_RECOVERABLE:
4708 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4709 case AARCH64_OPDE_OTHER_ERROR:
4710 /* Use the prepared error message if there is, otherwise use the
4711 operand description string to describe the error. */
4712 if (detail->error != NULL)
4713 {
4714 if (idx < 0)
4715 handler (_("%s -- `%s'"), detail->error, str);
4716 else
4717 handler (_("%s at operand %d -- `%s'"),
4718 detail->error, idx + 1, str);
4719 }
4720 else
4721 {
4722 gas_assert (idx >= 0);
4723 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4724 aarch64_get_operand_desc (opd_code), str);
4725 }
4726 break;
4727
4728 case AARCH64_OPDE_INVALID_VARIANT:
4729 handler (_("operand mismatch -- `%s'"), str);
4730 if (verbose_error_p)
4731 {
4732 /* We will try to correct the erroneous instruction and also provide
4733 more information e.g. all other valid variants.
4734
4735 The string representation of the corrected instruction and other
4736 valid variants are generated by
4737
4738 1) obtaining the intermediate representation of the erroneous
4739 instruction;
4740 2) manipulating the IR, e.g. replacing the operand qualifier;
4741 3) printing out the instruction by calling the printer functions
4742 shared with the disassembler.
4743
4744 The limitation of this method is that the exact input assembly
4745 line cannot be accurately reproduced in some cases, for example an
4746 optional operand present in the actual assembly line will be
4747 omitted in the output; likewise for the optional syntax rules,
4748 e.g. the # before the immediate. Another limitation is that the
4749 assembly symbols and relocation operations in the assembly line
4750 currently cannot be printed out in the error report. Last but not
4751 least, when there is other error(s) co-exist with this error, the
4752 'corrected' instruction may be still incorrect, e.g. given
4753 'ldnp h0,h1,[x0,#6]!'
4754 this diagnosis will provide the version:
4755 'ldnp s0,s1,[x0,#6]!'
4756 which is still not right. */
4757 size_t len = strlen (get_mnemonic_name (str));
4758 int i, qlf_idx;
4759 bfd_boolean result;
4760 char buf[2048];
4761 aarch64_inst *inst_base = &inst.base;
4762 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4763
4764 /* Init inst. */
4765 reset_aarch64_instruction (&inst);
4766 inst_base->opcode = opcode;
4767
4768 /* Reset the error report so that there is no side effect on the
4769 following operand parsing. */
4770 init_operand_error_report ();
4771
4772 /* Fill inst. */
4773 result = parse_operands (str + len, opcode)
4774 && programmer_friendly_fixup (&inst);
4775 gas_assert (result);
4776 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4777 NULL, NULL, insn_sequence);
4778 gas_assert (!result);
4779
4780 /* Find the most matched qualifier sequence. */
4781 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4782 gas_assert (qlf_idx > -1);
4783
4784 /* Assign the qualifiers. */
4785 assign_qualifier_sequence (inst_base,
4786 opcode->qualifiers_list[qlf_idx]);
4787
4788 /* Print the hint. */
4789 output_info (_(" did you mean this?"));
4790 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4791 print_operands (buf, opcode, inst_base->operands);
4792 output_info (_(" %s"), buf);
4793
4794 /* Print out other variant(s) if there is any. */
4795 if (qlf_idx != 0 ||
4796 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4797 output_info (_(" other valid variant(s):"));
4798
4799 /* For each pattern. */
4800 qualifiers_list = opcode->qualifiers_list;
4801 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4802 {
4803 /* Most opcodes has much fewer patterns in the list.
4804 First NIL qualifier indicates the end in the list. */
4805 if (empty_qualifier_sequence_p (*qualifiers_list))
4806 break;
4807
4808 if (i != qlf_idx)
4809 {
4810 /* Mnemonics name. */
4811 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4812
4813 /* Assign the qualifiers. */
4814 assign_qualifier_sequence (inst_base, *qualifiers_list);
4815
4816 /* Print instruction. */
4817 print_operands (buf, opcode, inst_base->operands);
4818
4819 output_info (_(" %s"), buf);
4820 }
4821 }
4822 }
4823 break;
4824
4825 case AARCH64_OPDE_UNTIED_OPERAND:
4826 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4827 detail->index + 1, str);
4828 break;
4829
4830 case AARCH64_OPDE_OUT_OF_RANGE:
4831 if (detail->data[0] != detail->data[1])
4832 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4833 detail->error ? detail->error : _("immediate value"),
4834 detail->data[0], detail->data[1], idx + 1, str);
4835 else
4836 handler (_("%s must be %d at operand %d -- `%s'"),
4837 detail->error ? detail->error : _("immediate value"),
4838 detail->data[0], idx + 1, str);
4839 break;
4840
4841 case AARCH64_OPDE_REG_LIST:
4842 if (detail->data[0] == 1)
4843 handler (_("invalid number of registers in the list; "
4844 "only 1 register is expected at operand %d -- `%s'"),
4845 idx + 1, str);
4846 else
4847 handler (_("invalid number of registers in the list; "
4848 "%d registers are expected at operand %d -- `%s'"),
4849 detail->data[0], idx + 1, str);
4850 break;
4851
4852 case AARCH64_OPDE_UNALIGNED:
4853 handler (_("immediate value must be a multiple of "
4854 "%d at operand %d -- `%s'"),
4855 detail->data[0], idx + 1, str);
4856 break;
4857
4858 default:
4859 gas_assert (0);
4860 break;
4861 }
4862 }
4863
4864 /* Process and output the error message about the operand mismatching.
4865
4866 When this function is called, the operand error information had
4867 been collected for an assembly line and there will be multiple
4868 errors in the case of multiple instruction templates; output the
4869 error message that most closely describes the problem.
4870
4871 The errors to be printed can be filtered on printing all errors
4872 or only non-fatal errors. This distinction has to be made because
4873 the error buffer may already be filled with fatal errors we don't want to
4874 print due to the different instruction templates. */
4875
4876 static void
4877 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4878 {
4879 int largest_error_pos;
4880 const char *msg = NULL;
4881 enum aarch64_operand_error_kind kind;
4882 operand_error_record *curr;
4883 operand_error_record *head = operand_error_report.head;
4884 operand_error_record *record = NULL;
4885
4886 /* No error to report. */
4887 if (head == NULL)
4888 return;
4889
4890 gas_assert (head != NULL && operand_error_report.tail != NULL);
4891
4892 /* Only one error. */
4893 if (head == operand_error_report.tail)
4894 {
4895 /* If the only error is a non-fatal one and we don't want to print it,
4896 just exit. */
4897 if (!non_fatal_only || head->detail.non_fatal)
4898 {
4899 DEBUG_TRACE ("single opcode entry with error kind: %s",
4900 operand_mismatch_kind_names[head->detail.kind]);
4901 output_operand_error_record (head, str);
4902 }
4903 return;
4904 }
4905
4906 /* Find the error kind of the highest severity. */
4907 DEBUG_TRACE ("multiple opcode entries with error kind");
4908 kind = AARCH64_OPDE_NIL;
4909 for (curr = head; curr != NULL; curr = curr->next)
4910 {
4911 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4912 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4913 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4914 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4915 kind = curr->detail.kind;
4916 }
4917
4918 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4919
4920 /* Pick up one of errors of KIND to report. */
4921 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4922 for (curr = head; curr != NULL; curr = curr->next)
4923 {
4924 /* If we don't want to print non-fatal errors then don't consider them
4925 at all. */
4926 if (curr->detail.kind != kind
4927 || (non_fatal_only && !curr->detail.non_fatal))
4928 continue;
4929 /* If there are multiple errors, pick up the one with the highest
4930 mismatching operand index. In the case of multiple errors with
4931 the equally highest operand index, pick up the first one or the
4932 first one with non-NULL error message. */
4933 if (curr->detail.index > largest_error_pos
4934 || (curr->detail.index == largest_error_pos && msg == NULL
4935 && curr->detail.error != NULL))
4936 {
4937 largest_error_pos = curr->detail.index;
4938 record = curr;
4939 msg = record->detail.error;
4940 }
4941 }
4942
4943 /* The way errors are collected in the back-end is a bit non-intuitive. But
4944 essentially, because each operand template is tried recursively you may
4945 always have errors collected from the previous tried OPND. These are
4946 usually skipped if there is one successful match. However now with the
4947 non-fatal errors we have to ignore those previously collected hard errors
4948 when we're only interested in printing the non-fatal ones. This condition
4949 prevents us from printing errors that are not appropriate, since we did
4950 match a condition, but it also has warnings that it wants to print. */
4951 if (non_fatal_only && !record)
4952 return;
4953
4954 gas_assert (largest_error_pos != -2 && record != NULL);
4955 DEBUG_TRACE ("Pick up error kind %s to report",
4956 operand_mismatch_kind_names[record->detail.kind]);
4957
4958 /* Output. */
4959 output_operand_error_record (record, str);
4960 }
4961 \f
4962 /* Write an AARCH64 instruction to buf - always little-endian. */
4963 static void
4964 put_aarch64_insn (char *buf, uint32_t insn)
4965 {
4966 unsigned char *where = (unsigned char *) buf;
4967 where[0] = insn;
4968 where[1] = insn >> 8;
4969 where[2] = insn >> 16;
4970 where[3] = insn >> 24;
4971 }
4972
4973 static uint32_t
4974 get_aarch64_insn (char *buf)
4975 {
4976 unsigned char *where = (unsigned char *) buf;
4977 uint32_t result;
4978 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4979 return result;
4980 }
4981
4982 static void
4983 output_inst (struct aarch64_inst *new_inst)
4984 {
4985 char *to = NULL;
4986
4987 to = frag_more (INSN_SIZE);
4988
4989 frag_now->tc_frag_data.recorded = 1;
4990
4991 put_aarch64_insn (to, inst.base.value);
4992
4993 if (inst.reloc.type != BFD_RELOC_UNUSED)
4994 {
4995 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4996 INSN_SIZE, &inst.reloc.exp,
4997 inst.reloc.pc_rel,
4998 inst.reloc.type);
4999 DEBUG_TRACE ("Prepared relocation fix up");
5000 /* Don't check the addend value against the instruction size,
5001 that's the job of our code in md_apply_fix(). */
5002 fixp->fx_no_overflow = 1;
5003 if (new_inst != NULL)
5004 fixp->tc_fix_data.inst = new_inst;
5005 if (aarch64_gas_internal_fixup_p ())
5006 {
5007 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5008 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5009 fixp->fx_addnumber = inst.reloc.flags;
5010 }
5011 }
5012
5013 dwarf2_emit_insn (INSN_SIZE);
5014 }
5015
5016 /* Link together opcodes of the same name. */
5017
5018 struct templates
5019 {
5020 aarch64_opcode *opcode;
5021 struct templates *next;
5022 };
5023
5024 typedef struct templates templates;
5025
5026 static templates *
5027 lookup_mnemonic (const char *start, int len)
5028 {
5029 templates *templ = NULL;
5030
5031 templ = hash_find_n (aarch64_ops_hsh, start, len);
5032 return templ;
5033 }
5034
5035 /* Subroutine of md_assemble, responsible for looking up the primary
5036 opcode from the mnemonic the user wrote. STR points to the
5037 beginning of the mnemonic. */
5038
5039 static templates *
5040 opcode_lookup (char **str)
5041 {
5042 char *end, *base, *dot;
5043 const aarch64_cond *cond;
5044 char condname[16];
5045 int len;
5046
5047 /* Scan up to the end of the mnemonic, which must end in white space,
5048 '.', or end of string. */
5049 dot = 0;
5050 for (base = end = *str; is_part_of_name(*end); end++)
5051 if (*end == '.' && !dot)
5052 dot = end;
5053
5054 if (end == base || dot == base)
5055 return 0;
5056
5057 inst.cond = COND_ALWAYS;
5058
5059 /* Handle a possible condition. */
5060 if (dot)
5061 {
5062 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5063 if (cond)
5064 {
5065 inst.cond = cond->value;
5066 *str = end;
5067 }
5068 else
5069 {
5070 *str = dot;
5071 return 0;
5072 }
5073 len = dot - base;
5074 }
5075 else
5076 {
5077 *str = end;
5078 len = end - base;
5079 }
5080
5081 if (inst.cond == COND_ALWAYS)
5082 {
5083 /* Look for unaffixed mnemonic. */
5084 return lookup_mnemonic (base, len);
5085 }
5086 else if (len <= 13)
5087 {
5088 /* append ".c" to mnemonic if conditional */
5089 memcpy (condname, base, len);
5090 memcpy (condname + len, ".c", 2);
5091 base = condname;
5092 len += 2;
5093 return lookup_mnemonic (base, len);
5094 }
5095
5096 return NULL;
5097 }
5098
5099 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5100 to a corresponding operand qualifier. */
5101
5102 static inline aarch64_opnd_qualifier_t
5103 vectype_to_qualifier (const struct vector_type_el *vectype)
5104 {
5105 /* Element size in bytes indexed by vector_el_type. */
5106 const unsigned char ele_size[5]
5107 = {1, 2, 4, 8, 16};
5108 const unsigned int ele_base [5] =
5109 {
5110 AARCH64_OPND_QLF_V_4B,
5111 AARCH64_OPND_QLF_V_2H,
5112 AARCH64_OPND_QLF_V_2S,
5113 AARCH64_OPND_QLF_V_1D,
5114 AARCH64_OPND_QLF_V_1Q
5115 };
5116
5117 if (!vectype->defined || vectype->type == NT_invtype)
5118 goto vectype_conversion_fail;
5119
5120 if (vectype->type == NT_zero)
5121 return AARCH64_OPND_QLF_P_Z;
5122 if (vectype->type == NT_merge)
5123 return AARCH64_OPND_QLF_P_M;
5124
5125 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5126
5127 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5128 {
5129 /* Special case S_4B. */
5130 if (vectype->type == NT_b && vectype->width == 4)
5131 return AARCH64_OPND_QLF_S_4B;
5132
5133 /* Vector element register. */
5134 return AARCH64_OPND_QLF_S_B + vectype->type;
5135 }
5136 else
5137 {
5138 /* Vector register. */
5139 int reg_size = ele_size[vectype->type] * vectype->width;
5140 unsigned offset;
5141 unsigned shift;
5142 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5143 goto vectype_conversion_fail;
5144
5145 /* The conversion is by calculating the offset from the base operand
5146 qualifier for the vector type. The operand qualifiers are regular
5147 enough that the offset can established by shifting the vector width by
5148 a vector-type dependent amount. */
5149 shift = 0;
5150 if (vectype->type == NT_b)
5151 shift = 3;
5152 else if (vectype->type == NT_h || vectype->type == NT_s)
5153 shift = 2;
5154 else if (vectype->type >= NT_d)
5155 shift = 1;
5156 else
5157 gas_assert (0);
5158
5159 offset = ele_base [vectype->type] + (vectype->width >> shift);
5160 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5161 && offset <= AARCH64_OPND_QLF_V_1Q);
5162 return offset;
5163 }
5164
5165 vectype_conversion_fail:
5166 first_error (_("bad vector arrangement type"));
5167 return AARCH64_OPND_QLF_NIL;
5168 }
5169
5170 /* Process an optional operand that is found omitted from the assembly line.
5171 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5172 instruction's opcode entry while IDX is the index of this omitted operand.
5173 */
5174
5175 static void
5176 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5177 int idx, aarch64_opnd_info *operand)
5178 {
5179 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5180 gas_assert (optional_operand_p (opcode, idx));
5181 gas_assert (!operand->present);
5182
5183 switch (type)
5184 {
5185 case AARCH64_OPND_Rd:
5186 case AARCH64_OPND_Rn:
5187 case AARCH64_OPND_Rm:
5188 case AARCH64_OPND_Rt:
5189 case AARCH64_OPND_Rt2:
5190 case AARCH64_OPND_Rt_SP:
5191 case AARCH64_OPND_Rs:
5192 case AARCH64_OPND_Ra:
5193 case AARCH64_OPND_Rt_SYS:
5194 case AARCH64_OPND_Rd_SP:
5195 case AARCH64_OPND_Rn_SP:
5196 case AARCH64_OPND_Rm_SP:
5197 case AARCH64_OPND_Fd:
5198 case AARCH64_OPND_Fn:
5199 case AARCH64_OPND_Fm:
5200 case AARCH64_OPND_Fa:
5201 case AARCH64_OPND_Ft:
5202 case AARCH64_OPND_Ft2:
5203 case AARCH64_OPND_Sd:
5204 case AARCH64_OPND_Sn:
5205 case AARCH64_OPND_Sm:
5206 case AARCH64_OPND_Va:
5207 case AARCH64_OPND_Vd:
5208 case AARCH64_OPND_Vn:
5209 case AARCH64_OPND_Vm:
5210 case AARCH64_OPND_VdD1:
5211 case AARCH64_OPND_VnD1:
5212 operand->reg.regno = default_value;
5213 break;
5214
5215 case AARCH64_OPND_Ed:
5216 case AARCH64_OPND_En:
5217 case AARCH64_OPND_Em:
5218 case AARCH64_OPND_Em16:
5219 case AARCH64_OPND_SM3_IMM2:
5220 operand->reglane.regno = default_value;
5221 break;
5222
5223 case AARCH64_OPND_IDX:
5224 case AARCH64_OPND_BIT_NUM:
5225 case AARCH64_OPND_IMMR:
5226 case AARCH64_OPND_IMMS:
5227 case AARCH64_OPND_SHLL_IMM:
5228 case AARCH64_OPND_IMM_VLSL:
5229 case AARCH64_OPND_IMM_VLSR:
5230 case AARCH64_OPND_CCMP_IMM:
5231 case AARCH64_OPND_FBITS:
5232 case AARCH64_OPND_UIMM4:
5233 case AARCH64_OPND_UIMM3_OP1:
5234 case AARCH64_OPND_UIMM3_OP2:
5235 case AARCH64_OPND_IMM:
5236 case AARCH64_OPND_IMM_2:
5237 case AARCH64_OPND_WIDTH:
5238 case AARCH64_OPND_UIMM7:
5239 case AARCH64_OPND_NZCV:
5240 case AARCH64_OPND_SVE_PATTERN:
5241 case AARCH64_OPND_SVE_PRFOP:
5242 operand->imm.value = default_value;
5243 break;
5244
5245 case AARCH64_OPND_SVE_PATTERN_SCALED:
5246 operand->imm.value = default_value;
5247 operand->shifter.kind = AARCH64_MOD_MUL;
5248 operand->shifter.amount = 1;
5249 break;
5250
5251 case AARCH64_OPND_EXCEPTION:
5252 inst.reloc.type = BFD_RELOC_UNUSED;
5253 break;
5254
5255 case AARCH64_OPND_BARRIER_ISB:
5256 operand->barrier = aarch64_barrier_options + default_value;
5257 break;
5258
5259 case AARCH64_OPND_BTI_TARGET:
5260 operand->hint_option = aarch64_hint_options + default_value;
5261 break;
5262
5263 default:
5264 break;
5265 }
5266 }
5267
5268 /* Process the relocation type for move wide instructions.
5269 Return TRUE on success; otherwise return FALSE. */
5270
5271 static bfd_boolean
5272 process_movw_reloc_info (void)
5273 {
5274 int is32;
5275 unsigned shift;
5276
5277 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5278
5279 if (inst.base.opcode->op == OP_MOVK)
5280 switch (inst.reloc.type)
5281 {
5282 case BFD_RELOC_AARCH64_MOVW_G0_S:
5283 case BFD_RELOC_AARCH64_MOVW_G1_S:
5284 case BFD_RELOC_AARCH64_MOVW_G2_S:
5285 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5286 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5287 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5288 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5289 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5290 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5291 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5292 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5293 set_syntax_error
5294 (_("the specified relocation type is not allowed for MOVK"));
5295 return FALSE;
5296 default:
5297 break;
5298 }
5299
5300 switch (inst.reloc.type)
5301 {
5302 case BFD_RELOC_AARCH64_MOVW_G0:
5303 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5304 case BFD_RELOC_AARCH64_MOVW_G0_S:
5305 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5306 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5307 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5308 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5309 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5310 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5311 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5312 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5313 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5314 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5315 shift = 0;
5316 break;
5317 case BFD_RELOC_AARCH64_MOVW_G1:
5318 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5319 case BFD_RELOC_AARCH64_MOVW_G1_S:
5320 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5321 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5322 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5323 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5324 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5325 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5326 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5327 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5328 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5329 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5330 shift = 16;
5331 break;
5332 case BFD_RELOC_AARCH64_MOVW_G2:
5333 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5334 case BFD_RELOC_AARCH64_MOVW_G2_S:
5335 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5336 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5337 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5338 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5339 if (is32)
5340 {
5341 set_fatal_syntax_error
5342 (_("the specified relocation type is not allowed for 32-bit "
5343 "register"));
5344 return FALSE;
5345 }
5346 shift = 32;
5347 break;
5348 case BFD_RELOC_AARCH64_MOVW_G3:
5349 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5350 if (is32)
5351 {
5352 set_fatal_syntax_error
5353 (_("the specified relocation type is not allowed for 32-bit "
5354 "register"));
5355 return FALSE;
5356 }
5357 shift = 48;
5358 break;
5359 default:
5360 /* More cases should be added when more MOVW-related relocation types
5361 are supported in GAS. */
5362 gas_assert (aarch64_gas_internal_fixup_p ());
5363 /* The shift amount should have already been set by the parser. */
5364 return TRUE;
5365 }
5366 inst.base.operands[1].shifter.amount = shift;
5367 return TRUE;
5368 }
5369
5370 /* A primitive log calculator. */
5371
5372 static inline unsigned int
5373 get_logsz (unsigned int size)
5374 {
5375 const unsigned char ls[16] =
5376 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5377 if (size > 16)
5378 {
5379 gas_assert (0);
5380 return -1;
5381 }
5382 gas_assert (ls[size - 1] != (unsigned char)-1);
5383 return ls[size - 1];
5384 }
5385
5386 /* Determine and return the real reloc type code for an instruction
5387 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5388
5389 static inline bfd_reloc_code_real_type
5390 ldst_lo12_determine_real_reloc_type (void)
5391 {
5392 unsigned logsz;
5393 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5394 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5395
5396 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5397 {
5398 BFD_RELOC_AARCH64_LDST8_LO12,
5399 BFD_RELOC_AARCH64_LDST16_LO12,
5400 BFD_RELOC_AARCH64_LDST32_LO12,
5401 BFD_RELOC_AARCH64_LDST64_LO12,
5402 BFD_RELOC_AARCH64_LDST128_LO12
5403 },
5404 {
5405 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5406 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5407 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5408 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5409 BFD_RELOC_AARCH64_NONE
5410 },
5411 {
5412 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5413 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5414 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5415 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5416 BFD_RELOC_AARCH64_NONE
5417 },
5418 {
5419 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5420 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5421 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5422 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5423 BFD_RELOC_AARCH64_NONE
5424 },
5425 {
5426 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5427 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5428 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5429 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5430 BFD_RELOC_AARCH64_NONE
5431 }
5432 };
5433
5434 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5435 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5436 || (inst.reloc.type
5437 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5438 || (inst.reloc.type
5439 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5440 || (inst.reloc.type
5441 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5442 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5443
5444 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5445 opd1_qlf =
5446 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5447 1, opd0_qlf, 0);
5448 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5449
5450 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5451 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5452 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5453 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5454 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5455 gas_assert (logsz <= 3);
5456 else
5457 gas_assert (logsz <= 4);
5458
5459 /* In reloc.c, these pseudo relocation types should be defined in similar
5460 order as above reloc_ldst_lo12 array. Because the array index calculation
5461 below relies on this. */
5462 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5463 }
5464
5465 /* Check whether a register list REGINFO is valid. The registers must be
5466 numbered in increasing order (modulo 32), in increments of one or two.
5467
5468 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5469 increments of two.
5470
5471 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5472
5473 static bfd_boolean
5474 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5475 {
5476 uint32_t i, nb_regs, prev_regno, incr;
5477
5478 nb_regs = 1 + (reginfo & 0x3);
5479 reginfo >>= 2;
5480 prev_regno = reginfo & 0x1f;
5481 incr = accept_alternate ? 2 : 1;
5482
5483 for (i = 1; i < nb_regs; ++i)
5484 {
5485 uint32_t curr_regno;
5486 reginfo >>= 5;
5487 curr_regno = reginfo & 0x1f;
5488 if (curr_regno != ((prev_regno + incr) & 0x1f))
5489 return FALSE;
5490 prev_regno = curr_regno;
5491 }
5492
5493 return TRUE;
5494 }
5495
5496 /* Generic instruction operand parser. This does no encoding and no
5497 semantic validation; it merely squirrels values away in the inst
5498 structure. Returns TRUE or FALSE depending on whether the
5499 specified grammar matched. */
5500
5501 static bfd_boolean
5502 parse_operands (char *str, const aarch64_opcode *opcode)
5503 {
5504 int i;
5505 char *backtrack_pos = 0;
5506 const enum aarch64_opnd *operands = opcode->operands;
5507 aarch64_reg_type imm_reg_type;
5508
5509 clear_error ();
5510 skip_whitespace (str);
5511
5512 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5513 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5514 else
5515 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5516
5517 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5518 {
5519 int64_t val;
5520 const reg_entry *reg;
5521 int comma_skipped_p = 0;
5522 aarch64_reg_type rtype;
5523 struct vector_type_el vectype;
5524 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5525 aarch64_opnd_info *info = &inst.base.operands[i];
5526 aarch64_reg_type reg_type;
5527
5528 DEBUG_TRACE ("parse operand %d", i);
5529
5530 /* Assign the operand code. */
5531 info->type = operands[i];
5532
5533 if (optional_operand_p (opcode, i))
5534 {
5535 /* Remember where we are in case we need to backtrack. */
5536 gas_assert (!backtrack_pos);
5537 backtrack_pos = str;
5538 }
5539
5540 /* Expect comma between operands; the backtrack mechanism will take
5541 care of cases of omitted optional operand. */
5542 if (i > 0 && ! skip_past_char (&str, ','))
5543 {
5544 set_syntax_error (_("comma expected between operands"));
5545 goto failure;
5546 }
5547 else
5548 comma_skipped_p = 1;
5549
5550 switch (operands[i])
5551 {
5552 case AARCH64_OPND_Rd:
5553 case AARCH64_OPND_Rn:
5554 case AARCH64_OPND_Rm:
5555 case AARCH64_OPND_Rt:
5556 case AARCH64_OPND_Rt2:
5557 case AARCH64_OPND_Rs:
5558 case AARCH64_OPND_Ra:
5559 case AARCH64_OPND_Rt_SYS:
5560 case AARCH64_OPND_PAIRREG:
5561 case AARCH64_OPND_SVE_Rm:
5562 po_int_reg_or_fail (REG_TYPE_R_Z);
5563 break;
5564
5565 case AARCH64_OPND_Rd_SP:
5566 case AARCH64_OPND_Rn_SP:
5567 case AARCH64_OPND_Rt_SP:
5568 case AARCH64_OPND_SVE_Rn_SP:
5569 case AARCH64_OPND_Rm_SP:
5570 po_int_reg_or_fail (REG_TYPE_R_SP);
5571 break;
5572
5573 case AARCH64_OPND_Rm_EXT:
5574 case AARCH64_OPND_Rm_SFT:
5575 po_misc_or_fail (parse_shifter_operand
5576 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5577 ? SHIFTED_ARITH_IMM
5578 : SHIFTED_LOGIC_IMM)));
5579 if (!info->shifter.operator_present)
5580 {
5581 /* Default to LSL if not present. Libopcodes prefers shifter
5582 kind to be explicit. */
5583 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5584 info->shifter.kind = AARCH64_MOD_LSL;
5585 /* For Rm_EXT, libopcodes will carry out further check on whether
5586 or not stack pointer is used in the instruction (Recall that
5587 "the extend operator is not optional unless at least one of
5588 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5589 }
5590 break;
5591
5592 case AARCH64_OPND_Fd:
5593 case AARCH64_OPND_Fn:
5594 case AARCH64_OPND_Fm:
5595 case AARCH64_OPND_Fa:
5596 case AARCH64_OPND_Ft:
5597 case AARCH64_OPND_Ft2:
5598 case AARCH64_OPND_Sd:
5599 case AARCH64_OPND_Sn:
5600 case AARCH64_OPND_Sm:
5601 case AARCH64_OPND_SVE_VZn:
5602 case AARCH64_OPND_SVE_Vd:
5603 case AARCH64_OPND_SVE_Vm:
5604 case AARCH64_OPND_SVE_Vn:
5605 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5606 if (val == PARSE_FAIL)
5607 {
5608 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5609 goto failure;
5610 }
5611 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5612
5613 info->reg.regno = val;
5614 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5615 break;
5616
5617 case AARCH64_OPND_SVE_Pd:
5618 case AARCH64_OPND_SVE_Pg3:
5619 case AARCH64_OPND_SVE_Pg4_5:
5620 case AARCH64_OPND_SVE_Pg4_10:
5621 case AARCH64_OPND_SVE_Pg4_16:
5622 case AARCH64_OPND_SVE_Pm:
5623 case AARCH64_OPND_SVE_Pn:
5624 case AARCH64_OPND_SVE_Pt:
5625 reg_type = REG_TYPE_PN;
5626 goto vector_reg;
5627
5628 case AARCH64_OPND_SVE_Za_5:
5629 case AARCH64_OPND_SVE_Za_16:
5630 case AARCH64_OPND_SVE_Zd:
5631 case AARCH64_OPND_SVE_Zm_5:
5632 case AARCH64_OPND_SVE_Zm_16:
5633 case AARCH64_OPND_SVE_Zn:
5634 case AARCH64_OPND_SVE_Zt:
5635 reg_type = REG_TYPE_ZN;
5636 goto vector_reg;
5637
5638 case AARCH64_OPND_Va:
5639 case AARCH64_OPND_Vd:
5640 case AARCH64_OPND_Vn:
5641 case AARCH64_OPND_Vm:
5642 reg_type = REG_TYPE_VN;
5643 vector_reg:
5644 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5645 if (val == PARSE_FAIL)
5646 {
5647 first_error (_(get_reg_expected_msg (reg_type)));
5648 goto failure;
5649 }
5650 if (vectype.defined & NTA_HASINDEX)
5651 goto failure;
5652
5653 info->reg.regno = val;
5654 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5655 && vectype.type == NT_invtype)
5656 /* Unqualified Pn and Zn registers are allowed in certain
5657 contexts. Rely on F_STRICT qualifier checking to catch
5658 invalid uses. */
5659 info->qualifier = AARCH64_OPND_QLF_NIL;
5660 else
5661 {
5662 info->qualifier = vectype_to_qualifier (&vectype);
5663 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5664 goto failure;
5665 }
5666 break;
5667
5668 case AARCH64_OPND_VdD1:
5669 case AARCH64_OPND_VnD1:
5670 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5671 if (val == PARSE_FAIL)
5672 {
5673 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5674 goto failure;
5675 }
5676 if (vectype.type != NT_d || vectype.index != 1)
5677 {
5678 set_fatal_syntax_error
5679 (_("the top half of a 128-bit FP/SIMD register is expected"));
5680 goto failure;
5681 }
5682 info->reg.regno = val;
5683 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5684 here; it is correct for the purpose of encoding/decoding since
5685 only the register number is explicitly encoded in the related
5686 instructions, although this appears a bit hacky. */
5687 info->qualifier = AARCH64_OPND_QLF_S_D;
5688 break;
5689
5690 case AARCH64_OPND_SVE_Zm3_INDEX:
5691 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5692 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5693 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5694 case AARCH64_OPND_SVE_Zm4_INDEX:
5695 case AARCH64_OPND_SVE_Zn_INDEX:
5696 reg_type = REG_TYPE_ZN;
5697 goto vector_reg_index;
5698
5699 case AARCH64_OPND_Ed:
5700 case AARCH64_OPND_En:
5701 case AARCH64_OPND_Em:
5702 case AARCH64_OPND_Em16:
5703 case AARCH64_OPND_SM3_IMM2:
5704 reg_type = REG_TYPE_VN;
5705 vector_reg_index:
5706 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5707 if (val == PARSE_FAIL)
5708 {
5709 first_error (_(get_reg_expected_msg (reg_type)));
5710 goto failure;
5711 }
5712 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5713 goto failure;
5714
5715 info->reglane.regno = val;
5716 info->reglane.index = vectype.index;
5717 info->qualifier = vectype_to_qualifier (&vectype);
5718 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5719 goto failure;
5720 break;
5721
5722 case AARCH64_OPND_SVE_ZnxN:
5723 case AARCH64_OPND_SVE_ZtxN:
5724 reg_type = REG_TYPE_ZN;
5725 goto vector_reg_list;
5726
5727 case AARCH64_OPND_LVn:
5728 case AARCH64_OPND_LVt:
5729 case AARCH64_OPND_LVt_AL:
5730 case AARCH64_OPND_LEt:
5731 reg_type = REG_TYPE_VN;
5732 vector_reg_list:
5733 if (reg_type == REG_TYPE_ZN
5734 && get_opcode_dependent_value (opcode) == 1
5735 && *str != '{')
5736 {
5737 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5738 if (val == PARSE_FAIL)
5739 {
5740 first_error (_(get_reg_expected_msg (reg_type)));
5741 goto failure;
5742 }
5743 info->reglist.first_regno = val;
5744 info->reglist.num_regs = 1;
5745 }
5746 else
5747 {
5748 val = parse_vector_reg_list (&str, reg_type, &vectype);
5749 if (val == PARSE_FAIL)
5750 goto failure;
5751
5752 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5753 {
5754 set_fatal_syntax_error (_("invalid register list"));
5755 goto failure;
5756 }
5757
5758 if (vectype.width != 0 && *str != ',')
5759 {
5760 set_fatal_syntax_error
5761 (_("expected element type rather than vector type"));
5762 goto failure;
5763 }
5764
5765 info->reglist.first_regno = (val >> 2) & 0x1f;
5766 info->reglist.num_regs = (val & 0x3) + 1;
5767 }
5768 if (operands[i] == AARCH64_OPND_LEt)
5769 {
5770 if (!(vectype.defined & NTA_HASINDEX))
5771 goto failure;
5772 info->reglist.has_index = 1;
5773 info->reglist.index = vectype.index;
5774 }
5775 else
5776 {
5777 if (vectype.defined & NTA_HASINDEX)
5778 goto failure;
5779 if (!(vectype.defined & NTA_HASTYPE))
5780 {
5781 if (reg_type == REG_TYPE_ZN)
5782 set_fatal_syntax_error (_("missing type suffix"));
5783 goto failure;
5784 }
5785 }
5786 info->qualifier = vectype_to_qualifier (&vectype);
5787 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5788 goto failure;
5789 break;
5790
5791 case AARCH64_OPND_CRn:
5792 case AARCH64_OPND_CRm:
5793 {
5794 char prefix = *(str++);
5795 if (prefix != 'c' && prefix != 'C')
5796 goto failure;
5797
5798 po_imm_nc_or_fail ();
5799 if (val > 15)
5800 {
5801 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5802 goto failure;
5803 }
5804 info->qualifier = AARCH64_OPND_QLF_CR;
5805 info->imm.value = val;
5806 break;
5807 }
5808
5809 case AARCH64_OPND_SHLL_IMM:
5810 case AARCH64_OPND_IMM_VLSR:
5811 po_imm_or_fail (1, 64);
5812 info->imm.value = val;
5813 break;
5814
5815 case AARCH64_OPND_CCMP_IMM:
5816 case AARCH64_OPND_SIMM5:
5817 case AARCH64_OPND_FBITS:
5818 case AARCH64_OPND_TME_UIMM16:
5819 case AARCH64_OPND_UIMM4:
5820 case AARCH64_OPND_UIMM4_ADDG:
5821 case AARCH64_OPND_UIMM10:
5822 case AARCH64_OPND_UIMM3_OP1:
5823 case AARCH64_OPND_UIMM3_OP2:
5824 case AARCH64_OPND_IMM_VLSL:
5825 case AARCH64_OPND_IMM:
5826 case AARCH64_OPND_IMM_2:
5827 case AARCH64_OPND_WIDTH:
5828 case AARCH64_OPND_SVE_INV_LIMM:
5829 case AARCH64_OPND_SVE_LIMM:
5830 case AARCH64_OPND_SVE_LIMM_MOV:
5831 case AARCH64_OPND_SVE_SHLIMM_PRED:
5832 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5833 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
5834 case AARCH64_OPND_SVE_SHRIMM_PRED:
5835 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5836 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
5837 case AARCH64_OPND_SVE_SIMM5:
5838 case AARCH64_OPND_SVE_SIMM5B:
5839 case AARCH64_OPND_SVE_SIMM6:
5840 case AARCH64_OPND_SVE_SIMM8:
5841 case AARCH64_OPND_SVE_UIMM3:
5842 case AARCH64_OPND_SVE_UIMM7:
5843 case AARCH64_OPND_SVE_UIMM8:
5844 case AARCH64_OPND_SVE_UIMM8_53:
5845 case AARCH64_OPND_IMM_ROT1:
5846 case AARCH64_OPND_IMM_ROT2:
5847 case AARCH64_OPND_IMM_ROT3:
5848 case AARCH64_OPND_SVE_IMM_ROT1:
5849 case AARCH64_OPND_SVE_IMM_ROT2:
5850 case AARCH64_OPND_SVE_IMM_ROT3:
5851 po_imm_nc_or_fail ();
5852 info->imm.value = val;
5853 break;
5854
5855 case AARCH64_OPND_SVE_AIMM:
5856 case AARCH64_OPND_SVE_ASIMM:
5857 po_imm_nc_or_fail ();
5858 info->imm.value = val;
5859 skip_whitespace (str);
5860 if (skip_past_comma (&str))
5861 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5862 else
5863 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5864 break;
5865
5866 case AARCH64_OPND_SVE_PATTERN:
5867 po_enum_or_fail (aarch64_sve_pattern_array);
5868 info->imm.value = val;
5869 break;
5870
5871 case AARCH64_OPND_SVE_PATTERN_SCALED:
5872 po_enum_or_fail (aarch64_sve_pattern_array);
5873 info->imm.value = val;
5874 if (skip_past_comma (&str)
5875 && !parse_shift (&str, info, SHIFTED_MUL))
5876 goto failure;
5877 if (!info->shifter.operator_present)
5878 {
5879 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5880 info->shifter.kind = AARCH64_MOD_MUL;
5881 info->shifter.amount = 1;
5882 }
5883 break;
5884
5885 case AARCH64_OPND_SVE_PRFOP:
5886 po_enum_or_fail (aarch64_sve_prfop_array);
5887 info->imm.value = val;
5888 break;
5889
5890 case AARCH64_OPND_UIMM7:
5891 po_imm_or_fail (0, 127);
5892 info->imm.value = val;
5893 break;
5894
5895 case AARCH64_OPND_IDX:
5896 case AARCH64_OPND_MASK:
5897 case AARCH64_OPND_BIT_NUM:
5898 case AARCH64_OPND_IMMR:
5899 case AARCH64_OPND_IMMS:
5900 po_imm_or_fail (0, 63);
5901 info->imm.value = val;
5902 break;
5903
5904 case AARCH64_OPND_IMM0:
5905 po_imm_nc_or_fail ();
5906 if (val != 0)
5907 {
5908 set_fatal_syntax_error (_("immediate zero expected"));
5909 goto failure;
5910 }
5911 info->imm.value = 0;
5912 break;
5913
5914 case AARCH64_OPND_FPIMM0:
5915 {
5916 int qfloat;
5917 bfd_boolean res1 = FALSE, res2 = FALSE;
5918 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5919 it is probably not worth the effort to support it. */
5920 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5921 imm_reg_type))
5922 && (error_p ()
5923 || !(res2 = parse_constant_immediate (&str, &val,
5924 imm_reg_type))))
5925 goto failure;
5926 if ((res1 && qfloat == 0) || (res2 && val == 0))
5927 {
5928 info->imm.value = 0;
5929 info->imm.is_fp = 1;
5930 break;
5931 }
5932 set_fatal_syntax_error (_("immediate zero expected"));
5933 goto failure;
5934 }
5935
5936 case AARCH64_OPND_IMM_MOV:
5937 {
5938 char *saved = str;
5939 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5940 reg_name_p (str, REG_TYPE_VN))
5941 goto failure;
5942 str = saved;
5943 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5944 GE_OPT_PREFIX, 1));
5945 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5946 later. fix_mov_imm_insn will try to determine a machine
5947 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5948 message if the immediate cannot be moved by a single
5949 instruction. */
5950 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5951 inst.base.operands[i].skip = 1;
5952 }
5953 break;
5954
5955 case AARCH64_OPND_SIMD_IMM:
5956 case AARCH64_OPND_SIMD_IMM_SFT:
5957 if (! parse_big_immediate (&str, &val, imm_reg_type))
5958 goto failure;
5959 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5960 /* addr_off_p */ 0,
5961 /* need_libopcodes_p */ 1,
5962 /* skip_p */ 1);
5963 /* Parse shift.
5964 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5965 shift, we don't check it here; we leave the checking to
5966 the libopcodes (operand_general_constraint_met_p). By
5967 doing this, we achieve better diagnostics. */
5968 if (skip_past_comma (&str)
5969 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5970 goto failure;
5971 if (!info->shifter.operator_present
5972 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5973 {
5974 /* Default to LSL if not present. Libopcodes prefers shifter
5975 kind to be explicit. */
5976 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5977 info->shifter.kind = AARCH64_MOD_LSL;
5978 }
5979 break;
5980
5981 case AARCH64_OPND_FPIMM:
5982 case AARCH64_OPND_SIMD_FPIMM:
5983 case AARCH64_OPND_SVE_FPIMM8:
5984 {
5985 int qfloat;
5986 bfd_boolean dp_p;
5987
5988 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5989 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5990 || !aarch64_imm_float_p (qfloat))
5991 {
5992 if (!error_p ())
5993 set_fatal_syntax_error (_("invalid floating-point"
5994 " constant"));
5995 goto failure;
5996 }
5997 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5998 inst.base.operands[i].imm.is_fp = 1;
5999 }
6000 break;
6001
6002 case AARCH64_OPND_SVE_I1_HALF_ONE:
6003 case AARCH64_OPND_SVE_I1_HALF_TWO:
6004 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6005 {
6006 int qfloat;
6007 bfd_boolean dp_p;
6008
6009 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6010 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6011 {
6012 if (!error_p ())
6013 set_fatal_syntax_error (_("invalid floating-point"
6014 " constant"));
6015 goto failure;
6016 }
6017 inst.base.operands[i].imm.value = qfloat;
6018 inst.base.operands[i].imm.is_fp = 1;
6019 }
6020 break;
6021
6022 case AARCH64_OPND_LIMM:
6023 po_misc_or_fail (parse_shifter_operand (&str, info,
6024 SHIFTED_LOGIC_IMM));
6025 if (info->shifter.operator_present)
6026 {
6027 set_fatal_syntax_error
6028 (_("shift not allowed for bitmask immediate"));
6029 goto failure;
6030 }
6031 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6032 /* addr_off_p */ 0,
6033 /* need_libopcodes_p */ 1,
6034 /* skip_p */ 1);
6035 break;
6036
6037 case AARCH64_OPND_AIMM:
6038 if (opcode->op == OP_ADD)
6039 /* ADD may have relocation types. */
6040 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6041 SHIFTED_ARITH_IMM));
6042 else
6043 po_misc_or_fail (parse_shifter_operand (&str, info,
6044 SHIFTED_ARITH_IMM));
6045 switch (inst.reloc.type)
6046 {
6047 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6048 info->shifter.amount = 12;
6049 break;
6050 case BFD_RELOC_UNUSED:
6051 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6052 if (info->shifter.kind != AARCH64_MOD_NONE)
6053 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6054 inst.reloc.pc_rel = 0;
6055 break;
6056 default:
6057 break;
6058 }
6059 info->imm.value = 0;
6060 if (!info->shifter.operator_present)
6061 {
6062 /* Default to LSL if not present. Libopcodes prefers shifter
6063 kind to be explicit. */
6064 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6065 info->shifter.kind = AARCH64_MOD_LSL;
6066 }
6067 break;
6068
6069 case AARCH64_OPND_HALF:
6070 {
6071 /* #<imm16> or relocation. */
6072 int internal_fixup_p;
6073 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6074 if (internal_fixup_p)
6075 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6076 skip_whitespace (str);
6077 if (skip_past_comma (&str))
6078 {
6079 /* {, LSL #<shift>} */
6080 if (! aarch64_gas_internal_fixup_p ())
6081 {
6082 set_fatal_syntax_error (_("can't mix relocation modifier "
6083 "with explicit shift"));
6084 goto failure;
6085 }
6086 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6087 }
6088 else
6089 inst.base.operands[i].shifter.amount = 0;
6090 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6091 inst.base.operands[i].imm.value = 0;
6092 if (! process_movw_reloc_info ())
6093 goto failure;
6094 }
6095 break;
6096
6097 case AARCH64_OPND_EXCEPTION:
6098 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6099 imm_reg_type));
6100 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6101 /* addr_off_p */ 0,
6102 /* need_libopcodes_p */ 0,
6103 /* skip_p */ 1);
6104 break;
6105
6106 case AARCH64_OPND_NZCV:
6107 {
6108 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
6109 if (nzcv != NULL)
6110 {
6111 str += 4;
6112 info->imm.value = nzcv->value;
6113 break;
6114 }
6115 po_imm_or_fail (0, 15);
6116 info->imm.value = val;
6117 }
6118 break;
6119
6120 case AARCH64_OPND_COND:
6121 case AARCH64_OPND_COND1:
6122 {
6123 char *start = str;
6124 do
6125 str++;
6126 while (ISALPHA (*str));
6127 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
6128 if (info->cond == NULL)
6129 {
6130 set_syntax_error (_("invalid condition"));
6131 goto failure;
6132 }
6133 else if (operands[i] == AARCH64_OPND_COND1
6134 && (info->cond->value & 0xe) == 0xe)
6135 {
6136 /* Do not allow AL or NV. */
6137 set_default_error ();
6138 goto failure;
6139 }
6140 }
6141 break;
6142
6143 case AARCH64_OPND_ADDR_ADRP:
6144 po_misc_or_fail (parse_adrp (&str));
6145 /* Clear the value as operand needs to be relocated. */
6146 info->imm.value = 0;
6147 break;
6148
6149 case AARCH64_OPND_ADDR_PCREL14:
6150 case AARCH64_OPND_ADDR_PCREL19:
6151 case AARCH64_OPND_ADDR_PCREL21:
6152 case AARCH64_OPND_ADDR_PCREL26:
6153 po_misc_or_fail (parse_address (&str, info));
6154 if (!info->addr.pcrel)
6155 {
6156 set_syntax_error (_("invalid pc-relative address"));
6157 goto failure;
6158 }
6159 if (inst.gen_lit_pool
6160 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6161 {
6162 /* Only permit "=value" in the literal load instructions.
6163 The literal will be generated by programmer_friendly_fixup. */
6164 set_syntax_error (_("invalid use of \"=immediate\""));
6165 goto failure;
6166 }
6167 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6168 {
6169 set_syntax_error (_("unrecognized relocation suffix"));
6170 goto failure;
6171 }
6172 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6173 {
6174 info->imm.value = inst.reloc.exp.X_add_number;
6175 inst.reloc.type = BFD_RELOC_UNUSED;
6176 }
6177 else
6178 {
6179 info->imm.value = 0;
6180 if (inst.reloc.type == BFD_RELOC_UNUSED)
6181 switch (opcode->iclass)
6182 {
6183 case compbranch:
6184 case condbranch:
6185 /* e.g. CBZ or B.COND */
6186 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6187 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6188 break;
6189 case testbranch:
6190 /* e.g. TBZ */
6191 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6192 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6193 break;
6194 case branch_imm:
6195 /* e.g. B or BL */
6196 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6197 inst.reloc.type =
6198 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6199 : BFD_RELOC_AARCH64_JUMP26;
6200 break;
6201 case loadlit:
6202 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6203 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6204 break;
6205 case pcreladdr:
6206 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6207 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6208 break;
6209 default:
6210 gas_assert (0);
6211 abort ();
6212 }
6213 inst.reloc.pc_rel = 1;
6214 }
6215 break;
6216
6217 case AARCH64_OPND_ADDR_SIMPLE:
6218 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6219 {
6220 /* [<Xn|SP>{, #<simm>}] */
6221 char *start = str;
6222 /* First use the normal address-parsing routines, to get
6223 the usual syntax errors. */
6224 po_misc_or_fail (parse_address (&str, info));
6225 if (info->addr.pcrel || info->addr.offset.is_reg
6226 || !info->addr.preind || info->addr.postind
6227 || info->addr.writeback)
6228 {
6229 set_syntax_error (_("invalid addressing mode"));
6230 goto failure;
6231 }
6232
6233 /* Then retry, matching the specific syntax of these addresses. */
6234 str = start;
6235 po_char_or_fail ('[');
6236 po_reg_or_fail (REG_TYPE_R64_SP);
6237 /* Accept optional ", #0". */
6238 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6239 && skip_past_char (&str, ','))
6240 {
6241 skip_past_char (&str, '#');
6242 if (! skip_past_char (&str, '0'))
6243 {
6244 set_fatal_syntax_error
6245 (_("the optional immediate offset can only be 0"));
6246 goto failure;
6247 }
6248 }
6249 po_char_or_fail (']');
6250 break;
6251 }
6252
6253 case AARCH64_OPND_ADDR_REGOFF:
6254 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6255 po_misc_or_fail (parse_address (&str, info));
6256 regoff_addr:
6257 if (info->addr.pcrel || !info->addr.offset.is_reg
6258 || !info->addr.preind || info->addr.postind
6259 || info->addr.writeback)
6260 {
6261 set_syntax_error (_("invalid addressing mode"));
6262 goto failure;
6263 }
6264 if (!info->shifter.operator_present)
6265 {
6266 /* Default to LSL if not present. Libopcodes prefers shifter
6267 kind to be explicit. */
6268 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6269 info->shifter.kind = AARCH64_MOD_LSL;
6270 }
6271 /* Qualifier to be deduced by libopcodes. */
6272 break;
6273
6274 case AARCH64_OPND_ADDR_SIMM7:
6275 po_misc_or_fail (parse_address (&str, info));
6276 if (info->addr.pcrel || info->addr.offset.is_reg
6277 || (!info->addr.preind && !info->addr.postind))
6278 {
6279 set_syntax_error (_("invalid addressing mode"));
6280 goto failure;
6281 }
6282 if (inst.reloc.type != BFD_RELOC_UNUSED)
6283 {
6284 set_syntax_error (_("relocation not allowed"));
6285 goto failure;
6286 }
6287 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6288 /* addr_off_p */ 1,
6289 /* need_libopcodes_p */ 1,
6290 /* skip_p */ 0);
6291 break;
6292
6293 case AARCH64_OPND_ADDR_SIMM9:
6294 case AARCH64_OPND_ADDR_SIMM9_2:
6295 case AARCH64_OPND_ADDR_SIMM11:
6296 case AARCH64_OPND_ADDR_SIMM13:
6297 po_misc_or_fail (parse_address (&str, info));
6298 if (info->addr.pcrel || info->addr.offset.is_reg
6299 || (!info->addr.preind && !info->addr.postind)
6300 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6301 && info->addr.writeback))
6302 {
6303 set_syntax_error (_("invalid addressing mode"));
6304 goto failure;
6305 }
6306 if (inst.reloc.type != BFD_RELOC_UNUSED)
6307 {
6308 set_syntax_error (_("relocation not allowed"));
6309 goto failure;
6310 }
6311 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6312 /* addr_off_p */ 1,
6313 /* need_libopcodes_p */ 1,
6314 /* skip_p */ 0);
6315 break;
6316
6317 case AARCH64_OPND_ADDR_SIMM10:
6318 case AARCH64_OPND_ADDR_OFFSET:
6319 po_misc_or_fail (parse_address (&str, info));
6320 if (info->addr.pcrel || info->addr.offset.is_reg
6321 || !info->addr.preind || info->addr.postind)
6322 {
6323 set_syntax_error (_("invalid addressing mode"));
6324 goto failure;
6325 }
6326 if (inst.reloc.type != BFD_RELOC_UNUSED)
6327 {
6328 set_syntax_error (_("relocation not allowed"));
6329 goto failure;
6330 }
6331 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6332 /* addr_off_p */ 1,
6333 /* need_libopcodes_p */ 1,
6334 /* skip_p */ 0);
6335 break;
6336
6337 case AARCH64_OPND_ADDR_UIMM12:
6338 po_misc_or_fail (parse_address (&str, info));
6339 if (info->addr.pcrel || info->addr.offset.is_reg
6340 || !info->addr.preind || info->addr.writeback)
6341 {
6342 set_syntax_error (_("invalid addressing mode"));
6343 goto failure;
6344 }
6345 if (inst.reloc.type == BFD_RELOC_UNUSED)
6346 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6347 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6348 || (inst.reloc.type
6349 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6350 || (inst.reloc.type
6351 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6352 || (inst.reloc.type
6353 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6354 || (inst.reloc.type
6355 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6356 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6357 /* Leave qualifier to be determined by libopcodes. */
6358 break;
6359
6360 case AARCH64_OPND_SIMD_ADDR_POST:
6361 /* [<Xn|SP>], <Xm|#<amount>> */
6362 po_misc_or_fail (parse_address (&str, info));
6363 if (!info->addr.postind || !info->addr.writeback)
6364 {
6365 set_syntax_error (_("invalid addressing mode"));
6366 goto failure;
6367 }
6368 if (!info->addr.offset.is_reg)
6369 {
6370 if (inst.reloc.exp.X_op == O_constant)
6371 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6372 else
6373 {
6374 set_fatal_syntax_error
6375 (_("writeback value must be an immediate constant"));
6376 goto failure;
6377 }
6378 }
6379 /* No qualifier. */
6380 break;
6381
6382 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6383 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6384 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6385 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6386 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6387 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6388 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6389 case AARCH64_OPND_SVE_ADDR_RI_U6:
6390 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6391 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6392 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6393 /* [X<n>{, #imm, MUL VL}]
6394 [X<n>{, #imm}]
6395 but recognizing SVE registers. */
6396 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6397 &offset_qualifier));
6398 if (base_qualifier != AARCH64_OPND_QLF_X)
6399 {
6400 set_syntax_error (_("invalid addressing mode"));
6401 goto failure;
6402 }
6403 sve_regimm:
6404 if (info->addr.pcrel || info->addr.offset.is_reg
6405 || !info->addr.preind || info->addr.writeback)
6406 {
6407 set_syntax_error (_("invalid addressing mode"));
6408 goto failure;
6409 }
6410 if (inst.reloc.type != BFD_RELOC_UNUSED
6411 || inst.reloc.exp.X_op != O_constant)
6412 {
6413 /* Make sure this has priority over
6414 "invalid addressing mode". */
6415 set_fatal_syntax_error (_("constant offset required"));
6416 goto failure;
6417 }
6418 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6419 break;
6420
6421 case AARCH64_OPND_SVE_ADDR_R:
6422 /* [<Xn|SP>{, <R><m>}]
6423 but recognizing SVE registers. */
6424 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6425 &offset_qualifier));
6426 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6427 {
6428 offset_qualifier = AARCH64_OPND_QLF_X;
6429 info->addr.offset.is_reg = 1;
6430 info->addr.offset.regno = 31;
6431 }
6432 else if (base_qualifier != AARCH64_OPND_QLF_X
6433 || offset_qualifier != AARCH64_OPND_QLF_X)
6434 {
6435 set_syntax_error (_("invalid addressing mode"));
6436 goto failure;
6437 }
6438 goto regoff_addr;
6439
6440 case AARCH64_OPND_SVE_ADDR_RR:
6441 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6442 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6443 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6444 case AARCH64_OPND_SVE_ADDR_RX:
6445 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6446 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6447 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6448 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6449 but recognizing SVE registers. */
6450 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6451 &offset_qualifier));
6452 if (base_qualifier != AARCH64_OPND_QLF_X
6453 || offset_qualifier != AARCH64_OPND_QLF_X)
6454 {
6455 set_syntax_error (_("invalid addressing mode"));
6456 goto failure;
6457 }
6458 goto regoff_addr;
6459
6460 case AARCH64_OPND_SVE_ADDR_RZ:
6461 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6462 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6463 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6464 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6465 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6466 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6467 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6468 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6469 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6470 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6471 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6472 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6473 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6474 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6475 &offset_qualifier));
6476 if (base_qualifier != AARCH64_OPND_QLF_X
6477 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6478 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6479 {
6480 set_syntax_error (_("invalid addressing mode"));
6481 goto failure;
6482 }
6483 info->qualifier = offset_qualifier;
6484 goto regoff_addr;
6485
6486 case AARCH64_OPND_SVE_ADDR_ZX:
6487 /* [Zn.<T>{, <Xm>}]. */
6488 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6489 &offset_qualifier));
6490 /* Things to check:
6491 base_qualifier either S_S or S_D
6492 offset_qualifier must be X
6493 */
6494 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6495 && base_qualifier != AARCH64_OPND_QLF_S_D)
6496 || offset_qualifier != AARCH64_OPND_QLF_X)
6497 {
6498 set_syntax_error (_("invalid addressing mode"));
6499 goto failure;
6500 }
6501 info->qualifier = base_qualifier;
6502 if (!info->addr.offset.is_reg || info->addr.pcrel
6503 || !info->addr.preind || info->addr.writeback
6504 || info->shifter.operator_present != 0)
6505 {
6506 set_syntax_error (_("invalid addressing mode"));
6507 goto failure;
6508 }
6509 info->shifter.kind = AARCH64_MOD_LSL;
6510 break;
6511
6512
6513 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6514 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6515 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6516 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6517 /* [Z<n>.<T>{, #imm}] */
6518 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6519 &offset_qualifier));
6520 if (base_qualifier != AARCH64_OPND_QLF_S_S
6521 && base_qualifier != AARCH64_OPND_QLF_S_D)
6522 {
6523 set_syntax_error (_("invalid addressing mode"));
6524 goto failure;
6525 }
6526 info->qualifier = base_qualifier;
6527 goto sve_regimm;
6528
6529 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6530 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6531 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6532 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6533 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6534
6535 We don't reject:
6536
6537 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6538
6539 here since we get better error messages by leaving it to
6540 the qualifier checking routines. */
6541 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6542 &offset_qualifier));
6543 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6544 && base_qualifier != AARCH64_OPND_QLF_S_D)
6545 || offset_qualifier != base_qualifier)
6546 {
6547 set_syntax_error (_("invalid addressing mode"));
6548 goto failure;
6549 }
6550 info->qualifier = base_qualifier;
6551 goto regoff_addr;
6552
6553 case AARCH64_OPND_SYSREG:
6554 {
6555 uint32_t sysreg_flags;
6556 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6557 &sysreg_flags)) == PARSE_FAIL)
6558 {
6559 set_syntax_error (_("unknown or missing system register name"));
6560 goto failure;
6561 }
6562 inst.base.operands[i].sysreg.value = val;
6563 inst.base.operands[i].sysreg.flags = sysreg_flags;
6564 break;
6565 }
6566
6567 case AARCH64_OPND_PSTATEFIELD:
6568 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6569 == PARSE_FAIL)
6570 {
6571 set_syntax_error (_("unknown or missing PSTATE field name"));
6572 goto failure;
6573 }
6574 inst.base.operands[i].pstatefield = val;
6575 break;
6576
6577 case AARCH64_OPND_SYSREG_IC:
6578 inst.base.operands[i].sysins_op =
6579 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6580 goto sys_reg_ins;
6581
6582 case AARCH64_OPND_SYSREG_DC:
6583 inst.base.operands[i].sysins_op =
6584 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6585 goto sys_reg_ins;
6586
6587 case AARCH64_OPND_SYSREG_AT:
6588 inst.base.operands[i].sysins_op =
6589 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6590 goto sys_reg_ins;
6591
6592 case AARCH64_OPND_SYSREG_SR:
6593 inst.base.operands[i].sysins_op =
6594 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6595 goto sys_reg_ins;
6596
6597 case AARCH64_OPND_SYSREG_TLBI:
6598 inst.base.operands[i].sysins_op =
6599 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6600 sys_reg_ins:
6601 if (inst.base.operands[i].sysins_op == NULL)
6602 {
6603 set_fatal_syntax_error ( _("unknown or missing operation name"));
6604 goto failure;
6605 }
6606 break;
6607
6608 case AARCH64_OPND_BARRIER:
6609 case AARCH64_OPND_BARRIER_ISB:
6610 val = parse_barrier (&str);
6611 if (val != PARSE_FAIL
6612 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6613 {
6614 /* ISB only accepts options name 'sy'. */
6615 set_syntax_error
6616 (_("the specified option is not accepted in ISB"));
6617 /* Turn off backtrack as this optional operand is present. */
6618 backtrack_pos = 0;
6619 goto failure;
6620 }
6621 /* This is an extension to accept a 0..15 immediate. */
6622 if (val == PARSE_FAIL)
6623 po_imm_or_fail (0, 15);
6624 info->barrier = aarch64_barrier_options + val;
6625 break;
6626
6627 case AARCH64_OPND_PRFOP:
6628 val = parse_pldop (&str);
6629 /* This is an extension to accept a 0..31 immediate. */
6630 if (val == PARSE_FAIL)
6631 po_imm_or_fail (0, 31);
6632 inst.base.operands[i].prfop = aarch64_prfops + val;
6633 break;
6634
6635 case AARCH64_OPND_BARRIER_PSB:
6636 val = parse_barrier_psb (&str, &(info->hint_option));
6637 if (val == PARSE_FAIL)
6638 goto failure;
6639 break;
6640
6641 case AARCH64_OPND_BTI_TARGET:
6642 val = parse_bti_operand (&str, &(info->hint_option));
6643 if (val == PARSE_FAIL)
6644 goto failure;
6645 break;
6646
6647 default:
6648 as_fatal (_("unhandled operand code %d"), operands[i]);
6649 }
6650
6651 /* If we get here, this operand was successfully parsed. */
6652 inst.base.operands[i].present = 1;
6653 continue;
6654
6655 failure:
6656 /* The parse routine should already have set the error, but in case
6657 not, set a default one here. */
6658 if (! error_p ())
6659 set_default_error ();
6660
6661 if (! backtrack_pos)
6662 goto parse_operands_return;
6663
6664 {
6665 /* We reach here because this operand is marked as optional, and
6666 either no operand was supplied or the operand was supplied but it
6667 was syntactically incorrect. In the latter case we report an
6668 error. In the former case we perform a few more checks before
6669 dropping through to the code to insert the default operand. */
6670
6671 char *tmp = backtrack_pos;
6672 char endchar = END_OF_INSN;
6673
6674 if (i != (aarch64_num_of_operands (opcode) - 1))
6675 endchar = ',';
6676 skip_past_char (&tmp, ',');
6677
6678 if (*tmp != endchar)
6679 /* The user has supplied an operand in the wrong format. */
6680 goto parse_operands_return;
6681
6682 /* Make sure there is not a comma before the optional operand.
6683 For example the fifth operand of 'sys' is optional:
6684
6685 sys #0,c0,c0,#0, <--- wrong
6686 sys #0,c0,c0,#0 <--- correct. */
6687 if (comma_skipped_p && i && endchar == END_OF_INSN)
6688 {
6689 set_fatal_syntax_error
6690 (_("unexpected comma before the omitted optional operand"));
6691 goto parse_operands_return;
6692 }
6693 }
6694
6695 /* Reaching here means we are dealing with an optional operand that is
6696 omitted from the assembly line. */
6697 gas_assert (optional_operand_p (opcode, i));
6698 info->present = 0;
6699 process_omitted_operand (operands[i], opcode, i, info);
6700
6701 /* Try again, skipping the optional operand at backtrack_pos. */
6702 str = backtrack_pos;
6703 backtrack_pos = 0;
6704
6705 /* Clear any error record after the omitted optional operand has been
6706 successfully handled. */
6707 clear_error ();
6708 }
6709
6710 /* Check if we have parsed all the operands. */
6711 if (*str != '\0' && ! error_p ())
6712 {
6713 /* Set I to the index of the last present operand; this is
6714 for the purpose of diagnostics. */
6715 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6716 ;
6717 set_fatal_syntax_error
6718 (_("unexpected characters following instruction"));
6719 }
6720
6721 parse_operands_return:
6722
6723 if (error_p ())
6724 {
6725 DEBUG_TRACE ("parsing FAIL: %s - %s",
6726 operand_mismatch_kind_names[get_error_kind ()],
6727 get_error_message ());
6728 /* Record the operand error properly; this is useful when there
6729 are multiple instruction templates for a mnemonic name, so that
6730 later on, we can select the error that most closely describes
6731 the problem. */
6732 record_operand_error (opcode, i, get_error_kind (),
6733 get_error_message ());
6734 return FALSE;
6735 }
6736 else
6737 {
6738 DEBUG_TRACE ("parsing SUCCESS");
6739 return TRUE;
6740 }
6741 }
6742
6743 /* It does some fix-up to provide some programmer friendly feature while
6744 keeping the libopcodes happy, i.e. libopcodes only accepts
6745 the preferred architectural syntax.
6746 Return FALSE if there is any failure; otherwise return TRUE. */
6747
6748 static bfd_boolean
6749 programmer_friendly_fixup (aarch64_instruction *instr)
6750 {
6751 aarch64_inst *base = &instr->base;
6752 const aarch64_opcode *opcode = base->opcode;
6753 enum aarch64_op op = opcode->op;
6754 aarch64_opnd_info *operands = base->operands;
6755
6756 DEBUG_TRACE ("enter");
6757
6758 switch (opcode->iclass)
6759 {
6760 case testbranch:
6761 /* TBNZ Xn|Wn, #uimm6, label
6762 Test and Branch Not Zero: conditionally jumps to label if bit number
6763 uimm6 in register Xn is not zero. The bit number implies the width of
6764 the register, which may be written and should be disassembled as Wn if
6765 uimm is less than 32. */
6766 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6767 {
6768 if (operands[1].imm.value >= 32)
6769 {
6770 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6771 0, 31);
6772 return FALSE;
6773 }
6774 operands[0].qualifier = AARCH64_OPND_QLF_X;
6775 }
6776 break;
6777 case loadlit:
6778 /* LDR Wt, label | =value
6779 As a convenience assemblers will typically permit the notation
6780 "=value" in conjunction with the pc-relative literal load instructions
6781 to automatically place an immediate value or symbolic address in a
6782 nearby literal pool and generate a hidden label which references it.
6783 ISREG has been set to 0 in the case of =value. */
6784 if (instr->gen_lit_pool
6785 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6786 {
6787 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6788 if (op == OP_LDRSW_LIT)
6789 size = 4;
6790 if (instr->reloc.exp.X_op != O_constant
6791 && instr->reloc.exp.X_op != O_big
6792 && instr->reloc.exp.X_op != O_symbol)
6793 {
6794 record_operand_error (opcode, 1,
6795 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6796 _("constant expression expected"));
6797 return FALSE;
6798 }
6799 if (! add_to_lit_pool (&instr->reloc.exp, size))
6800 {
6801 record_operand_error (opcode, 1,
6802 AARCH64_OPDE_OTHER_ERROR,
6803 _("literal pool insertion failed"));
6804 return FALSE;
6805 }
6806 }
6807 break;
6808 case log_shift:
6809 case bitfield:
6810 /* UXT[BHW] Wd, Wn
6811 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6812 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6813 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6814 A programmer-friendly assembler should accept a destination Xd in
6815 place of Wd, however that is not the preferred form for disassembly.
6816 */
6817 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6818 && operands[1].qualifier == AARCH64_OPND_QLF_W
6819 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6820 operands[0].qualifier = AARCH64_OPND_QLF_W;
6821 break;
6822
6823 case addsub_ext:
6824 {
6825 /* In the 64-bit form, the final register operand is written as Wm
6826 for all but the (possibly omitted) UXTX/LSL and SXTX
6827 operators.
6828 As a programmer-friendly assembler, we accept e.g.
6829 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6830 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6831 int idx = aarch64_operand_index (opcode->operands,
6832 AARCH64_OPND_Rm_EXT);
6833 gas_assert (idx == 1 || idx == 2);
6834 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6835 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6836 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6837 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6838 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6839 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6840 }
6841 break;
6842
6843 default:
6844 break;
6845 }
6846
6847 DEBUG_TRACE ("exit with SUCCESS");
6848 return TRUE;
6849 }
6850
6851 /* Check for loads and stores that will cause unpredictable behavior. */
6852
6853 static void
6854 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6855 {
6856 aarch64_inst *base = &instr->base;
6857 const aarch64_opcode *opcode = base->opcode;
6858 const aarch64_opnd_info *opnds = base->operands;
6859 switch (opcode->iclass)
6860 {
6861 case ldst_pos:
6862 case ldst_imm9:
6863 case ldst_imm10:
6864 case ldst_unscaled:
6865 case ldst_unpriv:
6866 /* Loading/storing the base register is unpredictable if writeback. */
6867 if ((aarch64_get_operand_class (opnds[0].type)
6868 == AARCH64_OPND_CLASS_INT_REG)
6869 && opnds[0].reg.regno == opnds[1].addr.base_regno
6870 && opnds[1].addr.base_regno != REG_SP
6871 /* Exempt STG/STZG/ST2G/STZ2G. */
6872 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
6873 && opnds[1].addr.writeback)
6874 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6875 break;
6876
6877 case ldstpair_off:
6878 case ldstnapair_offs:
6879 case ldstpair_indexed:
6880 /* Loading/storing the base register is unpredictable if writeback. */
6881 if ((aarch64_get_operand_class (opnds[0].type)
6882 == AARCH64_OPND_CLASS_INT_REG)
6883 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6884 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6885 && opnds[2].addr.base_regno != REG_SP
6886 /* Exempt STGP. */
6887 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
6888 && opnds[2].addr.writeback)
6889 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6890 /* Load operations must load different registers. */
6891 if ((opcode->opcode & (1 << 22))
6892 && opnds[0].reg.regno == opnds[1].reg.regno)
6893 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6894 break;
6895
6896 case ldstexcl:
6897 /* It is unpredictable if the destination and status registers are the
6898 same. */
6899 if ((aarch64_get_operand_class (opnds[0].type)
6900 == AARCH64_OPND_CLASS_INT_REG)
6901 && (aarch64_get_operand_class (opnds[1].type)
6902 == AARCH64_OPND_CLASS_INT_REG)
6903 && (opnds[0].reg.regno == opnds[1].reg.regno
6904 || opnds[0].reg.regno == opnds[2].reg.regno))
6905 as_warn (_("unpredictable: identical transfer and status registers"
6906 " --`%s'"),
6907 str);
6908
6909 break;
6910
6911 default:
6912 break;
6913 }
6914 }
6915
6916 static void
6917 force_automatic_sequence_close (void)
6918 {
6919 if (now_instr_sequence.instr)
6920 {
6921 as_warn (_("previous `%s' sequence has not been closed"),
6922 now_instr_sequence.instr->opcode->name);
6923 init_insn_sequence (NULL, &now_instr_sequence);
6924 }
6925 }
6926
6927 /* A wrapper function to interface with libopcodes on encoding and
6928 record the error message if there is any.
6929
6930 Return TRUE on success; otherwise return FALSE. */
6931
6932 static bfd_boolean
6933 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6934 aarch64_insn *code)
6935 {
6936 aarch64_operand_error error_info;
6937 memset (&error_info, '\0', sizeof (error_info));
6938 error_info.kind = AARCH64_OPDE_NIL;
6939 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
6940 && !error_info.non_fatal)
6941 return TRUE;
6942
6943 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6944 record_operand_error_info (opcode, &error_info);
6945 return error_info.non_fatal;
6946 }
6947
6948 #ifdef DEBUG_AARCH64
6949 static inline void
6950 dump_opcode_operands (const aarch64_opcode *opcode)
6951 {
6952 int i = 0;
6953 while (opcode->operands[i] != AARCH64_OPND_NIL)
6954 {
6955 aarch64_verbose ("\t\t opnd%d: %s", i,
6956 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6957 ? aarch64_get_operand_name (opcode->operands[i])
6958 : aarch64_get_operand_desc (opcode->operands[i]));
6959 ++i;
6960 }
6961 }
6962 #endif /* DEBUG_AARCH64 */
6963
6964 /* This is the guts of the machine-dependent assembler. STR points to a
6965 machine dependent instruction. This function is supposed to emit
6966 the frags/bytes it assembles to. */
6967
6968 void
6969 md_assemble (char *str)
6970 {
6971 char *p = str;
6972 templates *template;
6973 aarch64_opcode *opcode;
6974 aarch64_inst *inst_base;
6975 unsigned saved_cond;
6976
6977 /* Align the previous label if needed. */
6978 if (last_label_seen != NULL)
6979 {
6980 symbol_set_frag (last_label_seen, frag_now);
6981 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6982 S_SET_SEGMENT (last_label_seen, now_seg);
6983 }
6984
6985 /* Update the current insn_sequence from the segment. */
6986 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
6987
6988 inst.reloc.type = BFD_RELOC_UNUSED;
6989
6990 DEBUG_TRACE ("\n\n");
6991 DEBUG_TRACE ("==============================");
6992 DEBUG_TRACE ("Enter md_assemble with %s", str);
6993
6994 template = opcode_lookup (&p);
6995 if (!template)
6996 {
6997 /* It wasn't an instruction, but it might be a register alias of
6998 the form alias .req reg directive. */
6999 if (!create_register_alias (str, p))
7000 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7001 str);
7002 return;
7003 }
7004
7005 skip_whitespace (p);
7006 if (*p == ',')
7007 {
7008 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7009 get_mnemonic_name (str), str);
7010 return;
7011 }
7012
7013 init_operand_error_report ();
7014
7015 /* Sections are assumed to start aligned. In executable section, there is no
7016 MAP_DATA symbol pending. So we only align the address during
7017 MAP_DATA --> MAP_INSN transition.
7018 For other sections, this is not guaranteed. */
7019 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7020 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7021 frag_align_code (2, 0);
7022
7023 saved_cond = inst.cond;
7024 reset_aarch64_instruction (&inst);
7025 inst.cond = saved_cond;
7026
7027 /* Iterate through all opcode entries with the same mnemonic name. */
7028 do
7029 {
7030 opcode = template->opcode;
7031
7032 DEBUG_TRACE ("opcode %s found", opcode->name);
7033 #ifdef DEBUG_AARCH64
7034 if (debug_dump)
7035 dump_opcode_operands (opcode);
7036 #endif /* DEBUG_AARCH64 */
7037
7038 mapping_state (MAP_INSN);
7039
7040 inst_base = &inst.base;
7041 inst_base->opcode = opcode;
7042
7043 /* Truly conditionally executed instructions, e.g. b.cond. */
7044 if (opcode->flags & F_COND)
7045 {
7046 gas_assert (inst.cond != COND_ALWAYS);
7047 inst_base->cond = get_cond_from_value (inst.cond);
7048 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7049 }
7050 else if (inst.cond != COND_ALWAYS)
7051 {
7052 /* It shouldn't arrive here, where the assembly looks like a
7053 conditional instruction but the found opcode is unconditional. */
7054 gas_assert (0);
7055 continue;
7056 }
7057
7058 if (parse_operands (p, opcode)
7059 && programmer_friendly_fixup (&inst)
7060 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7061 {
7062 /* Check that this instruction is supported for this CPU. */
7063 if (!opcode->avariant
7064 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7065 {
7066 as_bad (_("selected processor does not support `%s'"), str);
7067 return;
7068 }
7069
7070 warn_unpredictable_ldst (&inst, str);
7071
7072 if (inst.reloc.type == BFD_RELOC_UNUSED
7073 || !inst.reloc.need_libopcodes_p)
7074 output_inst (NULL);
7075 else
7076 {
7077 /* If there is relocation generated for the instruction,
7078 store the instruction information for the future fix-up. */
7079 struct aarch64_inst *copy;
7080 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7081 copy = XNEW (struct aarch64_inst);
7082 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7083 output_inst (copy);
7084 }
7085
7086 /* Issue non-fatal messages if any. */
7087 output_operand_error_report (str, TRUE);
7088 return;
7089 }
7090
7091 template = template->next;
7092 if (template != NULL)
7093 {
7094 reset_aarch64_instruction (&inst);
7095 inst.cond = saved_cond;
7096 }
7097 }
7098 while (template != NULL);
7099
7100 /* Issue the error messages if any. */
7101 output_operand_error_report (str, FALSE);
7102 }
7103
7104 /* Various frobbings of labels and their addresses. */
7105
7106 void
7107 aarch64_start_line_hook (void)
7108 {
7109 last_label_seen = NULL;
7110 }
7111
7112 void
7113 aarch64_frob_label (symbolS * sym)
7114 {
7115 last_label_seen = sym;
7116
7117 dwarf2_emit_label (sym);
7118 }
7119
7120 void
7121 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7122 {
7123 /* Check to see if we have a block to close. */
7124 force_automatic_sequence_close ();
7125 }
7126
7127 int
7128 aarch64_data_in_code (void)
7129 {
7130 if (!strncmp (input_line_pointer + 1, "data:", 5))
7131 {
7132 *input_line_pointer = '/';
7133 input_line_pointer += 5;
7134 *input_line_pointer = 0;
7135 return 1;
7136 }
7137
7138 return 0;
7139 }
7140
7141 char *
7142 aarch64_canonicalize_symbol_name (char *name)
7143 {
7144 int len;
7145
7146 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7147 *(name + len - 5) = 0;
7148
7149 return name;
7150 }
7151 \f
7152 /* Table of all register names defined by default. The user can
7153 define additional names with .req. Note that all register names
7154 should appear in both upper and lowercase variants. Some registers
7155 also have mixed-case names. */
7156
7157 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7158 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7159 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7160 #define REGSET16(p,t) \
7161 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7162 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7163 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7164 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7165 #define REGSET31(p,t) \
7166 REGSET16(p, t), \
7167 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7168 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7169 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7170 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7171 #define REGSET(p,t) \
7172 REGSET31(p,t), REGNUM(p,31,t)
7173
7174 /* These go into aarch64_reg_hsh hash-table. */
7175 static const reg_entry reg_names[] = {
7176 /* Integer registers. */
7177 REGSET31 (x, R_64), REGSET31 (X, R_64),
7178 REGSET31 (w, R_32), REGSET31 (W, R_32),
7179
7180 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7181 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7182 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7183 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7184 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7185 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7186
7187 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7188 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7189
7190 /* Floating-point single precision registers. */
7191 REGSET (s, FP_S), REGSET (S, FP_S),
7192
7193 /* Floating-point double precision registers. */
7194 REGSET (d, FP_D), REGSET (D, FP_D),
7195
7196 /* Floating-point half precision registers. */
7197 REGSET (h, FP_H), REGSET (H, FP_H),
7198
7199 /* Floating-point byte precision registers. */
7200 REGSET (b, FP_B), REGSET (B, FP_B),
7201
7202 /* Floating-point quad precision registers. */
7203 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7204
7205 /* FP/SIMD registers. */
7206 REGSET (v, VN), REGSET (V, VN),
7207
7208 /* SVE vector registers. */
7209 REGSET (z, ZN), REGSET (Z, ZN),
7210
7211 /* SVE predicate registers. */
7212 REGSET16 (p, PN), REGSET16 (P, PN)
7213 };
7214
7215 #undef REGDEF
7216 #undef REGDEF_ALIAS
7217 #undef REGNUM
7218 #undef REGSET16
7219 #undef REGSET31
7220 #undef REGSET
7221
7222 #define N 1
7223 #define n 0
7224 #define Z 1
7225 #define z 0
7226 #define C 1
7227 #define c 0
7228 #define V 1
7229 #define v 0
7230 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7231 static const asm_nzcv nzcv_names[] = {
7232 {"nzcv", B (n, z, c, v)},
7233 {"nzcV", B (n, z, c, V)},
7234 {"nzCv", B (n, z, C, v)},
7235 {"nzCV", B (n, z, C, V)},
7236 {"nZcv", B (n, Z, c, v)},
7237 {"nZcV", B (n, Z, c, V)},
7238 {"nZCv", B (n, Z, C, v)},
7239 {"nZCV", B (n, Z, C, V)},
7240 {"Nzcv", B (N, z, c, v)},
7241 {"NzcV", B (N, z, c, V)},
7242 {"NzCv", B (N, z, C, v)},
7243 {"NzCV", B (N, z, C, V)},
7244 {"NZcv", B (N, Z, c, v)},
7245 {"NZcV", B (N, Z, c, V)},
7246 {"NZCv", B (N, Z, C, v)},
7247 {"NZCV", B (N, Z, C, V)}
7248 };
7249
7250 #undef N
7251 #undef n
7252 #undef Z
7253 #undef z
7254 #undef C
7255 #undef c
7256 #undef V
7257 #undef v
7258 #undef B
7259 \f
7260 /* MD interface: bits in the object file. */
7261
7262 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7263 for use in the a.out file, and stores them in the array pointed to by buf.
7264 This knows about the endian-ness of the target machine and does
7265 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7266 2 (short) and 4 (long) Floating numbers are put out as a series of
7267 LITTLENUMS (shorts, here at least). */
7268
7269 void
7270 md_number_to_chars (char *buf, valueT val, int n)
7271 {
7272 if (target_big_endian)
7273 number_to_chars_bigendian (buf, val, n);
7274 else
7275 number_to_chars_littleendian (buf, val, n);
7276 }
7277
7278 /* MD interface: Sections. */
7279
7280 /* Estimate the size of a frag before relaxing. Assume everything fits in
7281 4 bytes. */
7282
7283 int
7284 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7285 {
7286 fragp->fr_var = 4;
7287 return 4;
7288 }
7289
7290 /* Round up a section size to the appropriate boundary. */
7291
7292 valueT
7293 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7294 {
7295 return size;
7296 }
7297
7298 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7299 of an rs_align_code fragment.
7300
7301 Here we fill the frag with the appropriate info for padding the
7302 output stream. The resulting frag will consist of a fixed (fr_fix)
7303 and of a repeating (fr_var) part.
7304
7305 The fixed content is always emitted before the repeating content and
7306 these two parts are used as follows in constructing the output:
7307 - the fixed part will be used to align to a valid instruction word
7308 boundary, in case that we start at a misaligned address; as no
7309 executable instruction can live at the misaligned location, we
7310 simply fill with zeros;
7311 - the variable part will be used to cover the remaining padding and
7312 we fill using the AArch64 NOP instruction.
7313
7314 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7315 enough storage space for up to 3 bytes for padding the back to a valid
7316 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7317
7318 void
7319 aarch64_handle_align (fragS * fragP)
7320 {
7321 /* NOP = d503201f */
7322 /* AArch64 instructions are always little-endian. */
7323 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7324
7325 int bytes, fix, noop_size;
7326 char *p;
7327
7328 if (fragP->fr_type != rs_align_code)
7329 return;
7330
7331 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7332 p = fragP->fr_literal + fragP->fr_fix;
7333
7334 #ifdef OBJ_ELF
7335 gas_assert (fragP->tc_frag_data.recorded);
7336 #endif
7337
7338 noop_size = sizeof (aarch64_noop);
7339
7340 fix = bytes & (noop_size - 1);
7341 if (fix)
7342 {
7343 #ifdef OBJ_ELF
7344 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7345 #endif
7346 memset (p, 0, fix);
7347 p += fix;
7348 fragP->fr_fix += fix;
7349 }
7350
7351 if (noop_size)
7352 memcpy (p, aarch64_noop, noop_size);
7353 fragP->fr_var = noop_size;
7354 }
7355
7356 /* Perform target specific initialisation of a frag.
7357 Note - despite the name this initialisation is not done when the frag
7358 is created, but only when its type is assigned. A frag can be created
7359 and used a long time before its type is set, so beware of assuming that
7360 this initialisation is performed first. */
7361
7362 #ifndef OBJ_ELF
7363 void
7364 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7365 int max_chars ATTRIBUTE_UNUSED)
7366 {
7367 }
7368
7369 #else /* OBJ_ELF is defined. */
7370 void
7371 aarch64_init_frag (fragS * fragP, int max_chars)
7372 {
7373 /* Record a mapping symbol for alignment frags. We will delete this
7374 later if the alignment ends up empty. */
7375 if (!fragP->tc_frag_data.recorded)
7376 fragP->tc_frag_data.recorded = 1;
7377
7378 /* PR 21809: Do not set a mapping state for debug sections
7379 - it just confuses other tools. */
7380 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7381 return;
7382
7383 switch (fragP->fr_type)
7384 {
7385 case rs_align_test:
7386 case rs_fill:
7387 mapping_state_2 (MAP_DATA, max_chars);
7388 break;
7389 case rs_align:
7390 /* PR 20364: We can get alignment frags in code sections,
7391 so do not just assume that we should use the MAP_DATA state. */
7392 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7393 break;
7394 case rs_align_code:
7395 mapping_state_2 (MAP_INSN, max_chars);
7396 break;
7397 default:
7398 break;
7399 }
7400 }
7401 \f
7402 /* Initialize the DWARF-2 unwind information for this procedure. */
7403
7404 void
7405 tc_aarch64_frame_initial_instructions (void)
7406 {
7407 cfi_add_CFA_def_cfa (REG_SP, 0);
7408 }
7409 #endif /* OBJ_ELF */
7410
7411 /* Convert REGNAME to a DWARF-2 register number. */
7412
7413 int
7414 tc_aarch64_regname_to_dw2regnum (char *regname)
7415 {
7416 const reg_entry *reg = parse_reg (&regname);
7417 if (reg == NULL)
7418 return -1;
7419
7420 switch (reg->type)
7421 {
7422 case REG_TYPE_SP_32:
7423 case REG_TYPE_SP_64:
7424 case REG_TYPE_R_32:
7425 case REG_TYPE_R_64:
7426 return reg->number;
7427
7428 case REG_TYPE_FP_B:
7429 case REG_TYPE_FP_H:
7430 case REG_TYPE_FP_S:
7431 case REG_TYPE_FP_D:
7432 case REG_TYPE_FP_Q:
7433 return reg->number + 64;
7434
7435 default:
7436 break;
7437 }
7438 return -1;
7439 }
7440
7441 /* Implement DWARF2_ADDR_SIZE. */
7442
7443 int
7444 aarch64_dwarf2_addr_size (void)
7445 {
7446 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7447 if (ilp32_p)
7448 return 4;
7449 #endif
7450 return bfd_arch_bits_per_address (stdoutput) / 8;
7451 }
7452
7453 /* MD interface: Symbol and relocation handling. */
7454
7455 /* Return the address within the segment that a PC-relative fixup is
7456 relative to. For AArch64 PC-relative fixups applied to instructions
7457 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7458
7459 long
7460 md_pcrel_from_section (fixS * fixP, segT seg)
7461 {
7462 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7463
7464 /* If this is pc-relative and we are going to emit a relocation
7465 then we just want to put out any pipeline compensation that the linker
7466 will need. Otherwise we want to use the calculated base. */
7467 if (fixP->fx_pcrel
7468 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7469 || aarch64_force_relocation (fixP)))
7470 base = 0;
7471
7472 /* AArch64 should be consistent for all pc-relative relocations. */
7473 return base + AARCH64_PCREL_OFFSET;
7474 }
7475
7476 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7477 Otherwise we have no need to default values of symbols. */
7478
7479 symbolS *
7480 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7481 {
7482 #ifdef OBJ_ELF
7483 if (name[0] == '_' && name[1] == 'G'
7484 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7485 {
7486 if (!GOT_symbol)
7487 {
7488 if (symbol_find (name))
7489 as_bad (_("GOT already in the symbol table"));
7490
7491 GOT_symbol = symbol_new (name, undefined_section,
7492 (valueT) 0, &zero_address_frag);
7493 }
7494
7495 return GOT_symbol;
7496 }
7497 #endif
7498
7499 return 0;
7500 }
7501
7502 /* Return non-zero if the indicated VALUE has overflowed the maximum
7503 range expressible by a unsigned number with the indicated number of
7504 BITS. */
7505
7506 static bfd_boolean
7507 unsigned_overflow (valueT value, unsigned bits)
7508 {
7509 valueT lim;
7510 if (bits >= sizeof (valueT) * 8)
7511 return FALSE;
7512 lim = (valueT) 1 << bits;
7513 return (value >= lim);
7514 }
7515
7516
7517 /* Return non-zero if the indicated VALUE has overflowed the maximum
7518 range expressible by an signed number with the indicated number of
7519 BITS. */
7520
7521 static bfd_boolean
7522 signed_overflow (offsetT value, unsigned bits)
7523 {
7524 offsetT lim;
7525 if (bits >= sizeof (offsetT) * 8)
7526 return FALSE;
7527 lim = (offsetT) 1 << (bits - 1);
7528 return (value < -lim || value >= lim);
7529 }
7530
7531 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7532 unsigned immediate offset load/store instruction, try to encode it as
7533 an unscaled, 9-bit, signed immediate offset load/store instruction.
7534 Return TRUE if it is successful; otherwise return FALSE.
7535
7536 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7537 in response to the standard LDR/STR mnemonics when the immediate offset is
7538 unambiguous, i.e. when it is negative or unaligned. */
7539
7540 static bfd_boolean
7541 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7542 {
7543 int idx;
7544 enum aarch64_op new_op;
7545 const aarch64_opcode *new_opcode;
7546
7547 gas_assert (instr->opcode->iclass == ldst_pos);
7548
7549 switch (instr->opcode->op)
7550 {
7551 case OP_LDRB_POS:new_op = OP_LDURB; break;
7552 case OP_STRB_POS: new_op = OP_STURB; break;
7553 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7554 case OP_LDRH_POS: new_op = OP_LDURH; break;
7555 case OP_STRH_POS: new_op = OP_STURH; break;
7556 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7557 case OP_LDR_POS: new_op = OP_LDUR; break;
7558 case OP_STR_POS: new_op = OP_STUR; break;
7559 case OP_LDRF_POS: new_op = OP_LDURV; break;
7560 case OP_STRF_POS: new_op = OP_STURV; break;
7561 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7562 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7563 default: new_op = OP_NIL; break;
7564 }
7565
7566 if (new_op == OP_NIL)
7567 return FALSE;
7568
7569 new_opcode = aarch64_get_opcode (new_op);
7570 gas_assert (new_opcode != NULL);
7571
7572 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7573 instr->opcode->op, new_opcode->op);
7574
7575 aarch64_replace_opcode (instr, new_opcode);
7576
7577 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7578 qualifier matching may fail because the out-of-date qualifier will
7579 prevent the operand being updated with a new and correct qualifier. */
7580 idx = aarch64_operand_index (instr->opcode->operands,
7581 AARCH64_OPND_ADDR_SIMM9);
7582 gas_assert (idx == 1);
7583 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7584
7585 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7586
7587 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7588 insn_sequence))
7589 return FALSE;
7590
7591 return TRUE;
7592 }
7593
7594 /* Called by fix_insn to fix a MOV immediate alias instruction.
7595
7596 Operand for a generic move immediate instruction, which is an alias
7597 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7598 a 32-bit/64-bit immediate value into general register. An assembler error
7599 shall result if the immediate cannot be created by a single one of these
7600 instructions. If there is a choice, then to ensure reversability an
7601 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7602
7603 static void
7604 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7605 {
7606 const aarch64_opcode *opcode;
7607
7608 /* Need to check if the destination is SP/ZR. The check has to be done
7609 before any aarch64_replace_opcode. */
7610 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7611 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7612
7613 instr->operands[1].imm.value = value;
7614 instr->operands[1].skip = 0;
7615
7616 if (try_mov_wide_p)
7617 {
7618 /* Try the MOVZ alias. */
7619 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7620 aarch64_replace_opcode (instr, opcode);
7621 if (aarch64_opcode_encode (instr->opcode, instr,
7622 &instr->value, NULL, NULL, insn_sequence))
7623 {
7624 put_aarch64_insn (buf, instr->value);
7625 return;
7626 }
7627 /* Try the MOVK alias. */
7628 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7629 aarch64_replace_opcode (instr, opcode);
7630 if (aarch64_opcode_encode (instr->opcode, instr,
7631 &instr->value, NULL, NULL, insn_sequence))
7632 {
7633 put_aarch64_insn (buf, instr->value);
7634 return;
7635 }
7636 }
7637
7638 if (try_mov_bitmask_p)
7639 {
7640 /* Try the ORR alias. */
7641 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7642 aarch64_replace_opcode (instr, opcode);
7643 if (aarch64_opcode_encode (instr->opcode, instr,
7644 &instr->value, NULL, NULL, insn_sequence))
7645 {
7646 put_aarch64_insn (buf, instr->value);
7647 return;
7648 }
7649 }
7650
7651 as_bad_where (fixP->fx_file, fixP->fx_line,
7652 _("immediate cannot be moved by a single instruction"));
7653 }
7654
7655 /* An instruction operand which is immediate related may have symbol used
7656 in the assembly, e.g.
7657
7658 mov w0, u32
7659 .set u32, 0x00ffff00
7660
7661 At the time when the assembly instruction is parsed, a referenced symbol,
7662 like 'u32' in the above example may not have been seen; a fixS is created
7663 in such a case and is handled here after symbols have been resolved.
7664 Instruction is fixed up with VALUE using the information in *FIXP plus
7665 extra information in FLAGS.
7666
7667 This function is called by md_apply_fix to fix up instructions that need
7668 a fix-up described above but does not involve any linker-time relocation. */
7669
7670 static void
7671 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7672 {
7673 int idx;
7674 uint32_t insn;
7675 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7676 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7677 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7678
7679 if (new_inst)
7680 {
7681 /* Now the instruction is about to be fixed-up, so the operand that
7682 was previously marked as 'ignored' needs to be unmarked in order
7683 to get the encoding done properly. */
7684 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7685 new_inst->operands[idx].skip = 0;
7686 }
7687
7688 gas_assert (opnd != AARCH64_OPND_NIL);
7689
7690 switch (opnd)
7691 {
7692 case AARCH64_OPND_EXCEPTION:
7693 if (unsigned_overflow (value, 16))
7694 as_bad_where (fixP->fx_file, fixP->fx_line,
7695 _("immediate out of range"));
7696 insn = get_aarch64_insn (buf);
7697 insn |= encode_svc_imm (value);
7698 put_aarch64_insn (buf, insn);
7699 break;
7700
7701 case AARCH64_OPND_AIMM:
7702 /* ADD or SUB with immediate.
7703 NOTE this assumes we come here with a add/sub shifted reg encoding
7704 3 322|2222|2 2 2 21111 111111
7705 1 098|7654|3 2 1 09876 543210 98765 43210
7706 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7707 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7708 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7709 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7710 ->
7711 3 322|2222|2 2 221111111111
7712 1 098|7654|3 2 109876543210 98765 43210
7713 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7714 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7715 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7716 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7717 Fields sf Rn Rd are already set. */
7718 insn = get_aarch64_insn (buf);
7719 if (value < 0)
7720 {
7721 /* Add <-> sub. */
7722 insn = reencode_addsub_switch_add_sub (insn);
7723 value = -value;
7724 }
7725
7726 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7727 && unsigned_overflow (value, 12))
7728 {
7729 /* Try to shift the value by 12 to make it fit. */
7730 if (((value >> 12) << 12) == value
7731 && ! unsigned_overflow (value, 12 + 12))
7732 {
7733 value >>= 12;
7734 insn |= encode_addsub_imm_shift_amount (1);
7735 }
7736 }
7737
7738 if (unsigned_overflow (value, 12))
7739 as_bad_where (fixP->fx_file, fixP->fx_line,
7740 _("immediate out of range"));
7741
7742 insn |= encode_addsub_imm (value);
7743
7744 put_aarch64_insn (buf, insn);
7745 break;
7746
7747 case AARCH64_OPND_SIMD_IMM:
7748 case AARCH64_OPND_SIMD_IMM_SFT:
7749 case AARCH64_OPND_LIMM:
7750 /* Bit mask immediate. */
7751 gas_assert (new_inst != NULL);
7752 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7753 new_inst->operands[idx].imm.value = value;
7754 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7755 &new_inst->value, NULL, NULL, insn_sequence))
7756 put_aarch64_insn (buf, new_inst->value);
7757 else
7758 as_bad_where (fixP->fx_file, fixP->fx_line,
7759 _("invalid immediate"));
7760 break;
7761
7762 case AARCH64_OPND_HALF:
7763 /* 16-bit unsigned immediate. */
7764 if (unsigned_overflow (value, 16))
7765 as_bad_where (fixP->fx_file, fixP->fx_line,
7766 _("immediate out of range"));
7767 insn = get_aarch64_insn (buf);
7768 insn |= encode_movw_imm (value & 0xffff);
7769 put_aarch64_insn (buf, insn);
7770 break;
7771
7772 case AARCH64_OPND_IMM_MOV:
7773 /* Operand for a generic move immediate instruction, which is
7774 an alias instruction that generates a single MOVZ, MOVN or ORR
7775 instruction to loads a 32-bit/64-bit immediate value into general
7776 register. An assembler error shall result if the immediate cannot be
7777 created by a single one of these instructions. If there is a choice,
7778 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7779 and MOVZ or MOVN to ORR. */
7780 gas_assert (new_inst != NULL);
7781 fix_mov_imm_insn (fixP, buf, new_inst, value);
7782 break;
7783
7784 case AARCH64_OPND_ADDR_SIMM7:
7785 case AARCH64_OPND_ADDR_SIMM9:
7786 case AARCH64_OPND_ADDR_SIMM9_2:
7787 case AARCH64_OPND_ADDR_SIMM10:
7788 case AARCH64_OPND_ADDR_UIMM12:
7789 case AARCH64_OPND_ADDR_SIMM11:
7790 case AARCH64_OPND_ADDR_SIMM13:
7791 /* Immediate offset in an address. */
7792 insn = get_aarch64_insn (buf);
7793
7794 gas_assert (new_inst != NULL && new_inst->value == insn);
7795 gas_assert (new_inst->opcode->operands[1] == opnd
7796 || new_inst->opcode->operands[2] == opnd);
7797
7798 /* Get the index of the address operand. */
7799 if (new_inst->opcode->operands[1] == opnd)
7800 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7801 idx = 1;
7802 else
7803 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7804 idx = 2;
7805
7806 /* Update the resolved offset value. */
7807 new_inst->operands[idx].addr.offset.imm = value;
7808
7809 /* Encode/fix-up. */
7810 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7811 &new_inst->value, NULL, NULL, insn_sequence))
7812 {
7813 put_aarch64_insn (buf, new_inst->value);
7814 break;
7815 }
7816 else if (new_inst->opcode->iclass == ldst_pos
7817 && try_to_encode_as_unscaled_ldst (new_inst))
7818 {
7819 put_aarch64_insn (buf, new_inst->value);
7820 break;
7821 }
7822
7823 as_bad_where (fixP->fx_file, fixP->fx_line,
7824 _("immediate offset out of range"));
7825 break;
7826
7827 default:
7828 gas_assert (0);
7829 as_fatal (_("unhandled operand code %d"), opnd);
7830 }
7831 }
7832
7833 /* Apply a fixup (fixP) to segment data, once it has been determined
7834 by our caller that we have all the info we need to fix it up.
7835
7836 Parameter valP is the pointer to the value of the bits. */
7837
7838 void
7839 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7840 {
7841 offsetT value = *valP;
7842 uint32_t insn;
7843 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7844 int scale;
7845 unsigned flags = fixP->fx_addnumber;
7846
7847 DEBUG_TRACE ("\n\n");
7848 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7849 DEBUG_TRACE ("Enter md_apply_fix");
7850
7851 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7852
7853 /* Note whether this will delete the relocation. */
7854
7855 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7856 fixP->fx_done = 1;
7857
7858 /* Process the relocations. */
7859 switch (fixP->fx_r_type)
7860 {
7861 case BFD_RELOC_NONE:
7862 /* This will need to go in the object file. */
7863 fixP->fx_done = 0;
7864 break;
7865
7866 case BFD_RELOC_8:
7867 case BFD_RELOC_8_PCREL:
7868 if (fixP->fx_done || !seg->use_rela_p)
7869 md_number_to_chars (buf, value, 1);
7870 break;
7871
7872 case BFD_RELOC_16:
7873 case BFD_RELOC_16_PCREL:
7874 if (fixP->fx_done || !seg->use_rela_p)
7875 md_number_to_chars (buf, value, 2);
7876 break;
7877
7878 case BFD_RELOC_32:
7879 case BFD_RELOC_32_PCREL:
7880 if (fixP->fx_done || !seg->use_rela_p)
7881 md_number_to_chars (buf, value, 4);
7882 break;
7883
7884 case BFD_RELOC_64:
7885 case BFD_RELOC_64_PCREL:
7886 if (fixP->fx_done || !seg->use_rela_p)
7887 md_number_to_chars (buf, value, 8);
7888 break;
7889
7890 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7891 /* We claim that these fixups have been processed here, even if
7892 in fact we generate an error because we do not have a reloc
7893 for them, so tc_gen_reloc() will reject them. */
7894 fixP->fx_done = 1;
7895 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7896 {
7897 as_bad_where (fixP->fx_file, fixP->fx_line,
7898 _("undefined symbol %s used as an immediate value"),
7899 S_GET_NAME (fixP->fx_addsy));
7900 goto apply_fix_return;
7901 }
7902 fix_insn (fixP, flags, value);
7903 break;
7904
7905 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7906 if (fixP->fx_done || !seg->use_rela_p)
7907 {
7908 if (value & 3)
7909 as_bad_where (fixP->fx_file, fixP->fx_line,
7910 _("pc-relative load offset not word aligned"));
7911 if (signed_overflow (value, 21))
7912 as_bad_where (fixP->fx_file, fixP->fx_line,
7913 _("pc-relative load offset out of range"));
7914 insn = get_aarch64_insn (buf);
7915 insn |= encode_ld_lit_ofs_19 (value >> 2);
7916 put_aarch64_insn (buf, insn);
7917 }
7918 break;
7919
7920 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7921 if (fixP->fx_done || !seg->use_rela_p)
7922 {
7923 if (signed_overflow (value, 21))
7924 as_bad_where (fixP->fx_file, fixP->fx_line,
7925 _("pc-relative address offset out of range"));
7926 insn = get_aarch64_insn (buf);
7927 insn |= encode_adr_imm (value);
7928 put_aarch64_insn (buf, insn);
7929 }
7930 break;
7931
7932 case BFD_RELOC_AARCH64_BRANCH19:
7933 if (fixP->fx_done || !seg->use_rela_p)
7934 {
7935 if (value & 3)
7936 as_bad_where (fixP->fx_file, fixP->fx_line,
7937 _("conditional branch target not word aligned"));
7938 if (signed_overflow (value, 21))
7939 as_bad_where (fixP->fx_file, fixP->fx_line,
7940 _("conditional branch out of range"));
7941 insn = get_aarch64_insn (buf);
7942 insn |= encode_cond_branch_ofs_19 (value >> 2);
7943 put_aarch64_insn (buf, insn);
7944 }
7945 break;
7946
7947 case BFD_RELOC_AARCH64_TSTBR14:
7948 if (fixP->fx_done || !seg->use_rela_p)
7949 {
7950 if (value & 3)
7951 as_bad_where (fixP->fx_file, fixP->fx_line,
7952 _("conditional branch target not word aligned"));
7953 if (signed_overflow (value, 16))
7954 as_bad_where (fixP->fx_file, fixP->fx_line,
7955 _("conditional branch out of range"));
7956 insn = get_aarch64_insn (buf);
7957 insn |= encode_tst_branch_ofs_14 (value >> 2);
7958 put_aarch64_insn (buf, insn);
7959 }
7960 break;
7961
7962 case BFD_RELOC_AARCH64_CALL26:
7963 case BFD_RELOC_AARCH64_JUMP26:
7964 if (fixP->fx_done || !seg->use_rela_p)
7965 {
7966 if (value & 3)
7967 as_bad_where (fixP->fx_file, fixP->fx_line,
7968 _("branch target not word aligned"));
7969 if (signed_overflow (value, 28))
7970 as_bad_where (fixP->fx_file, fixP->fx_line,
7971 _("branch out of range"));
7972 insn = get_aarch64_insn (buf);
7973 insn |= encode_branch_ofs_26 (value >> 2);
7974 put_aarch64_insn (buf, insn);
7975 }
7976 break;
7977
7978 case BFD_RELOC_AARCH64_MOVW_G0:
7979 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7980 case BFD_RELOC_AARCH64_MOVW_G0_S:
7981 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7982 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7983 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
7984 scale = 0;
7985 goto movw_common;
7986 case BFD_RELOC_AARCH64_MOVW_G1:
7987 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7988 case BFD_RELOC_AARCH64_MOVW_G1_S:
7989 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7990 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7991 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
7992 scale = 16;
7993 goto movw_common;
7994 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7995 scale = 0;
7996 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7997 /* Should always be exported to object file, see
7998 aarch64_force_relocation(). */
7999 gas_assert (!fixP->fx_done);
8000 gas_assert (seg->use_rela_p);
8001 goto movw_common;
8002 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8003 scale = 16;
8004 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8005 /* Should always be exported to object file, see
8006 aarch64_force_relocation(). */
8007 gas_assert (!fixP->fx_done);
8008 gas_assert (seg->use_rela_p);
8009 goto movw_common;
8010 case BFD_RELOC_AARCH64_MOVW_G2:
8011 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8012 case BFD_RELOC_AARCH64_MOVW_G2_S:
8013 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8014 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8015 scale = 32;
8016 goto movw_common;
8017 case BFD_RELOC_AARCH64_MOVW_G3:
8018 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8019 scale = 48;
8020 movw_common:
8021 if (fixP->fx_done || !seg->use_rela_p)
8022 {
8023 insn = get_aarch64_insn (buf);
8024
8025 if (!fixP->fx_done)
8026 {
8027 /* REL signed addend must fit in 16 bits */
8028 if (signed_overflow (value, 16))
8029 as_bad_where (fixP->fx_file, fixP->fx_line,
8030 _("offset out of range"));
8031 }
8032 else
8033 {
8034 /* Check for overflow and scale. */
8035 switch (fixP->fx_r_type)
8036 {
8037 case BFD_RELOC_AARCH64_MOVW_G0:
8038 case BFD_RELOC_AARCH64_MOVW_G1:
8039 case BFD_RELOC_AARCH64_MOVW_G2:
8040 case BFD_RELOC_AARCH64_MOVW_G3:
8041 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8042 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8043 if (unsigned_overflow (value, scale + 16))
8044 as_bad_where (fixP->fx_file, fixP->fx_line,
8045 _("unsigned value out of range"));
8046 break;
8047 case BFD_RELOC_AARCH64_MOVW_G0_S:
8048 case BFD_RELOC_AARCH64_MOVW_G1_S:
8049 case BFD_RELOC_AARCH64_MOVW_G2_S:
8050 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8051 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8052 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8053 /* NOTE: We can only come here with movz or movn. */
8054 if (signed_overflow (value, scale + 16))
8055 as_bad_where (fixP->fx_file, fixP->fx_line,
8056 _("signed value out of range"));
8057 if (value < 0)
8058 {
8059 /* Force use of MOVN. */
8060 value = ~value;
8061 insn = reencode_movzn_to_movn (insn);
8062 }
8063 else
8064 {
8065 /* Force use of MOVZ. */
8066 insn = reencode_movzn_to_movz (insn);
8067 }
8068 break;
8069 default:
8070 /* Unchecked relocations. */
8071 break;
8072 }
8073 value >>= scale;
8074 }
8075
8076 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8077 insn |= encode_movw_imm (value & 0xffff);
8078
8079 put_aarch64_insn (buf, insn);
8080 }
8081 break;
8082
8083 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8084 fixP->fx_r_type = (ilp32_p
8085 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8086 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8087 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8088 /* Should always be exported to object file, see
8089 aarch64_force_relocation(). */
8090 gas_assert (!fixP->fx_done);
8091 gas_assert (seg->use_rela_p);
8092 break;
8093
8094 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8095 fixP->fx_r_type = (ilp32_p
8096 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8097 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8098 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8099 /* Should always be exported to object file, see
8100 aarch64_force_relocation(). */
8101 gas_assert (!fixP->fx_done);
8102 gas_assert (seg->use_rela_p);
8103 break;
8104
8105 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8106 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8107 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8108 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8109 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8110 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8111 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8112 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8113 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8114 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8115 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8116 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8117 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8118 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8119 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8120 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8121 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8122 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8123 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8124 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8125 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8126 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8127 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8128 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8129 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8130 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8131 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8132 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8133 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8134 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8135 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8136 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8137 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8138 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8139 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8140 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8141 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8142 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8143 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8144 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8145 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8146 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8147 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8148 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8149 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8150 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8151 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8152 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8153 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8154 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8155 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8156 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8157 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8158 /* Should always be exported to object file, see
8159 aarch64_force_relocation(). */
8160 gas_assert (!fixP->fx_done);
8161 gas_assert (seg->use_rela_p);
8162 break;
8163
8164 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8165 /* Should always be exported to object file, see
8166 aarch64_force_relocation(). */
8167 fixP->fx_r_type = (ilp32_p
8168 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8169 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8170 gas_assert (!fixP->fx_done);
8171 gas_assert (seg->use_rela_p);
8172 break;
8173
8174 case BFD_RELOC_AARCH64_ADD_LO12:
8175 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8176 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8177 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8178 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8179 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8180 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8181 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8182 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8183 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8184 case BFD_RELOC_AARCH64_LDST128_LO12:
8185 case BFD_RELOC_AARCH64_LDST16_LO12:
8186 case BFD_RELOC_AARCH64_LDST32_LO12:
8187 case BFD_RELOC_AARCH64_LDST64_LO12:
8188 case BFD_RELOC_AARCH64_LDST8_LO12:
8189 /* Should always be exported to object file, see
8190 aarch64_force_relocation(). */
8191 gas_assert (!fixP->fx_done);
8192 gas_assert (seg->use_rela_p);
8193 break;
8194
8195 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8196 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8197 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8198 break;
8199
8200 case BFD_RELOC_UNUSED:
8201 /* An error will already have been reported. */
8202 break;
8203
8204 default:
8205 as_bad_where (fixP->fx_file, fixP->fx_line,
8206 _("unexpected %s fixup"),
8207 bfd_get_reloc_code_name (fixP->fx_r_type));
8208 break;
8209 }
8210
8211 apply_fix_return:
8212 /* Free the allocated the struct aarch64_inst.
8213 N.B. currently there are very limited number of fix-up types actually use
8214 this field, so the impact on the performance should be minimal . */
8215 if (fixP->tc_fix_data.inst != NULL)
8216 free (fixP->tc_fix_data.inst);
8217
8218 return;
8219 }
8220
8221 /* Translate internal representation of relocation info to BFD target
8222 format. */
8223
8224 arelent *
8225 tc_gen_reloc (asection * section, fixS * fixp)
8226 {
8227 arelent *reloc;
8228 bfd_reloc_code_real_type code;
8229
8230 reloc = XNEW (arelent);
8231
8232 reloc->sym_ptr_ptr = XNEW (asymbol *);
8233 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8234 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8235
8236 if (fixp->fx_pcrel)
8237 {
8238 if (section->use_rela_p)
8239 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8240 else
8241 fixp->fx_offset = reloc->address;
8242 }
8243 reloc->addend = fixp->fx_offset;
8244
8245 code = fixp->fx_r_type;
8246 switch (code)
8247 {
8248 case BFD_RELOC_16:
8249 if (fixp->fx_pcrel)
8250 code = BFD_RELOC_16_PCREL;
8251 break;
8252
8253 case BFD_RELOC_32:
8254 if (fixp->fx_pcrel)
8255 code = BFD_RELOC_32_PCREL;
8256 break;
8257
8258 case BFD_RELOC_64:
8259 if (fixp->fx_pcrel)
8260 code = BFD_RELOC_64_PCREL;
8261 break;
8262
8263 default:
8264 break;
8265 }
8266
8267 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8268 if (reloc->howto == NULL)
8269 {
8270 as_bad_where (fixp->fx_file, fixp->fx_line,
8271 _
8272 ("cannot represent %s relocation in this object file format"),
8273 bfd_get_reloc_code_name (code));
8274 return NULL;
8275 }
8276
8277 return reloc;
8278 }
8279
8280 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8281
8282 void
8283 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8284 {
8285 bfd_reloc_code_real_type type;
8286 int pcrel = 0;
8287
8288 /* Pick a reloc.
8289 FIXME: @@ Should look at CPU word size. */
8290 switch (size)
8291 {
8292 case 1:
8293 type = BFD_RELOC_8;
8294 break;
8295 case 2:
8296 type = BFD_RELOC_16;
8297 break;
8298 case 4:
8299 type = BFD_RELOC_32;
8300 break;
8301 case 8:
8302 type = BFD_RELOC_64;
8303 break;
8304 default:
8305 as_bad (_("cannot do %u-byte relocation"), size);
8306 type = BFD_RELOC_UNUSED;
8307 break;
8308 }
8309
8310 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8311 }
8312
8313 int
8314 aarch64_force_relocation (struct fix *fixp)
8315 {
8316 switch (fixp->fx_r_type)
8317 {
8318 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8319 /* Perform these "immediate" internal relocations
8320 even if the symbol is extern or weak. */
8321 return 0;
8322
8323 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8324 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8325 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8326 /* Pseudo relocs that need to be fixed up according to
8327 ilp32_p. */
8328 return 0;
8329
8330 case BFD_RELOC_AARCH64_ADD_LO12:
8331 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8332 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8333 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8334 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8335 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8336 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8337 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8338 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8339 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8340 case BFD_RELOC_AARCH64_LDST128_LO12:
8341 case BFD_RELOC_AARCH64_LDST16_LO12:
8342 case BFD_RELOC_AARCH64_LDST32_LO12:
8343 case BFD_RELOC_AARCH64_LDST64_LO12:
8344 case BFD_RELOC_AARCH64_LDST8_LO12:
8345 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8346 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8347 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8348 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8349 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8350 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8351 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8352 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8353 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8354 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8355 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8356 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8357 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8358 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8359 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8360 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8361 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8362 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8363 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8364 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8365 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8366 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8367 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8368 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8369 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8370 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8371 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8372 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8373 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8374 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8375 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8376 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8377 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8378 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8379 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8380 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8381 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8382 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8383 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8384 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8385 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8386 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8387 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8388 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8389 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8390 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8391 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8392 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8393 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8394 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8395 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8396 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8397 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8398 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8399 /* Always leave these relocations for the linker. */
8400 return 1;
8401
8402 default:
8403 break;
8404 }
8405
8406 return generic_force_reloc (fixp);
8407 }
8408
8409 #ifdef OBJ_ELF
8410
8411 /* Implement md_after_parse_args. This is the earliest time we need to decide
8412 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8413
8414 void
8415 aarch64_after_parse_args (void)
8416 {
8417 if (aarch64_abi != AARCH64_ABI_NONE)
8418 return;
8419
8420 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8421 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8422 aarch64_abi = AARCH64_ABI_ILP32;
8423 else
8424 aarch64_abi = AARCH64_ABI_LP64;
8425 }
8426
8427 const char *
8428 elf64_aarch64_target_format (void)
8429 {
8430 #ifdef TE_CLOUDABI
8431 /* FIXME: What to do for ilp32_p ? */
8432 if (target_big_endian)
8433 return "elf64-bigaarch64-cloudabi";
8434 else
8435 return "elf64-littleaarch64-cloudabi";
8436 #else
8437 if (target_big_endian)
8438 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8439 else
8440 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8441 #endif
8442 }
8443
8444 void
8445 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8446 {
8447 elf_frob_symbol (symp, puntp);
8448 }
8449 #endif
8450
8451 /* MD interface: Finalization. */
8452
8453 /* A good place to do this, although this was probably not intended
8454 for this kind of use. We need to dump the literal pool before
8455 references are made to a null symbol pointer. */
8456
8457 void
8458 aarch64_cleanup (void)
8459 {
8460 literal_pool *pool;
8461
8462 for (pool = list_of_pools; pool; pool = pool->next)
8463 {
8464 /* Put it at the end of the relevant section. */
8465 subseg_set (pool->section, pool->sub_section);
8466 s_ltorg (0);
8467 }
8468 }
8469
8470 #ifdef OBJ_ELF
8471 /* Remove any excess mapping symbols generated for alignment frags in
8472 SEC. We may have created a mapping symbol before a zero byte
8473 alignment; remove it if there's a mapping symbol after the
8474 alignment. */
8475 static void
8476 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8477 void *dummy ATTRIBUTE_UNUSED)
8478 {
8479 segment_info_type *seginfo = seg_info (sec);
8480 fragS *fragp;
8481
8482 if (seginfo == NULL || seginfo->frchainP == NULL)
8483 return;
8484
8485 for (fragp = seginfo->frchainP->frch_root;
8486 fragp != NULL; fragp = fragp->fr_next)
8487 {
8488 symbolS *sym = fragp->tc_frag_data.last_map;
8489 fragS *next = fragp->fr_next;
8490
8491 /* Variable-sized frags have been converted to fixed size by
8492 this point. But if this was variable-sized to start with,
8493 there will be a fixed-size frag after it. So don't handle
8494 next == NULL. */
8495 if (sym == NULL || next == NULL)
8496 continue;
8497
8498 if (S_GET_VALUE (sym) < next->fr_address)
8499 /* Not at the end of this frag. */
8500 continue;
8501 know (S_GET_VALUE (sym) == next->fr_address);
8502
8503 do
8504 {
8505 if (next->tc_frag_data.first_map != NULL)
8506 {
8507 /* Next frag starts with a mapping symbol. Discard this
8508 one. */
8509 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8510 break;
8511 }
8512
8513 if (next->fr_next == NULL)
8514 {
8515 /* This mapping symbol is at the end of the section. Discard
8516 it. */
8517 know (next->fr_fix == 0 && next->fr_var == 0);
8518 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8519 break;
8520 }
8521
8522 /* As long as we have empty frags without any mapping symbols,
8523 keep looking. */
8524 /* If the next frag is non-empty and does not start with a
8525 mapping symbol, then this mapping symbol is required. */
8526 if (next->fr_address != next->fr_next->fr_address)
8527 break;
8528
8529 next = next->fr_next;
8530 }
8531 while (next != NULL);
8532 }
8533 }
8534 #endif
8535
8536 /* Adjust the symbol table. */
8537
8538 void
8539 aarch64_adjust_symtab (void)
8540 {
8541 #ifdef OBJ_ELF
8542 /* Remove any overlapping mapping symbols generated by alignment frags. */
8543 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8544 /* Now do generic ELF adjustments. */
8545 elf_adjust_symtab ();
8546 #endif
8547 }
8548
8549 static void
8550 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8551 {
8552 const char *hash_err;
8553
8554 hash_err = hash_insert (table, key, value);
8555 if (hash_err)
8556 printf ("Internal Error: Can't hash %s\n", key);
8557 }
8558
8559 static void
8560 fill_instruction_hash_table (void)
8561 {
8562 aarch64_opcode *opcode = aarch64_opcode_table;
8563
8564 while (opcode->name != NULL)
8565 {
8566 templates *templ, *new_templ;
8567 templ = hash_find (aarch64_ops_hsh, opcode->name);
8568
8569 new_templ = XNEW (templates);
8570 new_templ->opcode = opcode;
8571 new_templ->next = NULL;
8572
8573 if (!templ)
8574 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8575 else
8576 {
8577 new_templ->next = templ->next;
8578 templ->next = new_templ;
8579 }
8580 ++opcode;
8581 }
8582 }
8583
8584 static inline void
8585 convert_to_upper (char *dst, const char *src, size_t num)
8586 {
8587 unsigned int i;
8588 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8589 *dst = TOUPPER (*src);
8590 *dst = '\0';
8591 }
8592
8593 /* Assume STR point to a lower-case string, allocate, convert and return
8594 the corresponding upper-case string. */
8595 static inline const char*
8596 get_upper_str (const char *str)
8597 {
8598 char *ret;
8599 size_t len = strlen (str);
8600 ret = XNEWVEC (char, len + 1);
8601 convert_to_upper (ret, str, len);
8602 return ret;
8603 }
8604
8605 /* MD interface: Initialization. */
8606
8607 void
8608 md_begin (void)
8609 {
8610 unsigned mach;
8611 unsigned int i;
8612
8613 if ((aarch64_ops_hsh = hash_new ()) == NULL
8614 || (aarch64_cond_hsh = hash_new ()) == NULL
8615 || (aarch64_shift_hsh = hash_new ()) == NULL
8616 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8617 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8618 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8619 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8620 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8621 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8622 || (aarch64_sys_regs_sr_hsh = hash_new ()) == NULL
8623 || (aarch64_reg_hsh = hash_new ()) == NULL
8624 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8625 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8626 || (aarch64_pldop_hsh = hash_new ()) == NULL
8627 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8628 as_fatal (_("virtual memory exhausted"));
8629
8630 fill_instruction_hash_table ();
8631
8632 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8633 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8634 (void *) (aarch64_sys_regs + i));
8635
8636 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8637 checked_hash_insert (aarch64_pstatefield_hsh,
8638 aarch64_pstatefields[i].name,
8639 (void *) (aarch64_pstatefields + i));
8640
8641 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8642 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8643 aarch64_sys_regs_ic[i].name,
8644 (void *) (aarch64_sys_regs_ic + i));
8645
8646 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8647 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8648 aarch64_sys_regs_dc[i].name,
8649 (void *) (aarch64_sys_regs_dc + i));
8650
8651 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8652 checked_hash_insert (aarch64_sys_regs_at_hsh,
8653 aarch64_sys_regs_at[i].name,
8654 (void *) (aarch64_sys_regs_at + i));
8655
8656 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8657 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8658 aarch64_sys_regs_tlbi[i].name,
8659 (void *) (aarch64_sys_regs_tlbi + i));
8660
8661 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8662 checked_hash_insert (aarch64_sys_regs_sr_hsh,
8663 aarch64_sys_regs_sr[i].name,
8664 (void *) (aarch64_sys_regs_sr + i));
8665
8666 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8667 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8668 (void *) (reg_names + i));
8669
8670 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8671 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8672 (void *) (nzcv_names + i));
8673
8674 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8675 {
8676 const char *name = aarch64_operand_modifiers[i].name;
8677 checked_hash_insert (aarch64_shift_hsh, name,
8678 (void *) (aarch64_operand_modifiers + i));
8679 /* Also hash the name in the upper case. */
8680 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8681 (void *) (aarch64_operand_modifiers + i));
8682 }
8683
8684 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8685 {
8686 unsigned int j;
8687 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8688 the same condition code. */
8689 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8690 {
8691 const char *name = aarch64_conds[i].names[j];
8692 if (name == NULL)
8693 break;
8694 checked_hash_insert (aarch64_cond_hsh, name,
8695 (void *) (aarch64_conds + i));
8696 /* Also hash the name in the upper case. */
8697 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8698 (void *) (aarch64_conds + i));
8699 }
8700 }
8701
8702 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8703 {
8704 const char *name = aarch64_barrier_options[i].name;
8705 /* Skip xx00 - the unallocated values of option. */
8706 if ((i & 0x3) == 0)
8707 continue;
8708 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8709 (void *) (aarch64_barrier_options + i));
8710 /* Also hash the name in the upper case. */
8711 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8712 (void *) (aarch64_barrier_options + i));
8713 }
8714
8715 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8716 {
8717 const char* name = aarch64_prfops[i].name;
8718 /* Skip the unallocated hint encodings. */
8719 if (name == NULL)
8720 continue;
8721 checked_hash_insert (aarch64_pldop_hsh, name,
8722 (void *) (aarch64_prfops + i));
8723 /* Also hash the name in the upper case. */
8724 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8725 (void *) (aarch64_prfops + i));
8726 }
8727
8728 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8729 {
8730 const char* name = aarch64_hint_options[i].name;
8731
8732 checked_hash_insert (aarch64_hint_opt_hsh, name,
8733 (void *) (aarch64_hint_options + i));
8734 /* Also hash the name in the upper case. */
8735 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8736 (void *) (aarch64_hint_options + i));
8737 }
8738
8739 /* Set the cpu variant based on the command-line options. */
8740 if (!mcpu_cpu_opt)
8741 mcpu_cpu_opt = march_cpu_opt;
8742
8743 if (!mcpu_cpu_opt)
8744 mcpu_cpu_opt = &cpu_default;
8745
8746 cpu_variant = *mcpu_cpu_opt;
8747
8748 /* Record the CPU type. */
8749 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8750
8751 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8752 }
8753
8754 /* Command line processing. */
8755
8756 const char *md_shortopts = "m:";
8757
8758 #ifdef AARCH64_BI_ENDIAN
8759 #define OPTION_EB (OPTION_MD_BASE + 0)
8760 #define OPTION_EL (OPTION_MD_BASE + 1)
8761 #else
8762 #if TARGET_BYTES_BIG_ENDIAN
8763 #define OPTION_EB (OPTION_MD_BASE + 0)
8764 #else
8765 #define OPTION_EL (OPTION_MD_BASE + 1)
8766 #endif
8767 #endif
8768
8769 struct option md_longopts[] = {
8770 #ifdef OPTION_EB
8771 {"EB", no_argument, NULL, OPTION_EB},
8772 #endif
8773 #ifdef OPTION_EL
8774 {"EL", no_argument, NULL, OPTION_EL},
8775 #endif
8776 {NULL, no_argument, NULL, 0}
8777 };
8778
8779 size_t md_longopts_size = sizeof (md_longopts);
8780
8781 struct aarch64_option_table
8782 {
8783 const char *option; /* Option name to match. */
8784 const char *help; /* Help information. */
8785 int *var; /* Variable to change. */
8786 int value; /* What to change it to. */
8787 char *deprecated; /* If non-null, print this message. */
8788 };
8789
8790 static struct aarch64_option_table aarch64_opts[] = {
8791 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8792 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8793 NULL},
8794 #ifdef DEBUG_AARCH64
8795 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8796 #endif /* DEBUG_AARCH64 */
8797 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8798 NULL},
8799 {"mno-verbose-error", N_("do not output verbose error messages"),
8800 &verbose_error_p, 0, NULL},
8801 {NULL, NULL, NULL, 0, NULL}
8802 };
8803
8804 struct aarch64_cpu_option_table
8805 {
8806 const char *name;
8807 const aarch64_feature_set value;
8808 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8809 case. */
8810 const char *canonical_name;
8811 };
8812
8813 /* This list should, at a minimum, contain all the cpu names
8814 recognized by GCC. */
8815 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8816 {"all", AARCH64_ANY, NULL},
8817 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
8818 AARCH64_FEATURE_CRC), "Cortex-A34"},
8819 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8820 AARCH64_FEATURE_CRC), "Cortex-A35"},
8821 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8822 AARCH64_FEATURE_CRC), "Cortex-A53"},
8823 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8824 AARCH64_FEATURE_CRC), "Cortex-A57"},
8825 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8826 AARCH64_FEATURE_CRC), "Cortex-A72"},
8827 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8828 AARCH64_FEATURE_CRC), "Cortex-A73"},
8829 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8830 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8831 "Cortex-A55"},
8832 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8833 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8834 "Cortex-A75"},
8835 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8836 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8837 "Cortex-A76"},
8838 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8839 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8840 | AARCH64_FEATURE_DOTPROD
8841 | AARCH64_FEATURE_SSBS),
8842 "Cortex-A76AE"},
8843 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8844 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8845 | AARCH64_FEATURE_DOTPROD
8846 | AARCH64_FEATURE_SSBS),
8847 "Cortex-A77"},
8848 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8849 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8850 | AARCH64_FEATURE_DOTPROD
8851 | AARCH64_FEATURE_SSBS),
8852 "Cortex-A65"},
8853 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8854 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8855 | AARCH64_FEATURE_DOTPROD
8856 | AARCH64_FEATURE_SSBS),
8857 "Cortex-A65AE"},
8858 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8859 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8860 | AARCH64_FEATURE_DOTPROD
8861 | AARCH64_FEATURE_PROFILE),
8862 "Ares"},
8863 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8864 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8865 "Samsung Exynos M1"},
8866 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8867 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8868 | AARCH64_FEATURE_RDMA),
8869 "Qualcomm Falkor"},
8870 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8871 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8872 | AARCH64_FEATURE_DOTPROD
8873 | AARCH64_FEATURE_SSBS),
8874 "Neoverse E1"},
8875 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8876 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8877 | AARCH64_FEATURE_DOTPROD
8878 | AARCH64_FEATURE_PROFILE),
8879 "Neoverse N1"},
8880 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8881 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8882 | AARCH64_FEATURE_RDMA),
8883 "Qualcomm QDF24XX"},
8884 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8885 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8886 "Qualcomm Saphira"},
8887 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8888 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8889 "Cavium ThunderX"},
8890 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8891 AARCH64_FEATURE_CRYPTO),
8892 "Broadcom Vulcan"},
8893 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8894 in earlier releases and is superseded by 'xgene1' in all
8895 tools. */
8896 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8897 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8898 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8899 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8900 {"generic", AARCH64_ARCH_V8, NULL},
8901
8902 {NULL, AARCH64_ARCH_NONE, NULL}
8903 };
8904
8905 struct aarch64_arch_option_table
8906 {
8907 const char *name;
8908 const aarch64_feature_set value;
8909 };
8910
8911 /* This list should, at a minimum, contain all the architecture names
8912 recognized by GCC. */
8913 static const struct aarch64_arch_option_table aarch64_archs[] = {
8914 {"all", AARCH64_ANY},
8915 {"armv8-a", AARCH64_ARCH_V8},
8916 {"armv8.1-a", AARCH64_ARCH_V8_1},
8917 {"armv8.2-a", AARCH64_ARCH_V8_2},
8918 {"armv8.3-a", AARCH64_ARCH_V8_3},
8919 {"armv8.4-a", AARCH64_ARCH_V8_4},
8920 {"armv8.5-a", AARCH64_ARCH_V8_5},
8921 {"armv8.6-a", AARCH64_ARCH_V8_6},
8922 {NULL, AARCH64_ARCH_NONE}
8923 };
8924
8925 /* ISA extensions. */
8926 struct aarch64_option_cpu_value_table
8927 {
8928 const char *name;
8929 const aarch64_feature_set value;
8930 const aarch64_feature_set require; /* Feature dependencies. */
8931 };
8932
8933 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8934 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8935 AARCH64_ARCH_NONE},
8936 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO
8937 | AARCH64_FEATURE_AES
8938 | AARCH64_FEATURE_SHA2, 0),
8939 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8940 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8941 AARCH64_ARCH_NONE},
8942 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8943 AARCH64_ARCH_NONE},
8944 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8945 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8946 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8947 AARCH64_ARCH_NONE},
8948 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8949 AARCH64_ARCH_NONE},
8950 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8951 AARCH64_ARCH_NONE},
8952 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8953 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8954 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8955 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8956 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
8957 AARCH64_FEATURE (AARCH64_FEATURE_FP
8958 | AARCH64_FEATURE_F16, 0)},
8959 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8960 AARCH64_ARCH_NONE},
8961 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8962 AARCH64_FEATURE (AARCH64_FEATURE_F16
8963 | AARCH64_FEATURE_SIMD
8964 | AARCH64_FEATURE_COMPNUM, 0)},
8965 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
8966 AARCH64_ARCH_NONE},
8967 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8968 AARCH64_FEATURE (AARCH64_FEATURE_F16
8969 | AARCH64_FEATURE_SIMD, 0)},
8970 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8971 AARCH64_ARCH_NONE},
8972 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8973 AARCH64_ARCH_NONE},
8974 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
8975 AARCH64_ARCH_NONE},
8976 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
8977 AARCH64_ARCH_NONE},
8978 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
8979 AARCH64_ARCH_NONE},
8980 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
8981 AARCH64_ARCH_NONE},
8982 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
8983 AARCH64_ARCH_NONE},
8984 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA2
8985 | AARCH64_FEATURE_SHA3, 0),
8986 AARCH64_ARCH_NONE},
8987 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
8988 AARCH64_ARCH_NONE},
8989 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
8990 AARCH64_ARCH_NONE},
8991 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
8992 AARCH64_ARCH_NONE},
8993 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
8994 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
8995 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
8996 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
8997 | AARCH64_FEATURE_SM4, 0)},
8998 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
8999 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9000 | AARCH64_FEATURE_AES, 0)},
9001 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9002 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9003 | AARCH64_FEATURE_SHA3, 0)},
9004 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9005 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9006 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9007 };
9008
9009 struct aarch64_long_option_table
9010 {
9011 const char *option; /* Substring to match. */
9012 const char *help; /* Help information. */
9013 int (*func) (const char *subopt); /* Function to decode sub-option. */
9014 char *deprecated; /* If non-null, print this message. */
9015 };
9016
9017 /* Transitive closure of features depending on set. */
9018 static aarch64_feature_set
9019 aarch64_feature_disable_set (aarch64_feature_set set)
9020 {
9021 const struct aarch64_option_cpu_value_table *opt;
9022 aarch64_feature_set prev = 0;
9023
9024 while (prev != set) {
9025 prev = set;
9026 for (opt = aarch64_features; opt->name != NULL; opt++)
9027 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9028 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9029 }
9030 return set;
9031 }
9032
9033 /* Transitive closure of dependencies of set. */
9034 static aarch64_feature_set
9035 aarch64_feature_enable_set (aarch64_feature_set set)
9036 {
9037 const struct aarch64_option_cpu_value_table *opt;
9038 aarch64_feature_set prev = 0;
9039
9040 while (prev != set) {
9041 prev = set;
9042 for (opt = aarch64_features; opt->name != NULL; opt++)
9043 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9044 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9045 }
9046 return set;
9047 }
9048
9049 static int
9050 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9051 bfd_boolean ext_only)
9052 {
9053 /* We insist on extensions being added before being removed. We achieve
9054 this by using the ADDING_VALUE variable to indicate whether we are
9055 adding an extension (1) or removing it (0) and only allowing it to
9056 change in the order -1 -> 1 -> 0. */
9057 int adding_value = -1;
9058 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9059
9060 /* Copy the feature set, so that we can modify it. */
9061 *ext_set = **opt_p;
9062 *opt_p = ext_set;
9063
9064 while (str != NULL && *str != 0)
9065 {
9066 const struct aarch64_option_cpu_value_table *opt;
9067 const char *ext = NULL;
9068 int optlen;
9069
9070 if (!ext_only)
9071 {
9072 if (*str != '+')
9073 {
9074 as_bad (_("invalid architectural extension"));
9075 return 0;
9076 }
9077
9078 ext = strchr (++str, '+');
9079 }
9080
9081 if (ext != NULL)
9082 optlen = ext - str;
9083 else
9084 optlen = strlen (str);
9085
9086 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
9087 {
9088 if (adding_value != 0)
9089 adding_value = 0;
9090 optlen -= 2;
9091 str += 2;
9092 }
9093 else if (optlen > 0)
9094 {
9095 if (adding_value == -1)
9096 adding_value = 1;
9097 else if (adding_value != 1)
9098 {
9099 as_bad (_("must specify extensions to add before specifying "
9100 "those to remove"));
9101 return FALSE;
9102 }
9103 }
9104
9105 if (optlen == 0)
9106 {
9107 as_bad (_("missing architectural extension"));
9108 return 0;
9109 }
9110
9111 gas_assert (adding_value != -1);
9112
9113 for (opt = aarch64_features; opt->name != NULL; opt++)
9114 if (strncmp (opt->name, str, optlen) == 0)
9115 {
9116 aarch64_feature_set set;
9117
9118 /* Add or remove the extension. */
9119 if (adding_value)
9120 {
9121 set = aarch64_feature_enable_set (opt->value);
9122 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9123 }
9124 else
9125 {
9126 set = aarch64_feature_disable_set (opt->value);
9127 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9128 }
9129 break;
9130 }
9131
9132 if (opt->name == NULL)
9133 {
9134 as_bad (_("unknown architectural extension `%s'"), str);
9135 return 0;
9136 }
9137
9138 str = ext;
9139 };
9140
9141 return 1;
9142 }
9143
9144 static int
9145 aarch64_parse_cpu (const char *str)
9146 {
9147 const struct aarch64_cpu_option_table *opt;
9148 const char *ext = strchr (str, '+');
9149 size_t optlen;
9150
9151 if (ext != NULL)
9152 optlen = ext - str;
9153 else
9154 optlen = strlen (str);
9155
9156 if (optlen == 0)
9157 {
9158 as_bad (_("missing cpu name `%s'"), str);
9159 return 0;
9160 }
9161
9162 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9163 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9164 {
9165 mcpu_cpu_opt = &opt->value;
9166 if (ext != NULL)
9167 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9168
9169 return 1;
9170 }
9171
9172 as_bad (_("unknown cpu `%s'"), str);
9173 return 0;
9174 }
9175
9176 static int
9177 aarch64_parse_arch (const char *str)
9178 {
9179 const struct aarch64_arch_option_table *opt;
9180 const char *ext = strchr (str, '+');
9181 size_t optlen;
9182
9183 if (ext != NULL)
9184 optlen = ext - str;
9185 else
9186 optlen = strlen (str);
9187
9188 if (optlen == 0)
9189 {
9190 as_bad (_("missing architecture name `%s'"), str);
9191 return 0;
9192 }
9193
9194 for (opt = aarch64_archs; opt->name != NULL; opt++)
9195 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9196 {
9197 march_cpu_opt = &opt->value;
9198 if (ext != NULL)
9199 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9200
9201 return 1;
9202 }
9203
9204 as_bad (_("unknown architecture `%s'\n"), str);
9205 return 0;
9206 }
9207
9208 /* ABIs. */
9209 struct aarch64_option_abi_value_table
9210 {
9211 const char *name;
9212 enum aarch64_abi_type value;
9213 };
9214
9215 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9216 {"ilp32", AARCH64_ABI_ILP32},
9217 {"lp64", AARCH64_ABI_LP64},
9218 };
9219
9220 static int
9221 aarch64_parse_abi (const char *str)
9222 {
9223 unsigned int i;
9224
9225 if (str[0] == '\0')
9226 {
9227 as_bad (_("missing abi name `%s'"), str);
9228 return 0;
9229 }
9230
9231 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9232 if (strcmp (str, aarch64_abis[i].name) == 0)
9233 {
9234 aarch64_abi = aarch64_abis[i].value;
9235 return 1;
9236 }
9237
9238 as_bad (_("unknown abi `%s'\n"), str);
9239 return 0;
9240 }
9241
9242 static struct aarch64_long_option_table aarch64_long_opts[] = {
9243 #ifdef OBJ_ELF
9244 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9245 aarch64_parse_abi, NULL},
9246 #endif /* OBJ_ELF */
9247 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9248 aarch64_parse_cpu, NULL},
9249 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9250 aarch64_parse_arch, NULL},
9251 {NULL, NULL, 0, NULL}
9252 };
9253
9254 int
9255 md_parse_option (int c, const char *arg)
9256 {
9257 struct aarch64_option_table *opt;
9258 struct aarch64_long_option_table *lopt;
9259
9260 switch (c)
9261 {
9262 #ifdef OPTION_EB
9263 case OPTION_EB:
9264 target_big_endian = 1;
9265 break;
9266 #endif
9267
9268 #ifdef OPTION_EL
9269 case OPTION_EL:
9270 target_big_endian = 0;
9271 break;
9272 #endif
9273
9274 case 'a':
9275 /* Listing option. Just ignore these, we don't support additional
9276 ones. */
9277 return 0;
9278
9279 default:
9280 for (opt = aarch64_opts; opt->option != NULL; opt++)
9281 {
9282 if (c == opt->option[0]
9283 && ((arg == NULL && opt->option[1] == 0)
9284 || streq (arg, opt->option + 1)))
9285 {
9286 /* If the option is deprecated, tell the user. */
9287 if (opt->deprecated != NULL)
9288 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9289 arg ? arg : "", _(opt->deprecated));
9290
9291 if (opt->var != NULL)
9292 *opt->var = opt->value;
9293
9294 return 1;
9295 }
9296 }
9297
9298 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9299 {
9300 /* These options are expected to have an argument. */
9301 if (c == lopt->option[0]
9302 && arg != NULL
9303 && strncmp (arg, lopt->option + 1,
9304 strlen (lopt->option + 1)) == 0)
9305 {
9306 /* If the option is deprecated, tell the user. */
9307 if (lopt->deprecated != NULL)
9308 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9309 _(lopt->deprecated));
9310
9311 /* Call the sup-option parser. */
9312 return lopt->func (arg + strlen (lopt->option) - 1);
9313 }
9314 }
9315
9316 return 0;
9317 }
9318
9319 return 1;
9320 }
9321
9322 void
9323 md_show_usage (FILE * fp)
9324 {
9325 struct aarch64_option_table *opt;
9326 struct aarch64_long_option_table *lopt;
9327
9328 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9329
9330 for (opt = aarch64_opts; opt->option != NULL; opt++)
9331 if (opt->help != NULL)
9332 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9333
9334 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9335 if (lopt->help != NULL)
9336 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9337
9338 #ifdef OPTION_EB
9339 fprintf (fp, _("\
9340 -EB assemble code for a big-endian cpu\n"));
9341 #endif
9342
9343 #ifdef OPTION_EL
9344 fprintf (fp, _("\
9345 -EL assemble code for a little-endian cpu\n"));
9346 #endif
9347 }
9348
9349 /* Parse a .cpu directive. */
9350
9351 static void
9352 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9353 {
9354 const struct aarch64_cpu_option_table *opt;
9355 char saved_char;
9356 char *name;
9357 char *ext;
9358 size_t optlen;
9359
9360 name = input_line_pointer;
9361 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9362 input_line_pointer++;
9363 saved_char = *input_line_pointer;
9364 *input_line_pointer = 0;
9365
9366 ext = strchr (name, '+');
9367
9368 if (ext != NULL)
9369 optlen = ext - name;
9370 else
9371 optlen = strlen (name);
9372
9373 /* Skip the first "all" entry. */
9374 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9375 if (strlen (opt->name) == optlen
9376 && strncmp (name, opt->name, optlen) == 0)
9377 {
9378 mcpu_cpu_opt = &opt->value;
9379 if (ext != NULL)
9380 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9381 return;
9382
9383 cpu_variant = *mcpu_cpu_opt;
9384
9385 *input_line_pointer = saved_char;
9386 demand_empty_rest_of_line ();
9387 return;
9388 }
9389 as_bad (_("unknown cpu `%s'"), name);
9390 *input_line_pointer = saved_char;
9391 ignore_rest_of_line ();
9392 }
9393
9394
9395 /* Parse a .arch directive. */
9396
9397 static void
9398 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9399 {
9400 const struct aarch64_arch_option_table *opt;
9401 char saved_char;
9402 char *name;
9403 char *ext;
9404 size_t optlen;
9405
9406 name = input_line_pointer;
9407 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9408 input_line_pointer++;
9409 saved_char = *input_line_pointer;
9410 *input_line_pointer = 0;
9411
9412 ext = strchr (name, '+');
9413
9414 if (ext != NULL)
9415 optlen = ext - name;
9416 else
9417 optlen = strlen (name);
9418
9419 /* Skip the first "all" entry. */
9420 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9421 if (strlen (opt->name) == optlen
9422 && strncmp (name, opt->name, optlen) == 0)
9423 {
9424 mcpu_cpu_opt = &opt->value;
9425 if (ext != NULL)
9426 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9427 return;
9428
9429 cpu_variant = *mcpu_cpu_opt;
9430
9431 *input_line_pointer = saved_char;
9432 demand_empty_rest_of_line ();
9433 return;
9434 }
9435
9436 as_bad (_("unknown architecture `%s'\n"), name);
9437 *input_line_pointer = saved_char;
9438 ignore_rest_of_line ();
9439 }
9440
9441 /* Parse a .arch_extension directive. */
9442
9443 static void
9444 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9445 {
9446 char saved_char;
9447 char *ext = input_line_pointer;;
9448
9449 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9450 input_line_pointer++;
9451 saved_char = *input_line_pointer;
9452 *input_line_pointer = 0;
9453
9454 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9455 return;
9456
9457 cpu_variant = *mcpu_cpu_opt;
9458
9459 *input_line_pointer = saved_char;
9460 demand_empty_rest_of_line ();
9461 }
9462
9463 /* Copy symbol information. */
9464
9465 void
9466 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9467 {
9468 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9469 }
9470
9471 #ifdef OBJ_ELF
9472 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9473 This is needed so AArch64 specific st_other values can be independently
9474 specified for an IFUNC resolver (that is called by the dynamic linker)
9475 and the symbol it resolves (aliased to the resolver). In particular,
9476 if a function symbol has special st_other value set via directives,
9477 then attaching an IFUNC resolver to that symbol should not override
9478 the st_other setting. Requiring the directive on the IFUNC resolver
9479 symbol would be unexpected and problematic in C code, where the two
9480 symbols appear as two independent function declarations. */
9481
9482 void
9483 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9484 {
9485 struct elf_obj_sy *srcelf = symbol_get_obj (src);
9486 struct elf_obj_sy *destelf = symbol_get_obj (dest);
9487 if (srcelf->size)
9488 {
9489 if (destelf->size == NULL)
9490 destelf->size = XNEW (expressionS);
9491 *destelf->size = *srcelf->size;
9492 }
9493 else
9494 {
9495 if (destelf->size != NULL)
9496 free (destelf->size);
9497 destelf->size = NULL;
9498 }
9499 S_SET_SIZE (dest, S_GET_SIZE (src));
9500 }
9501 #endif
This page took 0.226844 seconds and 3 git commands to generate.