Implement a float16 directive for assembling 16 bit IEEE 754 floating point numbers...
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 unsigned long value;
254 } asm_barrier_opt;
255
256 typedef struct
257 {
258 const char *template;
259 uint32_t value;
260 } asm_nzcv;
261
262 struct reloc_entry
263 {
264 char *name;
265 bfd_reloc_code_real_type reloc;
266 };
267
268 /* Macros to define the register types and masks for the purpose
269 of parsing. */
270
271 #undef AARCH64_REG_TYPES
272 #define AARCH64_REG_TYPES \
273 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
274 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
275 BASIC_REG_TYPE(SP_32) /* wsp */ \
276 BASIC_REG_TYPE(SP_64) /* sp */ \
277 BASIC_REG_TYPE(Z_32) /* wzr */ \
278 BASIC_REG_TYPE(Z_64) /* xzr */ \
279 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
280 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
281 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
282 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
283 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
284 BASIC_REG_TYPE(VN) /* v[0-31] */ \
285 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
286 BASIC_REG_TYPE(PN) /* p[0-15] */ \
287 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
288 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
289 /* Typecheck: same, plus SVE registers. */ \
290 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
291 | REG_TYPE(ZN)) \
292 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
293 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Typecheck: same, plus SVE registers. */ \
296 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
298 | REG_TYPE(ZN)) \
299 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
300 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
301 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
302 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
303 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
306 /* Typecheck: any [BHSDQ]P FP. */ \
307 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
308 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
309 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
310 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
312 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
313 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
314 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
315 be used for SVE instructions, since Zn and Pn are valid symbols \
316 in other contexts. */ \
317 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
320 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
321 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
322 | REG_TYPE(ZN) | REG_TYPE(PN)) \
323 /* Any integer register; used for error messages only. */ \
324 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
325 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
326 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
327 /* Pseudo type to mark the end of the enumerator sequence. */ \
328 BASIC_REG_TYPE(MAX)
329
330 #undef BASIC_REG_TYPE
331 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
332 #undef MULTI_REG_TYPE
333 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
334
335 /* Register type enumerators. */
336 typedef enum aarch64_reg_type_
337 {
338 /* A list of REG_TYPE_*. */
339 AARCH64_REG_TYPES
340 } aarch64_reg_type;
341
342 #undef BASIC_REG_TYPE
343 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
344 #undef REG_TYPE
345 #define REG_TYPE(T) (1 << REG_TYPE_##T)
346 #undef MULTI_REG_TYPE
347 #define MULTI_REG_TYPE(T,V) V,
348
349 /* Structure for a hash table entry for a register. */
350 typedef struct
351 {
352 const char *name;
353 unsigned char number;
354 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
355 unsigned char builtin;
356 } reg_entry;
357
358 /* Values indexed by aarch64_reg_type to assist the type checking. */
359 static const unsigned reg_type_masks[] =
360 {
361 AARCH64_REG_TYPES
362 };
363
364 #undef BASIC_REG_TYPE
365 #undef REG_TYPE
366 #undef MULTI_REG_TYPE
367 #undef AARCH64_REG_TYPES
368
369 /* Diagnostics used when we don't get a register of the expected type.
370 Note: this has to synchronized with aarch64_reg_type definitions
371 above. */
372 static const char *
373 get_reg_expected_msg (aarch64_reg_type reg_type)
374 {
375 const char *msg;
376
377 switch (reg_type)
378 {
379 case REG_TYPE_R_32:
380 msg = N_("integer 32-bit register expected");
381 break;
382 case REG_TYPE_R_64:
383 msg = N_("integer 64-bit register expected");
384 break;
385 case REG_TYPE_R_N:
386 msg = N_("integer register expected");
387 break;
388 case REG_TYPE_R64_SP:
389 msg = N_("64-bit integer or SP register expected");
390 break;
391 case REG_TYPE_SVE_BASE:
392 msg = N_("base register expected");
393 break;
394 case REG_TYPE_R_Z:
395 msg = N_("integer or zero register expected");
396 break;
397 case REG_TYPE_SVE_OFFSET:
398 msg = N_("offset register expected");
399 break;
400 case REG_TYPE_R_SP:
401 msg = N_("integer or SP register expected");
402 break;
403 case REG_TYPE_R_Z_SP:
404 msg = N_("integer, zero or SP register expected");
405 break;
406 case REG_TYPE_FP_B:
407 msg = N_("8-bit SIMD scalar register expected");
408 break;
409 case REG_TYPE_FP_H:
410 msg = N_("16-bit SIMD scalar or floating-point half precision "
411 "register expected");
412 break;
413 case REG_TYPE_FP_S:
414 msg = N_("32-bit SIMD scalar or floating-point single precision "
415 "register expected");
416 break;
417 case REG_TYPE_FP_D:
418 msg = N_("64-bit SIMD scalar or floating-point double precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_Q:
422 msg = N_("128-bit SIMD scalar or floating-point quad precision "
423 "register expected");
424 break;
425 case REG_TYPE_R_Z_BHSDQ_V:
426 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
427 msg = N_("register expected");
428 break;
429 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
430 msg = N_("SIMD scalar or floating-point register expected");
431 break;
432 case REG_TYPE_VN: /* any V reg */
433 msg = N_("vector register expected");
434 break;
435 case REG_TYPE_ZN:
436 msg = N_("SVE vector register expected");
437 break;
438 case REG_TYPE_PN:
439 msg = N_("SVE predicate register expected");
440 break;
441 default:
442 as_fatal (_("invalid register type %d"), reg_type);
443 }
444 return msg;
445 }
446
447 /* Some well known registers that we refer to directly elsewhere. */
448 #define REG_SP 31
449 #define REG_ZR 31
450
451 /* Instructions take 4 bytes in the object file. */
452 #define INSN_SIZE 4
453
454 static struct hash_control *aarch64_ops_hsh;
455 static struct hash_control *aarch64_cond_hsh;
456 static struct hash_control *aarch64_shift_hsh;
457 static struct hash_control *aarch64_sys_regs_hsh;
458 static struct hash_control *aarch64_pstatefield_hsh;
459 static struct hash_control *aarch64_sys_regs_ic_hsh;
460 static struct hash_control *aarch64_sys_regs_dc_hsh;
461 static struct hash_control *aarch64_sys_regs_at_hsh;
462 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
463 static struct hash_control *aarch64_sys_regs_sr_hsh;
464 static struct hash_control *aarch64_reg_hsh;
465 static struct hash_control *aarch64_barrier_opt_hsh;
466 static struct hash_control *aarch64_nzcv_hsh;
467 static struct hash_control *aarch64_pldop_hsh;
468 static struct hash_control *aarch64_hint_opt_hsh;
469
470 /* Stuff needed to resolve the label ambiguity
471 As:
472 ...
473 label: <insn>
474 may differ from:
475 ...
476 label:
477 <insn> */
478
479 static symbolS *last_label_seen;
480
481 /* Literal pool structure. Held on a per-section
482 and per-sub-section basis. */
483
484 #define MAX_LITERAL_POOL_SIZE 1024
485 typedef struct literal_expression
486 {
487 expressionS exp;
488 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
489 LITTLENUM_TYPE * bignum;
490 } literal_expression;
491
492 typedef struct literal_pool
493 {
494 literal_expression literals[MAX_LITERAL_POOL_SIZE];
495 unsigned int next_free_entry;
496 unsigned int id;
497 symbolS *symbol;
498 segT section;
499 subsegT sub_section;
500 int size;
501 struct literal_pool *next;
502 } literal_pool;
503
504 /* Pointer to a linked list of literal pools. */
505 static literal_pool *list_of_pools = NULL;
506 \f
507 /* Pure syntax. */
508
509 /* This array holds the chars that always start a comment. If the
510 pre-processor is disabled, these aren't very useful. */
511 const char comment_chars[] = "";
512
513 /* This array holds the chars that only start a comment at the beginning of
514 a line. If the line seems to have the form '# 123 filename'
515 .line and .file directives will appear in the pre-processed output. */
516 /* Note that input_file.c hand checks for '#' at the beginning of the
517 first line of the input file. This is because the compiler outputs
518 #NO_APP at the beginning of its output. */
519 /* Also note that comments like this one will always work. */
520 const char line_comment_chars[] = "#";
521
522 const char line_separator_chars[] = ";";
523
524 /* Chars that can be used to separate mant
525 from exp in floating point numbers. */
526 const char EXP_CHARS[] = "eE";
527
528 /* Chars that mean this number is a floating point constant. */
529 /* As in 0f12.456 */
530 /* or 0d1.2345e12 */
531
532 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH";
533
534 /* Prefix character that indicates the start of an immediate value. */
535 #define is_immediate_prefix(C) ((C) == '#')
536
537 /* Separator character handling. */
538
539 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
540
541 static inline bfd_boolean
542 skip_past_char (char **str, char c)
543 {
544 if (**str == c)
545 {
546 (*str)++;
547 return TRUE;
548 }
549 else
550 return FALSE;
551 }
552
553 #define skip_past_comma(str) skip_past_char (str, ',')
554
555 /* Arithmetic expressions (possibly involving symbols). */
556
557 static bfd_boolean in_my_get_expression_p = FALSE;
558
559 /* Third argument to my_get_expression. */
560 #define GE_NO_PREFIX 0
561 #define GE_OPT_PREFIX 1
562
563 /* Return TRUE if the string pointed by *STR is successfully parsed
564 as an valid expression; *EP will be filled with the information of
565 such an expression. Otherwise return FALSE. */
566
567 static bfd_boolean
568 my_get_expression (expressionS * ep, char **str, int prefix_mode,
569 int reject_absent)
570 {
571 char *save_in;
572 segT seg;
573 int prefix_present_p = 0;
574
575 switch (prefix_mode)
576 {
577 case GE_NO_PREFIX:
578 break;
579 case GE_OPT_PREFIX:
580 if (is_immediate_prefix (**str))
581 {
582 (*str)++;
583 prefix_present_p = 1;
584 }
585 break;
586 default:
587 abort ();
588 }
589
590 memset (ep, 0, sizeof (expressionS));
591
592 save_in = input_line_pointer;
593 input_line_pointer = *str;
594 in_my_get_expression_p = TRUE;
595 seg = expression (ep);
596 in_my_get_expression_p = FALSE;
597
598 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
599 {
600 /* We found a bad expression in md_operand(). */
601 *str = input_line_pointer;
602 input_line_pointer = save_in;
603 if (prefix_present_p && ! error_p ())
604 set_fatal_syntax_error (_("bad expression"));
605 else
606 set_first_syntax_error (_("bad expression"));
607 return FALSE;
608 }
609
610 #ifdef OBJ_AOUT
611 if (seg != absolute_section
612 && seg != text_section
613 && seg != data_section
614 && seg != bss_section && seg != undefined_section)
615 {
616 set_syntax_error (_("bad segment"));
617 *str = input_line_pointer;
618 input_line_pointer = save_in;
619 return FALSE;
620 }
621 #else
622 (void) seg;
623 #endif
624
625 *str = input_line_pointer;
626 input_line_pointer = save_in;
627 return TRUE;
628 }
629
630 /* Turn a string in input_line_pointer into a floating point constant
631 of type TYPE, and store the appropriate bytes in *LITP. The number
632 of LITTLENUMS emitted is stored in *SIZEP. An error message is
633 returned, or NULL on OK. */
634
635 const char *
636 md_atof (int type, char *litP, int *sizeP)
637 {
638 return ieee_md_atof (type, litP, sizeP, target_big_endian);
639 }
640
641 /* We handle all bad expressions here, so that we can report the faulty
642 instruction in the error message. */
643 void
644 md_operand (expressionS * exp)
645 {
646 if (in_my_get_expression_p)
647 exp->X_op = O_illegal;
648 }
649
650 /* Immediate values. */
651
652 /* Errors may be set multiple times during parsing or bit encoding
653 (particularly in the Neon bits), but usually the earliest error which is set
654 will be the most meaningful. Avoid overwriting it with later (cascading)
655 errors by calling this function. */
656
657 static void
658 first_error (const char *error)
659 {
660 if (! error_p ())
661 set_syntax_error (error);
662 }
663
664 /* Similar to first_error, but this function accepts formatted error
665 message. */
666 static void
667 first_error_fmt (const char *format, ...)
668 {
669 va_list args;
670 enum
671 { size = 100 };
672 /* N.B. this single buffer will not cause error messages for different
673 instructions to pollute each other; this is because at the end of
674 processing of each assembly line, error message if any will be
675 collected by as_bad. */
676 static char buffer[size];
677
678 if (! error_p ())
679 {
680 int ret ATTRIBUTE_UNUSED;
681 va_start (args, format);
682 ret = vsnprintf (buffer, size, format, args);
683 know (ret <= size - 1 && ret >= 0);
684 va_end (args);
685 set_syntax_error (buffer);
686 }
687 }
688
689 /* Register parsing. */
690
691 /* Generic register parser which is called by other specialized
692 register parsers.
693 CCP points to what should be the beginning of a register name.
694 If it is indeed a valid register name, advance CCP over it and
695 return the reg_entry structure; otherwise return NULL.
696 It does not issue diagnostics. */
697
698 static reg_entry *
699 parse_reg (char **ccp)
700 {
701 char *start = *ccp;
702 char *p;
703 reg_entry *reg;
704
705 #ifdef REGISTER_PREFIX
706 if (*start != REGISTER_PREFIX)
707 return NULL;
708 start++;
709 #endif
710
711 p = start;
712 if (!ISALPHA (*p) || !is_name_beginner (*p))
713 return NULL;
714
715 do
716 p++;
717 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
718
719 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
720
721 if (!reg)
722 return NULL;
723
724 *ccp = p;
725 return reg;
726 }
727
728 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
729 return FALSE. */
730 static bfd_boolean
731 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
732 {
733 return (reg_type_masks[type] & (1 << reg->type)) != 0;
734 }
735
736 /* Try to parse a base or offset register. Allow SVE base and offset
737 registers if REG_TYPE includes SVE registers. Return the register
738 entry on success, setting *QUALIFIER to the register qualifier.
739 Return null otherwise.
740
741 Note that this function does not issue any diagnostics. */
742
743 static const reg_entry *
744 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
745 aarch64_opnd_qualifier_t *qualifier)
746 {
747 char *str = *ccp;
748 const reg_entry *reg = parse_reg (&str);
749
750 if (reg == NULL)
751 return NULL;
752
753 switch (reg->type)
754 {
755 case REG_TYPE_R_32:
756 case REG_TYPE_SP_32:
757 case REG_TYPE_Z_32:
758 *qualifier = AARCH64_OPND_QLF_W;
759 break;
760
761 case REG_TYPE_R_64:
762 case REG_TYPE_SP_64:
763 case REG_TYPE_Z_64:
764 *qualifier = AARCH64_OPND_QLF_X;
765 break;
766
767 case REG_TYPE_ZN:
768 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
769 || str[0] != '.')
770 return NULL;
771 switch (TOLOWER (str[1]))
772 {
773 case 's':
774 *qualifier = AARCH64_OPND_QLF_S_S;
775 break;
776 case 'd':
777 *qualifier = AARCH64_OPND_QLF_S_D;
778 break;
779 default:
780 return NULL;
781 }
782 str += 2;
783 break;
784
785 default:
786 return NULL;
787 }
788
789 *ccp = str;
790
791 return reg;
792 }
793
794 /* Try to parse a base or offset register. Return the register entry
795 on success, setting *QUALIFIER to the register qualifier. Return null
796 otherwise.
797
798 Note that this function does not issue any diagnostics. */
799
800 static const reg_entry *
801 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
802 {
803 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
804 }
805
806 /* Parse the qualifier of a vector register or vector element of type
807 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
808 succeeds; otherwise return FALSE.
809
810 Accept only one occurrence of:
811 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
812 b h s d q */
813 static bfd_boolean
814 parse_vector_type_for_operand (aarch64_reg_type reg_type,
815 struct vector_type_el *parsed_type, char **str)
816 {
817 char *ptr = *str;
818 unsigned width;
819 unsigned element_size;
820 enum vector_el_type type;
821
822 /* skip '.' */
823 gas_assert (*ptr == '.');
824 ptr++;
825
826 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
827 {
828 width = 0;
829 goto elt_size;
830 }
831 width = strtoul (ptr, &ptr, 10);
832 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
833 {
834 first_error_fmt (_("bad size %d in vector width specifier"), width);
835 return FALSE;
836 }
837
838 elt_size:
839 switch (TOLOWER (*ptr))
840 {
841 case 'b':
842 type = NT_b;
843 element_size = 8;
844 break;
845 case 'h':
846 type = NT_h;
847 element_size = 16;
848 break;
849 case 's':
850 type = NT_s;
851 element_size = 32;
852 break;
853 case 'd':
854 type = NT_d;
855 element_size = 64;
856 break;
857 case 'q':
858 if (reg_type == REG_TYPE_ZN || width == 1)
859 {
860 type = NT_q;
861 element_size = 128;
862 break;
863 }
864 /* fall through. */
865 default:
866 if (*ptr != '\0')
867 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
868 else
869 first_error (_("missing element size"));
870 return FALSE;
871 }
872 if (width != 0 && width * element_size != 64
873 && width * element_size != 128
874 && !(width == 2 && element_size == 16)
875 && !(width == 4 && element_size == 8))
876 {
877 first_error_fmt (_
878 ("invalid element size %d and vector size combination %c"),
879 width, *ptr);
880 return FALSE;
881 }
882 ptr++;
883
884 parsed_type->type = type;
885 parsed_type->width = width;
886
887 *str = ptr;
888
889 return TRUE;
890 }
891
892 /* *STR contains an SVE zero/merge predication suffix. Parse it into
893 *PARSED_TYPE and point *STR at the end of the suffix. */
894
895 static bfd_boolean
896 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
897 {
898 char *ptr = *str;
899
900 /* Skip '/'. */
901 gas_assert (*ptr == '/');
902 ptr++;
903 switch (TOLOWER (*ptr))
904 {
905 case 'z':
906 parsed_type->type = NT_zero;
907 break;
908 case 'm':
909 parsed_type->type = NT_merge;
910 break;
911 default:
912 if (*ptr != '\0' && *ptr != ',')
913 first_error_fmt (_("unexpected character `%c' in predication type"),
914 *ptr);
915 else
916 first_error (_("missing predication type"));
917 return FALSE;
918 }
919 parsed_type->width = 0;
920 *str = ptr + 1;
921 return TRUE;
922 }
923
924 /* Parse a register of the type TYPE.
925
926 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
927 name or the parsed register is not of TYPE.
928
929 Otherwise return the register number, and optionally fill in the actual
930 type of the register in *RTYPE when multiple alternatives were given, and
931 return the register shape and element index information in *TYPEINFO.
932
933 IN_REG_LIST should be set with TRUE if the caller is parsing a register
934 list. */
935
936 static int
937 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
938 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
939 {
940 char *str = *ccp;
941 const reg_entry *reg = parse_reg (&str);
942 struct vector_type_el atype;
943 struct vector_type_el parsetype;
944 bfd_boolean is_typed_vecreg = FALSE;
945
946 atype.defined = 0;
947 atype.type = NT_invtype;
948 atype.width = -1;
949 atype.index = 0;
950
951 if (reg == NULL)
952 {
953 if (typeinfo)
954 *typeinfo = atype;
955 set_default_error ();
956 return PARSE_FAIL;
957 }
958
959 if (! aarch64_check_reg_type (reg, type))
960 {
961 DEBUG_TRACE ("reg type check failed");
962 set_default_error ();
963 return PARSE_FAIL;
964 }
965 type = reg->type;
966
967 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
968 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
969 {
970 if (*str == '.')
971 {
972 if (!parse_vector_type_for_operand (type, &parsetype, &str))
973 return PARSE_FAIL;
974 }
975 else
976 {
977 if (!parse_predication_for_operand (&parsetype, &str))
978 return PARSE_FAIL;
979 }
980
981 /* Register if of the form Vn.[bhsdq]. */
982 is_typed_vecreg = TRUE;
983
984 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
985 {
986 /* The width is always variable; we don't allow an integer width
987 to be specified. */
988 gas_assert (parsetype.width == 0);
989 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
990 }
991 else if (parsetype.width == 0)
992 /* Expect index. In the new scheme we cannot have
993 Vn.[bhsdq] represent a scalar. Therefore any
994 Vn.[bhsdq] should have an index following it.
995 Except in reglists of course. */
996 atype.defined |= NTA_HASINDEX;
997 else
998 atype.defined |= NTA_HASTYPE;
999
1000 atype.type = parsetype.type;
1001 atype.width = parsetype.width;
1002 }
1003
1004 if (skip_past_char (&str, '['))
1005 {
1006 expressionS exp;
1007
1008 /* Reject Sn[index] syntax. */
1009 if (!is_typed_vecreg)
1010 {
1011 first_error (_("this type of register can't be indexed"));
1012 return PARSE_FAIL;
1013 }
1014
1015 if (in_reg_list)
1016 {
1017 first_error (_("index not allowed inside register list"));
1018 return PARSE_FAIL;
1019 }
1020
1021 atype.defined |= NTA_HASINDEX;
1022
1023 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1024
1025 if (exp.X_op != O_constant)
1026 {
1027 first_error (_("constant expression required"));
1028 return PARSE_FAIL;
1029 }
1030
1031 if (! skip_past_char (&str, ']'))
1032 return PARSE_FAIL;
1033
1034 atype.index = exp.X_add_number;
1035 }
1036 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1037 {
1038 /* Indexed vector register expected. */
1039 first_error (_("indexed vector register expected"));
1040 return PARSE_FAIL;
1041 }
1042
1043 /* A vector reg Vn should be typed or indexed. */
1044 if (type == REG_TYPE_VN && atype.defined == 0)
1045 {
1046 first_error (_("invalid use of vector register"));
1047 }
1048
1049 if (typeinfo)
1050 *typeinfo = atype;
1051
1052 if (rtype)
1053 *rtype = type;
1054
1055 *ccp = str;
1056
1057 return reg->number;
1058 }
1059
1060 /* Parse register.
1061
1062 Return the register number on success; return PARSE_FAIL otherwise.
1063
1064 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1065 the register (e.g. NEON double or quad reg when either has been requested).
1066
1067 If this is a NEON vector register with additional type information, fill
1068 in the struct pointed to by VECTYPE (if non-NULL).
1069
1070 This parser does not handle register list. */
1071
1072 static int
1073 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1074 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1075 {
1076 struct vector_type_el atype;
1077 char *str = *ccp;
1078 int reg = parse_typed_reg (&str, type, rtype, &atype,
1079 /*in_reg_list= */ FALSE);
1080
1081 if (reg == PARSE_FAIL)
1082 return PARSE_FAIL;
1083
1084 if (vectype)
1085 *vectype = atype;
1086
1087 *ccp = str;
1088
1089 return reg;
1090 }
1091
1092 static inline bfd_boolean
1093 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1094 {
1095 return
1096 e1.type == e2.type
1097 && e1.defined == e2.defined
1098 && e1.width == e2.width && e1.index == e2.index;
1099 }
1100
1101 /* This function parses a list of vector registers of type TYPE.
1102 On success, it returns the parsed register list information in the
1103 following encoded format:
1104
1105 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1106 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1107
1108 The information of the register shape and/or index is returned in
1109 *VECTYPE.
1110
1111 It returns PARSE_FAIL if the register list is invalid.
1112
1113 The list contains one to four registers.
1114 Each register can be one of:
1115 <Vt>.<T>[<index>]
1116 <Vt>.<T>
1117 All <T> should be identical.
1118 All <index> should be identical.
1119 There are restrictions on <Vt> numbers which are checked later
1120 (by reg_list_valid_p). */
1121
1122 static int
1123 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1124 struct vector_type_el *vectype)
1125 {
1126 char *str = *ccp;
1127 int nb_regs;
1128 struct vector_type_el typeinfo, typeinfo_first;
1129 int val, val_range;
1130 int in_range;
1131 int ret_val;
1132 int i;
1133 bfd_boolean error = FALSE;
1134 bfd_boolean expect_index = FALSE;
1135
1136 if (*str != '{')
1137 {
1138 set_syntax_error (_("expecting {"));
1139 return PARSE_FAIL;
1140 }
1141 str++;
1142
1143 nb_regs = 0;
1144 typeinfo_first.defined = 0;
1145 typeinfo_first.type = NT_invtype;
1146 typeinfo_first.width = -1;
1147 typeinfo_first.index = 0;
1148 ret_val = 0;
1149 val = -1;
1150 val_range = -1;
1151 in_range = 0;
1152 do
1153 {
1154 if (in_range)
1155 {
1156 str++; /* skip over '-' */
1157 val_range = val;
1158 }
1159 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1160 /*in_reg_list= */ TRUE);
1161 if (val == PARSE_FAIL)
1162 {
1163 set_first_syntax_error (_("invalid vector register in list"));
1164 error = TRUE;
1165 continue;
1166 }
1167 /* reject [bhsd]n */
1168 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1169 {
1170 set_first_syntax_error (_("invalid scalar register in list"));
1171 error = TRUE;
1172 continue;
1173 }
1174
1175 if (typeinfo.defined & NTA_HASINDEX)
1176 expect_index = TRUE;
1177
1178 if (in_range)
1179 {
1180 if (val < val_range)
1181 {
1182 set_first_syntax_error
1183 (_("invalid range in vector register list"));
1184 error = TRUE;
1185 }
1186 val_range++;
1187 }
1188 else
1189 {
1190 val_range = val;
1191 if (nb_regs == 0)
1192 typeinfo_first = typeinfo;
1193 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1194 {
1195 set_first_syntax_error
1196 (_("type mismatch in vector register list"));
1197 error = TRUE;
1198 }
1199 }
1200 if (! error)
1201 for (i = val_range; i <= val; i++)
1202 {
1203 ret_val |= i << (5 * nb_regs);
1204 nb_regs++;
1205 }
1206 in_range = 0;
1207 }
1208 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1209
1210 skip_whitespace (str);
1211 if (*str != '}')
1212 {
1213 set_first_syntax_error (_("end of vector register list not found"));
1214 error = TRUE;
1215 }
1216 str++;
1217
1218 skip_whitespace (str);
1219
1220 if (expect_index)
1221 {
1222 if (skip_past_char (&str, '['))
1223 {
1224 expressionS exp;
1225
1226 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1227 if (exp.X_op != O_constant)
1228 {
1229 set_first_syntax_error (_("constant expression required."));
1230 error = TRUE;
1231 }
1232 if (! skip_past_char (&str, ']'))
1233 error = TRUE;
1234 else
1235 typeinfo_first.index = exp.X_add_number;
1236 }
1237 else
1238 {
1239 set_first_syntax_error (_("expected index"));
1240 error = TRUE;
1241 }
1242 }
1243
1244 if (nb_regs > 4)
1245 {
1246 set_first_syntax_error (_("too many registers in vector register list"));
1247 error = TRUE;
1248 }
1249 else if (nb_regs == 0)
1250 {
1251 set_first_syntax_error (_("empty vector register list"));
1252 error = TRUE;
1253 }
1254
1255 *ccp = str;
1256 if (! error)
1257 *vectype = typeinfo_first;
1258
1259 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1260 }
1261
1262 /* Directives: register aliases. */
1263
1264 static reg_entry *
1265 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1266 {
1267 reg_entry *new;
1268 const char *name;
1269
1270 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1271 {
1272 if (new->builtin)
1273 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1274 str);
1275
1276 /* Only warn about a redefinition if it's not defined as the
1277 same register. */
1278 else if (new->number != number || new->type != type)
1279 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1280
1281 return NULL;
1282 }
1283
1284 name = xstrdup (str);
1285 new = XNEW (reg_entry);
1286
1287 new->name = name;
1288 new->number = number;
1289 new->type = type;
1290 new->builtin = FALSE;
1291
1292 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1293 abort ();
1294
1295 return new;
1296 }
1297
1298 /* Look for the .req directive. This is of the form:
1299
1300 new_register_name .req existing_register_name
1301
1302 If we find one, or if it looks sufficiently like one that we want to
1303 handle any error here, return TRUE. Otherwise return FALSE. */
1304
1305 static bfd_boolean
1306 create_register_alias (char *newname, char *p)
1307 {
1308 const reg_entry *old;
1309 char *oldname, *nbuf;
1310 size_t nlen;
1311
1312 /* The input scrubber ensures that whitespace after the mnemonic is
1313 collapsed to single spaces. */
1314 oldname = p;
1315 if (strncmp (oldname, " .req ", 6) != 0)
1316 return FALSE;
1317
1318 oldname += 6;
1319 if (*oldname == '\0')
1320 return FALSE;
1321
1322 old = hash_find (aarch64_reg_hsh, oldname);
1323 if (!old)
1324 {
1325 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1326 return TRUE;
1327 }
1328
1329 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1330 the desired alias name, and p points to its end. If not, then
1331 the desired alias name is in the global original_case_string. */
1332 #ifdef TC_CASE_SENSITIVE
1333 nlen = p - newname;
1334 #else
1335 newname = original_case_string;
1336 nlen = strlen (newname);
1337 #endif
1338
1339 nbuf = xmemdup0 (newname, nlen);
1340
1341 /* Create aliases under the new name as stated; an all-lowercase
1342 version of the new name; and an all-uppercase version of the new
1343 name. */
1344 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1345 {
1346 for (p = nbuf; *p; p++)
1347 *p = TOUPPER (*p);
1348
1349 if (strncmp (nbuf, newname, nlen))
1350 {
1351 /* If this attempt to create an additional alias fails, do not bother
1352 trying to create the all-lower case alias. We will fail and issue
1353 a second, duplicate error message. This situation arises when the
1354 programmer does something like:
1355 foo .req r0
1356 Foo .req r1
1357 The second .req creates the "Foo" alias but then fails to create
1358 the artificial FOO alias because it has already been created by the
1359 first .req. */
1360 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1361 {
1362 free (nbuf);
1363 return TRUE;
1364 }
1365 }
1366
1367 for (p = nbuf; *p; p++)
1368 *p = TOLOWER (*p);
1369
1370 if (strncmp (nbuf, newname, nlen))
1371 insert_reg_alias (nbuf, old->number, old->type);
1372 }
1373
1374 free (nbuf);
1375 return TRUE;
1376 }
1377
1378 /* Should never be called, as .req goes between the alias and the
1379 register name, not at the beginning of the line. */
1380 static void
1381 s_req (int a ATTRIBUTE_UNUSED)
1382 {
1383 as_bad (_("invalid syntax for .req directive"));
1384 }
1385
1386 /* The .unreq directive deletes an alias which was previously defined
1387 by .req. For example:
1388
1389 my_alias .req r11
1390 .unreq my_alias */
1391
1392 static void
1393 s_unreq (int a ATTRIBUTE_UNUSED)
1394 {
1395 char *name;
1396 char saved_char;
1397
1398 name = input_line_pointer;
1399
1400 while (*input_line_pointer != 0
1401 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1402 ++input_line_pointer;
1403
1404 saved_char = *input_line_pointer;
1405 *input_line_pointer = 0;
1406
1407 if (!*name)
1408 as_bad (_("invalid syntax for .unreq directive"));
1409 else
1410 {
1411 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1412
1413 if (!reg)
1414 as_bad (_("unknown register alias '%s'"), name);
1415 else if (reg->builtin)
1416 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1417 name);
1418 else
1419 {
1420 char *p;
1421 char *nbuf;
1422
1423 hash_delete (aarch64_reg_hsh, name, FALSE);
1424 free ((char *) reg->name);
1425 free (reg);
1426
1427 /* Also locate the all upper case and all lower case versions.
1428 Do not complain if we cannot find one or the other as it
1429 was probably deleted above. */
1430
1431 nbuf = strdup (name);
1432 for (p = nbuf; *p; p++)
1433 *p = TOUPPER (*p);
1434 reg = hash_find (aarch64_reg_hsh, nbuf);
1435 if (reg)
1436 {
1437 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1438 free ((char *) reg->name);
1439 free (reg);
1440 }
1441
1442 for (p = nbuf; *p; p++)
1443 *p = TOLOWER (*p);
1444 reg = hash_find (aarch64_reg_hsh, nbuf);
1445 if (reg)
1446 {
1447 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1448 free ((char *) reg->name);
1449 free (reg);
1450 }
1451
1452 free (nbuf);
1453 }
1454 }
1455
1456 *input_line_pointer = saved_char;
1457 demand_empty_rest_of_line ();
1458 }
1459
1460 /* Directives: Instruction set selection. */
1461
1462 #ifdef OBJ_ELF
1463 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1464 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1465 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1466 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1467
1468 /* Create a new mapping symbol for the transition to STATE. */
1469
1470 static void
1471 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1472 {
1473 symbolS *symbolP;
1474 const char *symname;
1475 int type;
1476
1477 switch (state)
1478 {
1479 case MAP_DATA:
1480 symname = "$d";
1481 type = BSF_NO_FLAGS;
1482 break;
1483 case MAP_INSN:
1484 symname = "$x";
1485 type = BSF_NO_FLAGS;
1486 break;
1487 default:
1488 abort ();
1489 }
1490
1491 symbolP = symbol_new (symname, now_seg, value, frag);
1492 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1493
1494 /* Save the mapping symbols for future reference. Also check that
1495 we do not place two mapping symbols at the same offset within a
1496 frag. We'll handle overlap between frags in
1497 check_mapping_symbols.
1498
1499 If .fill or other data filling directive generates zero sized data,
1500 the mapping symbol for the following code will have the same value
1501 as the one generated for the data filling directive. In this case,
1502 we replace the old symbol with the new one at the same address. */
1503 if (value == 0)
1504 {
1505 if (frag->tc_frag_data.first_map != NULL)
1506 {
1507 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1508 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1509 &symbol_lastP);
1510 }
1511 frag->tc_frag_data.first_map = symbolP;
1512 }
1513 if (frag->tc_frag_data.last_map != NULL)
1514 {
1515 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1516 S_GET_VALUE (symbolP));
1517 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1518 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1519 &symbol_lastP);
1520 }
1521 frag->tc_frag_data.last_map = symbolP;
1522 }
1523
1524 /* We must sometimes convert a region marked as code to data during
1525 code alignment, if an odd number of bytes have to be padded. The
1526 code mapping symbol is pushed to an aligned address. */
1527
1528 static void
1529 insert_data_mapping_symbol (enum mstate state,
1530 valueT value, fragS * frag, offsetT bytes)
1531 {
1532 /* If there was already a mapping symbol, remove it. */
1533 if (frag->tc_frag_data.last_map != NULL
1534 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1535 frag->fr_address + value)
1536 {
1537 symbolS *symp = frag->tc_frag_data.last_map;
1538
1539 if (value == 0)
1540 {
1541 know (frag->tc_frag_data.first_map == symp);
1542 frag->tc_frag_data.first_map = NULL;
1543 }
1544 frag->tc_frag_data.last_map = NULL;
1545 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1546 }
1547
1548 make_mapping_symbol (MAP_DATA, value, frag);
1549 make_mapping_symbol (state, value + bytes, frag);
1550 }
1551
1552 static void mapping_state_2 (enum mstate state, int max_chars);
1553
1554 /* Set the mapping state to STATE. Only call this when about to
1555 emit some STATE bytes to the file. */
1556
1557 void
1558 mapping_state (enum mstate state)
1559 {
1560 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1561
1562 if (state == MAP_INSN)
1563 /* AArch64 instructions require 4-byte alignment. When emitting
1564 instructions into any section, record the appropriate section
1565 alignment. */
1566 record_alignment (now_seg, 2);
1567
1568 if (mapstate == state)
1569 /* The mapping symbol has already been emitted.
1570 There is nothing else to do. */
1571 return;
1572
1573 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1574 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1575 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1576 evaluated later in the next else. */
1577 return;
1578 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1579 {
1580 /* Only add the symbol if the offset is > 0:
1581 if we're at the first frag, check it's size > 0;
1582 if we're not at the first frag, then for sure
1583 the offset is > 0. */
1584 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1585 const int add_symbol = (frag_now != frag_first)
1586 || (frag_now_fix () > 0);
1587
1588 if (add_symbol)
1589 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1590 }
1591 #undef TRANSITION
1592
1593 mapping_state_2 (state, 0);
1594 }
1595
1596 /* Same as mapping_state, but MAX_CHARS bytes have already been
1597 allocated. Put the mapping symbol that far back. */
1598
1599 static void
1600 mapping_state_2 (enum mstate state, int max_chars)
1601 {
1602 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1603
1604 if (!SEG_NORMAL (now_seg))
1605 return;
1606
1607 if (mapstate == state)
1608 /* The mapping symbol has already been emitted.
1609 There is nothing else to do. */
1610 return;
1611
1612 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1613 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1614 }
1615 #else
1616 #define mapping_state(x) /* nothing */
1617 #define mapping_state_2(x, y) /* nothing */
1618 #endif
1619
1620 /* Directives: sectioning and alignment. */
1621
1622 static void
1623 s_bss (int ignore ATTRIBUTE_UNUSED)
1624 {
1625 /* We don't support putting frags in the BSS segment, we fake it by
1626 marking in_bss, then looking at s_skip for clues. */
1627 subseg_set (bss_section, 0);
1628 demand_empty_rest_of_line ();
1629 mapping_state (MAP_DATA);
1630 }
1631
1632 static void
1633 s_even (int ignore ATTRIBUTE_UNUSED)
1634 {
1635 /* Never make frag if expect extra pass. */
1636 if (!need_pass_2)
1637 frag_align (1, 0, 0);
1638
1639 record_alignment (now_seg, 1);
1640
1641 demand_empty_rest_of_line ();
1642 }
1643
1644 /* Directives: Literal pools. */
1645
1646 static literal_pool *
1647 find_literal_pool (int size)
1648 {
1649 literal_pool *pool;
1650
1651 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1652 {
1653 if (pool->section == now_seg
1654 && pool->sub_section == now_subseg && pool->size == size)
1655 break;
1656 }
1657
1658 return pool;
1659 }
1660
1661 static literal_pool *
1662 find_or_make_literal_pool (int size)
1663 {
1664 /* Next literal pool ID number. */
1665 static unsigned int latest_pool_num = 1;
1666 literal_pool *pool;
1667
1668 pool = find_literal_pool (size);
1669
1670 if (pool == NULL)
1671 {
1672 /* Create a new pool. */
1673 pool = XNEW (literal_pool);
1674 if (!pool)
1675 return NULL;
1676
1677 /* Currently we always put the literal pool in the current text
1678 section. If we were generating "small" model code where we
1679 knew that all code and initialised data was within 1MB then
1680 we could output literals to mergeable, read-only data
1681 sections. */
1682
1683 pool->next_free_entry = 0;
1684 pool->section = now_seg;
1685 pool->sub_section = now_subseg;
1686 pool->size = size;
1687 pool->next = list_of_pools;
1688 pool->symbol = NULL;
1689
1690 /* Add it to the list. */
1691 list_of_pools = pool;
1692 }
1693
1694 /* New pools, and emptied pools, will have a NULL symbol. */
1695 if (pool->symbol == NULL)
1696 {
1697 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1698 (valueT) 0, &zero_address_frag);
1699 pool->id = latest_pool_num++;
1700 }
1701
1702 /* Done. */
1703 return pool;
1704 }
1705
1706 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1707 Return TRUE on success, otherwise return FALSE. */
1708 static bfd_boolean
1709 add_to_lit_pool (expressionS *exp, int size)
1710 {
1711 literal_pool *pool;
1712 unsigned int entry;
1713
1714 pool = find_or_make_literal_pool (size);
1715
1716 /* Check if this literal value is already in the pool. */
1717 for (entry = 0; entry < pool->next_free_entry; entry++)
1718 {
1719 expressionS * litexp = & pool->literals[entry].exp;
1720
1721 if ((litexp->X_op == exp->X_op)
1722 && (exp->X_op == O_constant)
1723 && (litexp->X_add_number == exp->X_add_number)
1724 && (litexp->X_unsigned == exp->X_unsigned))
1725 break;
1726
1727 if ((litexp->X_op == exp->X_op)
1728 && (exp->X_op == O_symbol)
1729 && (litexp->X_add_number == exp->X_add_number)
1730 && (litexp->X_add_symbol == exp->X_add_symbol)
1731 && (litexp->X_op_symbol == exp->X_op_symbol))
1732 break;
1733 }
1734
1735 /* Do we need to create a new entry? */
1736 if (entry == pool->next_free_entry)
1737 {
1738 if (entry >= MAX_LITERAL_POOL_SIZE)
1739 {
1740 set_syntax_error (_("literal pool overflow"));
1741 return FALSE;
1742 }
1743
1744 pool->literals[entry].exp = *exp;
1745 pool->next_free_entry += 1;
1746 if (exp->X_op == O_big)
1747 {
1748 /* PR 16688: Bignums are held in a single global array. We must
1749 copy and preserve that value now, before it is overwritten. */
1750 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1751 exp->X_add_number);
1752 memcpy (pool->literals[entry].bignum, generic_bignum,
1753 CHARS_PER_LITTLENUM * exp->X_add_number);
1754 }
1755 else
1756 pool->literals[entry].bignum = NULL;
1757 }
1758
1759 exp->X_op = O_symbol;
1760 exp->X_add_number = ((int) entry) * size;
1761 exp->X_add_symbol = pool->symbol;
1762
1763 return TRUE;
1764 }
1765
1766 /* Can't use symbol_new here, so have to create a symbol and then at
1767 a later date assign it a value. That's what these functions do. */
1768
1769 static void
1770 symbol_locate (symbolS * symbolP,
1771 const char *name,/* It is copied, the caller can modify. */
1772 segT segment, /* Segment identifier (SEG_<something>). */
1773 valueT valu, /* Symbol value. */
1774 fragS * frag) /* Associated fragment. */
1775 {
1776 size_t name_length;
1777 char *preserved_copy_of_name;
1778
1779 name_length = strlen (name) + 1; /* +1 for \0. */
1780 obstack_grow (&notes, name, name_length);
1781 preserved_copy_of_name = obstack_finish (&notes);
1782
1783 #ifdef tc_canonicalize_symbol_name
1784 preserved_copy_of_name =
1785 tc_canonicalize_symbol_name (preserved_copy_of_name);
1786 #endif
1787
1788 S_SET_NAME (symbolP, preserved_copy_of_name);
1789
1790 S_SET_SEGMENT (symbolP, segment);
1791 S_SET_VALUE (symbolP, valu);
1792 symbol_clear_list_pointers (symbolP);
1793
1794 symbol_set_frag (symbolP, frag);
1795
1796 /* Link to end of symbol chain. */
1797 {
1798 extern int symbol_table_frozen;
1799
1800 if (symbol_table_frozen)
1801 abort ();
1802 }
1803
1804 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1805
1806 obj_symbol_new_hook (symbolP);
1807
1808 #ifdef tc_symbol_new_hook
1809 tc_symbol_new_hook (symbolP);
1810 #endif
1811
1812 #ifdef DEBUG_SYMS
1813 verify_symbol_chain (symbol_rootP, symbol_lastP);
1814 #endif /* DEBUG_SYMS */
1815 }
1816
1817
1818 static void
1819 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1820 {
1821 unsigned int entry;
1822 literal_pool *pool;
1823 char sym_name[20];
1824 int align;
1825
1826 for (align = 2; align <= 4; align++)
1827 {
1828 int size = 1 << align;
1829
1830 pool = find_literal_pool (size);
1831 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1832 continue;
1833
1834 /* Align pool as you have word accesses.
1835 Only make a frag if we have to. */
1836 if (!need_pass_2)
1837 frag_align (align, 0, 0);
1838
1839 mapping_state (MAP_DATA);
1840
1841 record_alignment (now_seg, align);
1842
1843 sprintf (sym_name, "$$lit_\002%x", pool->id);
1844
1845 symbol_locate (pool->symbol, sym_name, now_seg,
1846 (valueT) frag_now_fix (), frag_now);
1847 symbol_table_insert (pool->symbol);
1848
1849 for (entry = 0; entry < pool->next_free_entry; entry++)
1850 {
1851 expressionS * exp = & pool->literals[entry].exp;
1852
1853 if (exp->X_op == O_big)
1854 {
1855 /* PR 16688: Restore the global bignum value. */
1856 gas_assert (pool->literals[entry].bignum != NULL);
1857 memcpy (generic_bignum, pool->literals[entry].bignum,
1858 CHARS_PER_LITTLENUM * exp->X_add_number);
1859 }
1860
1861 /* First output the expression in the instruction to the pool. */
1862 emit_expr (exp, size); /* .word|.xword */
1863
1864 if (exp->X_op == O_big)
1865 {
1866 free (pool->literals[entry].bignum);
1867 pool->literals[entry].bignum = NULL;
1868 }
1869 }
1870
1871 /* Mark the pool as empty. */
1872 pool->next_free_entry = 0;
1873 pool->symbol = NULL;
1874 }
1875 }
1876
1877 #ifdef OBJ_ELF
1878 /* Forward declarations for functions below, in the MD interface
1879 section. */
1880 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1881 static struct reloc_table_entry * find_reloc_table_entry (char **);
1882
1883 /* Directives: Data. */
1884 /* N.B. the support for relocation suffix in this directive needs to be
1885 implemented properly. */
1886
1887 static void
1888 s_aarch64_elf_cons (int nbytes)
1889 {
1890 expressionS exp;
1891
1892 #ifdef md_flush_pending_output
1893 md_flush_pending_output ();
1894 #endif
1895
1896 if (is_it_end_of_statement ())
1897 {
1898 demand_empty_rest_of_line ();
1899 return;
1900 }
1901
1902 #ifdef md_cons_align
1903 md_cons_align (nbytes);
1904 #endif
1905
1906 mapping_state (MAP_DATA);
1907 do
1908 {
1909 struct reloc_table_entry *reloc;
1910
1911 expression (&exp);
1912
1913 if (exp.X_op != O_symbol)
1914 emit_expr (&exp, (unsigned int) nbytes);
1915 else
1916 {
1917 skip_past_char (&input_line_pointer, '#');
1918 if (skip_past_char (&input_line_pointer, ':'))
1919 {
1920 reloc = find_reloc_table_entry (&input_line_pointer);
1921 if (reloc == NULL)
1922 as_bad (_("unrecognized relocation suffix"));
1923 else
1924 as_bad (_("unimplemented relocation suffix"));
1925 ignore_rest_of_line ();
1926 return;
1927 }
1928 else
1929 emit_expr (&exp, (unsigned int) nbytes);
1930 }
1931 }
1932 while (*input_line_pointer++ == ',');
1933
1934 /* Put terminator back into stream. */
1935 input_line_pointer--;
1936 demand_empty_rest_of_line ();
1937 }
1938
1939 /* Mark symbol that it follows a variant PCS convention. */
1940
1941 static void
1942 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1943 {
1944 char *name;
1945 char c;
1946 symbolS *sym;
1947 asymbol *bfdsym;
1948 elf_symbol_type *elfsym;
1949
1950 c = get_symbol_name (&name);
1951 if (!*name)
1952 as_bad (_("Missing symbol name in directive"));
1953 sym = symbol_find_or_make (name);
1954 restore_line_pointer (c);
1955 demand_empty_rest_of_line ();
1956 bfdsym = symbol_get_bfdsym (sym);
1957 elfsym = elf_symbol_from (bfd_asymbol_bfd (bfdsym), bfdsym);
1958 gas_assert (elfsym);
1959 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1960 }
1961 #endif /* OBJ_ELF */
1962
1963 /* Output a 32-bit word, but mark as an instruction. */
1964
1965 static void
1966 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1967 {
1968 expressionS exp;
1969
1970 #ifdef md_flush_pending_output
1971 md_flush_pending_output ();
1972 #endif
1973
1974 if (is_it_end_of_statement ())
1975 {
1976 demand_empty_rest_of_line ();
1977 return;
1978 }
1979
1980 /* Sections are assumed to start aligned. In executable section, there is no
1981 MAP_DATA symbol pending. So we only align the address during
1982 MAP_DATA --> MAP_INSN transition.
1983 For other sections, this is not guaranteed. */
1984 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1985 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1986 frag_align_code (2, 0);
1987
1988 #ifdef OBJ_ELF
1989 mapping_state (MAP_INSN);
1990 #endif
1991
1992 do
1993 {
1994 expression (&exp);
1995 if (exp.X_op != O_constant)
1996 {
1997 as_bad (_("constant expression required"));
1998 ignore_rest_of_line ();
1999 return;
2000 }
2001
2002 if (target_big_endian)
2003 {
2004 unsigned int val = exp.X_add_number;
2005 exp.X_add_number = SWAP_32 (val);
2006 }
2007 emit_expr (&exp, 4);
2008 }
2009 while (*input_line_pointer++ == ',');
2010
2011 /* Put terminator back into stream. */
2012 input_line_pointer--;
2013 demand_empty_rest_of_line ();
2014 }
2015
2016 static void
2017 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2018 {
2019 demand_empty_rest_of_line ();
2020 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2021 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2022 }
2023
2024 #ifdef OBJ_ELF
2025 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2026
2027 static void
2028 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2029 {
2030 expressionS exp;
2031
2032 expression (&exp);
2033 frag_grow (4);
2034 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2035 BFD_RELOC_AARCH64_TLSDESC_ADD);
2036
2037 demand_empty_rest_of_line ();
2038 }
2039
2040 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2041
2042 static void
2043 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2044 {
2045 expressionS exp;
2046
2047 /* Since we're just labelling the code, there's no need to define a
2048 mapping symbol. */
2049 expression (&exp);
2050 /* Make sure there is enough room in this frag for the following
2051 blr. This trick only works if the blr follows immediately after
2052 the .tlsdesc directive. */
2053 frag_grow (4);
2054 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2055 BFD_RELOC_AARCH64_TLSDESC_CALL);
2056
2057 demand_empty_rest_of_line ();
2058 }
2059
2060 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2061
2062 static void
2063 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2064 {
2065 expressionS exp;
2066
2067 expression (&exp);
2068 frag_grow (4);
2069 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2070 BFD_RELOC_AARCH64_TLSDESC_LDR);
2071
2072 demand_empty_rest_of_line ();
2073 }
2074 #endif /* OBJ_ELF */
2075
2076 static void s_aarch64_arch (int);
2077 static void s_aarch64_cpu (int);
2078 static void s_aarch64_arch_extension (int);
2079
2080 /* This table describes all the machine specific pseudo-ops the assembler
2081 has to support. The fields are:
2082 pseudo-op name without dot
2083 function to call to execute this pseudo-op
2084 Integer arg to pass to the function. */
2085
2086 const pseudo_typeS md_pseudo_table[] = {
2087 /* Never called because '.req' does not start a line. */
2088 {"req", s_req, 0},
2089 {"unreq", s_unreq, 0},
2090 {"bss", s_bss, 0},
2091 {"even", s_even, 0},
2092 {"ltorg", s_ltorg, 0},
2093 {"pool", s_ltorg, 0},
2094 {"cpu", s_aarch64_cpu, 0},
2095 {"arch", s_aarch64_arch, 0},
2096 {"arch_extension", s_aarch64_arch_extension, 0},
2097 {"inst", s_aarch64_inst, 0},
2098 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2099 #ifdef OBJ_ELF
2100 {"tlsdescadd", s_tlsdescadd, 0},
2101 {"tlsdesccall", s_tlsdesccall, 0},
2102 {"tlsdescldr", s_tlsdescldr, 0},
2103 {"word", s_aarch64_elf_cons, 4},
2104 {"long", s_aarch64_elf_cons, 4},
2105 {"xword", s_aarch64_elf_cons, 8},
2106 {"dword", s_aarch64_elf_cons, 8},
2107 {"variant_pcs", s_variant_pcs, 0},
2108 #endif
2109 {"float16", float_cons, 'h'},
2110 {0, 0, 0}
2111 };
2112 \f
2113
2114 /* Check whether STR points to a register name followed by a comma or the
2115 end of line; REG_TYPE indicates which register types are checked
2116 against. Return TRUE if STR is such a register name; otherwise return
2117 FALSE. The function does not intend to produce any diagnostics, but since
2118 the register parser aarch64_reg_parse, which is called by this function,
2119 does produce diagnostics, we call clear_error to clear any diagnostics
2120 that may be generated by aarch64_reg_parse.
2121 Also, the function returns FALSE directly if there is any user error
2122 present at the function entry. This prevents the existing diagnostics
2123 state from being spoiled.
2124 The function currently serves parse_constant_immediate and
2125 parse_big_immediate only. */
2126 static bfd_boolean
2127 reg_name_p (char *str, aarch64_reg_type reg_type)
2128 {
2129 int reg;
2130
2131 /* Prevent the diagnostics state from being spoiled. */
2132 if (error_p ())
2133 return FALSE;
2134
2135 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2136
2137 /* Clear the parsing error that may be set by the reg parser. */
2138 clear_error ();
2139
2140 if (reg == PARSE_FAIL)
2141 return FALSE;
2142
2143 skip_whitespace (str);
2144 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2145 return TRUE;
2146
2147 return FALSE;
2148 }
2149
2150 /* Parser functions used exclusively in instruction operands. */
2151
2152 /* Parse an immediate expression which may not be constant.
2153
2154 To prevent the expression parser from pushing a register name
2155 into the symbol table as an undefined symbol, firstly a check is
2156 done to find out whether STR is a register of type REG_TYPE followed
2157 by a comma or the end of line. Return FALSE if STR is such a string. */
2158
2159 static bfd_boolean
2160 parse_immediate_expression (char **str, expressionS *exp,
2161 aarch64_reg_type reg_type)
2162 {
2163 if (reg_name_p (*str, reg_type))
2164 {
2165 set_recoverable_error (_("immediate operand required"));
2166 return FALSE;
2167 }
2168
2169 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2170
2171 if (exp->X_op == O_absent)
2172 {
2173 set_fatal_syntax_error (_("missing immediate expression"));
2174 return FALSE;
2175 }
2176
2177 return TRUE;
2178 }
2179
2180 /* Constant immediate-value read function for use in insn parsing.
2181 STR points to the beginning of the immediate (with the optional
2182 leading #); *VAL receives the value. REG_TYPE says which register
2183 names should be treated as registers rather than as symbolic immediates.
2184
2185 Return TRUE on success; otherwise return FALSE. */
2186
2187 static bfd_boolean
2188 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2189 {
2190 expressionS exp;
2191
2192 if (! parse_immediate_expression (str, &exp, reg_type))
2193 return FALSE;
2194
2195 if (exp.X_op != O_constant)
2196 {
2197 set_syntax_error (_("constant expression required"));
2198 return FALSE;
2199 }
2200
2201 *val = exp.X_add_number;
2202 return TRUE;
2203 }
2204
2205 static uint32_t
2206 encode_imm_float_bits (uint32_t imm)
2207 {
2208 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2209 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2210 }
2211
2212 /* Return TRUE if the single-precision floating-point value encoded in IMM
2213 can be expressed in the AArch64 8-bit signed floating-point format with
2214 3-bit exponent and normalized 4 bits of precision; in other words, the
2215 floating-point value must be expressable as
2216 (+/-) n / 16 * power (2, r)
2217 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2218
2219 static bfd_boolean
2220 aarch64_imm_float_p (uint32_t imm)
2221 {
2222 /* If a single-precision floating-point value has the following bit
2223 pattern, it can be expressed in the AArch64 8-bit floating-point
2224 format:
2225
2226 3 32222222 2221111111111
2227 1 09876543 21098765432109876543210
2228 n Eeeeeexx xxxx0000000000000000000
2229
2230 where n, e and each x are either 0 or 1 independently, with
2231 E == ~ e. */
2232
2233 uint32_t pattern;
2234
2235 /* Prepare the pattern for 'Eeeeee'. */
2236 if (((imm >> 30) & 0x1) == 0)
2237 pattern = 0x3e000000;
2238 else
2239 pattern = 0x40000000;
2240
2241 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2242 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2243 }
2244
2245 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2246 as an IEEE float without any loss of precision. Store the value in
2247 *FPWORD if so. */
2248
2249 static bfd_boolean
2250 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2251 {
2252 /* If a double-precision floating-point value has the following bit
2253 pattern, it can be expressed in a float:
2254
2255 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2256 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2257 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2258
2259 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2260 if Eeee_eeee != 1111_1111
2261
2262 where n, e, s and S are either 0 or 1 independently and where ~ is the
2263 inverse of E. */
2264
2265 uint32_t pattern;
2266 uint32_t high32 = imm >> 32;
2267 uint32_t low32 = imm;
2268
2269 /* Lower 29 bits need to be 0s. */
2270 if ((imm & 0x1fffffff) != 0)
2271 return FALSE;
2272
2273 /* Prepare the pattern for 'Eeeeeeeee'. */
2274 if (((high32 >> 30) & 0x1) == 0)
2275 pattern = 0x38000000;
2276 else
2277 pattern = 0x40000000;
2278
2279 /* Check E~~~. */
2280 if ((high32 & 0x78000000) != pattern)
2281 return FALSE;
2282
2283 /* Check Eeee_eeee != 1111_1111. */
2284 if ((high32 & 0x7ff00000) == 0x47f00000)
2285 return FALSE;
2286
2287 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2288 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2289 | (low32 >> 29)); /* 3 S bits. */
2290 return TRUE;
2291 }
2292
2293 /* Return true if we should treat OPERAND as a double-precision
2294 floating-point operand rather than a single-precision one. */
2295 static bfd_boolean
2296 double_precision_operand_p (const aarch64_opnd_info *operand)
2297 {
2298 /* Check for unsuffixed SVE registers, which are allowed
2299 for LDR and STR but not in instructions that require an
2300 immediate. We get better error messages if we arbitrarily
2301 pick one size, parse the immediate normally, and then
2302 report the match failure in the normal way. */
2303 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2304 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2305 }
2306
2307 /* Parse a floating-point immediate. Return TRUE on success and return the
2308 value in *IMMED in the format of IEEE754 single-precision encoding.
2309 *CCP points to the start of the string; DP_P is TRUE when the immediate
2310 is expected to be in double-precision (N.B. this only matters when
2311 hexadecimal representation is involved). REG_TYPE says which register
2312 names should be treated as registers rather than as symbolic immediates.
2313
2314 This routine accepts any IEEE float; it is up to the callers to reject
2315 invalid ones. */
2316
2317 static bfd_boolean
2318 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2319 aarch64_reg_type reg_type)
2320 {
2321 char *str = *ccp;
2322 char *fpnum;
2323 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2324 int64_t val = 0;
2325 unsigned fpword = 0;
2326 bfd_boolean hex_p = FALSE;
2327
2328 skip_past_char (&str, '#');
2329
2330 fpnum = str;
2331 skip_whitespace (fpnum);
2332
2333 if (strncmp (fpnum, "0x", 2) == 0)
2334 {
2335 /* Support the hexadecimal representation of the IEEE754 encoding.
2336 Double-precision is expected when DP_P is TRUE, otherwise the
2337 representation should be in single-precision. */
2338 if (! parse_constant_immediate (&str, &val, reg_type))
2339 goto invalid_fp;
2340
2341 if (dp_p)
2342 {
2343 if (!can_convert_double_to_float (val, &fpword))
2344 goto invalid_fp;
2345 }
2346 else if ((uint64_t) val > 0xffffffff)
2347 goto invalid_fp;
2348 else
2349 fpword = val;
2350
2351 hex_p = TRUE;
2352 }
2353 else if (reg_name_p (str, reg_type))
2354 {
2355 set_recoverable_error (_("immediate operand required"));
2356 return FALSE;
2357 }
2358
2359 if (! hex_p)
2360 {
2361 int i;
2362
2363 if ((str = atof_ieee (str, 's', words)) == NULL)
2364 goto invalid_fp;
2365
2366 /* Our FP word must be 32 bits (single-precision FP). */
2367 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2368 {
2369 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2370 fpword |= words[i];
2371 }
2372 }
2373
2374 *immed = fpword;
2375 *ccp = str;
2376 return TRUE;
2377
2378 invalid_fp:
2379 set_fatal_syntax_error (_("invalid floating-point constant"));
2380 return FALSE;
2381 }
2382
2383 /* Less-generic immediate-value read function with the possibility of loading
2384 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2385 instructions.
2386
2387 To prevent the expression parser from pushing a register name into the
2388 symbol table as an undefined symbol, a check is firstly done to find
2389 out whether STR is a register of type REG_TYPE followed by a comma or
2390 the end of line. Return FALSE if STR is such a register. */
2391
2392 static bfd_boolean
2393 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2394 {
2395 char *ptr = *str;
2396
2397 if (reg_name_p (ptr, reg_type))
2398 {
2399 set_syntax_error (_("immediate operand required"));
2400 return FALSE;
2401 }
2402
2403 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2404
2405 if (inst.reloc.exp.X_op == O_constant)
2406 *imm = inst.reloc.exp.X_add_number;
2407
2408 *str = ptr;
2409
2410 return TRUE;
2411 }
2412
2413 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2414 if NEED_LIBOPCODES is non-zero, the fixup will need
2415 assistance from the libopcodes. */
2416
2417 static inline void
2418 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2419 const aarch64_opnd_info *operand,
2420 int need_libopcodes_p)
2421 {
2422 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2423 reloc->opnd = operand->type;
2424 if (need_libopcodes_p)
2425 reloc->need_libopcodes_p = 1;
2426 };
2427
2428 /* Return TRUE if the instruction needs to be fixed up later internally by
2429 the GAS; otherwise return FALSE. */
2430
2431 static inline bfd_boolean
2432 aarch64_gas_internal_fixup_p (void)
2433 {
2434 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2435 }
2436
2437 /* Assign the immediate value to the relevant field in *OPERAND if
2438 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2439 needs an internal fixup in a later stage.
2440 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2441 IMM.VALUE that may get assigned with the constant. */
2442 static inline void
2443 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2444 aarch64_opnd_info *operand,
2445 int addr_off_p,
2446 int need_libopcodes_p,
2447 int skip_p)
2448 {
2449 if (reloc->exp.X_op == O_constant)
2450 {
2451 if (addr_off_p)
2452 operand->addr.offset.imm = reloc->exp.X_add_number;
2453 else
2454 operand->imm.value = reloc->exp.X_add_number;
2455 reloc->type = BFD_RELOC_UNUSED;
2456 }
2457 else
2458 {
2459 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2460 /* Tell libopcodes to ignore this operand or not. This is helpful
2461 when one of the operands needs to be fixed up later but we need
2462 libopcodes to check the other operands. */
2463 operand->skip = skip_p;
2464 }
2465 }
2466
2467 /* Relocation modifiers. Each entry in the table contains the textual
2468 name for the relocation which may be placed before a symbol used as
2469 a load/store offset, or add immediate. It must be surrounded by a
2470 leading and trailing colon, for example:
2471
2472 ldr x0, [x1, #:rello:varsym]
2473 add x0, x1, #:rello:varsym */
2474
2475 struct reloc_table_entry
2476 {
2477 const char *name;
2478 int pc_rel;
2479 bfd_reloc_code_real_type adr_type;
2480 bfd_reloc_code_real_type adrp_type;
2481 bfd_reloc_code_real_type movw_type;
2482 bfd_reloc_code_real_type add_type;
2483 bfd_reloc_code_real_type ldst_type;
2484 bfd_reloc_code_real_type ld_literal_type;
2485 };
2486
2487 static struct reloc_table_entry reloc_table[] = {
2488 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2489 {"lo12", 0,
2490 0, /* adr_type */
2491 0,
2492 0,
2493 BFD_RELOC_AARCH64_ADD_LO12,
2494 BFD_RELOC_AARCH64_LDST_LO12,
2495 0},
2496
2497 /* Higher 21 bits of pc-relative page offset: ADRP */
2498 {"pg_hi21", 1,
2499 0, /* adr_type */
2500 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2501 0,
2502 0,
2503 0,
2504 0},
2505
2506 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2507 {"pg_hi21_nc", 1,
2508 0, /* adr_type */
2509 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2510 0,
2511 0,
2512 0,
2513 0},
2514
2515 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2516 {"abs_g0", 0,
2517 0, /* adr_type */
2518 0,
2519 BFD_RELOC_AARCH64_MOVW_G0,
2520 0,
2521 0,
2522 0},
2523
2524 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2525 {"abs_g0_s", 0,
2526 0, /* adr_type */
2527 0,
2528 BFD_RELOC_AARCH64_MOVW_G0_S,
2529 0,
2530 0,
2531 0},
2532
2533 /* Less significant bits 0-15 of address/value: MOVK, no check */
2534 {"abs_g0_nc", 0,
2535 0, /* adr_type */
2536 0,
2537 BFD_RELOC_AARCH64_MOVW_G0_NC,
2538 0,
2539 0,
2540 0},
2541
2542 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2543 {"abs_g1", 0,
2544 0, /* adr_type */
2545 0,
2546 BFD_RELOC_AARCH64_MOVW_G1,
2547 0,
2548 0,
2549 0},
2550
2551 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2552 {"abs_g1_s", 0,
2553 0, /* adr_type */
2554 0,
2555 BFD_RELOC_AARCH64_MOVW_G1_S,
2556 0,
2557 0,
2558 0},
2559
2560 /* Less significant bits 16-31 of address/value: MOVK, no check */
2561 {"abs_g1_nc", 0,
2562 0, /* adr_type */
2563 0,
2564 BFD_RELOC_AARCH64_MOVW_G1_NC,
2565 0,
2566 0,
2567 0},
2568
2569 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2570 {"abs_g2", 0,
2571 0, /* adr_type */
2572 0,
2573 BFD_RELOC_AARCH64_MOVW_G2,
2574 0,
2575 0,
2576 0},
2577
2578 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2579 {"abs_g2_s", 0,
2580 0, /* adr_type */
2581 0,
2582 BFD_RELOC_AARCH64_MOVW_G2_S,
2583 0,
2584 0,
2585 0},
2586
2587 /* Less significant bits 32-47 of address/value: MOVK, no check */
2588 {"abs_g2_nc", 0,
2589 0, /* adr_type */
2590 0,
2591 BFD_RELOC_AARCH64_MOVW_G2_NC,
2592 0,
2593 0,
2594 0},
2595
2596 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2597 {"abs_g3", 0,
2598 0, /* adr_type */
2599 0,
2600 BFD_RELOC_AARCH64_MOVW_G3,
2601 0,
2602 0,
2603 0},
2604
2605 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2606 {"prel_g0", 1,
2607 0, /* adr_type */
2608 0,
2609 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2610 0,
2611 0,
2612 0},
2613
2614 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2615 {"prel_g0_nc", 1,
2616 0, /* adr_type */
2617 0,
2618 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2619 0,
2620 0,
2621 0},
2622
2623 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2624 {"prel_g1", 1,
2625 0, /* adr_type */
2626 0,
2627 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2628 0,
2629 0,
2630 0},
2631
2632 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2633 {"prel_g1_nc", 1,
2634 0, /* adr_type */
2635 0,
2636 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2637 0,
2638 0,
2639 0},
2640
2641 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2642 {"prel_g2", 1,
2643 0, /* adr_type */
2644 0,
2645 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2646 0,
2647 0,
2648 0},
2649
2650 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2651 {"prel_g2_nc", 1,
2652 0, /* adr_type */
2653 0,
2654 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2655 0,
2656 0,
2657 0},
2658
2659 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2660 {"prel_g3", 1,
2661 0, /* adr_type */
2662 0,
2663 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2664 0,
2665 0,
2666 0},
2667
2668 /* Get to the page containing GOT entry for a symbol. */
2669 {"got", 1,
2670 0, /* adr_type */
2671 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2672 0,
2673 0,
2674 0,
2675 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2676
2677 /* 12 bit offset into the page containing GOT entry for that symbol. */
2678 {"got_lo12", 0,
2679 0, /* adr_type */
2680 0,
2681 0,
2682 0,
2683 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2684 0},
2685
2686 /* 0-15 bits of address/value: MOVk, no check. */
2687 {"gotoff_g0_nc", 0,
2688 0, /* adr_type */
2689 0,
2690 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2691 0,
2692 0,
2693 0},
2694
2695 /* Most significant bits 16-31 of address/value: MOVZ. */
2696 {"gotoff_g1", 0,
2697 0, /* adr_type */
2698 0,
2699 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2700 0,
2701 0,
2702 0},
2703
2704 /* 15 bit offset into the page containing GOT entry for that symbol. */
2705 {"gotoff_lo15", 0,
2706 0, /* adr_type */
2707 0,
2708 0,
2709 0,
2710 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2711 0},
2712
2713 /* Get to the page containing GOT TLS entry for a symbol */
2714 {"gottprel_g0_nc", 0,
2715 0, /* adr_type */
2716 0,
2717 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2718 0,
2719 0,
2720 0},
2721
2722 /* Get to the page containing GOT TLS entry for a symbol */
2723 {"gottprel_g1", 0,
2724 0, /* adr_type */
2725 0,
2726 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2727 0,
2728 0,
2729 0},
2730
2731 /* Get to the page containing GOT TLS entry for a symbol */
2732 {"tlsgd", 0,
2733 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2734 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2735 0,
2736 0,
2737 0,
2738 0},
2739
2740 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2741 {"tlsgd_lo12", 0,
2742 0, /* adr_type */
2743 0,
2744 0,
2745 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2746 0,
2747 0},
2748
2749 /* Lower 16 bits address/value: MOVk. */
2750 {"tlsgd_g0_nc", 0,
2751 0, /* adr_type */
2752 0,
2753 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2754 0,
2755 0,
2756 0},
2757
2758 /* Most significant bits 16-31 of address/value: MOVZ. */
2759 {"tlsgd_g1", 0,
2760 0, /* adr_type */
2761 0,
2762 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2763 0,
2764 0,
2765 0},
2766
2767 /* Get to the page containing GOT TLS entry for a symbol */
2768 {"tlsdesc", 0,
2769 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2770 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2771 0,
2772 0,
2773 0,
2774 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2775
2776 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2777 {"tlsdesc_lo12", 0,
2778 0, /* adr_type */
2779 0,
2780 0,
2781 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2782 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2783 0},
2784
2785 /* Get to the page containing GOT TLS entry for a symbol.
2786 The same as GD, we allocate two consecutive GOT slots
2787 for module index and module offset, the only difference
2788 with GD is the module offset should be initialized to
2789 zero without any outstanding runtime relocation. */
2790 {"tlsldm", 0,
2791 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2792 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2793 0,
2794 0,
2795 0,
2796 0},
2797
2798 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2799 {"tlsldm_lo12_nc", 0,
2800 0, /* adr_type */
2801 0,
2802 0,
2803 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2804 0,
2805 0},
2806
2807 /* 12 bit offset into the module TLS base address. */
2808 {"dtprel_lo12", 0,
2809 0, /* adr_type */
2810 0,
2811 0,
2812 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2813 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2814 0},
2815
2816 /* Same as dtprel_lo12, no overflow check. */
2817 {"dtprel_lo12_nc", 0,
2818 0, /* adr_type */
2819 0,
2820 0,
2821 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2822 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2823 0},
2824
2825 /* bits[23:12] of offset to the module TLS base address. */
2826 {"dtprel_hi12", 0,
2827 0, /* adr_type */
2828 0,
2829 0,
2830 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2831 0,
2832 0},
2833
2834 /* bits[15:0] of offset to the module TLS base address. */
2835 {"dtprel_g0", 0,
2836 0, /* adr_type */
2837 0,
2838 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2839 0,
2840 0,
2841 0},
2842
2843 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2844 {"dtprel_g0_nc", 0,
2845 0, /* adr_type */
2846 0,
2847 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2848 0,
2849 0,
2850 0},
2851
2852 /* bits[31:16] of offset to the module TLS base address. */
2853 {"dtprel_g1", 0,
2854 0, /* adr_type */
2855 0,
2856 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2857 0,
2858 0,
2859 0},
2860
2861 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2862 {"dtprel_g1_nc", 0,
2863 0, /* adr_type */
2864 0,
2865 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2866 0,
2867 0,
2868 0},
2869
2870 /* bits[47:32] of offset to the module TLS base address. */
2871 {"dtprel_g2", 0,
2872 0, /* adr_type */
2873 0,
2874 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2875 0,
2876 0,
2877 0},
2878
2879 /* Lower 16 bit offset into GOT entry for a symbol */
2880 {"tlsdesc_off_g0_nc", 0,
2881 0, /* adr_type */
2882 0,
2883 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2884 0,
2885 0,
2886 0},
2887
2888 /* Higher 16 bit offset into GOT entry for a symbol */
2889 {"tlsdesc_off_g1", 0,
2890 0, /* adr_type */
2891 0,
2892 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2893 0,
2894 0,
2895 0},
2896
2897 /* Get to the page containing GOT TLS entry for a symbol */
2898 {"gottprel", 0,
2899 0, /* adr_type */
2900 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2901 0,
2902 0,
2903 0,
2904 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2905
2906 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2907 {"gottprel_lo12", 0,
2908 0, /* adr_type */
2909 0,
2910 0,
2911 0,
2912 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2913 0},
2914
2915 /* Get tp offset for a symbol. */
2916 {"tprel", 0,
2917 0, /* adr_type */
2918 0,
2919 0,
2920 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2921 0,
2922 0},
2923
2924 /* Get tp offset for a symbol. */
2925 {"tprel_lo12", 0,
2926 0, /* adr_type */
2927 0,
2928 0,
2929 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2930 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2931 0},
2932
2933 /* Get tp offset for a symbol. */
2934 {"tprel_hi12", 0,
2935 0, /* adr_type */
2936 0,
2937 0,
2938 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2939 0,
2940 0},
2941
2942 /* Get tp offset for a symbol. */
2943 {"tprel_lo12_nc", 0,
2944 0, /* adr_type */
2945 0,
2946 0,
2947 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2948 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2949 0},
2950
2951 /* Most significant bits 32-47 of address/value: MOVZ. */
2952 {"tprel_g2", 0,
2953 0, /* adr_type */
2954 0,
2955 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2956 0,
2957 0,
2958 0},
2959
2960 /* Most significant bits 16-31 of address/value: MOVZ. */
2961 {"tprel_g1", 0,
2962 0, /* adr_type */
2963 0,
2964 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2965 0,
2966 0,
2967 0},
2968
2969 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2970 {"tprel_g1_nc", 0,
2971 0, /* adr_type */
2972 0,
2973 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2974 0,
2975 0,
2976 0},
2977
2978 /* Most significant bits 0-15 of address/value: MOVZ. */
2979 {"tprel_g0", 0,
2980 0, /* adr_type */
2981 0,
2982 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2983 0,
2984 0,
2985 0},
2986
2987 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2988 {"tprel_g0_nc", 0,
2989 0, /* adr_type */
2990 0,
2991 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2992 0,
2993 0,
2994 0},
2995
2996 /* 15bit offset from got entry to base address of GOT table. */
2997 {"gotpage_lo15", 0,
2998 0,
2999 0,
3000 0,
3001 0,
3002 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3003 0},
3004
3005 /* 14bit offset from got entry to base address of GOT table. */
3006 {"gotpage_lo14", 0,
3007 0,
3008 0,
3009 0,
3010 0,
3011 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3012 0},
3013 };
3014
3015 /* Given the address of a pointer pointing to the textual name of a
3016 relocation as may appear in assembler source, attempt to find its
3017 details in reloc_table. The pointer will be updated to the character
3018 after the trailing colon. On failure, NULL will be returned;
3019 otherwise return the reloc_table_entry. */
3020
3021 static struct reloc_table_entry *
3022 find_reloc_table_entry (char **str)
3023 {
3024 unsigned int i;
3025 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3026 {
3027 int length = strlen (reloc_table[i].name);
3028
3029 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3030 && (*str)[length] == ':')
3031 {
3032 *str += (length + 1);
3033 return &reloc_table[i];
3034 }
3035 }
3036
3037 return NULL;
3038 }
3039
3040 /* Mode argument to parse_shift and parser_shifter_operand. */
3041 enum parse_shift_mode
3042 {
3043 SHIFTED_NONE, /* no shifter allowed */
3044 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3045 "#imm{,lsl #n}" */
3046 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3047 "#imm" */
3048 SHIFTED_LSL, /* bare "lsl #n" */
3049 SHIFTED_MUL, /* bare "mul #n" */
3050 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3051 SHIFTED_MUL_VL, /* "mul vl" */
3052 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3053 };
3054
3055 /* Parse a <shift> operator on an AArch64 data processing instruction.
3056 Return TRUE on success; otherwise return FALSE. */
3057 static bfd_boolean
3058 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3059 {
3060 const struct aarch64_name_value_pair *shift_op;
3061 enum aarch64_modifier_kind kind;
3062 expressionS exp;
3063 int exp_has_prefix;
3064 char *s = *str;
3065 char *p = s;
3066
3067 for (p = *str; ISALPHA (*p); p++)
3068 ;
3069
3070 if (p == *str)
3071 {
3072 set_syntax_error (_("shift expression expected"));
3073 return FALSE;
3074 }
3075
3076 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
3077
3078 if (shift_op == NULL)
3079 {
3080 set_syntax_error (_("shift operator expected"));
3081 return FALSE;
3082 }
3083
3084 kind = aarch64_get_operand_modifier (shift_op);
3085
3086 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3087 {
3088 set_syntax_error (_("invalid use of 'MSL'"));
3089 return FALSE;
3090 }
3091
3092 if (kind == AARCH64_MOD_MUL
3093 && mode != SHIFTED_MUL
3094 && mode != SHIFTED_MUL_VL)
3095 {
3096 set_syntax_error (_("invalid use of 'MUL'"));
3097 return FALSE;
3098 }
3099
3100 switch (mode)
3101 {
3102 case SHIFTED_LOGIC_IMM:
3103 if (aarch64_extend_operator_p (kind))
3104 {
3105 set_syntax_error (_("extending shift is not permitted"));
3106 return FALSE;
3107 }
3108 break;
3109
3110 case SHIFTED_ARITH_IMM:
3111 if (kind == AARCH64_MOD_ROR)
3112 {
3113 set_syntax_error (_("'ROR' shift is not permitted"));
3114 return FALSE;
3115 }
3116 break;
3117
3118 case SHIFTED_LSL:
3119 if (kind != AARCH64_MOD_LSL)
3120 {
3121 set_syntax_error (_("only 'LSL' shift is permitted"));
3122 return FALSE;
3123 }
3124 break;
3125
3126 case SHIFTED_MUL:
3127 if (kind != AARCH64_MOD_MUL)
3128 {
3129 set_syntax_error (_("only 'MUL' is permitted"));
3130 return FALSE;
3131 }
3132 break;
3133
3134 case SHIFTED_MUL_VL:
3135 /* "MUL VL" consists of two separate tokens. Require the first
3136 token to be "MUL" and look for a following "VL". */
3137 if (kind == AARCH64_MOD_MUL)
3138 {
3139 skip_whitespace (p);
3140 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3141 {
3142 p += 2;
3143 kind = AARCH64_MOD_MUL_VL;
3144 break;
3145 }
3146 }
3147 set_syntax_error (_("only 'MUL VL' is permitted"));
3148 return FALSE;
3149
3150 case SHIFTED_REG_OFFSET:
3151 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3152 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3153 {
3154 set_fatal_syntax_error
3155 (_("invalid shift for the register offset addressing mode"));
3156 return FALSE;
3157 }
3158 break;
3159
3160 case SHIFTED_LSL_MSL:
3161 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3162 {
3163 set_syntax_error (_("invalid shift operator"));
3164 return FALSE;
3165 }
3166 break;
3167
3168 default:
3169 abort ();
3170 }
3171
3172 /* Whitespace can appear here if the next thing is a bare digit. */
3173 skip_whitespace (p);
3174
3175 /* Parse shift amount. */
3176 exp_has_prefix = 0;
3177 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3178 exp.X_op = O_absent;
3179 else
3180 {
3181 if (is_immediate_prefix (*p))
3182 {
3183 p++;
3184 exp_has_prefix = 1;
3185 }
3186 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3187 }
3188 if (kind == AARCH64_MOD_MUL_VL)
3189 /* For consistency, give MUL VL the same shift amount as an implicit
3190 MUL #1. */
3191 operand->shifter.amount = 1;
3192 else if (exp.X_op == O_absent)
3193 {
3194 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3195 {
3196 set_syntax_error (_("missing shift amount"));
3197 return FALSE;
3198 }
3199 operand->shifter.amount = 0;
3200 }
3201 else if (exp.X_op != O_constant)
3202 {
3203 set_syntax_error (_("constant shift amount required"));
3204 return FALSE;
3205 }
3206 /* For parsing purposes, MUL #n has no inherent range. The range
3207 depends on the operand and will be checked by operand-specific
3208 routines. */
3209 else if (kind != AARCH64_MOD_MUL
3210 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3211 {
3212 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3213 return FALSE;
3214 }
3215 else
3216 {
3217 operand->shifter.amount = exp.X_add_number;
3218 operand->shifter.amount_present = 1;
3219 }
3220
3221 operand->shifter.operator_present = 1;
3222 operand->shifter.kind = kind;
3223
3224 *str = p;
3225 return TRUE;
3226 }
3227
3228 /* Parse a <shifter_operand> for a data processing instruction:
3229
3230 #<immediate>
3231 #<immediate>, LSL #imm
3232
3233 Validation of immediate operands is deferred to md_apply_fix.
3234
3235 Return TRUE on success; otherwise return FALSE. */
3236
3237 static bfd_boolean
3238 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3239 enum parse_shift_mode mode)
3240 {
3241 char *p;
3242
3243 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3244 return FALSE;
3245
3246 p = *str;
3247
3248 /* Accept an immediate expression. */
3249 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3250 return FALSE;
3251
3252 /* Accept optional LSL for arithmetic immediate values. */
3253 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3254 if (! parse_shift (&p, operand, SHIFTED_LSL))
3255 return FALSE;
3256
3257 /* Not accept any shifter for logical immediate values. */
3258 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3259 && parse_shift (&p, operand, mode))
3260 {
3261 set_syntax_error (_("unexpected shift operator"));
3262 return FALSE;
3263 }
3264
3265 *str = p;
3266 return TRUE;
3267 }
3268
3269 /* Parse a <shifter_operand> for a data processing instruction:
3270
3271 <Rm>
3272 <Rm>, <shift>
3273 #<immediate>
3274 #<immediate>, LSL #imm
3275
3276 where <shift> is handled by parse_shift above, and the last two
3277 cases are handled by the function above.
3278
3279 Validation of immediate operands is deferred to md_apply_fix.
3280
3281 Return TRUE on success; otherwise return FALSE. */
3282
3283 static bfd_boolean
3284 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3285 enum parse_shift_mode mode)
3286 {
3287 const reg_entry *reg;
3288 aarch64_opnd_qualifier_t qualifier;
3289 enum aarch64_operand_class opd_class
3290 = aarch64_get_operand_class (operand->type);
3291
3292 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3293 if (reg)
3294 {
3295 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3296 {
3297 set_syntax_error (_("unexpected register in the immediate operand"));
3298 return FALSE;
3299 }
3300
3301 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3302 {
3303 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3304 return FALSE;
3305 }
3306
3307 operand->reg.regno = reg->number;
3308 operand->qualifier = qualifier;
3309
3310 /* Accept optional shift operation on register. */
3311 if (! skip_past_comma (str))
3312 return TRUE;
3313
3314 if (! parse_shift (str, operand, mode))
3315 return FALSE;
3316
3317 return TRUE;
3318 }
3319 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3320 {
3321 set_syntax_error
3322 (_("integer register expected in the extended/shifted operand "
3323 "register"));
3324 return FALSE;
3325 }
3326
3327 /* We have a shifted immediate variable. */
3328 return parse_shifter_operand_imm (str, operand, mode);
3329 }
3330
3331 /* Return TRUE on success; return FALSE otherwise. */
3332
3333 static bfd_boolean
3334 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3335 enum parse_shift_mode mode)
3336 {
3337 char *p = *str;
3338
3339 /* Determine if we have the sequence of characters #: or just :
3340 coming next. If we do, then we check for a :rello: relocation
3341 modifier. If we don't, punt the whole lot to
3342 parse_shifter_operand. */
3343
3344 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3345 {
3346 struct reloc_table_entry *entry;
3347
3348 if (p[0] == '#')
3349 p += 2;
3350 else
3351 p++;
3352 *str = p;
3353
3354 /* Try to parse a relocation. Anything else is an error. */
3355 if (!(entry = find_reloc_table_entry (str)))
3356 {
3357 set_syntax_error (_("unknown relocation modifier"));
3358 return FALSE;
3359 }
3360
3361 if (entry->add_type == 0)
3362 {
3363 set_syntax_error
3364 (_("this relocation modifier is not allowed on this instruction"));
3365 return FALSE;
3366 }
3367
3368 /* Save str before we decompose it. */
3369 p = *str;
3370
3371 /* Next, we parse the expression. */
3372 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3373 return FALSE;
3374
3375 /* Record the relocation type (use the ADD variant here). */
3376 inst.reloc.type = entry->add_type;
3377 inst.reloc.pc_rel = entry->pc_rel;
3378
3379 /* If str is empty, we've reached the end, stop here. */
3380 if (**str == '\0')
3381 return TRUE;
3382
3383 /* Otherwise, we have a shifted reloc modifier, so rewind to
3384 recover the variable name and continue parsing for the shifter. */
3385 *str = p;
3386 return parse_shifter_operand_imm (str, operand, mode);
3387 }
3388
3389 return parse_shifter_operand (str, operand, mode);
3390 }
3391
3392 /* Parse all forms of an address expression. Information is written
3393 to *OPERAND and/or inst.reloc.
3394
3395 The A64 instruction set has the following addressing modes:
3396
3397 Offset
3398 [base] // in SIMD ld/st structure
3399 [base{,#0}] // in ld/st exclusive
3400 [base{,#imm}]
3401 [base,Xm{,LSL #imm}]
3402 [base,Xm,SXTX {#imm}]
3403 [base,Wm,(S|U)XTW {#imm}]
3404 Pre-indexed
3405 [base,#imm]!
3406 Post-indexed
3407 [base],#imm
3408 [base],Xm // in SIMD ld/st structure
3409 PC-relative (literal)
3410 label
3411 SVE:
3412 [base,#imm,MUL VL]
3413 [base,Zm.D{,LSL #imm}]
3414 [base,Zm.S,(S|U)XTW {#imm}]
3415 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3416 [Zn.S,#imm]
3417 [Zn.D,#imm]
3418 [Zn.S{, Xm}]
3419 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3420 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3421 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3422
3423 (As a convenience, the notation "=immediate" is permitted in conjunction
3424 with the pc-relative literal load instructions to automatically place an
3425 immediate value or symbolic address in a nearby literal pool and generate
3426 a hidden label which references it.)
3427
3428 Upon a successful parsing, the address structure in *OPERAND will be
3429 filled in the following way:
3430
3431 .base_regno = <base>
3432 .offset.is_reg // 1 if the offset is a register
3433 .offset.imm = <imm>
3434 .offset.regno = <Rm>
3435
3436 For different addressing modes defined in the A64 ISA:
3437
3438 Offset
3439 .pcrel=0; .preind=1; .postind=0; .writeback=0
3440 Pre-indexed
3441 .pcrel=0; .preind=1; .postind=0; .writeback=1
3442 Post-indexed
3443 .pcrel=0; .preind=0; .postind=1; .writeback=1
3444 PC-relative (literal)
3445 .pcrel=1; .preind=1; .postind=0; .writeback=0
3446
3447 The shift/extension information, if any, will be stored in .shifter.
3448 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3449 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3450 corresponding register.
3451
3452 BASE_TYPE says which types of base register should be accepted and
3453 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3454 is the type of shifter that is allowed for immediate offsets,
3455 or SHIFTED_NONE if none.
3456
3457 In all other respects, it is the caller's responsibility to check
3458 for addressing modes not supported by the instruction, and to set
3459 inst.reloc.type. */
3460
3461 static bfd_boolean
3462 parse_address_main (char **str, aarch64_opnd_info *operand,
3463 aarch64_opnd_qualifier_t *base_qualifier,
3464 aarch64_opnd_qualifier_t *offset_qualifier,
3465 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3466 enum parse_shift_mode imm_shift_mode)
3467 {
3468 char *p = *str;
3469 const reg_entry *reg;
3470 expressionS *exp = &inst.reloc.exp;
3471
3472 *base_qualifier = AARCH64_OPND_QLF_NIL;
3473 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3474 if (! skip_past_char (&p, '['))
3475 {
3476 /* =immediate or label. */
3477 operand->addr.pcrel = 1;
3478 operand->addr.preind = 1;
3479
3480 /* #:<reloc_op>:<symbol> */
3481 skip_past_char (&p, '#');
3482 if (skip_past_char (&p, ':'))
3483 {
3484 bfd_reloc_code_real_type ty;
3485 struct reloc_table_entry *entry;
3486
3487 /* Try to parse a relocation modifier. Anything else is
3488 an error. */
3489 entry = find_reloc_table_entry (&p);
3490 if (! entry)
3491 {
3492 set_syntax_error (_("unknown relocation modifier"));
3493 return FALSE;
3494 }
3495
3496 switch (operand->type)
3497 {
3498 case AARCH64_OPND_ADDR_PCREL21:
3499 /* adr */
3500 ty = entry->adr_type;
3501 break;
3502
3503 default:
3504 ty = entry->ld_literal_type;
3505 break;
3506 }
3507
3508 if (ty == 0)
3509 {
3510 set_syntax_error
3511 (_("this relocation modifier is not allowed on this "
3512 "instruction"));
3513 return FALSE;
3514 }
3515
3516 /* #:<reloc_op>: */
3517 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3518 {
3519 set_syntax_error (_("invalid relocation expression"));
3520 return FALSE;
3521 }
3522
3523 /* #:<reloc_op>:<expr> */
3524 /* Record the relocation type. */
3525 inst.reloc.type = ty;
3526 inst.reloc.pc_rel = entry->pc_rel;
3527 }
3528 else
3529 {
3530
3531 if (skip_past_char (&p, '='))
3532 /* =immediate; need to generate the literal in the literal pool. */
3533 inst.gen_lit_pool = 1;
3534
3535 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3536 {
3537 set_syntax_error (_("invalid address"));
3538 return FALSE;
3539 }
3540 }
3541
3542 *str = p;
3543 return TRUE;
3544 }
3545
3546 /* [ */
3547
3548 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3549 if (!reg || !aarch64_check_reg_type (reg, base_type))
3550 {
3551 set_syntax_error (_(get_reg_expected_msg (base_type)));
3552 return FALSE;
3553 }
3554 operand->addr.base_regno = reg->number;
3555
3556 /* [Xn */
3557 if (skip_past_comma (&p))
3558 {
3559 /* [Xn, */
3560 operand->addr.preind = 1;
3561
3562 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3563 if (reg)
3564 {
3565 if (!aarch64_check_reg_type (reg, offset_type))
3566 {
3567 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3568 return FALSE;
3569 }
3570
3571 /* [Xn,Rm */
3572 operand->addr.offset.regno = reg->number;
3573 operand->addr.offset.is_reg = 1;
3574 /* Shifted index. */
3575 if (skip_past_comma (&p))
3576 {
3577 /* [Xn,Rm, */
3578 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3579 /* Use the diagnostics set in parse_shift, so not set new
3580 error message here. */
3581 return FALSE;
3582 }
3583 /* We only accept:
3584 [base,Xm] # For vector plus scalar SVE2 indexing.
3585 [base,Xm{,LSL #imm}]
3586 [base,Xm,SXTX {#imm}]
3587 [base,Wm,(S|U)XTW {#imm}] */
3588 if (operand->shifter.kind == AARCH64_MOD_NONE
3589 || operand->shifter.kind == AARCH64_MOD_LSL
3590 || operand->shifter.kind == AARCH64_MOD_SXTX)
3591 {
3592 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3593 {
3594 set_syntax_error (_("invalid use of 32-bit register offset"));
3595 return FALSE;
3596 }
3597 if (aarch64_get_qualifier_esize (*base_qualifier)
3598 != aarch64_get_qualifier_esize (*offset_qualifier)
3599 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3600 || *base_qualifier != AARCH64_OPND_QLF_S_S
3601 || *offset_qualifier != AARCH64_OPND_QLF_X))
3602 {
3603 set_syntax_error (_("offset has different size from base"));
3604 return FALSE;
3605 }
3606 }
3607 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3608 {
3609 set_syntax_error (_("invalid use of 64-bit register offset"));
3610 return FALSE;
3611 }
3612 }
3613 else
3614 {
3615 /* [Xn,#:<reloc_op>:<symbol> */
3616 skip_past_char (&p, '#');
3617 if (skip_past_char (&p, ':'))
3618 {
3619 struct reloc_table_entry *entry;
3620
3621 /* Try to parse a relocation modifier. Anything else is
3622 an error. */
3623 if (!(entry = find_reloc_table_entry (&p)))
3624 {
3625 set_syntax_error (_("unknown relocation modifier"));
3626 return FALSE;
3627 }
3628
3629 if (entry->ldst_type == 0)
3630 {
3631 set_syntax_error
3632 (_("this relocation modifier is not allowed on this "
3633 "instruction"));
3634 return FALSE;
3635 }
3636
3637 /* [Xn,#:<reloc_op>: */
3638 /* We now have the group relocation table entry corresponding to
3639 the name in the assembler source. Next, we parse the
3640 expression. */
3641 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3642 {
3643 set_syntax_error (_("invalid relocation expression"));
3644 return FALSE;
3645 }
3646
3647 /* [Xn,#:<reloc_op>:<expr> */
3648 /* Record the load/store relocation type. */
3649 inst.reloc.type = entry->ldst_type;
3650 inst.reloc.pc_rel = entry->pc_rel;
3651 }
3652 else
3653 {
3654 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3655 {
3656 set_syntax_error (_("invalid expression in the address"));
3657 return FALSE;
3658 }
3659 /* [Xn,<expr> */
3660 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3661 /* [Xn,<expr>,<shifter> */
3662 if (! parse_shift (&p, operand, imm_shift_mode))
3663 return FALSE;
3664 }
3665 }
3666 }
3667
3668 if (! skip_past_char (&p, ']'))
3669 {
3670 set_syntax_error (_("']' expected"));
3671 return FALSE;
3672 }
3673
3674 if (skip_past_char (&p, '!'))
3675 {
3676 if (operand->addr.preind && operand->addr.offset.is_reg)
3677 {
3678 set_syntax_error (_("register offset not allowed in pre-indexed "
3679 "addressing mode"));
3680 return FALSE;
3681 }
3682 /* [Xn]! */
3683 operand->addr.writeback = 1;
3684 }
3685 else if (skip_past_comma (&p))
3686 {
3687 /* [Xn], */
3688 operand->addr.postind = 1;
3689 operand->addr.writeback = 1;
3690
3691 if (operand->addr.preind)
3692 {
3693 set_syntax_error (_("cannot combine pre- and post-indexing"));
3694 return FALSE;
3695 }
3696
3697 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3698 if (reg)
3699 {
3700 /* [Xn],Xm */
3701 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3702 {
3703 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3704 return FALSE;
3705 }
3706
3707 operand->addr.offset.regno = reg->number;
3708 operand->addr.offset.is_reg = 1;
3709 }
3710 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3711 {
3712 /* [Xn],#expr */
3713 set_syntax_error (_("invalid expression in the address"));
3714 return FALSE;
3715 }
3716 }
3717
3718 /* If at this point neither .preind nor .postind is set, we have a
3719 bare [Rn]{!}; reject [Rn]! accept [Rn] as a shorthand for [Rn,#0].
3720 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3721 [Zn.<T>, xzr]. */
3722 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3723 {
3724 if (operand->addr.writeback)
3725 {
3726 /* Reject [Rn]! */
3727 set_syntax_error (_("missing offset in the pre-indexed address"));
3728 return FALSE;
3729 }
3730
3731 operand->addr.preind = 1;
3732 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3733 {
3734 operand->addr.offset.is_reg = 1;
3735 operand->addr.offset.regno = REG_ZR;
3736 *offset_qualifier = AARCH64_OPND_QLF_X;
3737 }
3738 else
3739 {
3740 inst.reloc.exp.X_op = O_constant;
3741 inst.reloc.exp.X_add_number = 0;
3742 }
3743 }
3744
3745 *str = p;
3746 return TRUE;
3747 }
3748
3749 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3750 on success. */
3751 static bfd_boolean
3752 parse_address (char **str, aarch64_opnd_info *operand)
3753 {
3754 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3755 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3756 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3757 }
3758
3759 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3760 The arguments have the same meaning as for parse_address_main.
3761 Return TRUE on success. */
3762 static bfd_boolean
3763 parse_sve_address (char **str, aarch64_opnd_info *operand,
3764 aarch64_opnd_qualifier_t *base_qualifier,
3765 aarch64_opnd_qualifier_t *offset_qualifier)
3766 {
3767 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3768 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3769 SHIFTED_MUL_VL);
3770 }
3771
3772 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3773 Return TRUE on success; otherwise return FALSE. */
3774 static bfd_boolean
3775 parse_half (char **str, int *internal_fixup_p)
3776 {
3777 char *p = *str;
3778
3779 skip_past_char (&p, '#');
3780
3781 gas_assert (internal_fixup_p);
3782 *internal_fixup_p = 0;
3783
3784 if (*p == ':')
3785 {
3786 struct reloc_table_entry *entry;
3787
3788 /* Try to parse a relocation. Anything else is an error. */
3789 ++p;
3790 if (!(entry = find_reloc_table_entry (&p)))
3791 {
3792 set_syntax_error (_("unknown relocation modifier"));
3793 return FALSE;
3794 }
3795
3796 if (entry->movw_type == 0)
3797 {
3798 set_syntax_error
3799 (_("this relocation modifier is not allowed on this instruction"));
3800 return FALSE;
3801 }
3802
3803 inst.reloc.type = entry->movw_type;
3804 }
3805 else
3806 *internal_fixup_p = 1;
3807
3808 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3809 return FALSE;
3810
3811 *str = p;
3812 return TRUE;
3813 }
3814
3815 /* Parse an operand for an ADRP instruction:
3816 ADRP <Xd>, <label>
3817 Return TRUE on success; otherwise return FALSE. */
3818
3819 static bfd_boolean
3820 parse_adrp (char **str)
3821 {
3822 char *p;
3823
3824 p = *str;
3825 if (*p == ':')
3826 {
3827 struct reloc_table_entry *entry;
3828
3829 /* Try to parse a relocation. Anything else is an error. */
3830 ++p;
3831 if (!(entry = find_reloc_table_entry (&p)))
3832 {
3833 set_syntax_error (_("unknown relocation modifier"));
3834 return FALSE;
3835 }
3836
3837 if (entry->adrp_type == 0)
3838 {
3839 set_syntax_error
3840 (_("this relocation modifier is not allowed on this instruction"));
3841 return FALSE;
3842 }
3843
3844 inst.reloc.type = entry->adrp_type;
3845 }
3846 else
3847 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3848
3849 inst.reloc.pc_rel = 1;
3850
3851 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3852 return FALSE;
3853
3854 *str = p;
3855 return TRUE;
3856 }
3857
3858 /* Miscellaneous. */
3859
3860 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3861 of SIZE tokens in which index I gives the token for field value I,
3862 or is null if field value I is invalid. REG_TYPE says which register
3863 names should be treated as registers rather than as symbolic immediates.
3864
3865 Return true on success, moving *STR past the operand and storing the
3866 field value in *VAL. */
3867
3868 static int
3869 parse_enum_string (char **str, int64_t *val, const char *const *array,
3870 size_t size, aarch64_reg_type reg_type)
3871 {
3872 expressionS exp;
3873 char *p, *q;
3874 size_t i;
3875
3876 /* Match C-like tokens. */
3877 p = q = *str;
3878 while (ISALNUM (*q))
3879 q++;
3880
3881 for (i = 0; i < size; ++i)
3882 if (array[i]
3883 && strncasecmp (array[i], p, q - p) == 0
3884 && array[i][q - p] == 0)
3885 {
3886 *val = i;
3887 *str = q;
3888 return TRUE;
3889 }
3890
3891 if (!parse_immediate_expression (&p, &exp, reg_type))
3892 return FALSE;
3893
3894 if (exp.X_op == O_constant
3895 && (uint64_t) exp.X_add_number < size)
3896 {
3897 *val = exp.X_add_number;
3898 *str = p;
3899 return TRUE;
3900 }
3901
3902 /* Use the default error for this operand. */
3903 return FALSE;
3904 }
3905
3906 /* Parse an option for a preload instruction. Returns the encoding for the
3907 option, or PARSE_FAIL. */
3908
3909 static int
3910 parse_pldop (char **str)
3911 {
3912 char *p, *q;
3913 const struct aarch64_name_value_pair *o;
3914
3915 p = q = *str;
3916 while (ISALNUM (*q))
3917 q++;
3918
3919 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3920 if (!o)
3921 return PARSE_FAIL;
3922
3923 *str = q;
3924 return o->value;
3925 }
3926
3927 /* Parse an option for a barrier instruction. Returns the encoding for the
3928 option, or PARSE_FAIL. */
3929
3930 static int
3931 parse_barrier (char **str)
3932 {
3933 char *p, *q;
3934 const asm_barrier_opt *o;
3935
3936 p = q = *str;
3937 while (ISALPHA (*q))
3938 q++;
3939
3940 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3941 if (!o)
3942 return PARSE_FAIL;
3943
3944 *str = q;
3945 return o->value;
3946 }
3947
3948 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3949 return 0 if successful. Otherwise return PARSE_FAIL. */
3950
3951 static int
3952 parse_barrier_psb (char **str,
3953 const struct aarch64_name_value_pair ** hint_opt)
3954 {
3955 char *p, *q;
3956 const struct aarch64_name_value_pair *o;
3957
3958 p = q = *str;
3959 while (ISALPHA (*q))
3960 q++;
3961
3962 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3963 if (!o)
3964 {
3965 set_fatal_syntax_error
3966 ( _("unknown or missing option to PSB"));
3967 return PARSE_FAIL;
3968 }
3969
3970 if (o->value != 0x11)
3971 {
3972 /* PSB only accepts option name 'CSYNC'. */
3973 set_syntax_error
3974 (_("the specified option is not accepted for PSB"));
3975 return PARSE_FAIL;
3976 }
3977
3978 *str = q;
3979 *hint_opt = o;
3980 return 0;
3981 }
3982
3983 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
3984 return 0 if successful. Otherwise return PARSE_FAIL. */
3985
3986 static int
3987 parse_bti_operand (char **str,
3988 const struct aarch64_name_value_pair ** hint_opt)
3989 {
3990 char *p, *q;
3991 const struct aarch64_name_value_pair *o;
3992
3993 p = q = *str;
3994 while (ISALPHA (*q))
3995 q++;
3996
3997 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3998 if (!o)
3999 {
4000 set_fatal_syntax_error
4001 ( _("unknown option to BTI"));
4002 return PARSE_FAIL;
4003 }
4004
4005 switch (o->value)
4006 {
4007 /* Valid BTI operands. */
4008 case HINT_OPD_C:
4009 case HINT_OPD_J:
4010 case HINT_OPD_JC:
4011 break;
4012
4013 default:
4014 set_syntax_error
4015 (_("unknown option to BTI"));
4016 return PARSE_FAIL;
4017 }
4018
4019 *str = q;
4020 *hint_opt = o;
4021 return 0;
4022 }
4023
4024 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4025 Returns the encoding for the option, or PARSE_FAIL.
4026
4027 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4028 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4029
4030 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4031 field, otherwise as a system register.
4032 */
4033
4034 static int
4035 parse_sys_reg (char **str, struct hash_control *sys_regs,
4036 int imple_defined_p, int pstatefield_p,
4037 uint32_t* flags)
4038 {
4039 char *p, *q;
4040 char buf[32];
4041 const aarch64_sys_reg *o;
4042 int value;
4043
4044 p = buf;
4045 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4046 if (p < buf + 31)
4047 *p++ = TOLOWER (*q);
4048 *p = '\0';
4049 /* Assert that BUF be large enough. */
4050 gas_assert (p - buf == q - *str);
4051
4052 o = hash_find (sys_regs, buf);
4053 if (!o)
4054 {
4055 if (!imple_defined_p)
4056 return PARSE_FAIL;
4057 else
4058 {
4059 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4060 unsigned int op0, op1, cn, cm, op2;
4061
4062 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4063 != 5)
4064 return PARSE_FAIL;
4065 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4066 return PARSE_FAIL;
4067 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4068 if (flags)
4069 *flags = 0;
4070 }
4071 }
4072 else
4073 {
4074 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4075 as_bad (_("selected processor does not support PSTATE field "
4076 "name '%s'"), buf);
4077 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
4078 as_bad (_("selected processor does not support system register "
4079 "name '%s'"), buf);
4080 if (aarch64_sys_reg_deprecated_p (o))
4081 as_warn (_("system register name '%s' is deprecated and may be "
4082 "removed in a future release"), buf);
4083 value = o->value;
4084 if (flags)
4085 *flags = o->flags;
4086 }
4087
4088 *str = q;
4089 return value;
4090 }
4091
4092 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4093 for the option, or NULL. */
4094
4095 static const aarch64_sys_ins_reg *
4096 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
4097 {
4098 char *p, *q;
4099 char buf[32];
4100 const aarch64_sys_ins_reg *o;
4101
4102 p = buf;
4103 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4104 if (p < buf + 31)
4105 *p++ = TOLOWER (*q);
4106 *p = '\0';
4107
4108 o = hash_find (sys_ins_regs, buf);
4109 if (!o)
4110 return NULL;
4111
4112 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
4113 as_bad (_("selected processor does not support system register "
4114 "name '%s'"), buf);
4115
4116 *str = q;
4117 return o;
4118 }
4119 \f
4120 #define po_char_or_fail(chr) do { \
4121 if (! skip_past_char (&str, chr)) \
4122 goto failure; \
4123 } while (0)
4124
4125 #define po_reg_or_fail(regtype) do { \
4126 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4127 if (val == PARSE_FAIL) \
4128 { \
4129 set_default_error (); \
4130 goto failure; \
4131 } \
4132 } while (0)
4133
4134 #define po_int_reg_or_fail(reg_type) do { \
4135 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4136 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4137 { \
4138 set_default_error (); \
4139 goto failure; \
4140 } \
4141 info->reg.regno = reg->number; \
4142 info->qualifier = qualifier; \
4143 } while (0)
4144
4145 #define po_imm_nc_or_fail() do { \
4146 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4147 goto failure; \
4148 } while (0)
4149
4150 #define po_imm_or_fail(min, max) do { \
4151 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4152 goto failure; \
4153 if (val < min || val > max) \
4154 { \
4155 set_fatal_syntax_error (_("immediate value out of range "\
4156 #min " to "#max)); \
4157 goto failure; \
4158 } \
4159 } while (0)
4160
4161 #define po_enum_or_fail(array) do { \
4162 if (!parse_enum_string (&str, &val, array, \
4163 ARRAY_SIZE (array), imm_reg_type)) \
4164 goto failure; \
4165 } while (0)
4166
4167 #define po_misc_or_fail(expr) do { \
4168 if (!expr) \
4169 goto failure; \
4170 } while (0)
4171 \f
4172 /* encode the 12-bit imm field of Add/sub immediate */
4173 static inline uint32_t
4174 encode_addsub_imm (uint32_t imm)
4175 {
4176 return imm << 10;
4177 }
4178
4179 /* encode the shift amount field of Add/sub immediate */
4180 static inline uint32_t
4181 encode_addsub_imm_shift_amount (uint32_t cnt)
4182 {
4183 return cnt << 22;
4184 }
4185
4186
4187 /* encode the imm field of Adr instruction */
4188 static inline uint32_t
4189 encode_adr_imm (uint32_t imm)
4190 {
4191 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4192 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4193 }
4194
4195 /* encode the immediate field of Move wide immediate */
4196 static inline uint32_t
4197 encode_movw_imm (uint32_t imm)
4198 {
4199 return imm << 5;
4200 }
4201
4202 /* encode the 26-bit offset of unconditional branch */
4203 static inline uint32_t
4204 encode_branch_ofs_26 (uint32_t ofs)
4205 {
4206 return ofs & ((1 << 26) - 1);
4207 }
4208
4209 /* encode the 19-bit offset of conditional branch and compare & branch */
4210 static inline uint32_t
4211 encode_cond_branch_ofs_19 (uint32_t ofs)
4212 {
4213 return (ofs & ((1 << 19) - 1)) << 5;
4214 }
4215
4216 /* encode the 19-bit offset of ld literal */
4217 static inline uint32_t
4218 encode_ld_lit_ofs_19 (uint32_t ofs)
4219 {
4220 return (ofs & ((1 << 19) - 1)) << 5;
4221 }
4222
4223 /* Encode the 14-bit offset of test & branch. */
4224 static inline uint32_t
4225 encode_tst_branch_ofs_14 (uint32_t ofs)
4226 {
4227 return (ofs & ((1 << 14) - 1)) << 5;
4228 }
4229
4230 /* Encode the 16-bit imm field of svc/hvc/smc. */
4231 static inline uint32_t
4232 encode_svc_imm (uint32_t imm)
4233 {
4234 return imm << 5;
4235 }
4236
4237 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4238 static inline uint32_t
4239 reencode_addsub_switch_add_sub (uint32_t opcode)
4240 {
4241 return opcode ^ (1 << 30);
4242 }
4243
4244 static inline uint32_t
4245 reencode_movzn_to_movz (uint32_t opcode)
4246 {
4247 return opcode | (1 << 30);
4248 }
4249
4250 static inline uint32_t
4251 reencode_movzn_to_movn (uint32_t opcode)
4252 {
4253 return opcode & ~(1 << 30);
4254 }
4255
4256 /* Overall per-instruction processing. */
4257
4258 /* We need to be able to fix up arbitrary expressions in some statements.
4259 This is so that we can handle symbols that are an arbitrary distance from
4260 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4261 which returns part of an address in a form which will be valid for
4262 a data instruction. We do this by pushing the expression into a symbol
4263 in the expr_section, and creating a fix for that. */
4264
4265 static fixS *
4266 fix_new_aarch64 (fragS * frag,
4267 int where,
4268 short int size, expressionS * exp, int pc_rel, int reloc)
4269 {
4270 fixS *new_fix;
4271
4272 switch (exp->X_op)
4273 {
4274 case O_constant:
4275 case O_symbol:
4276 case O_add:
4277 case O_subtract:
4278 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4279 break;
4280
4281 default:
4282 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4283 pc_rel, reloc);
4284 break;
4285 }
4286 return new_fix;
4287 }
4288 \f
4289 /* Diagnostics on operands errors. */
4290
4291 /* By default, output verbose error message.
4292 Disable the verbose error message by -mno-verbose-error. */
4293 static int verbose_error_p = 1;
4294
4295 #ifdef DEBUG_AARCH64
4296 /* N.B. this is only for the purpose of debugging. */
4297 const char* operand_mismatch_kind_names[] =
4298 {
4299 "AARCH64_OPDE_NIL",
4300 "AARCH64_OPDE_RECOVERABLE",
4301 "AARCH64_OPDE_SYNTAX_ERROR",
4302 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4303 "AARCH64_OPDE_INVALID_VARIANT",
4304 "AARCH64_OPDE_OUT_OF_RANGE",
4305 "AARCH64_OPDE_UNALIGNED",
4306 "AARCH64_OPDE_REG_LIST",
4307 "AARCH64_OPDE_OTHER_ERROR",
4308 };
4309 #endif /* DEBUG_AARCH64 */
4310
4311 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4312
4313 When multiple errors of different kinds are found in the same assembly
4314 line, only the error of the highest severity will be picked up for
4315 issuing the diagnostics. */
4316
4317 static inline bfd_boolean
4318 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4319 enum aarch64_operand_error_kind rhs)
4320 {
4321 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4322 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4323 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4324 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4325 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4326 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4327 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4328 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4329 return lhs > rhs;
4330 }
4331
4332 /* Helper routine to get the mnemonic name from the assembly instruction
4333 line; should only be called for the diagnosis purpose, as there is
4334 string copy operation involved, which may affect the runtime
4335 performance if used in elsewhere. */
4336
4337 static const char*
4338 get_mnemonic_name (const char *str)
4339 {
4340 static char mnemonic[32];
4341 char *ptr;
4342
4343 /* Get the first 15 bytes and assume that the full name is included. */
4344 strncpy (mnemonic, str, 31);
4345 mnemonic[31] = '\0';
4346
4347 /* Scan up to the end of the mnemonic, which must end in white space,
4348 '.', or end of string. */
4349 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4350 ;
4351
4352 *ptr = '\0';
4353
4354 /* Append '...' to the truncated long name. */
4355 if (ptr - mnemonic == 31)
4356 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4357
4358 return mnemonic;
4359 }
4360
4361 static void
4362 reset_aarch64_instruction (aarch64_instruction *instruction)
4363 {
4364 memset (instruction, '\0', sizeof (aarch64_instruction));
4365 instruction->reloc.type = BFD_RELOC_UNUSED;
4366 }
4367
4368 /* Data structures storing one user error in the assembly code related to
4369 operands. */
4370
4371 struct operand_error_record
4372 {
4373 const aarch64_opcode *opcode;
4374 aarch64_operand_error detail;
4375 struct operand_error_record *next;
4376 };
4377
4378 typedef struct operand_error_record operand_error_record;
4379
4380 struct operand_errors
4381 {
4382 operand_error_record *head;
4383 operand_error_record *tail;
4384 };
4385
4386 typedef struct operand_errors operand_errors;
4387
4388 /* Top-level data structure reporting user errors for the current line of
4389 the assembly code.
4390 The way md_assemble works is that all opcodes sharing the same mnemonic
4391 name are iterated to find a match to the assembly line. In this data
4392 structure, each of the such opcodes will have one operand_error_record
4393 allocated and inserted. In other words, excessive errors related with
4394 a single opcode are disregarded. */
4395 operand_errors operand_error_report;
4396
4397 /* Free record nodes. */
4398 static operand_error_record *free_opnd_error_record_nodes = NULL;
4399
4400 /* Initialize the data structure that stores the operand mismatch
4401 information on assembling one line of the assembly code. */
4402 static void
4403 init_operand_error_report (void)
4404 {
4405 if (operand_error_report.head != NULL)
4406 {
4407 gas_assert (operand_error_report.tail != NULL);
4408 operand_error_report.tail->next = free_opnd_error_record_nodes;
4409 free_opnd_error_record_nodes = operand_error_report.head;
4410 operand_error_report.head = NULL;
4411 operand_error_report.tail = NULL;
4412 return;
4413 }
4414 gas_assert (operand_error_report.tail == NULL);
4415 }
4416
4417 /* Return TRUE if some operand error has been recorded during the
4418 parsing of the current assembly line using the opcode *OPCODE;
4419 otherwise return FALSE. */
4420 static inline bfd_boolean
4421 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4422 {
4423 operand_error_record *record = operand_error_report.head;
4424 return record && record->opcode == opcode;
4425 }
4426
4427 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4428 OPCODE field is initialized with OPCODE.
4429 N.B. only one record for each opcode, i.e. the maximum of one error is
4430 recorded for each instruction template. */
4431
4432 static void
4433 add_operand_error_record (const operand_error_record* new_record)
4434 {
4435 const aarch64_opcode *opcode = new_record->opcode;
4436 operand_error_record* record = operand_error_report.head;
4437
4438 /* The record may have been created for this opcode. If not, we need
4439 to prepare one. */
4440 if (! opcode_has_operand_error_p (opcode))
4441 {
4442 /* Get one empty record. */
4443 if (free_opnd_error_record_nodes == NULL)
4444 {
4445 record = XNEW (operand_error_record);
4446 }
4447 else
4448 {
4449 record = free_opnd_error_record_nodes;
4450 free_opnd_error_record_nodes = record->next;
4451 }
4452 record->opcode = opcode;
4453 /* Insert at the head. */
4454 record->next = operand_error_report.head;
4455 operand_error_report.head = record;
4456 if (operand_error_report.tail == NULL)
4457 operand_error_report.tail = record;
4458 }
4459 else if (record->detail.kind != AARCH64_OPDE_NIL
4460 && record->detail.index <= new_record->detail.index
4461 && operand_error_higher_severity_p (record->detail.kind,
4462 new_record->detail.kind))
4463 {
4464 /* In the case of multiple errors found on operands related with a
4465 single opcode, only record the error of the leftmost operand and
4466 only if the error is of higher severity. */
4467 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4468 " the existing error %s on operand %d",
4469 operand_mismatch_kind_names[new_record->detail.kind],
4470 new_record->detail.index,
4471 operand_mismatch_kind_names[record->detail.kind],
4472 record->detail.index);
4473 return;
4474 }
4475
4476 record->detail = new_record->detail;
4477 }
4478
4479 static inline void
4480 record_operand_error_info (const aarch64_opcode *opcode,
4481 aarch64_operand_error *error_info)
4482 {
4483 operand_error_record record;
4484 record.opcode = opcode;
4485 record.detail = *error_info;
4486 add_operand_error_record (&record);
4487 }
4488
4489 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4490 error message *ERROR, for operand IDX (count from 0). */
4491
4492 static void
4493 record_operand_error (const aarch64_opcode *opcode, int idx,
4494 enum aarch64_operand_error_kind kind,
4495 const char* error)
4496 {
4497 aarch64_operand_error info;
4498 memset(&info, 0, sizeof (info));
4499 info.index = idx;
4500 info.kind = kind;
4501 info.error = error;
4502 info.non_fatal = FALSE;
4503 record_operand_error_info (opcode, &info);
4504 }
4505
4506 static void
4507 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4508 enum aarch64_operand_error_kind kind,
4509 const char* error, const int *extra_data)
4510 {
4511 aarch64_operand_error info;
4512 info.index = idx;
4513 info.kind = kind;
4514 info.error = error;
4515 info.data[0] = extra_data[0];
4516 info.data[1] = extra_data[1];
4517 info.data[2] = extra_data[2];
4518 info.non_fatal = FALSE;
4519 record_operand_error_info (opcode, &info);
4520 }
4521
4522 static void
4523 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4524 const char* error, int lower_bound,
4525 int upper_bound)
4526 {
4527 int data[3] = {lower_bound, upper_bound, 0};
4528 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4529 error, data);
4530 }
4531
4532 /* Remove the operand error record for *OPCODE. */
4533 static void ATTRIBUTE_UNUSED
4534 remove_operand_error_record (const aarch64_opcode *opcode)
4535 {
4536 if (opcode_has_operand_error_p (opcode))
4537 {
4538 operand_error_record* record = operand_error_report.head;
4539 gas_assert (record != NULL && operand_error_report.tail != NULL);
4540 operand_error_report.head = record->next;
4541 record->next = free_opnd_error_record_nodes;
4542 free_opnd_error_record_nodes = record;
4543 if (operand_error_report.head == NULL)
4544 {
4545 gas_assert (operand_error_report.tail == record);
4546 operand_error_report.tail = NULL;
4547 }
4548 }
4549 }
4550
4551 /* Given the instruction in *INSTR, return the index of the best matched
4552 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4553
4554 Return -1 if there is no qualifier sequence; return the first match
4555 if there is multiple matches found. */
4556
4557 static int
4558 find_best_match (const aarch64_inst *instr,
4559 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4560 {
4561 int i, num_opnds, max_num_matched, idx;
4562
4563 num_opnds = aarch64_num_of_operands (instr->opcode);
4564 if (num_opnds == 0)
4565 {
4566 DEBUG_TRACE ("no operand");
4567 return -1;
4568 }
4569
4570 max_num_matched = 0;
4571 idx = 0;
4572
4573 /* For each pattern. */
4574 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4575 {
4576 int j, num_matched;
4577 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4578
4579 /* Most opcodes has much fewer patterns in the list. */
4580 if (empty_qualifier_sequence_p (qualifiers))
4581 {
4582 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4583 break;
4584 }
4585
4586 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4587 if (*qualifiers == instr->operands[j].qualifier)
4588 ++num_matched;
4589
4590 if (num_matched > max_num_matched)
4591 {
4592 max_num_matched = num_matched;
4593 idx = i;
4594 }
4595 }
4596
4597 DEBUG_TRACE ("return with %d", idx);
4598 return idx;
4599 }
4600
4601 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4602 corresponding operands in *INSTR. */
4603
4604 static inline void
4605 assign_qualifier_sequence (aarch64_inst *instr,
4606 const aarch64_opnd_qualifier_t *qualifiers)
4607 {
4608 int i = 0;
4609 int num_opnds = aarch64_num_of_operands (instr->opcode);
4610 gas_assert (num_opnds);
4611 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4612 instr->operands[i].qualifier = *qualifiers;
4613 }
4614
4615 /* Print operands for the diagnosis purpose. */
4616
4617 static void
4618 print_operands (char *buf, const aarch64_opcode *opcode,
4619 const aarch64_opnd_info *opnds)
4620 {
4621 int i;
4622
4623 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4624 {
4625 char str[128];
4626
4627 /* We regard the opcode operand info more, however we also look into
4628 the inst->operands to support the disassembling of the optional
4629 operand.
4630 The two operand code should be the same in all cases, apart from
4631 when the operand can be optional. */
4632 if (opcode->operands[i] == AARCH64_OPND_NIL
4633 || opnds[i].type == AARCH64_OPND_NIL)
4634 break;
4635
4636 /* Generate the operand string in STR. */
4637 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4638 NULL);
4639
4640 /* Delimiter. */
4641 if (str[0] != '\0')
4642 strcat (buf, i == 0 ? " " : ", ");
4643
4644 /* Append the operand string. */
4645 strcat (buf, str);
4646 }
4647 }
4648
4649 /* Send to stderr a string as information. */
4650
4651 static void
4652 output_info (const char *format, ...)
4653 {
4654 const char *file;
4655 unsigned int line;
4656 va_list args;
4657
4658 file = as_where (&line);
4659 if (file)
4660 {
4661 if (line != 0)
4662 fprintf (stderr, "%s:%u: ", file, line);
4663 else
4664 fprintf (stderr, "%s: ", file);
4665 }
4666 fprintf (stderr, _("Info: "));
4667 va_start (args, format);
4668 vfprintf (stderr, format, args);
4669 va_end (args);
4670 (void) putc ('\n', stderr);
4671 }
4672
4673 /* Output one operand error record. */
4674
4675 static void
4676 output_operand_error_record (const operand_error_record *record, char *str)
4677 {
4678 const aarch64_operand_error *detail = &record->detail;
4679 int idx = detail->index;
4680 const aarch64_opcode *opcode = record->opcode;
4681 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4682 : AARCH64_OPND_NIL);
4683
4684 typedef void (*handler_t)(const char *format, ...);
4685 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4686
4687 switch (detail->kind)
4688 {
4689 case AARCH64_OPDE_NIL:
4690 gas_assert (0);
4691 break;
4692 case AARCH64_OPDE_SYNTAX_ERROR:
4693 case AARCH64_OPDE_RECOVERABLE:
4694 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4695 case AARCH64_OPDE_OTHER_ERROR:
4696 /* Use the prepared error message if there is, otherwise use the
4697 operand description string to describe the error. */
4698 if (detail->error != NULL)
4699 {
4700 if (idx < 0)
4701 handler (_("%s -- `%s'"), detail->error, str);
4702 else
4703 handler (_("%s at operand %d -- `%s'"),
4704 detail->error, idx + 1, str);
4705 }
4706 else
4707 {
4708 gas_assert (idx >= 0);
4709 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4710 aarch64_get_operand_desc (opd_code), str);
4711 }
4712 break;
4713
4714 case AARCH64_OPDE_INVALID_VARIANT:
4715 handler (_("operand mismatch -- `%s'"), str);
4716 if (verbose_error_p)
4717 {
4718 /* We will try to correct the erroneous instruction and also provide
4719 more information e.g. all other valid variants.
4720
4721 The string representation of the corrected instruction and other
4722 valid variants are generated by
4723
4724 1) obtaining the intermediate representation of the erroneous
4725 instruction;
4726 2) manipulating the IR, e.g. replacing the operand qualifier;
4727 3) printing out the instruction by calling the printer functions
4728 shared with the disassembler.
4729
4730 The limitation of this method is that the exact input assembly
4731 line cannot be accurately reproduced in some cases, for example an
4732 optional operand present in the actual assembly line will be
4733 omitted in the output; likewise for the optional syntax rules,
4734 e.g. the # before the immediate. Another limitation is that the
4735 assembly symbols and relocation operations in the assembly line
4736 currently cannot be printed out in the error report. Last but not
4737 least, when there is other error(s) co-exist with this error, the
4738 'corrected' instruction may be still incorrect, e.g. given
4739 'ldnp h0,h1,[x0,#6]!'
4740 this diagnosis will provide the version:
4741 'ldnp s0,s1,[x0,#6]!'
4742 which is still not right. */
4743 size_t len = strlen (get_mnemonic_name (str));
4744 int i, qlf_idx;
4745 bfd_boolean result;
4746 char buf[2048];
4747 aarch64_inst *inst_base = &inst.base;
4748 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4749
4750 /* Init inst. */
4751 reset_aarch64_instruction (&inst);
4752 inst_base->opcode = opcode;
4753
4754 /* Reset the error report so that there is no side effect on the
4755 following operand parsing. */
4756 init_operand_error_report ();
4757
4758 /* Fill inst. */
4759 result = parse_operands (str + len, opcode)
4760 && programmer_friendly_fixup (&inst);
4761 gas_assert (result);
4762 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4763 NULL, NULL, insn_sequence);
4764 gas_assert (!result);
4765
4766 /* Find the most matched qualifier sequence. */
4767 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4768 gas_assert (qlf_idx > -1);
4769
4770 /* Assign the qualifiers. */
4771 assign_qualifier_sequence (inst_base,
4772 opcode->qualifiers_list[qlf_idx]);
4773
4774 /* Print the hint. */
4775 output_info (_(" did you mean this?"));
4776 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4777 print_operands (buf, opcode, inst_base->operands);
4778 output_info (_(" %s"), buf);
4779
4780 /* Print out other variant(s) if there is any. */
4781 if (qlf_idx != 0 ||
4782 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4783 output_info (_(" other valid variant(s):"));
4784
4785 /* For each pattern. */
4786 qualifiers_list = opcode->qualifiers_list;
4787 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4788 {
4789 /* Most opcodes has much fewer patterns in the list.
4790 First NIL qualifier indicates the end in the list. */
4791 if (empty_qualifier_sequence_p (*qualifiers_list))
4792 break;
4793
4794 if (i != qlf_idx)
4795 {
4796 /* Mnemonics name. */
4797 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4798
4799 /* Assign the qualifiers. */
4800 assign_qualifier_sequence (inst_base, *qualifiers_list);
4801
4802 /* Print instruction. */
4803 print_operands (buf, opcode, inst_base->operands);
4804
4805 output_info (_(" %s"), buf);
4806 }
4807 }
4808 }
4809 break;
4810
4811 case AARCH64_OPDE_UNTIED_OPERAND:
4812 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4813 detail->index + 1, str);
4814 break;
4815
4816 case AARCH64_OPDE_OUT_OF_RANGE:
4817 if (detail->data[0] != detail->data[1])
4818 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4819 detail->error ? detail->error : _("immediate value"),
4820 detail->data[0], detail->data[1], idx + 1, str);
4821 else
4822 handler (_("%s must be %d at operand %d -- `%s'"),
4823 detail->error ? detail->error : _("immediate value"),
4824 detail->data[0], idx + 1, str);
4825 break;
4826
4827 case AARCH64_OPDE_REG_LIST:
4828 if (detail->data[0] == 1)
4829 handler (_("invalid number of registers in the list; "
4830 "only 1 register is expected at operand %d -- `%s'"),
4831 idx + 1, str);
4832 else
4833 handler (_("invalid number of registers in the list; "
4834 "%d registers are expected at operand %d -- `%s'"),
4835 detail->data[0], idx + 1, str);
4836 break;
4837
4838 case AARCH64_OPDE_UNALIGNED:
4839 handler (_("immediate value must be a multiple of "
4840 "%d at operand %d -- `%s'"),
4841 detail->data[0], idx + 1, str);
4842 break;
4843
4844 default:
4845 gas_assert (0);
4846 break;
4847 }
4848 }
4849
4850 /* Process and output the error message about the operand mismatching.
4851
4852 When this function is called, the operand error information had
4853 been collected for an assembly line and there will be multiple
4854 errors in the case of multiple instruction templates; output the
4855 error message that most closely describes the problem.
4856
4857 The errors to be printed can be filtered on printing all errors
4858 or only non-fatal errors. This distinction has to be made because
4859 the error buffer may already be filled with fatal errors we don't want to
4860 print due to the different instruction templates. */
4861
4862 static void
4863 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4864 {
4865 int largest_error_pos;
4866 const char *msg = NULL;
4867 enum aarch64_operand_error_kind kind;
4868 operand_error_record *curr;
4869 operand_error_record *head = operand_error_report.head;
4870 operand_error_record *record = NULL;
4871
4872 /* No error to report. */
4873 if (head == NULL)
4874 return;
4875
4876 gas_assert (head != NULL && operand_error_report.tail != NULL);
4877
4878 /* Only one error. */
4879 if (head == operand_error_report.tail)
4880 {
4881 /* If the only error is a non-fatal one and we don't want to print it,
4882 just exit. */
4883 if (!non_fatal_only || head->detail.non_fatal)
4884 {
4885 DEBUG_TRACE ("single opcode entry with error kind: %s",
4886 operand_mismatch_kind_names[head->detail.kind]);
4887 output_operand_error_record (head, str);
4888 }
4889 return;
4890 }
4891
4892 /* Find the error kind of the highest severity. */
4893 DEBUG_TRACE ("multiple opcode entries with error kind");
4894 kind = AARCH64_OPDE_NIL;
4895 for (curr = head; curr != NULL; curr = curr->next)
4896 {
4897 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4898 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4899 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4900 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4901 kind = curr->detail.kind;
4902 }
4903
4904 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4905
4906 /* Pick up one of errors of KIND to report. */
4907 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4908 for (curr = head; curr != NULL; curr = curr->next)
4909 {
4910 /* If we don't want to print non-fatal errors then don't consider them
4911 at all. */
4912 if (curr->detail.kind != kind
4913 || (non_fatal_only && !curr->detail.non_fatal))
4914 continue;
4915 /* If there are multiple errors, pick up the one with the highest
4916 mismatching operand index. In the case of multiple errors with
4917 the equally highest operand index, pick up the first one or the
4918 first one with non-NULL error message. */
4919 if (curr->detail.index > largest_error_pos
4920 || (curr->detail.index == largest_error_pos && msg == NULL
4921 && curr->detail.error != NULL))
4922 {
4923 largest_error_pos = curr->detail.index;
4924 record = curr;
4925 msg = record->detail.error;
4926 }
4927 }
4928
4929 /* The way errors are collected in the back-end is a bit non-intuitive. But
4930 essentially, because each operand template is tried recursively you may
4931 always have errors collected from the previous tried OPND. These are
4932 usually skipped if there is one successful match. However now with the
4933 non-fatal errors we have to ignore those previously collected hard errors
4934 when we're only interested in printing the non-fatal ones. This condition
4935 prevents us from printing errors that are not appropriate, since we did
4936 match a condition, but it also has warnings that it wants to print. */
4937 if (non_fatal_only && !record)
4938 return;
4939
4940 gas_assert (largest_error_pos != -2 && record != NULL);
4941 DEBUG_TRACE ("Pick up error kind %s to report",
4942 operand_mismatch_kind_names[record->detail.kind]);
4943
4944 /* Output. */
4945 output_operand_error_record (record, str);
4946 }
4947 \f
4948 /* Write an AARCH64 instruction to buf - always little-endian. */
4949 static void
4950 put_aarch64_insn (char *buf, uint32_t insn)
4951 {
4952 unsigned char *where = (unsigned char *) buf;
4953 where[0] = insn;
4954 where[1] = insn >> 8;
4955 where[2] = insn >> 16;
4956 where[3] = insn >> 24;
4957 }
4958
4959 static uint32_t
4960 get_aarch64_insn (char *buf)
4961 {
4962 unsigned char *where = (unsigned char *) buf;
4963 uint32_t result;
4964 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4965 return result;
4966 }
4967
4968 static void
4969 output_inst (struct aarch64_inst *new_inst)
4970 {
4971 char *to = NULL;
4972
4973 to = frag_more (INSN_SIZE);
4974
4975 frag_now->tc_frag_data.recorded = 1;
4976
4977 put_aarch64_insn (to, inst.base.value);
4978
4979 if (inst.reloc.type != BFD_RELOC_UNUSED)
4980 {
4981 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4982 INSN_SIZE, &inst.reloc.exp,
4983 inst.reloc.pc_rel,
4984 inst.reloc.type);
4985 DEBUG_TRACE ("Prepared relocation fix up");
4986 /* Don't check the addend value against the instruction size,
4987 that's the job of our code in md_apply_fix(). */
4988 fixp->fx_no_overflow = 1;
4989 if (new_inst != NULL)
4990 fixp->tc_fix_data.inst = new_inst;
4991 if (aarch64_gas_internal_fixup_p ())
4992 {
4993 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4994 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4995 fixp->fx_addnumber = inst.reloc.flags;
4996 }
4997 }
4998
4999 dwarf2_emit_insn (INSN_SIZE);
5000 }
5001
5002 /* Link together opcodes of the same name. */
5003
5004 struct templates
5005 {
5006 aarch64_opcode *opcode;
5007 struct templates *next;
5008 };
5009
5010 typedef struct templates templates;
5011
5012 static templates *
5013 lookup_mnemonic (const char *start, int len)
5014 {
5015 templates *templ = NULL;
5016
5017 templ = hash_find_n (aarch64_ops_hsh, start, len);
5018 return templ;
5019 }
5020
5021 /* Subroutine of md_assemble, responsible for looking up the primary
5022 opcode from the mnemonic the user wrote. STR points to the
5023 beginning of the mnemonic. */
5024
5025 static templates *
5026 opcode_lookup (char **str)
5027 {
5028 char *end, *base, *dot;
5029 const aarch64_cond *cond;
5030 char condname[16];
5031 int len;
5032
5033 /* Scan up to the end of the mnemonic, which must end in white space,
5034 '.', or end of string. */
5035 dot = 0;
5036 for (base = end = *str; is_part_of_name(*end); end++)
5037 if (*end == '.' && !dot)
5038 dot = end;
5039
5040 if (end == base || dot == base)
5041 return 0;
5042
5043 inst.cond = COND_ALWAYS;
5044
5045 /* Handle a possible condition. */
5046 if (dot)
5047 {
5048 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5049 if (cond)
5050 {
5051 inst.cond = cond->value;
5052 *str = end;
5053 }
5054 else
5055 {
5056 *str = dot;
5057 return 0;
5058 }
5059 len = dot - base;
5060 }
5061 else
5062 {
5063 *str = end;
5064 len = end - base;
5065 }
5066
5067 if (inst.cond == COND_ALWAYS)
5068 {
5069 /* Look for unaffixed mnemonic. */
5070 return lookup_mnemonic (base, len);
5071 }
5072 else if (len <= 13)
5073 {
5074 /* append ".c" to mnemonic if conditional */
5075 memcpy (condname, base, len);
5076 memcpy (condname + len, ".c", 2);
5077 base = condname;
5078 len += 2;
5079 return lookup_mnemonic (base, len);
5080 }
5081
5082 return NULL;
5083 }
5084
5085 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5086 to a corresponding operand qualifier. */
5087
5088 static inline aarch64_opnd_qualifier_t
5089 vectype_to_qualifier (const struct vector_type_el *vectype)
5090 {
5091 /* Element size in bytes indexed by vector_el_type. */
5092 const unsigned char ele_size[5]
5093 = {1, 2, 4, 8, 16};
5094 const unsigned int ele_base [5] =
5095 {
5096 AARCH64_OPND_QLF_V_4B,
5097 AARCH64_OPND_QLF_V_2H,
5098 AARCH64_OPND_QLF_V_2S,
5099 AARCH64_OPND_QLF_V_1D,
5100 AARCH64_OPND_QLF_V_1Q
5101 };
5102
5103 if (!vectype->defined || vectype->type == NT_invtype)
5104 goto vectype_conversion_fail;
5105
5106 if (vectype->type == NT_zero)
5107 return AARCH64_OPND_QLF_P_Z;
5108 if (vectype->type == NT_merge)
5109 return AARCH64_OPND_QLF_P_M;
5110
5111 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5112
5113 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5114 {
5115 /* Special case S_4B. */
5116 if (vectype->type == NT_b && vectype->width == 4)
5117 return AARCH64_OPND_QLF_S_4B;
5118
5119 /* Vector element register. */
5120 return AARCH64_OPND_QLF_S_B + vectype->type;
5121 }
5122 else
5123 {
5124 /* Vector register. */
5125 int reg_size = ele_size[vectype->type] * vectype->width;
5126 unsigned offset;
5127 unsigned shift;
5128 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5129 goto vectype_conversion_fail;
5130
5131 /* The conversion is by calculating the offset from the base operand
5132 qualifier for the vector type. The operand qualifiers are regular
5133 enough that the offset can established by shifting the vector width by
5134 a vector-type dependent amount. */
5135 shift = 0;
5136 if (vectype->type == NT_b)
5137 shift = 3;
5138 else if (vectype->type == NT_h || vectype->type == NT_s)
5139 shift = 2;
5140 else if (vectype->type >= NT_d)
5141 shift = 1;
5142 else
5143 gas_assert (0);
5144
5145 offset = ele_base [vectype->type] + (vectype->width >> shift);
5146 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5147 && offset <= AARCH64_OPND_QLF_V_1Q);
5148 return offset;
5149 }
5150
5151 vectype_conversion_fail:
5152 first_error (_("bad vector arrangement type"));
5153 return AARCH64_OPND_QLF_NIL;
5154 }
5155
5156 /* Process an optional operand that is found omitted from the assembly line.
5157 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5158 instruction's opcode entry while IDX is the index of this omitted operand.
5159 */
5160
5161 static void
5162 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5163 int idx, aarch64_opnd_info *operand)
5164 {
5165 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5166 gas_assert (optional_operand_p (opcode, idx));
5167 gas_assert (!operand->present);
5168
5169 switch (type)
5170 {
5171 case AARCH64_OPND_Rd:
5172 case AARCH64_OPND_Rn:
5173 case AARCH64_OPND_Rm:
5174 case AARCH64_OPND_Rt:
5175 case AARCH64_OPND_Rt2:
5176 case AARCH64_OPND_Rt_SP:
5177 case AARCH64_OPND_Rs:
5178 case AARCH64_OPND_Ra:
5179 case AARCH64_OPND_Rt_SYS:
5180 case AARCH64_OPND_Rd_SP:
5181 case AARCH64_OPND_Rn_SP:
5182 case AARCH64_OPND_Rm_SP:
5183 case AARCH64_OPND_Fd:
5184 case AARCH64_OPND_Fn:
5185 case AARCH64_OPND_Fm:
5186 case AARCH64_OPND_Fa:
5187 case AARCH64_OPND_Ft:
5188 case AARCH64_OPND_Ft2:
5189 case AARCH64_OPND_Sd:
5190 case AARCH64_OPND_Sn:
5191 case AARCH64_OPND_Sm:
5192 case AARCH64_OPND_Va:
5193 case AARCH64_OPND_Vd:
5194 case AARCH64_OPND_Vn:
5195 case AARCH64_OPND_Vm:
5196 case AARCH64_OPND_VdD1:
5197 case AARCH64_OPND_VnD1:
5198 operand->reg.regno = default_value;
5199 break;
5200
5201 case AARCH64_OPND_Ed:
5202 case AARCH64_OPND_En:
5203 case AARCH64_OPND_Em:
5204 case AARCH64_OPND_Em16:
5205 case AARCH64_OPND_SM3_IMM2:
5206 operand->reglane.regno = default_value;
5207 break;
5208
5209 case AARCH64_OPND_IDX:
5210 case AARCH64_OPND_BIT_NUM:
5211 case AARCH64_OPND_IMMR:
5212 case AARCH64_OPND_IMMS:
5213 case AARCH64_OPND_SHLL_IMM:
5214 case AARCH64_OPND_IMM_VLSL:
5215 case AARCH64_OPND_IMM_VLSR:
5216 case AARCH64_OPND_CCMP_IMM:
5217 case AARCH64_OPND_FBITS:
5218 case AARCH64_OPND_UIMM4:
5219 case AARCH64_OPND_UIMM3_OP1:
5220 case AARCH64_OPND_UIMM3_OP2:
5221 case AARCH64_OPND_IMM:
5222 case AARCH64_OPND_IMM_2:
5223 case AARCH64_OPND_WIDTH:
5224 case AARCH64_OPND_UIMM7:
5225 case AARCH64_OPND_NZCV:
5226 case AARCH64_OPND_SVE_PATTERN:
5227 case AARCH64_OPND_SVE_PRFOP:
5228 operand->imm.value = default_value;
5229 break;
5230
5231 case AARCH64_OPND_SVE_PATTERN_SCALED:
5232 operand->imm.value = default_value;
5233 operand->shifter.kind = AARCH64_MOD_MUL;
5234 operand->shifter.amount = 1;
5235 break;
5236
5237 case AARCH64_OPND_EXCEPTION:
5238 inst.reloc.type = BFD_RELOC_UNUSED;
5239 break;
5240
5241 case AARCH64_OPND_BARRIER_ISB:
5242 operand->barrier = aarch64_barrier_options + default_value;
5243 break;
5244
5245 case AARCH64_OPND_BTI_TARGET:
5246 operand->hint_option = aarch64_hint_options + default_value;
5247 break;
5248
5249 default:
5250 break;
5251 }
5252 }
5253
5254 /* Process the relocation type for move wide instructions.
5255 Return TRUE on success; otherwise return FALSE. */
5256
5257 static bfd_boolean
5258 process_movw_reloc_info (void)
5259 {
5260 int is32;
5261 unsigned shift;
5262
5263 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5264
5265 if (inst.base.opcode->op == OP_MOVK)
5266 switch (inst.reloc.type)
5267 {
5268 case BFD_RELOC_AARCH64_MOVW_G0_S:
5269 case BFD_RELOC_AARCH64_MOVW_G1_S:
5270 case BFD_RELOC_AARCH64_MOVW_G2_S:
5271 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5272 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5273 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5274 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5275 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5276 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5277 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5278 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5279 set_syntax_error
5280 (_("the specified relocation type is not allowed for MOVK"));
5281 return FALSE;
5282 default:
5283 break;
5284 }
5285
5286 switch (inst.reloc.type)
5287 {
5288 case BFD_RELOC_AARCH64_MOVW_G0:
5289 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5290 case BFD_RELOC_AARCH64_MOVW_G0_S:
5291 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5292 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5293 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5294 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5295 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5296 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5297 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5298 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5299 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5300 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5301 shift = 0;
5302 break;
5303 case BFD_RELOC_AARCH64_MOVW_G1:
5304 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5305 case BFD_RELOC_AARCH64_MOVW_G1_S:
5306 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5307 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5308 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5309 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5310 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5311 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5312 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5313 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5314 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5315 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5316 shift = 16;
5317 break;
5318 case BFD_RELOC_AARCH64_MOVW_G2:
5319 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5320 case BFD_RELOC_AARCH64_MOVW_G2_S:
5321 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5322 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5323 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5324 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5325 if (is32)
5326 {
5327 set_fatal_syntax_error
5328 (_("the specified relocation type is not allowed for 32-bit "
5329 "register"));
5330 return FALSE;
5331 }
5332 shift = 32;
5333 break;
5334 case BFD_RELOC_AARCH64_MOVW_G3:
5335 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5336 if (is32)
5337 {
5338 set_fatal_syntax_error
5339 (_("the specified relocation type is not allowed for 32-bit "
5340 "register"));
5341 return FALSE;
5342 }
5343 shift = 48;
5344 break;
5345 default:
5346 /* More cases should be added when more MOVW-related relocation types
5347 are supported in GAS. */
5348 gas_assert (aarch64_gas_internal_fixup_p ());
5349 /* The shift amount should have already been set by the parser. */
5350 return TRUE;
5351 }
5352 inst.base.operands[1].shifter.amount = shift;
5353 return TRUE;
5354 }
5355
5356 /* A primitive log calculator. */
5357
5358 static inline unsigned int
5359 get_logsz (unsigned int size)
5360 {
5361 const unsigned char ls[16] =
5362 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5363 if (size > 16)
5364 {
5365 gas_assert (0);
5366 return -1;
5367 }
5368 gas_assert (ls[size - 1] != (unsigned char)-1);
5369 return ls[size - 1];
5370 }
5371
5372 /* Determine and return the real reloc type code for an instruction
5373 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5374
5375 static inline bfd_reloc_code_real_type
5376 ldst_lo12_determine_real_reloc_type (void)
5377 {
5378 unsigned logsz;
5379 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5380 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5381
5382 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5383 {
5384 BFD_RELOC_AARCH64_LDST8_LO12,
5385 BFD_RELOC_AARCH64_LDST16_LO12,
5386 BFD_RELOC_AARCH64_LDST32_LO12,
5387 BFD_RELOC_AARCH64_LDST64_LO12,
5388 BFD_RELOC_AARCH64_LDST128_LO12
5389 },
5390 {
5391 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5392 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5393 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5394 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5395 BFD_RELOC_AARCH64_NONE
5396 },
5397 {
5398 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5399 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5400 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5401 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5402 BFD_RELOC_AARCH64_NONE
5403 },
5404 {
5405 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5406 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5407 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5408 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5409 BFD_RELOC_AARCH64_NONE
5410 },
5411 {
5412 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5413 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5414 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5415 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5416 BFD_RELOC_AARCH64_NONE
5417 }
5418 };
5419
5420 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5421 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5422 || (inst.reloc.type
5423 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5424 || (inst.reloc.type
5425 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5426 || (inst.reloc.type
5427 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5428 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5429
5430 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5431 opd1_qlf =
5432 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5433 1, opd0_qlf, 0);
5434 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5435
5436 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5437 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5438 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5439 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5440 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5441 gas_assert (logsz <= 3);
5442 else
5443 gas_assert (logsz <= 4);
5444
5445 /* In reloc.c, these pseudo relocation types should be defined in similar
5446 order as above reloc_ldst_lo12 array. Because the array index calculation
5447 below relies on this. */
5448 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5449 }
5450
5451 /* Check whether a register list REGINFO is valid. The registers must be
5452 numbered in increasing order (modulo 32), in increments of one or two.
5453
5454 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5455 increments of two.
5456
5457 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5458
5459 static bfd_boolean
5460 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5461 {
5462 uint32_t i, nb_regs, prev_regno, incr;
5463
5464 nb_regs = 1 + (reginfo & 0x3);
5465 reginfo >>= 2;
5466 prev_regno = reginfo & 0x1f;
5467 incr = accept_alternate ? 2 : 1;
5468
5469 for (i = 1; i < nb_regs; ++i)
5470 {
5471 uint32_t curr_regno;
5472 reginfo >>= 5;
5473 curr_regno = reginfo & 0x1f;
5474 if (curr_regno != ((prev_regno + incr) & 0x1f))
5475 return FALSE;
5476 prev_regno = curr_regno;
5477 }
5478
5479 return TRUE;
5480 }
5481
5482 /* Generic instruction operand parser. This does no encoding and no
5483 semantic validation; it merely squirrels values away in the inst
5484 structure. Returns TRUE or FALSE depending on whether the
5485 specified grammar matched. */
5486
5487 static bfd_boolean
5488 parse_operands (char *str, const aarch64_opcode *opcode)
5489 {
5490 int i;
5491 char *backtrack_pos = 0;
5492 const enum aarch64_opnd *operands = opcode->operands;
5493 aarch64_reg_type imm_reg_type;
5494
5495 clear_error ();
5496 skip_whitespace (str);
5497
5498 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5499 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5500 else
5501 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5502
5503 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5504 {
5505 int64_t val;
5506 const reg_entry *reg;
5507 int comma_skipped_p = 0;
5508 aarch64_reg_type rtype;
5509 struct vector_type_el vectype;
5510 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5511 aarch64_opnd_info *info = &inst.base.operands[i];
5512 aarch64_reg_type reg_type;
5513
5514 DEBUG_TRACE ("parse operand %d", i);
5515
5516 /* Assign the operand code. */
5517 info->type = operands[i];
5518
5519 if (optional_operand_p (opcode, i))
5520 {
5521 /* Remember where we are in case we need to backtrack. */
5522 gas_assert (!backtrack_pos);
5523 backtrack_pos = str;
5524 }
5525
5526 /* Expect comma between operands; the backtrack mechanism will take
5527 care of cases of omitted optional operand. */
5528 if (i > 0 && ! skip_past_char (&str, ','))
5529 {
5530 set_syntax_error (_("comma expected between operands"));
5531 goto failure;
5532 }
5533 else
5534 comma_skipped_p = 1;
5535
5536 switch (operands[i])
5537 {
5538 case AARCH64_OPND_Rd:
5539 case AARCH64_OPND_Rn:
5540 case AARCH64_OPND_Rm:
5541 case AARCH64_OPND_Rt:
5542 case AARCH64_OPND_Rt2:
5543 case AARCH64_OPND_Rs:
5544 case AARCH64_OPND_Ra:
5545 case AARCH64_OPND_Rt_SYS:
5546 case AARCH64_OPND_PAIRREG:
5547 case AARCH64_OPND_SVE_Rm:
5548 po_int_reg_or_fail (REG_TYPE_R_Z);
5549 break;
5550
5551 case AARCH64_OPND_Rd_SP:
5552 case AARCH64_OPND_Rn_SP:
5553 case AARCH64_OPND_Rt_SP:
5554 case AARCH64_OPND_SVE_Rn_SP:
5555 case AARCH64_OPND_Rm_SP:
5556 po_int_reg_or_fail (REG_TYPE_R_SP);
5557 break;
5558
5559 case AARCH64_OPND_Rm_EXT:
5560 case AARCH64_OPND_Rm_SFT:
5561 po_misc_or_fail (parse_shifter_operand
5562 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5563 ? SHIFTED_ARITH_IMM
5564 : SHIFTED_LOGIC_IMM)));
5565 if (!info->shifter.operator_present)
5566 {
5567 /* Default to LSL if not present. Libopcodes prefers shifter
5568 kind to be explicit. */
5569 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5570 info->shifter.kind = AARCH64_MOD_LSL;
5571 /* For Rm_EXT, libopcodes will carry out further check on whether
5572 or not stack pointer is used in the instruction (Recall that
5573 "the extend operator is not optional unless at least one of
5574 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5575 }
5576 break;
5577
5578 case AARCH64_OPND_Fd:
5579 case AARCH64_OPND_Fn:
5580 case AARCH64_OPND_Fm:
5581 case AARCH64_OPND_Fa:
5582 case AARCH64_OPND_Ft:
5583 case AARCH64_OPND_Ft2:
5584 case AARCH64_OPND_Sd:
5585 case AARCH64_OPND_Sn:
5586 case AARCH64_OPND_Sm:
5587 case AARCH64_OPND_SVE_VZn:
5588 case AARCH64_OPND_SVE_Vd:
5589 case AARCH64_OPND_SVE_Vm:
5590 case AARCH64_OPND_SVE_Vn:
5591 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5592 if (val == PARSE_FAIL)
5593 {
5594 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5595 goto failure;
5596 }
5597 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5598
5599 info->reg.regno = val;
5600 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5601 break;
5602
5603 case AARCH64_OPND_SVE_Pd:
5604 case AARCH64_OPND_SVE_Pg3:
5605 case AARCH64_OPND_SVE_Pg4_5:
5606 case AARCH64_OPND_SVE_Pg4_10:
5607 case AARCH64_OPND_SVE_Pg4_16:
5608 case AARCH64_OPND_SVE_Pm:
5609 case AARCH64_OPND_SVE_Pn:
5610 case AARCH64_OPND_SVE_Pt:
5611 reg_type = REG_TYPE_PN;
5612 goto vector_reg;
5613
5614 case AARCH64_OPND_SVE_Za_5:
5615 case AARCH64_OPND_SVE_Za_16:
5616 case AARCH64_OPND_SVE_Zd:
5617 case AARCH64_OPND_SVE_Zm_5:
5618 case AARCH64_OPND_SVE_Zm_16:
5619 case AARCH64_OPND_SVE_Zn:
5620 case AARCH64_OPND_SVE_Zt:
5621 reg_type = REG_TYPE_ZN;
5622 goto vector_reg;
5623
5624 case AARCH64_OPND_Va:
5625 case AARCH64_OPND_Vd:
5626 case AARCH64_OPND_Vn:
5627 case AARCH64_OPND_Vm:
5628 reg_type = REG_TYPE_VN;
5629 vector_reg:
5630 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5631 if (val == PARSE_FAIL)
5632 {
5633 first_error (_(get_reg_expected_msg (reg_type)));
5634 goto failure;
5635 }
5636 if (vectype.defined & NTA_HASINDEX)
5637 goto failure;
5638
5639 info->reg.regno = val;
5640 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5641 && vectype.type == NT_invtype)
5642 /* Unqualified Pn and Zn registers are allowed in certain
5643 contexts. Rely on F_STRICT qualifier checking to catch
5644 invalid uses. */
5645 info->qualifier = AARCH64_OPND_QLF_NIL;
5646 else
5647 {
5648 info->qualifier = vectype_to_qualifier (&vectype);
5649 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5650 goto failure;
5651 }
5652 break;
5653
5654 case AARCH64_OPND_VdD1:
5655 case AARCH64_OPND_VnD1:
5656 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5657 if (val == PARSE_FAIL)
5658 {
5659 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5660 goto failure;
5661 }
5662 if (vectype.type != NT_d || vectype.index != 1)
5663 {
5664 set_fatal_syntax_error
5665 (_("the top half of a 128-bit FP/SIMD register is expected"));
5666 goto failure;
5667 }
5668 info->reg.regno = val;
5669 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5670 here; it is correct for the purpose of encoding/decoding since
5671 only the register number is explicitly encoded in the related
5672 instructions, although this appears a bit hacky. */
5673 info->qualifier = AARCH64_OPND_QLF_S_D;
5674 break;
5675
5676 case AARCH64_OPND_SVE_Zm3_INDEX:
5677 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5678 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5679 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5680 case AARCH64_OPND_SVE_Zm4_INDEX:
5681 case AARCH64_OPND_SVE_Zn_INDEX:
5682 reg_type = REG_TYPE_ZN;
5683 goto vector_reg_index;
5684
5685 case AARCH64_OPND_Ed:
5686 case AARCH64_OPND_En:
5687 case AARCH64_OPND_Em:
5688 case AARCH64_OPND_Em16:
5689 case AARCH64_OPND_SM3_IMM2:
5690 reg_type = REG_TYPE_VN;
5691 vector_reg_index:
5692 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5693 if (val == PARSE_FAIL)
5694 {
5695 first_error (_(get_reg_expected_msg (reg_type)));
5696 goto failure;
5697 }
5698 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5699 goto failure;
5700
5701 info->reglane.regno = val;
5702 info->reglane.index = vectype.index;
5703 info->qualifier = vectype_to_qualifier (&vectype);
5704 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5705 goto failure;
5706 break;
5707
5708 case AARCH64_OPND_SVE_ZnxN:
5709 case AARCH64_OPND_SVE_ZtxN:
5710 reg_type = REG_TYPE_ZN;
5711 goto vector_reg_list;
5712
5713 case AARCH64_OPND_LVn:
5714 case AARCH64_OPND_LVt:
5715 case AARCH64_OPND_LVt_AL:
5716 case AARCH64_OPND_LEt:
5717 reg_type = REG_TYPE_VN;
5718 vector_reg_list:
5719 if (reg_type == REG_TYPE_ZN
5720 && get_opcode_dependent_value (opcode) == 1
5721 && *str != '{')
5722 {
5723 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5724 if (val == PARSE_FAIL)
5725 {
5726 first_error (_(get_reg_expected_msg (reg_type)));
5727 goto failure;
5728 }
5729 info->reglist.first_regno = val;
5730 info->reglist.num_regs = 1;
5731 }
5732 else
5733 {
5734 val = parse_vector_reg_list (&str, reg_type, &vectype);
5735 if (val == PARSE_FAIL)
5736 goto failure;
5737
5738 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5739 {
5740 set_fatal_syntax_error (_("invalid register list"));
5741 goto failure;
5742 }
5743
5744 if (vectype.width != 0 && *str != ',')
5745 {
5746 set_fatal_syntax_error
5747 (_("expected element type rather than vector type"));
5748 goto failure;
5749 }
5750
5751 info->reglist.first_regno = (val >> 2) & 0x1f;
5752 info->reglist.num_regs = (val & 0x3) + 1;
5753 }
5754 if (operands[i] == AARCH64_OPND_LEt)
5755 {
5756 if (!(vectype.defined & NTA_HASINDEX))
5757 goto failure;
5758 info->reglist.has_index = 1;
5759 info->reglist.index = vectype.index;
5760 }
5761 else
5762 {
5763 if (vectype.defined & NTA_HASINDEX)
5764 goto failure;
5765 if (!(vectype.defined & NTA_HASTYPE))
5766 {
5767 if (reg_type == REG_TYPE_ZN)
5768 set_fatal_syntax_error (_("missing type suffix"));
5769 goto failure;
5770 }
5771 }
5772 info->qualifier = vectype_to_qualifier (&vectype);
5773 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5774 goto failure;
5775 break;
5776
5777 case AARCH64_OPND_CRn:
5778 case AARCH64_OPND_CRm:
5779 {
5780 char prefix = *(str++);
5781 if (prefix != 'c' && prefix != 'C')
5782 goto failure;
5783
5784 po_imm_nc_or_fail ();
5785 if (val > 15)
5786 {
5787 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5788 goto failure;
5789 }
5790 info->qualifier = AARCH64_OPND_QLF_CR;
5791 info->imm.value = val;
5792 break;
5793 }
5794
5795 case AARCH64_OPND_SHLL_IMM:
5796 case AARCH64_OPND_IMM_VLSR:
5797 po_imm_or_fail (1, 64);
5798 info->imm.value = val;
5799 break;
5800
5801 case AARCH64_OPND_CCMP_IMM:
5802 case AARCH64_OPND_SIMM5:
5803 case AARCH64_OPND_FBITS:
5804 case AARCH64_OPND_TME_UIMM16:
5805 case AARCH64_OPND_UIMM4:
5806 case AARCH64_OPND_UIMM4_ADDG:
5807 case AARCH64_OPND_UIMM10:
5808 case AARCH64_OPND_UIMM3_OP1:
5809 case AARCH64_OPND_UIMM3_OP2:
5810 case AARCH64_OPND_IMM_VLSL:
5811 case AARCH64_OPND_IMM:
5812 case AARCH64_OPND_IMM_2:
5813 case AARCH64_OPND_WIDTH:
5814 case AARCH64_OPND_SVE_INV_LIMM:
5815 case AARCH64_OPND_SVE_LIMM:
5816 case AARCH64_OPND_SVE_LIMM_MOV:
5817 case AARCH64_OPND_SVE_SHLIMM_PRED:
5818 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5819 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
5820 case AARCH64_OPND_SVE_SHRIMM_PRED:
5821 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5822 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
5823 case AARCH64_OPND_SVE_SIMM5:
5824 case AARCH64_OPND_SVE_SIMM5B:
5825 case AARCH64_OPND_SVE_SIMM6:
5826 case AARCH64_OPND_SVE_SIMM8:
5827 case AARCH64_OPND_SVE_UIMM3:
5828 case AARCH64_OPND_SVE_UIMM7:
5829 case AARCH64_OPND_SVE_UIMM8:
5830 case AARCH64_OPND_SVE_UIMM8_53:
5831 case AARCH64_OPND_IMM_ROT1:
5832 case AARCH64_OPND_IMM_ROT2:
5833 case AARCH64_OPND_IMM_ROT3:
5834 case AARCH64_OPND_SVE_IMM_ROT1:
5835 case AARCH64_OPND_SVE_IMM_ROT2:
5836 case AARCH64_OPND_SVE_IMM_ROT3:
5837 po_imm_nc_or_fail ();
5838 info->imm.value = val;
5839 break;
5840
5841 case AARCH64_OPND_SVE_AIMM:
5842 case AARCH64_OPND_SVE_ASIMM:
5843 po_imm_nc_or_fail ();
5844 info->imm.value = val;
5845 skip_whitespace (str);
5846 if (skip_past_comma (&str))
5847 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5848 else
5849 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5850 break;
5851
5852 case AARCH64_OPND_SVE_PATTERN:
5853 po_enum_or_fail (aarch64_sve_pattern_array);
5854 info->imm.value = val;
5855 break;
5856
5857 case AARCH64_OPND_SVE_PATTERN_SCALED:
5858 po_enum_or_fail (aarch64_sve_pattern_array);
5859 info->imm.value = val;
5860 if (skip_past_comma (&str)
5861 && !parse_shift (&str, info, SHIFTED_MUL))
5862 goto failure;
5863 if (!info->shifter.operator_present)
5864 {
5865 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5866 info->shifter.kind = AARCH64_MOD_MUL;
5867 info->shifter.amount = 1;
5868 }
5869 break;
5870
5871 case AARCH64_OPND_SVE_PRFOP:
5872 po_enum_or_fail (aarch64_sve_prfop_array);
5873 info->imm.value = val;
5874 break;
5875
5876 case AARCH64_OPND_UIMM7:
5877 po_imm_or_fail (0, 127);
5878 info->imm.value = val;
5879 break;
5880
5881 case AARCH64_OPND_IDX:
5882 case AARCH64_OPND_MASK:
5883 case AARCH64_OPND_BIT_NUM:
5884 case AARCH64_OPND_IMMR:
5885 case AARCH64_OPND_IMMS:
5886 po_imm_or_fail (0, 63);
5887 info->imm.value = val;
5888 break;
5889
5890 case AARCH64_OPND_IMM0:
5891 po_imm_nc_or_fail ();
5892 if (val != 0)
5893 {
5894 set_fatal_syntax_error (_("immediate zero expected"));
5895 goto failure;
5896 }
5897 info->imm.value = 0;
5898 break;
5899
5900 case AARCH64_OPND_FPIMM0:
5901 {
5902 int qfloat;
5903 bfd_boolean res1 = FALSE, res2 = FALSE;
5904 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5905 it is probably not worth the effort to support it. */
5906 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5907 imm_reg_type))
5908 && (error_p ()
5909 || !(res2 = parse_constant_immediate (&str, &val,
5910 imm_reg_type))))
5911 goto failure;
5912 if ((res1 && qfloat == 0) || (res2 && val == 0))
5913 {
5914 info->imm.value = 0;
5915 info->imm.is_fp = 1;
5916 break;
5917 }
5918 set_fatal_syntax_error (_("immediate zero expected"));
5919 goto failure;
5920 }
5921
5922 case AARCH64_OPND_IMM_MOV:
5923 {
5924 char *saved = str;
5925 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5926 reg_name_p (str, REG_TYPE_VN))
5927 goto failure;
5928 str = saved;
5929 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5930 GE_OPT_PREFIX, 1));
5931 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5932 later. fix_mov_imm_insn will try to determine a machine
5933 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5934 message if the immediate cannot be moved by a single
5935 instruction. */
5936 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5937 inst.base.operands[i].skip = 1;
5938 }
5939 break;
5940
5941 case AARCH64_OPND_SIMD_IMM:
5942 case AARCH64_OPND_SIMD_IMM_SFT:
5943 if (! parse_big_immediate (&str, &val, imm_reg_type))
5944 goto failure;
5945 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5946 /* addr_off_p */ 0,
5947 /* need_libopcodes_p */ 1,
5948 /* skip_p */ 1);
5949 /* Parse shift.
5950 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5951 shift, we don't check it here; we leave the checking to
5952 the libopcodes (operand_general_constraint_met_p). By
5953 doing this, we achieve better diagnostics. */
5954 if (skip_past_comma (&str)
5955 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5956 goto failure;
5957 if (!info->shifter.operator_present
5958 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5959 {
5960 /* Default to LSL if not present. Libopcodes prefers shifter
5961 kind to be explicit. */
5962 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5963 info->shifter.kind = AARCH64_MOD_LSL;
5964 }
5965 break;
5966
5967 case AARCH64_OPND_FPIMM:
5968 case AARCH64_OPND_SIMD_FPIMM:
5969 case AARCH64_OPND_SVE_FPIMM8:
5970 {
5971 int qfloat;
5972 bfd_boolean dp_p;
5973
5974 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5975 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5976 || !aarch64_imm_float_p (qfloat))
5977 {
5978 if (!error_p ())
5979 set_fatal_syntax_error (_("invalid floating-point"
5980 " constant"));
5981 goto failure;
5982 }
5983 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5984 inst.base.operands[i].imm.is_fp = 1;
5985 }
5986 break;
5987
5988 case AARCH64_OPND_SVE_I1_HALF_ONE:
5989 case AARCH64_OPND_SVE_I1_HALF_TWO:
5990 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5991 {
5992 int qfloat;
5993 bfd_boolean dp_p;
5994
5995 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5996 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5997 {
5998 if (!error_p ())
5999 set_fatal_syntax_error (_("invalid floating-point"
6000 " constant"));
6001 goto failure;
6002 }
6003 inst.base.operands[i].imm.value = qfloat;
6004 inst.base.operands[i].imm.is_fp = 1;
6005 }
6006 break;
6007
6008 case AARCH64_OPND_LIMM:
6009 po_misc_or_fail (parse_shifter_operand (&str, info,
6010 SHIFTED_LOGIC_IMM));
6011 if (info->shifter.operator_present)
6012 {
6013 set_fatal_syntax_error
6014 (_("shift not allowed for bitmask immediate"));
6015 goto failure;
6016 }
6017 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6018 /* addr_off_p */ 0,
6019 /* need_libopcodes_p */ 1,
6020 /* skip_p */ 1);
6021 break;
6022
6023 case AARCH64_OPND_AIMM:
6024 if (opcode->op == OP_ADD)
6025 /* ADD may have relocation types. */
6026 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6027 SHIFTED_ARITH_IMM));
6028 else
6029 po_misc_or_fail (parse_shifter_operand (&str, info,
6030 SHIFTED_ARITH_IMM));
6031 switch (inst.reloc.type)
6032 {
6033 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6034 info->shifter.amount = 12;
6035 break;
6036 case BFD_RELOC_UNUSED:
6037 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6038 if (info->shifter.kind != AARCH64_MOD_NONE)
6039 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6040 inst.reloc.pc_rel = 0;
6041 break;
6042 default:
6043 break;
6044 }
6045 info->imm.value = 0;
6046 if (!info->shifter.operator_present)
6047 {
6048 /* Default to LSL if not present. Libopcodes prefers shifter
6049 kind to be explicit. */
6050 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6051 info->shifter.kind = AARCH64_MOD_LSL;
6052 }
6053 break;
6054
6055 case AARCH64_OPND_HALF:
6056 {
6057 /* #<imm16> or relocation. */
6058 int internal_fixup_p;
6059 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6060 if (internal_fixup_p)
6061 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6062 skip_whitespace (str);
6063 if (skip_past_comma (&str))
6064 {
6065 /* {, LSL #<shift>} */
6066 if (! aarch64_gas_internal_fixup_p ())
6067 {
6068 set_fatal_syntax_error (_("can't mix relocation modifier "
6069 "with explicit shift"));
6070 goto failure;
6071 }
6072 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6073 }
6074 else
6075 inst.base.operands[i].shifter.amount = 0;
6076 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6077 inst.base.operands[i].imm.value = 0;
6078 if (! process_movw_reloc_info ())
6079 goto failure;
6080 }
6081 break;
6082
6083 case AARCH64_OPND_EXCEPTION:
6084 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6085 imm_reg_type));
6086 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6087 /* addr_off_p */ 0,
6088 /* need_libopcodes_p */ 0,
6089 /* skip_p */ 1);
6090 break;
6091
6092 case AARCH64_OPND_NZCV:
6093 {
6094 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
6095 if (nzcv != NULL)
6096 {
6097 str += 4;
6098 info->imm.value = nzcv->value;
6099 break;
6100 }
6101 po_imm_or_fail (0, 15);
6102 info->imm.value = val;
6103 }
6104 break;
6105
6106 case AARCH64_OPND_COND:
6107 case AARCH64_OPND_COND1:
6108 {
6109 char *start = str;
6110 do
6111 str++;
6112 while (ISALPHA (*str));
6113 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
6114 if (info->cond == NULL)
6115 {
6116 set_syntax_error (_("invalid condition"));
6117 goto failure;
6118 }
6119 else if (operands[i] == AARCH64_OPND_COND1
6120 && (info->cond->value & 0xe) == 0xe)
6121 {
6122 /* Do not allow AL or NV. */
6123 set_default_error ();
6124 goto failure;
6125 }
6126 }
6127 break;
6128
6129 case AARCH64_OPND_ADDR_ADRP:
6130 po_misc_or_fail (parse_adrp (&str));
6131 /* Clear the value as operand needs to be relocated. */
6132 info->imm.value = 0;
6133 break;
6134
6135 case AARCH64_OPND_ADDR_PCREL14:
6136 case AARCH64_OPND_ADDR_PCREL19:
6137 case AARCH64_OPND_ADDR_PCREL21:
6138 case AARCH64_OPND_ADDR_PCREL26:
6139 po_misc_or_fail (parse_address (&str, info));
6140 if (!info->addr.pcrel)
6141 {
6142 set_syntax_error (_("invalid pc-relative address"));
6143 goto failure;
6144 }
6145 if (inst.gen_lit_pool
6146 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6147 {
6148 /* Only permit "=value" in the literal load instructions.
6149 The literal will be generated by programmer_friendly_fixup. */
6150 set_syntax_error (_("invalid use of \"=immediate\""));
6151 goto failure;
6152 }
6153 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6154 {
6155 set_syntax_error (_("unrecognized relocation suffix"));
6156 goto failure;
6157 }
6158 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6159 {
6160 info->imm.value = inst.reloc.exp.X_add_number;
6161 inst.reloc.type = BFD_RELOC_UNUSED;
6162 }
6163 else
6164 {
6165 info->imm.value = 0;
6166 if (inst.reloc.type == BFD_RELOC_UNUSED)
6167 switch (opcode->iclass)
6168 {
6169 case compbranch:
6170 case condbranch:
6171 /* e.g. CBZ or B.COND */
6172 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6173 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6174 break;
6175 case testbranch:
6176 /* e.g. TBZ */
6177 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6178 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6179 break;
6180 case branch_imm:
6181 /* e.g. B or BL */
6182 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6183 inst.reloc.type =
6184 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6185 : BFD_RELOC_AARCH64_JUMP26;
6186 break;
6187 case loadlit:
6188 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6189 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6190 break;
6191 case pcreladdr:
6192 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6193 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6194 break;
6195 default:
6196 gas_assert (0);
6197 abort ();
6198 }
6199 inst.reloc.pc_rel = 1;
6200 }
6201 break;
6202
6203 case AARCH64_OPND_ADDR_SIMPLE:
6204 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6205 {
6206 /* [<Xn|SP>{, #<simm>}] */
6207 char *start = str;
6208 /* First use the normal address-parsing routines, to get
6209 the usual syntax errors. */
6210 po_misc_or_fail (parse_address (&str, info));
6211 if (info->addr.pcrel || info->addr.offset.is_reg
6212 || !info->addr.preind || info->addr.postind
6213 || info->addr.writeback)
6214 {
6215 set_syntax_error (_("invalid addressing mode"));
6216 goto failure;
6217 }
6218
6219 /* Then retry, matching the specific syntax of these addresses. */
6220 str = start;
6221 po_char_or_fail ('[');
6222 po_reg_or_fail (REG_TYPE_R64_SP);
6223 /* Accept optional ", #0". */
6224 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6225 && skip_past_char (&str, ','))
6226 {
6227 skip_past_char (&str, '#');
6228 if (! skip_past_char (&str, '0'))
6229 {
6230 set_fatal_syntax_error
6231 (_("the optional immediate offset can only be 0"));
6232 goto failure;
6233 }
6234 }
6235 po_char_or_fail (']');
6236 break;
6237 }
6238
6239 case AARCH64_OPND_ADDR_REGOFF:
6240 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6241 po_misc_or_fail (parse_address (&str, info));
6242 regoff_addr:
6243 if (info->addr.pcrel || !info->addr.offset.is_reg
6244 || !info->addr.preind || info->addr.postind
6245 || info->addr.writeback)
6246 {
6247 set_syntax_error (_("invalid addressing mode"));
6248 goto failure;
6249 }
6250 if (!info->shifter.operator_present)
6251 {
6252 /* Default to LSL if not present. Libopcodes prefers shifter
6253 kind to be explicit. */
6254 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6255 info->shifter.kind = AARCH64_MOD_LSL;
6256 }
6257 /* Qualifier to be deduced by libopcodes. */
6258 break;
6259
6260 case AARCH64_OPND_ADDR_SIMM7:
6261 po_misc_or_fail (parse_address (&str, info));
6262 if (info->addr.pcrel || info->addr.offset.is_reg
6263 || (!info->addr.preind && !info->addr.postind))
6264 {
6265 set_syntax_error (_("invalid addressing mode"));
6266 goto failure;
6267 }
6268 if (inst.reloc.type != BFD_RELOC_UNUSED)
6269 {
6270 set_syntax_error (_("relocation not allowed"));
6271 goto failure;
6272 }
6273 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6274 /* addr_off_p */ 1,
6275 /* need_libopcodes_p */ 1,
6276 /* skip_p */ 0);
6277 break;
6278
6279 case AARCH64_OPND_ADDR_SIMM9:
6280 case AARCH64_OPND_ADDR_SIMM9_2:
6281 case AARCH64_OPND_ADDR_SIMM11:
6282 case AARCH64_OPND_ADDR_SIMM13:
6283 po_misc_or_fail (parse_address (&str, info));
6284 if (info->addr.pcrel || info->addr.offset.is_reg
6285 || (!info->addr.preind && !info->addr.postind)
6286 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6287 && info->addr.writeback))
6288 {
6289 set_syntax_error (_("invalid addressing mode"));
6290 goto failure;
6291 }
6292 if (inst.reloc.type != BFD_RELOC_UNUSED)
6293 {
6294 set_syntax_error (_("relocation not allowed"));
6295 goto failure;
6296 }
6297 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6298 /* addr_off_p */ 1,
6299 /* need_libopcodes_p */ 1,
6300 /* skip_p */ 0);
6301 break;
6302
6303 case AARCH64_OPND_ADDR_SIMM10:
6304 case AARCH64_OPND_ADDR_OFFSET:
6305 po_misc_or_fail (parse_address (&str, info));
6306 if (info->addr.pcrel || info->addr.offset.is_reg
6307 || !info->addr.preind || info->addr.postind)
6308 {
6309 set_syntax_error (_("invalid addressing mode"));
6310 goto failure;
6311 }
6312 if (inst.reloc.type != BFD_RELOC_UNUSED)
6313 {
6314 set_syntax_error (_("relocation not allowed"));
6315 goto failure;
6316 }
6317 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6318 /* addr_off_p */ 1,
6319 /* need_libopcodes_p */ 1,
6320 /* skip_p */ 0);
6321 break;
6322
6323 case AARCH64_OPND_ADDR_UIMM12:
6324 po_misc_or_fail (parse_address (&str, info));
6325 if (info->addr.pcrel || info->addr.offset.is_reg
6326 || !info->addr.preind || info->addr.writeback)
6327 {
6328 set_syntax_error (_("invalid addressing mode"));
6329 goto failure;
6330 }
6331 if (inst.reloc.type == BFD_RELOC_UNUSED)
6332 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6333 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6334 || (inst.reloc.type
6335 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6336 || (inst.reloc.type
6337 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6338 || (inst.reloc.type
6339 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6340 || (inst.reloc.type
6341 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6342 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6343 /* Leave qualifier to be determined by libopcodes. */
6344 break;
6345
6346 case AARCH64_OPND_SIMD_ADDR_POST:
6347 /* [<Xn|SP>], <Xm|#<amount>> */
6348 po_misc_or_fail (parse_address (&str, info));
6349 if (!info->addr.postind || !info->addr.writeback)
6350 {
6351 set_syntax_error (_("invalid addressing mode"));
6352 goto failure;
6353 }
6354 if (!info->addr.offset.is_reg)
6355 {
6356 if (inst.reloc.exp.X_op == O_constant)
6357 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6358 else
6359 {
6360 set_fatal_syntax_error
6361 (_("writeback value must be an immediate constant"));
6362 goto failure;
6363 }
6364 }
6365 /* No qualifier. */
6366 break;
6367
6368 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6369 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6370 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6371 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6372 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6373 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6374 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6375 case AARCH64_OPND_SVE_ADDR_RI_U6:
6376 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6377 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6378 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6379 /* [X<n>{, #imm, MUL VL}]
6380 [X<n>{, #imm}]
6381 but recognizing SVE registers. */
6382 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6383 &offset_qualifier));
6384 if (base_qualifier != AARCH64_OPND_QLF_X)
6385 {
6386 set_syntax_error (_("invalid addressing mode"));
6387 goto failure;
6388 }
6389 sve_regimm:
6390 if (info->addr.pcrel || info->addr.offset.is_reg
6391 || !info->addr.preind || info->addr.writeback)
6392 {
6393 set_syntax_error (_("invalid addressing mode"));
6394 goto failure;
6395 }
6396 if (inst.reloc.type != BFD_RELOC_UNUSED
6397 || inst.reloc.exp.X_op != O_constant)
6398 {
6399 /* Make sure this has priority over
6400 "invalid addressing mode". */
6401 set_fatal_syntax_error (_("constant offset required"));
6402 goto failure;
6403 }
6404 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6405 break;
6406
6407 case AARCH64_OPND_SVE_ADDR_R:
6408 /* [<Xn|SP>{, <R><m>}]
6409 but recognizing SVE registers. */
6410 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6411 &offset_qualifier));
6412 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6413 {
6414 offset_qualifier = AARCH64_OPND_QLF_X;
6415 info->addr.offset.is_reg = 1;
6416 info->addr.offset.regno = 31;
6417 }
6418 else if (base_qualifier != AARCH64_OPND_QLF_X
6419 || offset_qualifier != AARCH64_OPND_QLF_X)
6420 {
6421 set_syntax_error (_("invalid addressing mode"));
6422 goto failure;
6423 }
6424 goto regoff_addr;
6425
6426 case AARCH64_OPND_SVE_ADDR_RR:
6427 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6428 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6429 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6430 case AARCH64_OPND_SVE_ADDR_RX:
6431 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6432 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6433 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6434 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6435 but recognizing SVE registers. */
6436 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6437 &offset_qualifier));
6438 if (base_qualifier != AARCH64_OPND_QLF_X
6439 || offset_qualifier != AARCH64_OPND_QLF_X)
6440 {
6441 set_syntax_error (_("invalid addressing mode"));
6442 goto failure;
6443 }
6444 goto regoff_addr;
6445
6446 case AARCH64_OPND_SVE_ADDR_RZ:
6447 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6448 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6449 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6450 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6451 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6452 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6453 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6454 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6455 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6456 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6457 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6458 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6459 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6460 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6461 &offset_qualifier));
6462 if (base_qualifier != AARCH64_OPND_QLF_X
6463 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6464 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6465 {
6466 set_syntax_error (_("invalid addressing mode"));
6467 goto failure;
6468 }
6469 info->qualifier = offset_qualifier;
6470 goto regoff_addr;
6471
6472 case AARCH64_OPND_SVE_ADDR_ZX:
6473 /* [Zn.<T>{, <Xm>}]. */
6474 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6475 &offset_qualifier));
6476 /* Things to check:
6477 base_qualifier either S_S or S_D
6478 offset_qualifier must be X
6479 */
6480 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6481 && base_qualifier != AARCH64_OPND_QLF_S_D)
6482 || offset_qualifier != AARCH64_OPND_QLF_X)
6483 {
6484 set_syntax_error (_("invalid addressing mode"));
6485 goto failure;
6486 }
6487 info->qualifier = base_qualifier;
6488 if (!info->addr.offset.is_reg || info->addr.pcrel
6489 || !info->addr.preind || info->addr.writeback
6490 || info->shifter.operator_present != 0)
6491 {
6492 set_syntax_error (_("invalid addressing mode"));
6493 goto failure;
6494 }
6495 info->shifter.kind = AARCH64_MOD_LSL;
6496 break;
6497
6498
6499 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6500 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6501 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6502 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6503 /* [Z<n>.<T>{, #imm}] */
6504 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6505 &offset_qualifier));
6506 if (base_qualifier != AARCH64_OPND_QLF_S_S
6507 && base_qualifier != AARCH64_OPND_QLF_S_D)
6508 {
6509 set_syntax_error (_("invalid addressing mode"));
6510 goto failure;
6511 }
6512 info->qualifier = base_qualifier;
6513 goto sve_regimm;
6514
6515 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6516 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6517 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6518 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6519 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6520
6521 We don't reject:
6522
6523 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6524
6525 here since we get better error messages by leaving it to
6526 the qualifier checking routines. */
6527 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6528 &offset_qualifier));
6529 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6530 && base_qualifier != AARCH64_OPND_QLF_S_D)
6531 || offset_qualifier != base_qualifier)
6532 {
6533 set_syntax_error (_("invalid addressing mode"));
6534 goto failure;
6535 }
6536 info->qualifier = base_qualifier;
6537 goto regoff_addr;
6538
6539 case AARCH64_OPND_SYSREG:
6540 {
6541 uint32_t sysreg_flags;
6542 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6543 &sysreg_flags)) == PARSE_FAIL)
6544 {
6545 set_syntax_error (_("unknown or missing system register name"));
6546 goto failure;
6547 }
6548 inst.base.operands[i].sysreg.value = val;
6549 inst.base.operands[i].sysreg.flags = sysreg_flags;
6550 break;
6551 }
6552
6553 case AARCH64_OPND_PSTATEFIELD:
6554 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6555 == PARSE_FAIL)
6556 {
6557 set_syntax_error (_("unknown or missing PSTATE field name"));
6558 goto failure;
6559 }
6560 inst.base.operands[i].pstatefield = val;
6561 break;
6562
6563 case AARCH64_OPND_SYSREG_IC:
6564 inst.base.operands[i].sysins_op =
6565 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6566 goto sys_reg_ins;
6567
6568 case AARCH64_OPND_SYSREG_DC:
6569 inst.base.operands[i].sysins_op =
6570 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6571 goto sys_reg_ins;
6572
6573 case AARCH64_OPND_SYSREG_AT:
6574 inst.base.operands[i].sysins_op =
6575 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6576 goto sys_reg_ins;
6577
6578 case AARCH64_OPND_SYSREG_SR:
6579 inst.base.operands[i].sysins_op =
6580 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6581 goto sys_reg_ins;
6582
6583 case AARCH64_OPND_SYSREG_TLBI:
6584 inst.base.operands[i].sysins_op =
6585 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6586 sys_reg_ins:
6587 if (inst.base.operands[i].sysins_op == NULL)
6588 {
6589 set_fatal_syntax_error ( _("unknown or missing operation name"));
6590 goto failure;
6591 }
6592 break;
6593
6594 case AARCH64_OPND_BARRIER:
6595 case AARCH64_OPND_BARRIER_ISB:
6596 val = parse_barrier (&str);
6597 if (val != PARSE_FAIL
6598 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6599 {
6600 /* ISB only accepts options name 'sy'. */
6601 set_syntax_error
6602 (_("the specified option is not accepted in ISB"));
6603 /* Turn off backtrack as this optional operand is present. */
6604 backtrack_pos = 0;
6605 goto failure;
6606 }
6607 /* This is an extension to accept a 0..15 immediate. */
6608 if (val == PARSE_FAIL)
6609 po_imm_or_fail (0, 15);
6610 info->barrier = aarch64_barrier_options + val;
6611 break;
6612
6613 case AARCH64_OPND_PRFOP:
6614 val = parse_pldop (&str);
6615 /* This is an extension to accept a 0..31 immediate. */
6616 if (val == PARSE_FAIL)
6617 po_imm_or_fail (0, 31);
6618 inst.base.operands[i].prfop = aarch64_prfops + val;
6619 break;
6620
6621 case AARCH64_OPND_BARRIER_PSB:
6622 val = parse_barrier_psb (&str, &(info->hint_option));
6623 if (val == PARSE_FAIL)
6624 goto failure;
6625 break;
6626
6627 case AARCH64_OPND_BTI_TARGET:
6628 val = parse_bti_operand (&str, &(info->hint_option));
6629 if (val == PARSE_FAIL)
6630 goto failure;
6631 break;
6632
6633 default:
6634 as_fatal (_("unhandled operand code %d"), operands[i]);
6635 }
6636
6637 /* If we get here, this operand was successfully parsed. */
6638 inst.base.operands[i].present = 1;
6639 continue;
6640
6641 failure:
6642 /* The parse routine should already have set the error, but in case
6643 not, set a default one here. */
6644 if (! error_p ())
6645 set_default_error ();
6646
6647 if (! backtrack_pos)
6648 goto parse_operands_return;
6649
6650 {
6651 /* We reach here because this operand is marked as optional, and
6652 either no operand was supplied or the operand was supplied but it
6653 was syntactically incorrect. In the latter case we report an
6654 error. In the former case we perform a few more checks before
6655 dropping through to the code to insert the default operand. */
6656
6657 char *tmp = backtrack_pos;
6658 char endchar = END_OF_INSN;
6659
6660 if (i != (aarch64_num_of_operands (opcode) - 1))
6661 endchar = ',';
6662 skip_past_char (&tmp, ',');
6663
6664 if (*tmp != endchar)
6665 /* The user has supplied an operand in the wrong format. */
6666 goto parse_operands_return;
6667
6668 /* Make sure there is not a comma before the optional operand.
6669 For example the fifth operand of 'sys' is optional:
6670
6671 sys #0,c0,c0,#0, <--- wrong
6672 sys #0,c0,c0,#0 <--- correct. */
6673 if (comma_skipped_p && i && endchar == END_OF_INSN)
6674 {
6675 set_fatal_syntax_error
6676 (_("unexpected comma before the omitted optional operand"));
6677 goto parse_operands_return;
6678 }
6679 }
6680
6681 /* Reaching here means we are dealing with an optional operand that is
6682 omitted from the assembly line. */
6683 gas_assert (optional_operand_p (opcode, i));
6684 info->present = 0;
6685 process_omitted_operand (operands[i], opcode, i, info);
6686
6687 /* Try again, skipping the optional operand at backtrack_pos. */
6688 str = backtrack_pos;
6689 backtrack_pos = 0;
6690
6691 /* Clear any error record after the omitted optional operand has been
6692 successfully handled. */
6693 clear_error ();
6694 }
6695
6696 /* Check if we have parsed all the operands. */
6697 if (*str != '\0' && ! error_p ())
6698 {
6699 /* Set I to the index of the last present operand; this is
6700 for the purpose of diagnostics. */
6701 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6702 ;
6703 set_fatal_syntax_error
6704 (_("unexpected characters following instruction"));
6705 }
6706
6707 parse_operands_return:
6708
6709 if (error_p ())
6710 {
6711 DEBUG_TRACE ("parsing FAIL: %s - %s",
6712 operand_mismatch_kind_names[get_error_kind ()],
6713 get_error_message ());
6714 /* Record the operand error properly; this is useful when there
6715 are multiple instruction templates for a mnemonic name, so that
6716 later on, we can select the error that most closely describes
6717 the problem. */
6718 record_operand_error (opcode, i, get_error_kind (),
6719 get_error_message ());
6720 return FALSE;
6721 }
6722 else
6723 {
6724 DEBUG_TRACE ("parsing SUCCESS");
6725 return TRUE;
6726 }
6727 }
6728
6729 /* It does some fix-up to provide some programmer friendly feature while
6730 keeping the libopcodes happy, i.e. libopcodes only accepts
6731 the preferred architectural syntax.
6732 Return FALSE if there is any failure; otherwise return TRUE. */
6733
6734 static bfd_boolean
6735 programmer_friendly_fixup (aarch64_instruction *instr)
6736 {
6737 aarch64_inst *base = &instr->base;
6738 const aarch64_opcode *opcode = base->opcode;
6739 enum aarch64_op op = opcode->op;
6740 aarch64_opnd_info *operands = base->operands;
6741
6742 DEBUG_TRACE ("enter");
6743
6744 switch (opcode->iclass)
6745 {
6746 case testbranch:
6747 /* TBNZ Xn|Wn, #uimm6, label
6748 Test and Branch Not Zero: conditionally jumps to label if bit number
6749 uimm6 in register Xn is not zero. The bit number implies the width of
6750 the register, which may be written and should be disassembled as Wn if
6751 uimm is less than 32. */
6752 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6753 {
6754 if (operands[1].imm.value >= 32)
6755 {
6756 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6757 0, 31);
6758 return FALSE;
6759 }
6760 operands[0].qualifier = AARCH64_OPND_QLF_X;
6761 }
6762 break;
6763 case loadlit:
6764 /* LDR Wt, label | =value
6765 As a convenience assemblers will typically permit the notation
6766 "=value" in conjunction with the pc-relative literal load instructions
6767 to automatically place an immediate value or symbolic address in a
6768 nearby literal pool and generate a hidden label which references it.
6769 ISREG has been set to 0 in the case of =value. */
6770 if (instr->gen_lit_pool
6771 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6772 {
6773 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6774 if (op == OP_LDRSW_LIT)
6775 size = 4;
6776 if (instr->reloc.exp.X_op != O_constant
6777 && instr->reloc.exp.X_op != O_big
6778 && instr->reloc.exp.X_op != O_symbol)
6779 {
6780 record_operand_error (opcode, 1,
6781 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6782 _("constant expression expected"));
6783 return FALSE;
6784 }
6785 if (! add_to_lit_pool (&instr->reloc.exp, size))
6786 {
6787 record_operand_error (opcode, 1,
6788 AARCH64_OPDE_OTHER_ERROR,
6789 _("literal pool insertion failed"));
6790 return FALSE;
6791 }
6792 }
6793 break;
6794 case log_shift:
6795 case bitfield:
6796 /* UXT[BHW] Wd, Wn
6797 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6798 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6799 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6800 A programmer-friendly assembler should accept a destination Xd in
6801 place of Wd, however that is not the preferred form for disassembly.
6802 */
6803 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6804 && operands[1].qualifier == AARCH64_OPND_QLF_W
6805 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6806 operands[0].qualifier = AARCH64_OPND_QLF_W;
6807 break;
6808
6809 case addsub_ext:
6810 {
6811 /* In the 64-bit form, the final register operand is written as Wm
6812 for all but the (possibly omitted) UXTX/LSL and SXTX
6813 operators.
6814 As a programmer-friendly assembler, we accept e.g.
6815 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6816 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6817 int idx = aarch64_operand_index (opcode->operands,
6818 AARCH64_OPND_Rm_EXT);
6819 gas_assert (idx == 1 || idx == 2);
6820 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6821 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6822 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6823 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6824 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6825 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6826 }
6827 break;
6828
6829 default:
6830 break;
6831 }
6832
6833 DEBUG_TRACE ("exit with SUCCESS");
6834 return TRUE;
6835 }
6836
6837 /* Check for loads and stores that will cause unpredictable behavior. */
6838
6839 static void
6840 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6841 {
6842 aarch64_inst *base = &instr->base;
6843 const aarch64_opcode *opcode = base->opcode;
6844 const aarch64_opnd_info *opnds = base->operands;
6845 switch (opcode->iclass)
6846 {
6847 case ldst_pos:
6848 case ldst_imm9:
6849 case ldst_imm10:
6850 case ldst_unscaled:
6851 case ldst_unpriv:
6852 /* Loading/storing the base register is unpredictable if writeback. */
6853 if ((aarch64_get_operand_class (opnds[0].type)
6854 == AARCH64_OPND_CLASS_INT_REG)
6855 && opnds[0].reg.regno == opnds[1].addr.base_regno
6856 && opnds[1].addr.base_regno != REG_SP
6857 /* Exempt STG/STZG/ST2G/STZ2G. */
6858 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
6859 && opnds[1].addr.writeback)
6860 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6861 break;
6862
6863 case ldstpair_off:
6864 case ldstnapair_offs:
6865 case ldstpair_indexed:
6866 /* Loading/storing the base register is unpredictable if writeback. */
6867 if ((aarch64_get_operand_class (opnds[0].type)
6868 == AARCH64_OPND_CLASS_INT_REG)
6869 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6870 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6871 && opnds[2].addr.base_regno != REG_SP
6872 /* Exempt STGP. */
6873 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
6874 && opnds[2].addr.writeback)
6875 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6876 /* Load operations must load different registers. */
6877 if ((opcode->opcode & (1 << 22))
6878 && opnds[0].reg.regno == opnds[1].reg.regno)
6879 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6880 break;
6881
6882 case ldstexcl:
6883 /* It is unpredictable if the destination and status registers are the
6884 same. */
6885 if ((aarch64_get_operand_class (opnds[0].type)
6886 == AARCH64_OPND_CLASS_INT_REG)
6887 && (aarch64_get_operand_class (opnds[1].type)
6888 == AARCH64_OPND_CLASS_INT_REG)
6889 && (opnds[0].reg.regno == opnds[1].reg.regno
6890 || opnds[0].reg.regno == opnds[2].reg.regno))
6891 as_warn (_("unpredictable: identical transfer and status registers"
6892 " --`%s'"),
6893 str);
6894
6895 break;
6896
6897 default:
6898 break;
6899 }
6900 }
6901
6902 static void
6903 force_automatic_sequence_close (void)
6904 {
6905 if (now_instr_sequence.instr)
6906 {
6907 as_warn (_("previous `%s' sequence has not been closed"),
6908 now_instr_sequence.instr->opcode->name);
6909 init_insn_sequence (NULL, &now_instr_sequence);
6910 }
6911 }
6912
6913 /* A wrapper function to interface with libopcodes on encoding and
6914 record the error message if there is any.
6915
6916 Return TRUE on success; otherwise return FALSE. */
6917
6918 static bfd_boolean
6919 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6920 aarch64_insn *code)
6921 {
6922 aarch64_operand_error error_info;
6923 memset (&error_info, '\0', sizeof (error_info));
6924 error_info.kind = AARCH64_OPDE_NIL;
6925 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
6926 && !error_info.non_fatal)
6927 return TRUE;
6928
6929 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6930 record_operand_error_info (opcode, &error_info);
6931 return error_info.non_fatal;
6932 }
6933
6934 #ifdef DEBUG_AARCH64
6935 static inline void
6936 dump_opcode_operands (const aarch64_opcode *opcode)
6937 {
6938 int i = 0;
6939 while (opcode->operands[i] != AARCH64_OPND_NIL)
6940 {
6941 aarch64_verbose ("\t\t opnd%d: %s", i,
6942 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6943 ? aarch64_get_operand_name (opcode->operands[i])
6944 : aarch64_get_operand_desc (opcode->operands[i]));
6945 ++i;
6946 }
6947 }
6948 #endif /* DEBUG_AARCH64 */
6949
6950 /* This is the guts of the machine-dependent assembler. STR points to a
6951 machine dependent instruction. This function is supposed to emit
6952 the frags/bytes it assembles to. */
6953
6954 void
6955 md_assemble (char *str)
6956 {
6957 char *p = str;
6958 templates *template;
6959 aarch64_opcode *opcode;
6960 aarch64_inst *inst_base;
6961 unsigned saved_cond;
6962
6963 /* Align the previous label if needed. */
6964 if (last_label_seen != NULL)
6965 {
6966 symbol_set_frag (last_label_seen, frag_now);
6967 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6968 S_SET_SEGMENT (last_label_seen, now_seg);
6969 }
6970
6971 /* Update the current insn_sequence from the segment. */
6972 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
6973
6974 inst.reloc.type = BFD_RELOC_UNUSED;
6975
6976 DEBUG_TRACE ("\n\n");
6977 DEBUG_TRACE ("==============================");
6978 DEBUG_TRACE ("Enter md_assemble with %s", str);
6979
6980 template = opcode_lookup (&p);
6981 if (!template)
6982 {
6983 /* It wasn't an instruction, but it might be a register alias of
6984 the form alias .req reg directive. */
6985 if (!create_register_alias (str, p))
6986 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6987 str);
6988 return;
6989 }
6990
6991 skip_whitespace (p);
6992 if (*p == ',')
6993 {
6994 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6995 get_mnemonic_name (str), str);
6996 return;
6997 }
6998
6999 init_operand_error_report ();
7000
7001 /* Sections are assumed to start aligned. In executable section, there is no
7002 MAP_DATA symbol pending. So we only align the address during
7003 MAP_DATA --> MAP_INSN transition.
7004 For other sections, this is not guaranteed. */
7005 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7006 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7007 frag_align_code (2, 0);
7008
7009 saved_cond = inst.cond;
7010 reset_aarch64_instruction (&inst);
7011 inst.cond = saved_cond;
7012
7013 /* Iterate through all opcode entries with the same mnemonic name. */
7014 do
7015 {
7016 opcode = template->opcode;
7017
7018 DEBUG_TRACE ("opcode %s found", opcode->name);
7019 #ifdef DEBUG_AARCH64
7020 if (debug_dump)
7021 dump_opcode_operands (opcode);
7022 #endif /* DEBUG_AARCH64 */
7023
7024 mapping_state (MAP_INSN);
7025
7026 inst_base = &inst.base;
7027 inst_base->opcode = opcode;
7028
7029 /* Truly conditionally executed instructions, e.g. b.cond. */
7030 if (opcode->flags & F_COND)
7031 {
7032 gas_assert (inst.cond != COND_ALWAYS);
7033 inst_base->cond = get_cond_from_value (inst.cond);
7034 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7035 }
7036 else if (inst.cond != COND_ALWAYS)
7037 {
7038 /* It shouldn't arrive here, where the assembly looks like a
7039 conditional instruction but the found opcode is unconditional. */
7040 gas_assert (0);
7041 continue;
7042 }
7043
7044 if (parse_operands (p, opcode)
7045 && programmer_friendly_fixup (&inst)
7046 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7047 {
7048 /* Check that this instruction is supported for this CPU. */
7049 if (!opcode->avariant
7050 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7051 {
7052 as_bad (_("selected processor does not support `%s'"), str);
7053 return;
7054 }
7055
7056 warn_unpredictable_ldst (&inst, str);
7057
7058 if (inst.reloc.type == BFD_RELOC_UNUSED
7059 || !inst.reloc.need_libopcodes_p)
7060 output_inst (NULL);
7061 else
7062 {
7063 /* If there is relocation generated for the instruction,
7064 store the instruction information for the future fix-up. */
7065 struct aarch64_inst *copy;
7066 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7067 copy = XNEW (struct aarch64_inst);
7068 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7069 output_inst (copy);
7070 }
7071
7072 /* Issue non-fatal messages if any. */
7073 output_operand_error_report (str, TRUE);
7074 return;
7075 }
7076
7077 template = template->next;
7078 if (template != NULL)
7079 {
7080 reset_aarch64_instruction (&inst);
7081 inst.cond = saved_cond;
7082 }
7083 }
7084 while (template != NULL);
7085
7086 /* Issue the error messages if any. */
7087 output_operand_error_report (str, FALSE);
7088 }
7089
7090 /* Various frobbings of labels and their addresses. */
7091
7092 void
7093 aarch64_start_line_hook (void)
7094 {
7095 last_label_seen = NULL;
7096 }
7097
7098 void
7099 aarch64_frob_label (symbolS * sym)
7100 {
7101 last_label_seen = sym;
7102
7103 dwarf2_emit_label (sym);
7104 }
7105
7106 void
7107 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7108 {
7109 /* Check to see if we have a block to close. */
7110 force_automatic_sequence_close ();
7111 }
7112
7113 int
7114 aarch64_data_in_code (void)
7115 {
7116 if (!strncmp (input_line_pointer + 1, "data:", 5))
7117 {
7118 *input_line_pointer = '/';
7119 input_line_pointer += 5;
7120 *input_line_pointer = 0;
7121 return 1;
7122 }
7123
7124 return 0;
7125 }
7126
7127 char *
7128 aarch64_canonicalize_symbol_name (char *name)
7129 {
7130 int len;
7131
7132 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7133 *(name + len - 5) = 0;
7134
7135 return name;
7136 }
7137 \f
7138 /* Table of all register names defined by default. The user can
7139 define additional names with .req. Note that all register names
7140 should appear in both upper and lowercase variants. Some registers
7141 also have mixed-case names. */
7142
7143 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7144 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7145 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7146 #define REGSET16(p,t) \
7147 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7148 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7149 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7150 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7151 #define REGSET31(p,t) \
7152 REGSET16(p, t), \
7153 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7154 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7155 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7156 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7157 #define REGSET(p,t) \
7158 REGSET31(p,t), REGNUM(p,31,t)
7159
7160 /* These go into aarch64_reg_hsh hash-table. */
7161 static const reg_entry reg_names[] = {
7162 /* Integer registers. */
7163 REGSET31 (x, R_64), REGSET31 (X, R_64),
7164 REGSET31 (w, R_32), REGSET31 (W, R_32),
7165
7166 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7167 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7168 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7169 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7170 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7171 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7172
7173 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7174 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7175
7176 /* Floating-point single precision registers. */
7177 REGSET (s, FP_S), REGSET (S, FP_S),
7178
7179 /* Floating-point double precision registers. */
7180 REGSET (d, FP_D), REGSET (D, FP_D),
7181
7182 /* Floating-point half precision registers. */
7183 REGSET (h, FP_H), REGSET (H, FP_H),
7184
7185 /* Floating-point byte precision registers. */
7186 REGSET (b, FP_B), REGSET (B, FP_B),
7187
7188 /* Floating-point quad precision registers. */
7189 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7190
7191 /* FP/SIMD registers. */
7192 REGSET (v, VN), REGSET (V, VN),
7193
7194 /* SVE vector registers. */
7195 REGSET (z, ZN), REGSET (Z, ZN),
7196
7197 /* SVE predicate registers. */
7198 REGSET16 (p, PN), REGSET16 (P, PN)
7199 };
7200
7201 #undef REGDEF
7202 #undef REGDEF_ALIAS
7203 #undef REGNUM
7204 #undef REGSET16
7205 #undef REGSET31
7206 #undef REGSET
7207
7208 #define N 1
7209 #define n 0
7210 #define Z 1
7211 #define z 0
7212 #define C 1
7213 #define c 0
7214 #define V 1
7215 #define v 0
7216 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7217 static const asm_nzcv nzcv_names[] = {
7218 {"nzcv", B (n, z, c, v)},
7219 {"nzcV", B (n, z, c, V)},
7220 {"nzCv", B (n, z, C, v)},
7221 {"nzCV", B (n, z, C, V)},
7222 {"nZcv", B (n, Z, c, v)},
7223 {"nZcV", B (n, Z, c, V)},
7224 {"nZCv", B (n, Z, C, v)},
7225 {"nZCV", B (n, Z, C, V)},
7226 {"Nzcv", B (N, z, c, v)},
7227 {"NzcV", B (N, z, c, V)},
7228 {"NzCv", B (N, z, C, v)},
7229 {"NzCV", B (N, z, C, V)},
7230 {"NZcv", B (N, Z, c, v)},
7231 {"NZcV", B (N, Z, c, V)},
7232 {"NZCv", B (N, Z, C, v)},
7233 {"NZCV", B (N, Z, C, V)}
7234 };
7235
7236 #undef N
7237 #undef n
7238 #undef Z
7239 #undef z
7240 #undef C
7241 #undef c
7242 #undef V
7243 #undef v
7244 #undef B
7245 \f
7246 /* MD interface: bits in the object file. */
7247
7248 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7249 for use in the a.out file, and stores them in the array pointed to by buf.
7250 This knows about the endian-ness of the target machine and does
7251 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7252 2 (short) and 4 (long) Floating numbers are put out as a series of
7253 LITTLENUMS (shorts, here at least). */
7254
7255 void
7256 md_number_to_chars (char *buf, valueT val, int n)
7257 {
7258 if (target_big_endian)
7259 number_to_chars_bigendian (buf, val, n);
7260 else
7261 number_to_chars_littleendian (buf, val, n);
7262 }
7263
7264 /* MD interface: Sections. */
7265
7266 /* Estimate the size of a frag before relaxing. Assume everything fits in
7267 4 bytes. */
7268
7269 int
7270 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7271 {
7272 fragp->fr_var = 4;
7273 return 4;
7274 }
7275
7276 /* Round up a section size to the appropriate boundary. */
7277
7278 valueT
7279 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7280 {
7281 return size;
7282 }
7283
7284 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7285 of an rs_align_code fragment.
7286
7287 Here we fill the frag with the appropriate info for padding the
7288 output stream. The resulting frag will consist of a fixed (fr_fix)
7289 and of a repeating (fr_var) part.
7290
7291 The fixed content is always emitted before the repeating content and
7292 these two parts are used as follows in constructing the output:
7293 - the fixed part will be used to align to a valid instruction word
7294 boundary, in case that we start at a misaligned address; as no
7295 executable instruction can live at the misaligned location, we
7296 simply fill with zeros;
7297 - the variable part will be used to cover the remaining padding and
7298 we fill using the AArch64 NOP instruction.
7299
7300 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7301 enough storage space for up to 3 bytes for padding the back to a valid
7302 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7303
7304 void
7305 aarch64_handle_align (fragS * fragP)
7306 {
7307 /* NOP = d503201f */
7308 /* AArch64 instructions are always little-endian. */
7309 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7310
7311 int bytes, fix, noop_size;
7312 char *p;
7313
7314 if (fragP->fr_type != rs_align_code)
7315 return;
7316
7317 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7318 p = fragP->fr_literal + fragP->fr_fix;
7319
7320 #ifdef OBJ_ELF
7321 gas_assert (fragP->tc_frag_data.recorded);
7322 #endif
7323
7324 noop_size = sizeof (aarch64_noop);
7325
7326 fix = bytes & (noop_size - 1);
7327 if (fix)
7328 {
7329 #ifdef OBJ_ELF
7330 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7331 #endif
7332 memset (p, 0, fix);
7333 p += fix;
7334 fragP->fr_fix += fix;
7335 }
7336
7337 if (noop_size)
7338 memcpy (p, aarch64_noop, noop_size);
7339 fragP->fr_var = noop_size;
7340 }
7341
7342 /* Perform target specific initialisation of a frag.
7343 Note - despite the name this initialisation is not done when the frag
7344 is created, but only when its type is assigned. A frag can be created
7345 and used a long time before its type is set, so beware of assuming that
7346 this initialisation is performed first. */
7347
7348 #ifndef OBJ_ELF
7349 void
7350 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7351 int max_chars ATTRIBUTE_UNUSED)
7352 {
7353 }
7354
7355 #else /* OBJ_ELF is defined. */
7356 void
7357 aarch64_init_frag (fragS * fragP, int max_chars)
7358 {
7359 /* Record a mapping symbol for alignment frags. We will delete this
7360 later if the alignment ends up empty. */
7361 if (!fragP->tc_frag_data.recorded)
7362 fragP->tc_frag_data.recorded = 1;
7363
7364 /* PR 21809: Do not set a mapping state for debug sections
7365 - it just confuses other tools. */
7366 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
7367 return;
7368
7369 switch (fragP->fr_type)
7370 {
7371 case rs_align_test:
7372 case rs_fill:
7373 mapping_state_2 (MAP_DATA, max_chars);
7374 break;
7375 case rs_align:
7376 /* PR 20364: We can get alignment frags in code sections,
7377 so do not just assume that we should use the MAP_DATA state. */
7378 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7379 break;
7380 case rs_align_code:
7381 mapping_state_2 (MAP_INSN, max_chars);
7382 break;
7383 default:
7384 break;
7385 }
7386 }
7387 \f
7388 /* Initialize the DWARF-2 unwind information for this procedure. */
7389
7390 void
7391 tc_aarch64_frame_initial_instructions (void)
7392 {
7393 cfi_add_CFA_def_cfa (REG_SP, 0);
7394 }
7395 #endif /* OBJ_ELF */
7396
7397 /* Convert REGNAME to a DWARF-2 register number. */
7398
7399 int
7400 tc_aarch64_regname_to_dw2regnum (char *regname)
7401 {
7402 const reg_entry *reg = parse_reg (&regname);
7403 if (reg == NULL)
7404 return -1;
7405
7406 switch (reg->type)
7407 {
7408 case REG_TYPE_SP_32:
7409 case REG_TYPE_SP_64:
7410 case REG_TYPE_R_32:
7411 case REG_TYPE_R_64:
7412 return reg->number;
7413
7414 case REG_TYPE_FP_B:
7415 case REG_TYPE_FP_H:
7416 case REG_TYPE_FP_S:
7417 case REG_TYPE_FP_D:
7418 case REG_TYPE_FP_Q:
7419 return reg->number + 64;
7420
7421 default:
7422 break;
7423 }
7424 return -1;
7425 }
7426
7427 /* Implement DWARF2_ADDR_SIZE. */
7428
7429 int
7430 aarch64_dwarf2_addr_size (void)
7431 {
7432 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7433 if (ilp32_p)
7434 return 4;
7435 #endif
7436 return bfd_arch_bits_per_address (stdoutput) / 8;
7437 }
7438
7439 /* MD interface: Symbol and relocation handling. */
7440
7441 /* Return the address within the segment that a PC-relative fixup is
7442 relative to. For AArch64 PC-relative fixups applied to instructions
7443 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7444
7445 long
7446 md_pcrel_from_section (fixS * fixP, segT seg)
7447 {
7448 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7449
7450 /* If this is pc-relative and we are going to emit a relocation
7451 then we just want to put out any pipeline compensation that the linker
7452 will need. Otherwise we want to use the calculated base. */
7453 if (fixP->fx_pcrel
7454 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7455 || aarch64_force_relocation (fixP)))
7456 base = 0;
7457
7458 /* AArch64 should be consistent for all pc-relative relocations. */
7459 return base + AARCH64_PCREL_OFFSET;
7460 }
7461
7462 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7463 Otherwise we have no need to default values of symbols. */
7464
7465 symbolS *
7466 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7467 {
7468 #ifdef OBJ_ELF
7469 if (name[0] == '_' && name[1] == 'G'
7470 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7471 {
7472 if (!GOT_symbol)
7473 {
7474 if (symbol_find (name))
7475 as_bad (_("GOT already in the symbol table"));
7476
7477 GOT_symbol = symbol_new (name, undefined_section,
7478 (valueT) 0, &zero_address_frag);
7479 }
7480
7481 return GOT_symbol;
7482 }
7483 #endif
7484
7485 return 0;
7486 }
7487
7488 /* Return non-zero if the indicated VALUE has overflowed the maximum
7489 range expressible by a unsigned number with the indicated number of
7490 BITS. */
7491
7492 static bfd_boolean
7493 unsigned_overflow (valueT value, unsigned bits)
7494 {
7495 valueT lim;
7496 if (bits >= sizeof (valueT) * 8)
7497 return FALSE;
7498 lim = (valueT) 1 << bits;
7499 return (value >= lim);
7500 }
7501
7502
7503 /* Return non-zero if the indicated VALUE has overflowed the maximum
7504 range expressible by an signed number with the indicated number of
7505 BITS. */
7506
7507 static bfd_boolean
7508 signed_overflow (offsetT value, unsigned bits)
7509 {
7510 offsetT lim;
7511 if (bits >= sizeof (offsetT) * 8)
7512 return FALSE;
7513 lim = (offsetT) 1 << (bits - 1);
7514 return (value < -lim || value >= lim);
7515 }
7516
7517 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7518 unsigned immediate offset load/store instruction, try to encode it as
7519 an unscaled, 9-bit, signed immediate offset load/store instruction.
7520 Return TRUE if it is successful; otherwise return FALSE.
7521
7522 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7523 in response to the standard LDR/STR mnemonics when the immediate offset is
7524 unambiguous, i.e. when it is negative or unaligned. */
7525
7526 static bfd_boolean
7527 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7528 {
7529 int idx;
7530 enum aarch64_op new_op;
7531 const aarch64_opcode *new_opcode;
7532
7533 gas_assert (instr->opcode->iclass == ldst_pos);
7534
7535 switch (instr->opcode->op)
7536 {
7537 case OP_LDRB_POS:new_op = OP_LDURB; break;
7538 case OP_STRB_POS: new_op = OP_STURB; break;
7539 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7540 case OP_LDRH_POS: new_op = OP_LDURH; break;
7541 case OP_STRH_POS: new_op = OP_STURH; break;
7542 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7543 case OP_LDR_POS: new_op = OP_LDUR; break;
7544 case OP_STR_POS: new_op = OP_STUR; break;
7545 case OP_LDRF_POS: new_op = OP_LDURV; break;
7546 case OP_STRF_POS: new_op = OP_STURV; break;
7547 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7548 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7549 default: new_op = OP_NIL; break;
7550 }
7551
7552 if (new_op == OP_NIL)
7553 return FALSE;
7554
7555 new_opcode = aarch64_get_opcode (new_op);
7556 gas_assert (new_opcode != NULL);
7557
7558 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7559 instr->opcode->op, new_opcode->op);
7560
7561 aarch64_replace_opcode (instr, new_opcode);
7562
7563 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7564 qualifier matching may fail because the out-of-date qualifier will
7565 prevent the operand being updated with a new and correct qualifier. */
7566 idx = aarch64_operand_index (instr->opcode->operands,
7567 AARCH64_OPND_ADDR_SIMM9);
7568 gas_assert (idx == 1);
7569 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7570
7571 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7572
7573 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7574 insn_sequence))
7575 return FALSE;
7576
7577 return TRUE;
7578 }
7579
7580 /* Called by fix_insn to fix a MOV immediate alias instruction.
7581
7582 Operand for a generic move immediate instruction, which is an alias
7583 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7584 a 32-bit/64-bit immediate value into general register. An assembler error
7585 shall result if the immediate cannot be created by a single one of these
7586 instructions. If there is a choice, then to ensure reversability an
7587 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7588
7589 static void
7590 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7591 {
7592 const aarch64_opcode *opcode;
7593
7594 /* Need to check if the destination is SP/ZR. The check has to be done
7595 before any aarch64_replace_opcode. */
7596 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7597 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7598
7599 instr->operands[1].imm.value = value;
7600 instr->operands[1].skip = 0;
7601
7602 if (try_mov_wide_p)
7603 {
7604 /* Try the MOVZ alias. */
7605 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7606 aarch64_replace_opcode (instr, opcode);
7607 if (aarch64_opcode_encode (instr->opcode, instr,
7608 &instr->value, NULL, NULL, insn_sequence))
7609 {
7610 put_aarch64_insn (buf, instr->value);
7611 return;
7612 }
7613 /* Try the MOVK alias. */
7614 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7615 aarch64_replace_opcode (instr, opcode);
7616 if (aarch64_opcode_encode (instr->opcode, instr,
7617 &instr->value, NULL, NULL, insn_sequence))
7618 {
7619 put_aarch64_insn (buf, instr->value);
7620 return;
7621 }
7622 }
7623
7624 if (try_mov_bitmask_p)
7625 {
7626 /* Try the ORR alias. */
7627 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7628 aarch64_replace_opcode (instr, opcode);
7629 if (aarch64_opcode_encode (instr->opcode, instr,
7630 &instr->value, NULL, NULL, insn_sequence))
7631 {
7632 put_aarch64_insn (buf, instr->value);
7633 return;
7634 }
7635 }
7636
7637 as_bad_where (fixP->fx_file, fixP->fx_line,
7638 _("immediate cannot be moved by a single instruction"));
7639 }
7640
7641 /* An instruction operand which is immediate related may have symbol used
7642 in the assembly, e.g.
7643
7644 mov w0, u32
7645 .set u32, 0x00ffff00
7646
7647 At the time when the assembly instruction is parsed, a referenced symbol,
7648 like 'u32' in the above example may not have been seen; a fixS is created
7649 in such a case and is handled here after symbols have been resolved.
7650 Instruction is fixed up with VALUE using the information in *FIXP plus
7651 extra information in FLAGS.
7652
7653 This function is called by md_apply_fix to fix up instructions that need
7654 a fix-up described above but does not involve any linker-time relocation. */
7655
7656 static void
7657 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7658 {
7659 int idx;
7660 uint32_t insn;
7661 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7662 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7663 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7664
7665 if (new_inst)
7666 {
7667 /* Now the instruction is about to be fixed-up, so the operand that
7668 was previously marked as 'ignored' needs to be unmarked in order
7669 to get the encoding done properly. */
7670 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7671 new_inst->operands[idx].skip = 0;
7672 }
7673
7674 gas_assert (opnd != AARCH64_OPND_NIL);
7675
7676 switch (opnd)
7677 {
7678 case AARCH64_OPND_EXCEPTION:
7679 if (unsigned_overflow (value, 16))
7680 as_bad_where (fixP->fx_file, fixP->fx_line,
7681 _("immediate out of range"));
7682 insn = get_aarch64_insn (buf);
7683 insn |= encode_svc_imm (value);
7684 put_aarch64_insn (buf, insn);
7685 break;
7686
7687 case AARCH64_OPND_AIMM:
7688 /* ADD or SUB with immediate.
7689 NOTE this assumes we come here with a add/sub shifted reg encoding
7690 3 322|2222|2 2 2 21111 111111
7691 1 098|7654|3 2 1 09876 543210 98765 43210
7692 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7693 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7694 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7695 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7696 ->
7697 3 322|2222|2 2 221111111111
7698 1 098|7654|3 2 109876543210 98765 43210
7699 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7700 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7701 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7702 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7703 Fields sf Rn Rd are already set. */
7704 insn = get_aarch64_insn (buf);
7705 if (value < 0)
7706 {
7707 /* Add <-> sub. */
7708 insn = reencode_addsub_switch_add_sub (insn);
7709 value = -value;
7710 }
7711
7712 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7713 && unsigned_overflow (value, 12))
7714 {
7715 /* Try to shift the value by 12 to make it fit. */
7716 if (((value >> 12) << 12) == value
7717 && ! unsigned_overflow (value, 12 + 12))
7718 {
7719 value >>= 12;
7720 insn |= encode_addsub_imm_shift_amount (1);
7721 }
7722 }
7723
7724 if (unsigned_overflow (value, 12))
7725 as_bad_where (fixP->fx_file, fixP->fx_line,
7726 _("immediate out of range"));
7727
7728 insn |= encode_addsub_imm (value);
7729
7730 put_aarch64_insn (buf, insn);
7731 break;
7732
7733 case AARCH64_OPND_SIMD_IMM:
7734 case AARCH64_OPND_SIMD_IMM_SFT:
7735 case AARCH64_OPND_LIMM:
7736 /* Bit mask immediate. */
7737 gas_assert (new_inst != NULL);
7738 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7739 new_inst->operands[idx].imm.value = value;
7740 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7741 &new_inst->value, NULL, NULL, insn_sequence))
7742 put_aarch64_insn (buf, new_inst->value);
7743 else
7744 as_bad_where (fixP->fx_file, fixP->fx_line,
7745 _("invalid immediate"));
7746 break;
7747
7748 case AARCH64_OPND_HALF:
7749 /* 16-bit unsigned immediate. */
7750 if (unsigned_overflow (value, 16))
7751 as_bad_where (fixP->fx_file, fixP->fx_line,
7752 _("immediate out of range"));
7753 insn = get_aarch64_insn (buf);
7754 insn |= encode_movw_imm (value & 0xffff);
7755 put_aarch64_insn (buf, insn);
7756 break;
7757
7758 case AARCH64_OPND_IMM_MOV:
7759 /* Operand for a generic move immediate instruction, which is
7760 an alias instruction that generates a single MOVZ, MOVN or ORR
7761 instruction to loads a 32-bit/64-bit immediate value into general
7762 register. An assembler error shall result if the immediate cannot be
7763 created by a single one of these instructions. If there is a choice,
7764 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7765 and MOVZ or MOVN to ORR. */
7766 gas_assert (new_inst != NULL);
7767 fix_mov_imm_insn (fixP, buf, new_inst, value);
7768 break;
7769
7770 case AARCH64_OPND_ADDR_SIMM7:
7771 case AARCH64_OPND_ADDR_SIMM9:
7772 case AARCH64_OPND_ADDR_SIMM9_2:
7773 case AARCH64_OPND_ADDR_SIMM10:
7774 case AARCH64_OPND_ADDR_UIMM12:
7775 case AARCH64_OPND_ADDR_SIMM11:
7776 case AARCH64_OPND_ADDR_SIMM13:
7777 /* Immediate offset in an address. */
7778 insn = get_aarch64_insn (buf);
7779
7780 gas_assert (new_inst != NULL && new_inst->value == insn);
7781 gas_assert (new_inst->opcode->operands[1] == opnd
7782 || new_inst->opcode->operands[2] == opnd);
7783
7784 /* Get the index of the address operand. */
7785 if (new_inst->opcode->operands[1] == opnd)
7786 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7787 idx = 1;
7788 else
7789 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7790 idx = 2;
7791
7792 /* Update the resolved offset value. */
7793 new_inst->operands[idx].addr.offset.imm = value;
7794
7795 /* Encode/fix-up. */
7796 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7797 &new_inst->value, NULL, NULL, insn_sequence))
7798 {
7799 put_aarch64_insn (buf, new_inst->value);
7800 break;
7801 }
7802 else if (new_inst->opcode->iclass == ldst_pos
7803 && try_to_encode_as_unscaled_ldst (new_inst))
7804 {
7805 put_aarch64_insn (buf, new_inst->value);
7806 break;
7807 }
7808
7809 as_bad_where (fixP->fx_file, fixP->fx_line,
7810 _("immediate offset out of range"));
7811 break;
7812
7813 default:
7814 gas_assert (0);
7815 as_fatal (_("unhandled operand code %d"), opnd);
7816 }
7817 }
7818
7819 /* Apply a fixup (fixP) to segment data, once it has been determined
7820 by our caller that we have all the info we need to fix it up.
7821
7822 Parameter valP is the pointer to the value of the bits. */
7823
7824 void
7825 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7826 {
7827 offsetT value = *valP;
7828 uint32_t insn;
7829 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7830 int scale;
7831 unsigned flags = fixP->fx_addnumber;
7832
7833 DEBUG_TRACE ("\n\n");
7834 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7835 DEBUG_TRACE ("Enter md_apply_fix");
7836
7837 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7838
7839 /* Note whether this will delete the relocation. */
7840
7841 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7842 fixP->fx_done = 1;
7843
7844 /* Process the relocations. */
7845 switch (fixP->fx_r_type)
7846 {
7847 case BFD_RELOC_NONE:
7848 /* This will need to go in the object file. */
7849 fixP->fx_done = 0;
7850 break;
7851
7852 case BFD_RELOC_8:
7853 case BFD_RELOC_8_PCREL:
7854 if (fixP->fx_done || !seg->use_rela_p)
7855 md_number_to_chars (buf, value, 1);
7856 break;
7857
7858 case BFD_RELOC_16:
7859 case BFD_RELOC_16_PCREL:
7860 if (fixP->fx_done || !seg->use_rela_p)
7861 md_number_to_chars (buf, value, 2);
7862 break;
7863
7864 case BFD_RELOC_32:
7865 case BFD_RELOC_32_PCREL:
7866 if (fixP->fx_done || !seg->use_rela_p)
7867 md_number_to_chars (buf, value, 4);
7868 break;
7869
7870 case BFD_RELOC_64:
7871 case BFD_RELOC_64_PCREL:
7872 if (fixP->fx_done || !seg->use_rela_p)
7873 md_number_to_chars (buf, value, 8);
7874 break;
7875
7876 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7877 /* We claim that these fixups have been processed here, even if
7878 in fact we generate an error because we do not have a reloc
7879 for them, so tc_gen_reloc() will reject them. */
7880 fixP->fx_done = 1;
7881 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7882 {
7883 as_bad_where (fixP->fx_file, fixP->fx_line,
7884 _("undefined symbol %s used as an immediate value"),
7885 S_GET_NAME (fixP->fx_addsy));
7886 goto apply_fix_return;
7887 }
7888 fix_insn (fixP, flags, value);
7889 break;
7890
7891 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7892 if (fixP->fx_done || !seg->use_rela_p)
7893 {
7894 if (value & 3)
7895 as_bad_where (fixP->fx_file, fixP->fx_line,
7896 _("pc-relative load offset not word aligned"));
7897 if (signed_overflow (value, 21))
7898 as_bad_where (fixP->fx_file, fixP->fx_line,
7899 _("pc-relative load offset out of range"));
7900 insn = get_aarch64_insn (buf);
7901 insn |= encode_ld_lit_ofs_19 (value >> 2);
7902 put_aarch64_insn (buf, insn);
7903 }
7904 break;
7905
7906 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7907 if (fixP->fx_done || !seg->use_rela_p)
7908 {
7909 if (signed_overflow (value, 21))
7910 as_bad_where (fixP->fx_file, fixP->fx_line,
7911 _("pc-relative address offset out of range"));
7912 insn = get_aarch64_insn (buf);
7913 insn |= encode_adr_imm (value);
7914 put_aarch64_insn (buf, insn);
7915 }
7916 break;
7917
7918 case BFD_RELOC_AARCH64_BRANCH19:
7919 if (fixP->fx_done || !seg->use_rela_p)
7920 {
7921 if (value & 3)
7922 as_bad_where (fixP->fx_file, fixP->fx_line,
7923 _("conditional branch target not word aligned"));
7924 if (signed_overflow (value, 21))
7925 as_bad_where (fixP->fx_file, fixP->fx_line,
7926 _("conditional branch out of range"));
7927 insn = get_aarch64_insn (buf);
7928 insn |= encode_cond_branch_ofs_19 (value >> 2);
7929 put_aarch64_insn (buf, insn);
7930 }
7931 break;
7932
7933 case BFD_RELOC_AARCH64_TSTBR14:
7934 if (fixP->fx_done || !seg->use_rela_p)
7935 {
7936 if (value & 3)
7937 as_bad_where (fixP->fx_file, fixP->fx_line,
7938 _("conditional branch target not word aligned"));
7939 if (signed_overflow (value, 16))
7940 as_bad_where (fixP->fx_file, fixP->fx_line,
7941 _("conditional branch out of range"));
7942 insn = get_aarch64_insn (buf);
7943 insn |= encode_tst_branch_ofs_14 (value >> 2);
7944 put_aarch64_insn (buf, insn);
7945 }
7946 break;
7947
7948 case BFD_RELOC_AARCH64_CALL26:
7949 case BFD_RELOC_AARCH64_JUMP26:
7950 if (fixP->fx_done || !seg->use_rela_p)
7951 {
7952 if (value & 3)
7953 as_bad_where (fixP->fx_file, fixP->fx_line,
7954 _("branch target not word aligned"));
7955 if (signed_overflow (value, 28))
7956 as_bad_where (fixP->fx_file, fixP->fx_line,
7957 _("branch out of range"));
7958 insn = get_aarch64_insn (buf);
7959 insn |= encode_branch_ofs_26 (value >> 2);
7960 put_aarch64_insn (buf, insn);
7961 }
7962 break;
7963
7964 case BFD_RELOC_AARCH64_MOVW_G0:
7965 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7966 case BFD_RELOC_AARCH64_MOVW_G0_S:
7967 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7968 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7969 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
7970 scale = 0;
7971 goto movw_common;
7972 case BFD_RELOC_AARCH64_MOVW_G1:
7973 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7974 case BFD_RELOC_AARCH64_MOVW_G1_S:
7975 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7976 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7977 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
7978 scale = 16;
7979 goto movw_common;
7980 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7981 scale = 0;
7982 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7983 /* Should always be exported to object file, see
7984 aarch64_force_relocation(). */
7985 gas_assert (!fixP->fx_done);
7986 gas_assert (seg->use_rela_p);
7987 goto movw_common;
7988 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7989 scale = 16;
7990 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7991 /* Should always be exported to object file, see
7992 aarch64_force_relocation(). */
7993 gas_assert (!fixP->fx_done);
7994 gas_assert (seg->use_rela_p);
7995 goto movw_common;
7996 case BFD_RELOC_AARCH64_MOVW_G2:
7997 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7998 case BFD_RELOC_AARCH64_MOVW_G2_S:
7999 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8000 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8001 scale = 32;
8002 goto movw_common;
8003 case BFD_RELOC_AARCH64_MOVW_G3:
8004 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8005 scale = 48;
8006 movw_common:
8007 if (fixP->fx_done || !seg->use_rela_p)
8008 {
8009 insn = get_aarch64_insn (buf);
8010
8011 if (!fixP->fx_done)
8012 {
8013 /* REL signed addend must fit in 16 bits */
8014 if (signed_overflow (value, 16))
8015 as_bad_where (fixP->fx_file, fixP->fx_line,
8016 _("offset out of range"));
8017 }
8018 else
8019 {
8020 /* Check for overflow and scale. */
8021 switch (fixP->fx_r_type)
8022 {
8023 case BFD_RELOC_AARCH64_MOVW_G0:
8024 case BFD_RELOC_AARCH64_MOVW_G1:
8025 case BFD_RELOC_AARCH64_MOVW_G2:
8026 case BFD_RELOC_AARCH64_MOVW_G3:
8027 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8028 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8029 if (unsigned_overflow (value, scale + 16))
8030 as_bad_where (fixP->fx_file, fixP->fx_line,
8031 _("unsigned value out of range"));
8032 break;
8033 case BFD_RELOC_AARCH64_MOVW_G0_S:
8034 case BFD_RELOC_AARCH64_MOVW_G1_S:
8035 case BFD_RELOC_AARCH64_MOVW_G2_S:
8036 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8037 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8038 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8039 /* NOTE: We can only come here with movz or movn. */
8040 if (signed_overflow (value, scale + 16))
8041 as_bad_where (fixP->fx_file, fixP->fx_line,
8042 _("signed value out of range"));
8043 if (value < 0)
8044 {
8045 /* Force use of MOVN. */
8046 value = ~value;
8047 insn = reencode_movzn_to_movn (insn);
8048 }
8049 else
8050 {
8051 /* Force use of MOVZ. */
8052 insn = reencode_movzn_to_movz (insn);
8053 }
8054 break;
8055 default:
8056 /* Unchecked relocations. */
8057 break;
8058 }
8059 value >>= scale;
8060 }
8061
8062 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8063 insn |= encode_movw_imm (value & 0xffff);
8064
8065 put_aarch64_insn (buf, insn);
8066 }
8067 break;
8068
8069 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8070 fixP->fx_r_type = (ilp32_p
8071 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8072 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8073 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8074 /* Should always be exported to object file, see
8075 aarch64_force_relocation(). */
8076 gas_assert (!fixP->fx_done);
8077 gas_assert (seg->use_rela_p);
8078 break;
8079
8080 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8081 fixP->fx_r_type = (ilp32_p
8082 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8083 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8084 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8085 /* Should always be exported to object file, see
8086 aarch64_force_relocation(). */
8087 gas_assert (!fixP->fx_done);
8088 gas_assert (seg->use_rela_p);
8089 break;
8090
8091 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8092 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8093 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8094 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8095 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8096 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8097 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8098 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8099 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8100 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8101 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8102 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8103 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8104 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8105 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8106 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8107 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8108 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8109 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8110 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8111 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8112 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8113 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8114 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8115 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8116 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8117 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8118 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8119 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8120 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8121 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8122 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8123 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8124 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8125 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8126 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8127 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8128 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8129 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8130 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8131 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8132 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8133 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8134 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8135 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8136 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8137 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8138 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8139 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8140 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8141 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8142 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8143 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8144 /* Should always be exported to object file, see
8145 aarch64_force_relocation(). */
8146 gas_assert (!fixP->fx_done);
8147 gas_assert (seg->use_rela_p);
8148 break;
8149
8150 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8151 /* Should always be exported to object file, see
8152 aarch64_force_relocation(). */
8153 fixP->fx_r_type = (ilp32_p
8154 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8155 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8156 gas_assert (!fixP->fx_done);
8157 gas_assert (seg->use_rela_p);
8158 break;
8159
8160 case BFD_RELOC_AARCH64_ADD_LO12:
8161 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8162 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8163 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8164 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8165 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8166 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8167 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8168 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8169 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8170 case BFD_RELOC_AARCH64_LDST128_LO12:
8171 case BFD_RELOC_AARCH64_LDST16_LO12:
8172 case BFD_RELOC_AARCH64_LDST32_LO12:
8173 case BFD_RELOC_AARCH64_LDST64_LO12:
8174 case BFD_RELOC_AARCH64_LDST8_LO12:
8175 /* Should always be exported to object file, see
8176 aarch64_force_relocation(). */
8177 gas_assert (!fixP->fx_done);
8178 gas_assert (seg->use_rela_p);
8179 break;
8180
8181 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8182 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8183 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8184 break;
8185
8186 case BFD_RELOC_UNUSED:
8187 /* An error will already have been reported. */
8188 break;
8189
8190 default:
8191 as_bad_where (fixP->fx_file, fixP->fx_line,
8192 _("unexpected %s fixup"),
8193 bfd_get_reloc_code_name (fixP->fx_r_type));
8194 break;
8195 }
8196
8197 apply_fix_return:
8198 /* Free the allocated the struct aarch64_inst.
8199 N.B. currently there are very limited number of fix-up types actually use
8200 this field, so the impact on the performance should be minimal . */
8201 if (fixP->tc_fix_data.inst != NULL)
8202 free (fixP->tc_fix_data.inst);
8203
8204 return;
8205 }
8206
8207 /* Translate internal representation of relocation info to BFD target
8208 format. */
8209
8210 arelent *
8211 tc_gen_reloc (asection * section, fixS * fixp)
8212 {
8213 arelent *reloc;
8214 bfd_reloc_code_real_type code;
8215
8216 reloc = XNEW (arelent);
8217
8218 reloc->sym_ptr_ptr = XNEW (asymbol *);
8219 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8220 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8221
8222 if (fixp->fx_pcrel)
8223 {
8224 if (section->use_rela_p)
8225 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8226 else
8227 fixp->fx_offset = reloc->address;
8228 }
8229 reloc->addend = fixp->fx_offset;
8230
8231 code = fixp->fx_r_type;
8232 switch (code)
8233 {
8234 case BFD_RELOC_16:
8235 if (fixp->fx_pcrel)
8236 code = BFD_RELOC_16_PCREL;
8237 break;
8238
8239 case BFD_RELOC_32:
8240 if (fixp->fx_pcrel)
8241 code = BFD_RELOC_32_PCREL;
8242 break;
8243
8244 case BFD_RELOC_64:
8245 if (fixp->fx_pcrel)
8246 code = BFD_RELOC_64_PCREL;
8247 break;
8248
8249 default:
8250 break;
8251 }
8252
8253 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8254 if (reloc->howto == NULL)
8255 {
8256 as_bad_where (fixp->fx_file, fixp->fx_line,
8257 _
8258 ("cannot represent %s relocation in this object file format"),
8259 bfd_get_reloc_code_name (code));
8260 return NULL;
8261 }
8262
8263 return reloc;
8264 }
8265
8266 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8267
8268 void
8269 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8270 {
8271 bfd_reloc_code_real_type type;
8272 int pcrel = 0;
8273
8274 /* Pick a reloc.
8275 FIXME: @@ Should look at CPU word size. */
8276 switch (size)
8277 {
8278 case 1:
8279 type = BFD_RELOC_8;
8280 break;
8281 case 2:
8282 type = BFD_RELOC_16;
8283 break;
8284 case 4:
8285 type = BFD_RELOC_32;
8286 break;
8287 case 8:
8288 type = BFD_RELOC_64;
8289 break;
8290 default:
8291 as_bad (_("cannot do %u-byte relocation"), size);
8292 type = BFD_RELOC_UNUSED;
8293 break;
8294 }
8295
8296 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8297 }
8298
8299 int
8300 aarch64_force_relocation (struct fix *fixp)
8301 {
8302 switch (fixp->fx_r_type)
8303 {
8304 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8305 /* Perform these "immediate" internal relocations
8306 even if the symbol is extern or weak. */
8307 return 0;
8308
8309 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8310 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8311 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8312 /* Pseudo relocs that need to be fixed up according to
8313 ilp32_p. */
8314 return 0;
8315
8316 case BFD_RELOC_AARCH64_ADD_LO12:
8317 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8318 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8319 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8320 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8321 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8322 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8323 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8324 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8325 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8326 case BFD_RELOC_AARCH64_LDST128_LO12:
8327 case BFD_RELOC_AARCH64_LDST16_LO12:
8328 case BFD_RELOC_AARCH64_LDST32_LO12:
8329 case BFD_RELOC_AARCH64_LDST64_LO12:
8330 case BFD_RELOC_AARCH64_LDST8_LO12:
8331 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8332 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8333 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8334 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8335 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8336 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8337 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8338 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8339 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8340 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8341 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8342 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8343 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8344 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8345 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8346 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8347 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8348 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8349 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8350 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8351 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8352 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8353 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8354 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8355 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8356 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8357 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8358 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8359 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8360 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8361 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8362 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8363 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8364 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8365 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8366 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8367 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8368 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8369 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8370 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8371 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8372 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8373 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8374 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8375 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8376 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8377 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8378 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8379 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8380 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8381 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8382 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8383 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8384 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8385 /* Always leave these relocations for the linker. */
8386 return 1;
8387
8388 default:
8389 break;
8390 }
8391
8392 return generic_force_reloc (fixp);
8393 }
8394
8395 #ifdef OBJ_ELF
8396
8397 /* Implement md_after_parse_args. This is the earliest time we need to decide
8398 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8399
8400 void
8401 aarch64_after_parse_args (void)
8402 {
8403 if (aarch64_abi != AARCH64_ABI_NONE)
8404 return;
8405
8406 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8407 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8408 aarch64_abi = AARCH64_ABI_ILP32;
8409 else
8410 aarch64_abi = AARCH64_ABI_LP64;
8411 }
8412
8413 const char *
8414 elf64_aarch64_target_format (void)
8415 {
8416 #ifdef TE_CLOUDABI
8417 /* FIXME: What to do for ilp32_p ? */
8418 if (target_big_endian)
8419 return "elf64-bigaarch64-cloudabi";
8420 else
8421 return "elf64-littleaarch64-cloudabi";
8422 #else
8423 if (target_big_endian)
8424 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8425 else
8426 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8427 #endif
8428 }
8429
8430 void
8431 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8432 {
8433 elf_frob_symbol (symp, puntp);
8434 }
8435 #endif
8436
8437 /* MD interface: Finalization. */
8438
8439 /* A good place to do this, although this was probably not intended
8440 for this kind of use. We need to dump the literal pool before
8441 references are made to a null symbol pointer. */
8442
8443 void
8444 aarch64_cleanup (void)
8445 {
8446 literal_pool *pool;
8447
8448 for (pool = list_of_pools; pool; pool = pool->next)
8449 {
8450 /* Put it at the end of the relevant section. */
8451 subseg_set (pool->section, pool->sub_section);
8452 s_ltorg (0);
8453 }
8454 }
8455
8456 #ifdef OBJ_ELF
8457 /* Remove any excess mapping symbols generated for alignment frags in
8458 SEC. We may have created a mapping symbol before a zero byte
8459 alignment; remove it if there's a mapping symbol after the
8460 alignment. */
8461 static void
8462 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8463 void *dummy ATTRIBUTE_UNUSED)
8464 {
8465 segment_info_type *seginfo = seg_info (sec);
8466 fragS *fragp;
8467
8468 if (seginfo == NULL || seginfo->frchainP == NULL)
8469 return;
8470
8471 for (fragp = seginfo->frchainP->frch_root;
8472 fragp != NULL; fragp = fragp->fr_next)
8473 {
8474 symbolS *sym = fragp->tc_frag_data.last_map;
8475 fragS *next = fragp->fr_next;
8476
8477 /* Variable-sized frags have been converted to fixed size by
8478 this point. But if this was variable-sized to start with,
8479 there will be a fixed-size frag after it. So don't handle
8480 next == NULL. */
8481 if (sym == NULL || next == NULL)
8482 continue;
8483
8484 if (S_GET_VALUE (sym) < next->fr_address)
8485 /* Not at the end of this frag. */
8486 continue;
8487 know (S_GET_VALUE (sym) == next->fr_address);
8488
8489 do
8490 {
8491 if (next->tc_frag_data.first_map != NULL)
8492 {
8493 /* Next frag starts with a mapping symbol. Discard this
8494 one. */
8495 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8496 break;
8497 }
8498
8499 if (next->fr_next == NULL)
8500 {
8501 /* This mapping symbol is at the end of the section. Discard
8502 it. */
8503 know (next->fr_fix == 0 && next->fr_var == 0);
8504 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8505 break;
8506 }
8507
8508 /* As long as we have empty frags without any mapping symbols,
8509 keep looking. */
8510 /* If the next frag is non-empty and does not start with a
8511 mapping symbol, then this mapping symbol is required. */
8512 if (next->fr_address != next->fr_next->fr_address)
8513 break;
8514
8515 next = next->fr_next;
8516 }
8517 while (next != NULL);
8518 }
8519 }
8520 #endif
8521
8522 /* Adjust the symbol table. */
8523
8524 void
8525 aarch64_adjust_symtab (void)
8526 {
8527 #ifdef OBJ_ELF
8528 /* Remove any overlapping mapping symbols generated by alignment frags. */
8529 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8530 /* Now do generic ELF adjustments. */
8531 elf_adjust_symtab ();
8532 #endif
8533 }
8534
8535 static void
8536 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8537 {
8538 const char *hash_err;
8539
8540 hash_err = hash_insert (table, key, value);
8541 if (hash_err)
8542 printf ("Internal Error: Can't hash %s\n", key);
8543 }
8544
8545 static void
8546 fill_instruction_hash_table (void)
8547 {
8548 aarch64_opcode *opcode = aarch64_opcode_table;
8549
8550 while (opcode->name != NULL)
8551 {
8552 templates *templ, *new_templ;
8553 templ = hash_find (aarch64_ops_hsh, opcode->name);
8554
8555 new_templ = XNEW (templates);
8556 new_templ->opcode = opcode;
8557 new_templ->next = NULL;
8558
8559 if (!templ)
8560 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8561 else
8562 {
8563 new_templ->next = templ->next;
8564 templ->next = new_templ;
8565 }
8566 ++opcode;
8567 }
8568 }
8569
8570 static inline void
8571 convert_to_upper (char *dst, const char *src, size_t num)
8572 {
8573 unsigned int i;
8574 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8575 *dst = TOUPPER (*src);
8576 *dst = '\0';
8577 }
8578
8579 /* Assume STR point to a lower-case string, allocate, convert and return
8580 the corresponding upper-case string. */
8581 static inline const char*
8582 get_upper_str (const char *str)
8583 {
8584 char *ret;
8585 size_t len = strlen (str);
8586 ret = XNEWVEC (char, len + 1);
8587 convert_to_upper (ret, str, len);
8588 return ret;
8589 }
8590
8591 /* MD interface: Initialization. */
8592
8593 void
8594 md_begin (void)
8595 {
8596 unsigned mach;
8597 unsigned int i;
8598
8599 if ((aarch64_ops_hsh = hash_new ()) == NULL
8600 || (aarch64_cond_hsh = hash_new ()) == NULL
8601 || (aarch64_shift_hsh = hash_new ()) == NULL
8602 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8603 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8604 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8605 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8606 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8607 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8608 || (aarch64_sys_regs_sr_hsh = hash_new ()) == NULL
8609 || (aarch64_reg_hsh = hash_new ()) == NULL
8610 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8611 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8612 || (aarch64_pldop_hsh = hash_new ()) == NULL
8613 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8614 as_fatal (_("virtual memory exhausted"));
8615
8616 fill_instruction_hash_table ();
8617
8618 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8619 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8620 (void *) (aarch64_sys_regs + i));
8621
8622 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8623 checked_hash_insert (aarch64_pstatefield_hsh,
8624 aarch64_pstatefields[i].name,
8625 (void *) (aarch64_pstatefields + i));
8626
8627 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8628 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8629 aarch64_sys_regs_ic[i].name,
8630 (void *) (aarch64_sys_regs_ic + i));
8631
8632 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8633 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8634 aarch64_sys_regs_dc[i].name,
8635 (void *) (aarch64_sys_regs_dc + i));
8636
8637 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8638 checked_hash_insert (aarch64_sys_regs_at_hsh,
8639 aarch64_sys_regs_at[i].name,
8640 (void *) (aarch64_sys_regs_at + i));
8641
8642 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8643 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8644 aarch64_sys_regs_tlbi[i].name,
8645 (void *) (aarch64_sys_regs_tlbi + i));
8646
8647 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8648 checked_hash_insert (aarch64_sys_regs_sr_hsh,
8649 aarch64_sys_regs_sr[i].name,
8650 (void *) (aarch64_sys_regs_sr + i));
8651
8652 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8653 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8654 (void *) (reg_names + i));
8655
8656 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8657 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8658 (void *) (nzcv_names + i));
8659
8660 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8661 {
8662 const char *name = aarch64_operand_modifiers[i].name;
8663 checked_hash_insert (aarch64_shift_hsh, name,
8664 (void *) (aarch64_operand_modifiers + i));
8665 /* Also hash the name in the upper case. */
8666 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8667 (void *) (aarch64_operand_modifiers + i));
8668 }
8669
8670 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8671 {
8672 unsigned int j;
8673 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8674 the same condition code. */
8675 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8676 {
8677 const char *name = aarch64_conds[i].names[j];
8678 if (name == NULL)
8679 break;
8680 checked_hash_insert (aarch64_cond_hsh, name,
8681 (void *) (aarch64_conds + i));
8682 /* Also hash the name in the upper case. */
8683 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8684 (void *) (aarch64_conds + i));
8685 }
8686 }
8687
8688 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8689 {
8690 const char *name = aarch64_barrier_options[i].name;
8691 /* Skip xx00 - the unallocated values of option. */
8692 if ((i & 0x3) == 0)
8693 continue;
8694 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8695 (void *) (aarch64_barrier_options + i));
8696 /* Also hash the name in the upper case. */
8697 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8698 (void *) (aarch64_barrier_options + i));
8699 }
8700
8701 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8702 {
8703 const char* name = aarch64_prfops[i].name;
8704 /* Skip the unallocated hint encodings. */
8705 if (name == NULL)
8706 continue;
8707 checked_hash_insert (aarch64_pldop_hsh, name,
8708 (void *) (aarch64_prfops + i));
8709 /* Also hash the name in the upper case. */
8710 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8711 (void *) (aarch64_prfops + i));
8712 }
8713
8714 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8715 {
8716 const char* name = aarch64_hint_options[i].name;
8717
8718 checked_hash_insert (aarch64_hint_opt_hsh, name,
8719 (void *) (aarch64_hint_options + i));
8720 /* Also hash the name in the upper case. */
8721 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8722 (void *) (aarch64_hint_options + i));
8723 }
8724
8725 /* Set the cpu variant based on the command-line options. */
8726 if (!mcpu_cpu_opt)
8727 mcpu_cpu_opt = march_cpu_opt;
8728
8729 if (!mcpu_cpu_opt)
8730 mcpu_cpu_opt = &cpu_default;
8731
8732 cpu_variant = *mcpu_cpu_opt;
8733
8734 /* Record the CPU type. */
8735 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8736
8737 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8738 }
8739
8740 /* Command line processing. */
8741
8742 const char *md_shortopts = "m:";
8743
8744 #ifdef AARCH64_BI_ENDIAN
8745 #define OPTION_EB (OPTION_MD_BASE + 0)
8746 #define OPTION_EL (OPTION_MD_BASE + 1)
8747 #else
8748 #if TARGET_BYTES_BIG_ENDIAN
8749 #define OPTION_EB (OPTION_MD_BASE + 0)
8750 #else
8751 #define OPTION_EL (OPTION_MD_BASE + 1)
8752 #endif
8753 #endif
8754
8755 struct option md_longopts[] = {
8756 #ifdef OPTION_EB
8757 {"EB", no_argument, NULL, OPTION_EB},
8758 #endif
8759 #ifdef OPTION_EL
8760 {"EL", no_argument, NULL, OPTION_EL},
8761 #endif
8762 {NULL, no_argument, NULL, 0}
8763 };
8764
8765 size_t md_longopts_size = sizeof (md_longopts);
8766
8767 struct aarch64_option_table
8768 {
8769 const char *option; /* Option name to match. */
8770 const char *help; /* Help information. */
8771 int *var; /* Variable to change. */
8772 int value; /* What to change it to. */
8773 char *deprecated; /* If non-null, print this message. */
8774 };
8775
8776 static struct aarch64_option_table aarch64_opts[] = {
8777 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8778 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8779 NULL},
8780 #ifdef DEBUG_AARCH64
8781 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8782 #endif /* DEBUG_AARCH64 */
8783 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8784 NULL},
8785 {"mno-verbose-error", N_("do not output verbose error messages"),
8786 &verbose_error_p, 0, NULL},
8787 {NULL, NULL, NULL, 0, NULL}
8788 };
8789
8790 struct aarch64_cpu_option_table
8791 {
8792 const char *name;
8793 const aarch64_feature_set value;
8794 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8795 case. */
8796 const char *canonical_name;
8797 };
8798
8799 /* This list should, at a minimum, contain all the cpu names
8800 recognized by GCC. */
8801 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8802 {"all", AARCH64_ANY, NULL},
8803 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
8804 AARCH64_FEATURE_CRC), "Cortex-A34"},
8805 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8806 AARCH64_FEATURE_CRC), "Cortex-A35"},
8807 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8808 AARCH64_FEATURE_CRC), "Cortex-A53"},
8809 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8810 AARCH64_FEATURE_CRC), "Cortex-A57"},
8811 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8812 AARCH64_FEATURE_CRC), "Cortex-A72"},
8813 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8814 AARCH64_FEATURE_CRC), "Cortex-A73"},
8815 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8816 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8817 "Cortex-A55"},
8818 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8819 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8820 "Cortex-A75"},
8821 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8822 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8823 "Cortex-A76"},
8824 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8825 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8826 | AARCH64_FEATURE_DOTPROD
8827 | AARCH64_FEATURE_SSBS),
8828 "Cortex-A76AE"},
8829 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8830 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8831 | AARCH64_FEATURE_DOTPROD
8832 | AARCH64_FEATURE_SSBS),
8833 "Cortex-A77"},
8834 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8835 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8836 | AARCH64_FEATURE_DOTPROD
8837 | AARCH64_FEATURE_SSBS),
8838 "Cortex-A65"},
8839 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8840 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8841 | AARCH64_FEATURE_DOTPROD
8842 | AARCH64_FEATURE_SSBS),
8843 "Cortex-A65AE"},
8844 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8845 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8846 | AARCH64_FEATURE_DOTPROD
8847 | AARCH64_FEATURE_PROFILE),
8848 "Ares"},
8849 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8850 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8851 "Samsung Exynos M1"},
8852 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8853 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8854 | AARCH64_FEATURE_RDMA),
8855 "Qualcomm Falkor"},
8856 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8857 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8858 | AARCH64_FEATURE_DOTPROD
8859 | AARCH64_FEATURE_SSBS),
8860 "Neoverse E1"},
8861 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8862 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8863 | AARCH64_FEATURE_DOTPROD
8864 | AARCH64_FEATURE_PROFILE),
8865 "Neoverse N1"},
8866 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8867 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8868 | AARCH64_FEATURE_RDMA),
8869 "Qualcomm QDF24XX"},
8870 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8871 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8872 "Qualcomm Saphira"},
8873 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8874 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8875 "Cavium ThunderX"},
8876 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8877 AARCH64_FEATURE_CRYPTO),
8878 "Broadcom Vulcan"},
8879 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8880 in earlier releases and is superseded by 'xgene1' in all
8881 tools. */
8882 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8883 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8884 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8885 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8886 {"generic", AARCH64_ARCH_V8, NULL},
8887
8888 {NULL, AARCH64_ARCH_NONE, NULL}
8889 };
8890
8891 struct aarch64_arch_option_table
8892 {
8893 const char *name;
8894 const aarch64_feature_set value;
8895 };
8896
8897 /* This list should, at a minimum, contain all the architecture names
8898 recognized by GCC. */
8899 static const struct aarch64_arch_option_table aarch64_archs[] = {
8900 {"all", AARCH64_ANY},
8901 {"armv8-a", AARCH64_ARCH_V8},
8902 {"armv8.1-a", AARCH64_ARCH_V8_1},
8903 {"armv8.2-a", AARCH64_ARCH_V8_2},
8904 {"armv8.3-a", AARCH64_ARCH_V8_3},
8905 {"armv8.4-a", AARCH64_ARCH_V8_4},
8906 {"armv8.5-a", AARCH64_ARCH_V8_5},
8907 {NULL, AARCH64_ARCH_NONE}
8908 };
8909
8910 /* ISA extensions. */
8911 struct aarch64_option_cpu_value_table
8912 {
8913 const char *name;
8914 const aarch64_feature_set value;
8915 const aarch64_feature_set require; /* Feature dependencies. */
8916 };
8917
8918 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8919 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8920 AARCH64_ARCH_NONE},
8921 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO
8922 | AARCH64_FEATURE_AES
8923 | AARCH64_FEATURE_SHA2, 0),
8924 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8925 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8926 AARCH64_ARCH_NONE},
8927 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8928 AARCH64_ARCH_NONE},
8929 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8930 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8931 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8932 AARCH64_ARCH_NONE},
8933 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8934 AARCH64_ARCH_NONE},
8935 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8936 AARCH64_ARCH_NONE},
8937 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8938 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8939 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8940 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8941 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
8942 AARCH64_FEATURE (AARCH64_FEATURE_FP
8943 | AARCH64_FEATURE_F16, 0)},
8944 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8945 AARCH64_ARCH_NONE},
8946 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8947 AARCH64_FEATURE (AARCH64_FEATURE_F16
8948 | AARCH64_FEATURE_SIMD
8949 | AARCH64_FEATURE_COMPNUM, 0)},
8950 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
8951 AARCH64_ARCH_NONE},
8952 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8953 AARCH64_FEATURE (AARCH64_FEATURE_F16
8954 | AARCH64_FEATURE_SIMD, 0)},
8955 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8956 AARCH64_ARCH_NONE},
8957 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8958 AARCH64_ARCH_NONE},
8959 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
8960 AARCH64_ARCH_NONE},
8961 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
8962 AARCH64_ARCH_NONE},
8963 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
8964 AARCH64_ARCH_NONE},
8965 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
8966 AARCH64_ARCH_NONE},
8967 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
8968 AARCH64_ARCH_NONE},
8969 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA2
8970 | AARCH64_FEATURE_SHA3, 0),
8971 AARCH64_ARCH_NONE},
8972 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
8973 AARCH64_ARCH_NONE},
8974 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
8975 AARCH64_ARCH_NONE},
8976 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
8977 AARCH64_ARCH_NONE},
8978 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
8979 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
8980 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
8981 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
8982 | AARCH64_FEATURE_SM4, 0)},
8983 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
8984 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
8985 | AARCH64_FEATURE_AES, 0)},
8986 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
8987 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
8988 | AARCH64_FEATURE_SHA3, 0)},
8989 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
8990 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
8991 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8992 };
8993
8994 struct aarch64_long_option_table
8995 {
8996 const char *option; /* Substring to match. */
8997 const char *help; /* Help information. */
8998 int (*func) (const char *subopt); /* Function to decode sub-option. */
8999 char *deprecated; /* If non-null, print this message. */
9000 };
9001
9002 /* Transitive closure of features depending on set. */
9003 static aarch64_feature_set
9004 aarch64_feature_disable_set (aarch64_feature_set set)
9005 {
9006 const struct aarch64_option_cpu_value_table *opt;
9007 aarch64_feature_set prev = 0;
9008
9009 while (prev != set) {
9010 prev = set;
9011 for (opt = aarch64_features; opt->name != NULL; opt++)
9012 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9013 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9014 }
9015 return set;
9016 }
9017
9018 /* Transitive closure of dependencies of set. */
9019 static aarch64_feature_set
9020 aarch64_feature_enable_set (aarch64_feature_set set)
9021 {
9022 const struct aarch64_option_cpu_value_table *opt;
9023 aarch64_feature_set prev = 0;
9024
9025 while (prev != set) {
9026 prev = set;
9027 for (opt = aarch64_features; opt->name != NULL; opt++)
9028 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9029 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9030 }
9031 return set;
9032 }
9033
9034 static int
9035 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9036 bfd_boolean ext_only)
9037 {
9038 /* We insist on extensions being added before being removed. We achieve
9039 this by using the ADDING_VALUE variable to indicate whether we are
9040 adding an extension (1) or removing it (0) and only allowing it to
9041 change in the order -1 -> 1 -> 0. */
9042 int adding_value = -1;
9043 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9044
9045 /* Copy the feature set, so that we can modify it. */
9046 *ext_set = **opt_p;
9047 *opt_p = ext_set;
9048
9049 while (str != NULL && *str != 0)
9050 {
9051 const struct aarch64_option_cpu_value_table *opt;
9052 const char *ext = NULL;
9053 int optlen;
9054
9055 if (!ext_only)
9056 {
9057 if (*str != '+')
9058 {
9059 as_bad (_("invalid architectural extension"));
9060 return 0;
9061 }
9062
9063 ext = strchr (++str, '+');
9064 }
9065
9066 if (ext != NULL)
9067 optlen = ext - str;
9068 else
9069 optlen = strlen (str);
9070
9071 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
9072 {
9073 if (adding_value != 0)
9074 adding_value = 0;
9075 optlen -= 2;
9076 str += 2;
9077 }
9078 else if (optlen > 0)
9079 {
9080 if (adding_value == -1)
9081 adding_value = 1;
9082 else if (adding_value != 1)
9083 {
9084 as_bad (_("must specify extensions to add before specifying "
9085 "those to remove"));
9086 return FALSE;
9087 }
9088 }
9089
9090 if (optlen == 0)
9091 {
9092 as_bad (_("missing architectural extension"));
9093 return 0;
9094 }
9095
9096 gas_assert (adding_value != -1);
9097
9098 for (opt = aarch64_features; opt->name != NULL; opt++)
9099 if (strncmp (opt->name, str, optlen) == 0)
9100 {
9101 aarch64_feature_set set;
9102
9103 /* Add or remove the extension. */
9104 if (adding_value)
9105 {
9106 set = aarch64_feature_enable_set (opt->value);
9107 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9108 }
9109 else
9110 {
9111 set = aarch64_feature_disable_set (opt->value);
9112 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9113 }
9114 break;
9115 }
9116
9117 if (opt->name == NULL)
9118 {
9119 as_bad (_("unknown architectural extension `%s'"), str);
9120 return 0;
9121 }
9122
9123 str = ext;
9124 };
9125
9126 return 1;
9127 }
9128
9129 static int
9130 aarch64_parse_cpu (const char *str)
9131 {
9132 const struct aarch64_cpu_option_table *opt;
9133 const char *ext = strchr (str, '+');
9134 size_t optlen;
9135
9136 if (ext != NULL)
9137 optlen = ext - str;
9138 else
9139 optlen = strlen (str);
9140
9141 if (optlen == 0)
9142 {
9143 as_bad (_("missing cpu name `%s'"), str);
9144 return 0;
9145 }
9146
9147 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9148 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9149 {
9150 mcpu_cpu_opt = &opt->value;
9151 if (ext != NULL)
9152 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9153
9154 return 1;
9155 }
9156
9157 as_bad (_("unknown cpu `%s'"), str);
9158 return 0;
9159 }
9160
9161 static int
9162 aarch64_parse_arch (const char *str)
9163 {
9164 const struct aarch64_arch_option_table *opt;
9165 const char *ext = strchr (str, '+');
9166 size_t optlen;
9167
9168 if (ext != NULL)
9169 optlen = ext - str;
9170 else
9171 optlen = strlen (str);
9172
9173 if (optlen == 0)
9174 {
9175 as_bad (_("missing architecture name `%s'"), str);
9176 return 0;
9177 }
9178
9179 for (opt = aarch64_archs; opt->name != NULL; opt++)
9180 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9181 {
9182 march_cpu_opt = &opt->value;
9183 if (ext != NULL)
9184 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9185
9186 return 1;
9187 }
9188
9189 as_bad (_("unknown architecture `%s'\n"), str);
9190 return 0;
9191 }
9192
9193 /* ABIs. */
9194 struct aarch64_option_abi_value_table
9195 {
9196 const char *name;
9197 enum aarch64_abi_type value;
9198 };
9199
9200 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9201 {"ilp32", AARCH64_ABI_ILP32},
9202 {"lp64", AARCH64_ABI_LP64},
9203 };
9204
9205 static int
9206 aarch64_parse_abi (const char *str)
9207 {
9208 unsigned int i;
9209
9210 if (str[0] == '\0')
9211 {
9212 as_bad (_("missing abi name `%s'"), str);
9213 return 0;
9214 }
9215
9216 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9217 if (strcmp (str, aarch64_abis[i].name) == 0)
9218 {
9219 aarch64_abi = aarch64_abis[i].value;
9220 return 1;
9221 }
9222
9223 as_bad (_("unknown abi `%s'\n"), str);
9224 return 0;
9225 }
9226
9227 static struct aarch64_long_option_table aarch64_long_opts[] = {
9228 #ifdef OBJ_ELF
9229 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9230 aarch64_parse_abi, NULL},
9231 #endif /* OBJ_ELF */
9232 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9233 aarch64_parse_cpu, NULL},
9234 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9235 aarch64_parse_arch, NULL},
9236 {NULL, NULL, 0, NULL}
9237 };
9238
9239 int
9240 md_parse_option (int c, const char *arg)
9241 {
9242 struct aarch64_option_table *opt;
9243 struct aarch64_long_option_table *lopt;
9244
9245 switch (c)
9246 {
9247 #ifdef OPTION_EB
9248 case OPTION_EB:
9249 target_big_endian = 1;
9250 break;
9251 #endif
9252
9253 #ifdef OPTION_EL
9254 case OPTION_EL:
9255 target_big_endian = 0;
9256 break;
9257 #endif
9258
9259 case 'a':
9260 /* Listing option. Just ignore these, we don't support additional
9261 ones. */
9262 return 0;
9263
9264 default:
9265 for (opt = aarch64_opts; opt->option != NULL; opt++)
9266 {
9267 if (c == opt->option[0]
9268 && ((arg == NULL && opt->option[1] == 0)
9269 || streq (arg, opt->option + 1)))
9270 {
9271 /* If the option is deprecated, tell the user. */
9272 if (opt->deprecated != NULL)
9273 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9274 arg ? arg : "", _(opt->deprecated));
9275
9276 if (opt->var != NULL)
9277 *opt->var = opt->value;
9278
9279 return 1;
9280 }
9281 }
9282
9283 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9284 {
9285 /* These options are expected to have an argument. */
9286 if (c == lopt->option[0]
9287 && arg != NULL
9288 && strncmp (arg, lopt->option + 1,
9289 strlen (lopt->option + 1)) == 0)
9290 {
9291 /* If the option is deprecated, tell the user. */
9292 if (lopt->deprecated != NULL)
9293 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9294 _(lopt->deprecated));
9295
9296 /* Call the sup-option parser. */
9297 return lopt->func (arg + strlen (lopt->option) - 1);
9298 }
9299 }
9300
9301 return 0;
9302 }
9303
9304 return 1;
9305 }
9306
9307 void
9308 md_show_usage (FILE * fp)
9309 {
9310 struct aarch64_option_table *opt;
9311 struct aarch64_long_option_table *lopt;
9312
9313 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9314
9315 for (opt = aarch64_opts; opt->option != NULL; opt++)
9316 if (opt->help != NULL)
9317 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9318
9319 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9320 if (lopt->help != NULL)
9321 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9322
9323 #ifdef OPTION_EB
9324 fprintf (fp, _("\
9325 -EB assemble code for a big-endian cpu\n"));
9326 #endif
9327
9328 #ifdef OPTION_EL
9329 fprintf (fp, _("\
9330 -EL assemble code for a little-endian cpu\n"));
9331 #endif
9332 }
9333
9334 /* Parse a .cpu directive. */
9335
9336 static void
9337 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9338 {
9339 const struct aarch64_cpu_option_table *opt;
9340 char saved_char;
9341 char *name;
9342 char *ext;
9343 size_t optlen;
9344
9345 name = input_line_pointer;
9346 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9347 input_line_pointer++;
9348 saved_char = *input_line_pointer;
9349 *input_line_pointer = 0;
9350
9351 ext = strchr (name, '+');
9352
9353 if (ext != NULL)
9354 optlen = ext - name;
9355 else
9356 optlen = strlen (name);
9357
9358 /* Skip the first "all" entry. */
9359 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9360 if (strlen (opt->name) == optlen
9361 && strncmp (name, opt->name, optlen) == 0)
9362 {
9363 mcpu_cpu_opt = &opt->value;
9364 if (ext != NULL)
9365 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9366 return;
9367
9368 cpu_variant = *mcpu_cpu_opt;
9369
9370 *input_line_pointer = saved_char;
9371 demand_empty_rest_of_line ();
9372 return;
9373 }
9374 as_bad (_("unknown cpu `%s'"), name);
9375 *input_line_pointer = saved_char;
9376 ignore_rest_of_line ();
9377 }
9378
9379
9380 /* Parse a .arch directive. */
9381
9382 static void
9383 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9384 {
9385 const struct aarch64_arch_option_table *opt;
9386 char saved_char;
9387 char *name;
9388 char *ext;
9389 size_t optlen;
9390
9391 name = input_line_pointer;
9392 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9393 input_line_pointer++;
9394 saved_char = *input_line_pointer;
9395 *input_line_pointer = 0;
9396
9397 ext = strchr (name, '+');
9398
9399 if (ext != NULL)
9400 optlen = ext - name;
9401 else
9402 optlen = strlen (name);
9403
9404 /* Skip the first "all" entry. */
9405 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9406 if (strlen (opt->name) == optlen
9407 && strncmp (name, opt->name, optlen) == 0)
9408 {
9409 mcpu_cpu_opt = &opt->value;
9410 if (ext != NULL)
9411 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9412 return;
9413
9414 cpu_variant = *mcpu_cpu_opt;
9415
9416 *input_line_pointer = saved_char;
9417 demand_empty_rest_of_line ();
9418 return;
9419 }
9420
9421 as_bad (_("unknown architecture `%s'\n"), name);
9422 *input_line_pointer = saved_char;
9423 ignore_rest_of_line ();
9424 }
9425
9426 /* Parse a .arch_extension directive. */
9427
9428 static void
9429 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9430 {
9431 char saved_char;
9432 char *ext = input_line_pointer;;
9433
9434 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9435 input_line_pointer++;
9436 saved_char = *input_line_pointer;
9437 *input_line_pointer = 0;
9438
9439 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9440 return;
9441
9442 cpu_variant = *mcpu_cpu_opt;
9443
9444 *input_line_pointer = saved_char;
9445 demand_empty_rest_of_line ();
9446 }
9447
9448 /* Copy symbol information. */
9449
9450 void
9451 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9452 {
9453 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9454 }
9455
9456 #ifdef OBJ_ELF
9457 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9458 This is needed so AArch64 specific st_other values can be independently
9459 specified for an IFUNC resolver (that is called by the dynamic linker)
9460 and the symbol it resolves (aliased to the resolver). In particular,
9461 if a function symbol has special st_other value set via directives,
9462 then attaching an IFUNC resolver to that symbol should not override
9463 the st_other setting. Requiring the directive on the IFUNC resolver
9464 symbol would be unexpected and problematic in C code, where the two
9465 symbols appear as two independent function declarations. */
9466
9467 void
9468 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9469 {
9470 struct elf_obj_sy *srcelf = symbol_get_obj (src);
9471 struct elf_obj_sy *destelf = symbol_get_obj (dest);
9472 if (srcelf->size)
9473 {
9474 if (destelf->size == NULL)
9475 destelf->size = XNEW (expressionS);
9476 *destelf->size = *srcelf->size;
9477 }
9478 else
9479 {
9480 if (destelf->size != NULL)
9481 free (destelf->size);
9482 destelf->size = NULL;
9483 }
9484 S_SET_SIZE (dest, S_GET_SIZE (src));
9485 }
9486 #endif
This page took 0.215443 seconds and 5 git commands to generate.