1f8d94ea1e295782402d0deda298e18627332dd3
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 unsigned long value;
254 } asm_barrier_opt;
255
256 typedef struct
257 {
258 const char *template;
259 uint32_t value;
260 } asm_nzcv;
261
262 struct reloc_entry
263 {
264 char *name;
265 bfd_reloc_code_real_type reloc;
266 };
267
268 /* Macros to define the register types and masks for the purpose
269 of parsing. */
270
271 #undef AARCH64_REG_TYPES
272 #define AARCH64_REG_TYPES \
273 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
274 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
275 BASIC_REG_TYPE(SP_32) /* wsp */ \
276 BASIC_REG_TYPE(SP_64) /* sp */ \
277 BASIC_REG_TYPE(Z_32) /* wzr */ \
278 BASIC_REG_TYPE(Z_64) /* xzr */ \
279 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
280 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
281 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
282 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
283 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
284 BASIC_REG_TYPE(VN) /* v[0-31] */ \
285 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
286 BASIC_REG_TYPE(PN) /* p[0-15] */ \
287 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
288 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
289 /* Typecheck: same, plus SVE registers. */ \
290 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
291 | REG_TYPE(ZN)) \
292 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
293 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Typecheck: same, plus SVE registers. */ \
296 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
298 | REG_TYPE(ZN)) \
299 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
300 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
301 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
302 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
303 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
306 /* Typecheck: any [BHSDQ]P FP. */ \
307 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
308 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
309 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
310 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
312 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
313 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
314 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
315 be used for SVE instructions, since Zn and Pn are valid symbols \
316 in other contexts. */ \
317 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
320 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
321 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
322 | REG_TYPE(ZN) | REG_TYPE(PN)) \
323 /* Any integer register; used for error messages only. */ \
324 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
325 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
326 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
327 /* Pseudo type to mark the end of the enumerator sequence. */ \
328 BASIC_REG_TYPE(MAX)
329
330 #undef BASIC_REG_TYPE
331 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
332 #undef MULTI_REG_TYPE
333 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
334
335 /* Register type enumerators. */
336 typedef enum aarch64_reg_type_
337 {
338 /* A list of REG_TYPE_*. */
339 AARCH64_REG_TYPES
340 } aarch64_reg_type;
341
342 #undef BASIC_REG_TYPE
343 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
344 #undef REG_TYPE
345 #define REG_TYPE(T) (1 << REG_TYPE_##T)
346 #undef MULTI_REG_TYPE
347 #define MULTI_REG_TYPE(T,V) V,
348
349 /* Structure for a hash table entry for a register. */
350 typedef struct
351 {
352 const char *name;
353 unsigned char number;
354 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
355 unsigned char builtin;
356 } reg_entry;
357
358 /* Values indexed by aarch64_reg_type to assist the type checking. */
359 static const unsigned reg_type_masks[] =
360 {
361 AARCH64_REG_TYPES
362 };
363
364 #undef BASIC_REG_TYPE
365 #undef REG_TYPE
366 #undef MULTI_REG_TYPE
367 #undef AARCH64_REG_TYPES
368
369 /* Diagnostics used when we don't get a register of the expected type.
370 Note: this has to synchronized with aarch64_reg_type definitions
371 above. */
372 static const char *
373 get_reg_expected_msg (aarch64_reg_type reg_type)
374 {
375 const char *msg;
376
377 switch (reg_type)
378 {
379 case REG_TYPE_R_32:
380 msg = N_("integer 32-bit register expected");
381 break;
382 case REG_TYPE_R_64:
383 msg = N_("integer 64-bit register expected");
384 break;
385 case REG_TYPE_R_N:
386 msg = N_("integer register expected");
387 break;
388 case REG_TYPE_R64_SP:
389 msg = N_("64-bit integer or SP register expected");
390 break;
391 case REG_TYPE_SVE_BASE:
392 msg = N_("base register expected");
393 break;
394 case REG_TYPE_R_Z:
395 msg = N_("integer or zero register expected");
396 break;
397 case REG_TYPE_SVE_OFFSET:
398 msg = N_("offset register expected");
399 break;
400 case REG_TYPE_R_SP:
401 msg = N_("integer or SP register expected");
402 break;
403 case REG_TYPE_R_Z_SP:
404 msg = N_("integer, zero or SP register expected");
405 break;
406 case REG_TYPE_FP_B:
407 msg = N_("8-bit SIMD scalar register expected");
408 break;
409 case REG_TYPE_FP_H:
410 msg = N_("16-bit SIMD scalar or floating-point half precision "
411 "register expected");
412 break;
413 case REG_TYPE_FP_S:
414 msg = N_("32-bit SIMD scalar or floating-point single precision "
415 "register expected");
416 break;
417 case REG_TYPE_FP_D:
418 msg = N_("64-bit SIMD scalar or floating-point double precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_Q:
422 msg = N_("128-bit SIMD scalar or floating-point quad precision "
423 "register expected");
424 break;
425 case REG_TYPE_R_Z_BHSDQ_V:
426 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
427 msg = N_("register expected");
428 break;
429 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
430 msg = N_("SIMD scalar or floating-point register expected");
431 break;
432 case REG_TYPE_VN: /* any V reg */
433 msg = N_("vector register expected");
434 break;
435 case REG_TYPE_ZN:
436 msg = N_("SVE vector register expected");
437 break;
438 case REG_TYPE_PN:
439 msg = N_("SVE predicate register expected");
440 break;
441 default:
442 as_fatal (_("invalid register type %d"), reg_type);
443 }
444 return msg;
445 }
446
447 /* Some well known registers that we refer to directly elsewhere. */
448 #define REG_SP 31
449 #define REG_ZR 31
450
451 /* Instructions take 4 bytes in the object file. */
452 #define INSN_SIZE 4
453
454 static struct hash_control *aarch64_ops_hsh;
455 static struct hash_control *aarch64_cond_hsh;
456 static struct hash_control *aarch64_shift_hsh;
457 static struct hash_control *aarch64_sys_regs_hsh;
458 static struct hash_control *aarch64_pstatefield_hsh;
459 static struct hash_control *aarch64_sys_regs_ic_hsh;
460 static struct hash_control *aarch64_sys_regs_dc_hsh;
461 static struct hash_control *aarch64_sys_regs_at_hsh;
462 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
463 static struct hash_control *aarch64_sys_regs_sr_hsh;
464 static struct hash_control *aarch64_reg_hsh;
465 static struct hash_control *aarch64_barrier_opt_hsh;
466 static struct hash_control *aarch64_nzcv_hsh;
467 static struct hash_control *aarch64_pldop_hsh;
468 static struct hash_control *aarch64_hint_opt_hsh;
469
470 /* Stuff needed to resolve the label ambiguity
471 As:
472 ...
473 label: <insn>
474 may differ from:
475 ...
476 label:
477 <insn> */
478
479 static symbolS *last_label_seen;
480
481 /* Literal pool structure. Held on a per-section
482 and per-sub-section basis. */
483
484 #define MAX_LITERAL_POOL_SIZE 1024
485 typedef struct literal_expression
486 {
487 expressionS exp;
488 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
489 LITTLENUM_TYPE * bignum;
490 } literal_expression;
491
492 typedef struct literal_pool
493 {
494 literal_expression literals[MAX_LITERAL_POOL_SIZE];
495 unsigned int next_free_entry;
496 unsigned int id;
497 symbolS *symbol;
498 segT section;
499 subsegT sub_section;
500 int size;
501 struct literal_pool *next;
502 } literal_pool;
503
504 /* Pointer to a linked list of literal pools. */
505 static literal_pool *list_of_pools = NULL;
506 \f
507 /* Pure syntax. */
508
509 /* This array holds the chars that always start a comment. If the
510 pre-processor is disabled, these aren't very useful. */
511 const char comment_chars[] = "";
512
513 /* This array holds the chars that only start a comment at the beginning of
514 a line. If the line seems to have the form '# 123 filename'
515 .line and .file directives will appear in the pre-processed output. */
516 /* Note that input_file.c hand checks for '#' at the beginning of the
517 first line of the input file. This is because the compiler outputs
518 #NO_APP at the beginning of its output. */
519 /* Also note that comments like this one will always work. */
520 const char line_comment_chars[] = "#";
521
522 const char line_separator_chars[] = ";";
523
524 /* Chars that can be used to separate mant
525 from exp in floating point numbers. */
526 const char EXP_CHARS[] = "eE";
527
528 /* Chars that mean this number is a floating point constant. */
529 /* As in 0f12.456 */
530 /* or 0d1.2345e12 */
531
532 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
533
534 /* Prefix character that indicates the start of an immediate value. */
535 #define is_immediate_prefix(C) ((C) == '#')
536
537 /* Separator character handling. */
538
539 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
540
541 static inline bfd_boolean
542 skip_past_char (char **str, char c)
543 {
544 if (**str == c)
545 {
546 (*str)++;
547 return TRUE;
548 }
549 else
550 return FALSE;
551 }
552
553 #define skip_past_comma(str) skip_past_char (str, ',')
554
555 /* Arithmetic expressions (possibly involving symbols). */
556
557 static bfd_boolean in_my_get_expression_p = FALSE;
558
559 /* Third argument to my_get_expression. */
560 #define GE_NO_PREFIX 0
561 #define GE_OPT_PREFIX 1
562
563 /* Return TRUE if the string pointed by *STR is successfully parsed
564 as an valid expression; *EP will be filled with the information of
565 such an expression. Otherwise return FALSE. */
566
567 static bfd_boolean
568 my_get_expression (expressionS * ep, char **str, int prefix_mode,
569 int reject_absent)
570 {
571 char *save_in;
572 segT seg;
573 int prefix_present_p = 0;
574
575 switch (prefix_mode)
576 {
577 case GE_NO_PREFIX:
578 break;
579 case GE_OPT_PREFIX:
580 if (is_immediate_prefix (**str))
581 {
582 (*str)++;
583 prefix_present_p = 1;
584 }
585 break;
586 default:
587 abort ();
588 }
589
590 memset (ep, 0, sizeof (expressionS));
591
592 save_in = input_line_pointer;
593 input_line_pointer = *str;
594 in_my_get_expression_p = TRUE;
595 seg = expression (ep);
596 in_my_get_expression_p = FALSE;
597
598 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
599 {
600 /* We found a bad expression in md_operand(). */
601 *str = input_line_pointer;
602 input_line_pointer = save_in;
603 if (prefix_present_p && ! error_p ())
604 set_fatal_syntax_error (_("bad expression"));
605 else
606 set_first_syntax_error (_("bad expression"));
607 return FALSE;
608 }
609
610 #ifdef OBJ_AOUT
611 if (seg != absolute_section
612 && seg != text_section
613 && seg != data_section
614 && seg != bss_section && seg != undefined_section)
615 {
616 set_syntax_error (_("bad segment"));
617 *str = input_line_pointer;
618 input_line_pointer = save_in;
619 return FALSE;
620 }
621 #else
622 (void) seg;
623 #endif
624
625 *str = input_line_pointer;
626 input_line_pointer = save_in;
627 return TRUE;
628 }
629
630 /* Turn a string in input_line_pointer into a floating point constant
631 of type TYPE, and store the appropriate bytes in *LITP. The number
632 of LITTLENUMS emitted is stored in *SIZEP. An error message is
633 returned, or NULL on OK. */
634
635 const char *
636 md_atof (int type, char *litP, int *sizeP)
637 {
638 return ieee_md_atof (type, litP, sizeP, target_big_endian);
639 }
640
641 /* We handle all bad expressions here, so that we can report the faulty
642 instruction in the error message. */
643 void
644 md_operand (expressionS * exp)
645 {
646 if (in_my_get_expression_p)
647 exp->X_op = O_illegal;
648 }
649
650 /* Immediate values. */
651
652 /* Errors may be set multiple times during parsing or bit encoding
653 (particularly in the Neon bits), but usually the earliest error which is set
654 will be the most meaningful. Avoid overwriting it with later (cascading)
655 errors by calling this function. */
656
657 static void
658 first_error (const char *error)
659 {
660 if (! error_p ())
661 set_syntax_error (error);
662 }
663
664 /* Similar to first_error, but this function accepts formatted error
665 message. */
666 static void
667 first_error_fmt (const char *format, ...)
668 {
669 va_list args;
670 enum
671 { size = 100 };
672 /* N.B. this single buffer will not cause error messages for different
673 instructions to pollute each other; this is because at the end of
674 processing of each assembly line, error message if any will be
675 collected by as_bad. */
676 static char buffer[size];
677
678 if (! error_p ())
679 {
680 int ret ATTRIBUTE_UNUSED;
681 va_start (args, format);
682 ret = vsnprintf (buffer, size, format, args);
683 know (ret <= size - 1 && ret >= 0);
684 va_end (args);
685 set_syntax_error (buffer);
686 }
687 }
688
689 /* Register parsing. */
690
691 /* Generic register parser which is called by other specialized
692 register parsers.
693 CCP points to what should be the beginning of a register name.
694 If it is indeed a valid register name, advance CCP over it and
695 return the reg_entry structure; otherwise return NULL.
696 It does not issue diagnostics. */
697
698 static reg_entry *
699 parse_reg (char **ccp)
700 {
701 char *start = *ccp;
702 char *p;
703 reg_entry *reg;
704
705 #ifdef REGISTER_PREFIX
706 if (*start != REGISTER_PREFIX)
707 return NULL;
708 start++;
709 #endif
710
711 p = start;
712 if (!ISALPHA (*p) || !is_name_beginner (*p))
713 return NULL;
714
715 do
716 p++;
717 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
718
719 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
720
721 if (!reg)
722 return NULL;
723
724 *ccp = p;
725 return reg;
726 }
727
728 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
729 return FALSE. */
730 static bfd_boolean
731 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
732 {
733 return (reg_type_masks[type] & (1 << reg->type)) != 0;
734 }
735
736 /* Try to parse a base or offset register. Allow SVE base and offset
737 registers if REG_TYPE includes SVE registers. Return the register
738 entry on success, setting *QUALIFIER to the register qualifier.
739 Return null otherwise.
740
741 Note that this function does not issue any diagnostics. */
742
743 static const reg_entry *
744 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
745 aarch64_opnd_qualifier_t *qualifier)
746 {
747 char *str = *ccp;
748 const reg_entry *reg = parse_reg (&str);
749
750 if (reg == NULL)
751 return NULL;
752
753 switch (reg->type)
754 {
755 case REG_TYPE_R_32:
756 case REG_TYPE_SP_32:
757 case REG_TYPE_Z_32:
758 *qualifier = AARCH64_OPND_QLF_W;
759 break;
760
761 case REG_TYPE_R_64:
762 case REG_TYPE_SP_64:
763 case REG_TYPE_Z_64:
764 *qualifier = AARCH64_OPND_QLF_X;
765 break;
766
767 case REG_TYPE_ZN:
768 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
769 || str[0] != '.')
770 return NULL;
771 switch (TOLOWER (str[1]))
772 {
773 case 's':
774 *qualifier = AARCH64_OPND_QLF_S_S;
775 break;
776 case 'd':
777 *qualifier = AARCH64_OPND_QLF_S_D;
778 break;
779 default:
780 return NULL;
781 }
782 str += 2;
783 break;
784
785 default:
786 return NULL;
787 }
788
789 *ccp = str;
790
791 return reg;
792 }
793
794 /* Try to parse a base or offset register. Return the register entry
795 on success, setting *QUALIFIER to the register qualifier. Return null
796 otherwise.
797
798 Note that this function does not issue any diagnostics. */
799
800 static const reg_entry *
801 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
802 {
803 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
804 }
805
806 /* Parse the qualifier of a vector register or vector element of type
807 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
808 succeeds; otherwise return FALSE.
809
810 Accept only one occurrence of:
811 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
812 b h s d q */
813 static bfd_boolean
814 parse_vector_type_for_operand (aarch64_reg_type reg_type,
815 struct vector_type_el *parsed_type, char **str)
816 {
817 char *ptr = *str;
818 unsigned width;
819 unsigned element_size;
820 enum vector_el_type type;
821
822 /* skip '.' */
823 gas_assert (*ptr == '.');
824 ptr++;
825
826 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
827 {
828 width = 0;
829 goto elt_size;
830 }
831 width = strtoul (ptr, &ptr, 10);
832 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
833 {
834 first_error_fmt (_("bad size %d in vector width specifier"), width);
835 return FALSE;
836 }
837
838 elt_size:
839 switch (TOLOWER (*ptr))
840 {
841 case 'b':
842 type = NT_b;
843 element_size = 8;
844 break;
845 case 'h':
846 type = NT_h;
847 element_size = 16;
848 break;
849 case 's':
850 type = NT_s;
851 element_size = 32;
852 break;
853 case 'd':
854 type = NT_d;
855 element_size = 64;
856 break;
857 case 'q':
858 if (reg_type == REG_TYPE_ZN || width == 1)
859 {
860 type = NT_q;
861 element_size = 128;
862 break;
863 }
864 /* fall through. */
865 default:
866 if (*ptr != '\0')
867 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
868 else
869 first_error (_("missing element size"));
870 return FALSE;
871 }
872 if (width != 0 && width * element_size != 64
873 && width * element_size != 128
874 && !(width == 2 && element_size == 16)
875 && !(width == 4 && element_size == 8))
876 {
877 first_error_fmt (_
878 ("invalid element size %d and vector size combination %c"),
879 width, *ptr);
880 return FALSE;
881 }
882 ptr++;
883
884 parsed_type->type = type;
885 parsed_type->width = width;
886
887 *str = ptr;
888
889 return TRUE;
890 }
891
892 /* *STR contains an SVE zero/merge predication suffix. Parse it into
893 *PARSED_TYPE and point *STR at the end of the suffix. */
894
895 static bfd_boolean
896 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
897 {
898 char *ptr = *str;
899
900 /* Skip '/'. */
901 gas_assert (*ptr == '/');
902 ptr++;
903 switch (TOLOWER (*ptr))
904 {
905 case 'z':
906 parsed_type->type = NT_zero;
907 break;
908 case 'm':
909 parsed_type->type = NT_merge;
910 break;
911 default:
912 if (*ptr != '\0' && *ptr != ',')
913 first_error_fmt (_("unexpected character `%c' in predication type"),
914 *ptr);
915 else
916 first_error (_("missing predication type"));
917 return FALSE;
918 }
919 parsed_type->width = 0;
920 *str = ptr + 1;
921 return TRUE;
922 }
923
924 /* Parse a register of the type TYPE.
925
926 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
927 name or the parsed register is not of TYPE.
928
929 Otherwise return the register number, and optionally fill in the actual
930 type of the register in *RTYPE when multiple alternatives were given, and
931 return the register shape and element index information in *TYPEINFO.
932
933 IN_REG_LIST should be set with TRUE if the caller is parsing a register
934 list. */
935
936 static int
937 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
938 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
939 {
940 char *str = *ccp;
941 const reg_entry *reg = parse_reg (&str);
942 struct vector_type_el atype;
943 struct vector_type_el parsetype;
944 bfd_boolean is_typed_vecreg = FALSE;
945
946 atype.defined = 0;
947 atype.type = NT_invtype;
948 atype.width = -1;
949 atype.index = 0;
950
951 if (reg == NULL)
952 {
953 if (typeinfo)
954 *typeinfo = atype;
955 set_default_error ();
956 return PARSE_FAIL;
957 }
958
959 if (! aarch64_check_reg_type (reg, type))
960 {
961 DEBUG_TRACE ("reg type check failed");
962 set_default_error ();
963 return PARSE_FAIL;
964 }
965 type = reg->type;
966
967 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
968 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
969 {
970 if (*str == '.')
971 {
972 if (!parse_vector_type_for_operand (type, &parsetype, &str))
973 return PARSE_FAIL;
974 }
975 else
976 {
977 if (!parse_predication_for_operand (&parsetype, &str))
978 return PARSE_FAIL;
979 }
980
981 /* Register if of the form Vn.[bhsdq]. */
982 is_typed_vecreg = TRUE;
983
984 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
985 {
986 /* The width is always variable; we don't allow an integer width
987 to be specified. */
988 gas_assert (parsetype.width == 0);
989 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
990 }
991 else if (parsetype.width == 0)
992 /* Expect index. In the new scheme we cannot have
993 Vn.[bhsdq] represent a scalar. Therefore any
994 Vn.[bhsdq] should have an index following it.
995 Except in reglists of course. */
996 atype.defined |= NTA_HASINDEX;
997 else
998 atype.defined |= NTA_HASTYPE;
999
1000 atype.type = parsetype.type;
1001 atype.width = parsetype.width;
1002 }
1003
1004 if (skip_past_char (&str, '['))
1005 {
1006 expressionS exp;
1007
1008 /* Reject Sn[index] syntax. */
1009 if (!is_typed_vecreg)
1010 {
1011 first_error (_("this type of register can't be indexed"));
1012 return PARSE_FAIL;
1013 }
1014
1015 if (in_reg_list)
1016 {
1017 first_error (_("index not allowed inside register list"));
1018 return PARSE_FAIL;
1019 }
1020
1021 atype.defined |= NTA_HASINDEX;
1022
1023 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1024
1025 if (exp.X_op != O_constant)
1026 {
1027 first_error (_("constant expression required"));
1028 return PARSE_FAIL;
1029 }
1030
1031 if (! skip_past_char (&str, ']'))
1032 return PARSE_FAIL;
1033
1034 atype.index = exp.X_add_number;
1035 }
1036 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1037 {
1038 /* Indexed vector register expected. */
1039 first_error (_("indexed vector register expected"));
1040 return PARSE_FAIL;
1041 }
1042
1043 /* A vector reg Vn should be typed or indexed. */
1044 if (type == REG_TYPE_VN && atype.defined == 0)
1045 {
1046 first_error (_("invalid use of vector register"));
1047 }
1048
1049 if (typeinfo)
1050 *typeinfo = atype;
1051
1052 if (rtype)
1053 *rtype = type;
1054
1055 *ccp = str;
1056
1057 return reg->number;
1058 }
1059
1060 /* Parse register.
1061
1062 Return the register number on success; return PARSE_FAIL otherwise.
1063
1064 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1065 the register (e.g. NEON double or quad reg when either has been requested).
1066
1067 If this is a NEON vector register with additional type information, fill
1068 in the struct pointed to by VECTYPE (if non-NULL).
1069
1070 This parser does not handle register list. */
1071
1072 static int
1073 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1074 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1075 {
1076 struct vector_type_el atype;
1077 char *str = *ccp;
1078 int reg = parse_typed_reg (&str, type, rtype, &atype,
1079 /*in_reg_list= */ FALSE);
1080
1081 if (reg == PARSE_FAIL)
1082 return PARSE_FAIL;
1083
1084 if (vectype)
1085 *vectype = atype;
1086
1087 *ccp = str;
1088
1089 return reg;
1090 }
1091
1092 static inline bfd_boolean
1093 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1094 {
1095 return
1096 e1.type == e2.type
1097 && e1.defined == e2.defined
1098 && e1.width == e2.width && e1.index == e2.index;
1099 }
1100
1101 /* This function parses a list of vector registers of type TYPE.
1102 On success, it returns the parsed register list information in the
1103 following encoded format:
1104
1105 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1106 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1107
1108 The information of the register shape and/or index is returned in
1109 *VECTYPE.
1110
1111 It returns PARSE_FAIL if the register list is invalid.
1112
1113 The list contains one to four registers.
1114 Each register can be one of:
1115 <Vt>.<T>[<index>]
1116 <Vt>.<T>
1117 All <T> should be identical.
1118 All <index> should be identical.
1119 There are restrictions on <Vt> numbers which are checked later
1120 (by reg_list_valid_p). */
1121
1122 static int
1123 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1124 struct vector_type_el *vectype)
1125 {
1126 char *str = *ccp;
1127 int nb_regs;
1128 struct vector_type_el typeinfo, typeinfo_first;
1129 int val, val_range;
1130 int in_range;
1131 int ret_val;
1132 int i;
1133 bfd_boolean error = FALSE;
1134 bfd_boolean expect_index = FALSE;
1135
1136 if (*str != '{')
1137 {
1138 set_syntax_error (_("expecting {"));
1139 return PARSE_FAIL;
1140 }
1141 str++;
1142
1143 nb_regs = 0;
1144 typeinfo_first.defined = 0;
1145 typeinfo_first.type = NT_invtype;
1146 typeinfo_first.width = -1;
1147 typeinfo_first.index = 0;
1148 ret_val = 0;
1149 val = -1;
1150 val_range = -1;
1151 in_range = 0;
1152 do
1153 {
1154 if (in_range)
1155 {
1156 str++; /* skip over '-' */
1157 val_range = val;
1158 }
1159 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1160 /*in_reg_list= */ TRUE);
1161 if (val == PARSE_FAIL)
1162 {
1163 set_first_syntax_error (_("invalid vector register in list"));
1164 error = TRUE;
1165 continue;
1166 }
1167 /* reject [bhsd]n */
1168 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1169 {
1170 set_first_syntax_error (_("invalid scalar register in list"));
1171 error = TRUE;
1172 continue;
1173 }
1174
1175 if (typeinfo.defined & NTA_HASINDEX)
1176 expect_index = TRUE;
1177
1178 if (in_range)
1179 {
1180 if (val < val_range)
1181 {
1182 set_first_syntax_error
1183 (_("invalid range in vector register list"));
1184 error = TRUE;
1185 }
1186 val_range++;
1187 }
1188 else
1189 {
1190 val_range = val;
1191 if (nb_regs == 0)
1192 typeinfo_first = typeinfo;
1193 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1194 {
1195 set_first_syntax_error
1196 (_("type mismatch in vector register list"));
1197 error = TRUE;
1198 }
1199 }
1200 if (! error)
1201 for (i = val_range; i <= val; i++)
1202 {
1203 ret_val |= i << (5 * nb_regs);
1204 nb_regs++;
1205 }
1206 in_range = 0;
1207 }
1208 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1209
1210 skip_whitespace (str);
1211 if (*str != '}')
1212 {
1213 set_first_syntax_error (_("end of vector register list not found"));
1214 error = TRUE;
1215 }
1216 str++;
1217
1218 skip_whitespace (str);
1219
1220 if (expect_index)
1221 {
1222 if (skip_past_char (&str, '['))
1223 {
1224 expressionS exp;
1225
1226 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1227 if (exp.X_op != O_constant)
1228 {
1229 set_first_syntax_error (_("constant expression required."));
1230 error = TRUE;
1231 }
1232 if (! skip_past_char (&str, ']'))
1233 error = TRUE;
1234 else
1235 typeinfo_first.index = exp.X_add_number;
1236 }
1237 else
1238 {
1239 set_first_syntax_error (_("expected index"));
1240 error = TRUE;
1241 }
1242 }
1243
1244 if (nb_regs > 4)
1245 {
1246 set_first_syntax_error (_("too many registers in vector register list"));
1247 error = TRUE;
1248 }
1249 else if (nb_regs == 0)
1250 {
1251 set_first_syntax_error (_("empty vector register list"));
1252 error = TRUE;
1253 }
1254
1255 *ccp = str;
1256 if (! error)
1257 *vectype = typeinfo_first;
1258
1259 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1260 }
1261
1262 /* Directives: register aliases. */
1263
1264 static reg_entry *
1265 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1266 {
1267 reg_entry *new;
1268 const char *name;
1269
1270 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1271 {
1272 if (new->builtin)
1273 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1274 str);
1275
1276 /* Only warn about a redefinition if it's not defined as the
1277 same register. */
1278 else if (new->number != number || new->type != type)
1279 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1280
1281 return NULL;
1282 }
1283
1284 name = xstrdup (str);
1285 new = XNEW (reg_entry);
1286
1287 new->name = name;
1288 new->number = number;
1289 new->type = type;
1290 new->builtin = FALSE;
1291
1292 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1293 abort ();
1294
1295 return new;
1296 }
1297
1298 /* Look for the .req directive. This is of the form:
1299
1300 new_register_name .req existing_register_name
1301
1302 If we find one, or if it looks sufficiently like one that we want to
1303 handle any error here, return TRUE. Otherwise return FALSE. */
1304
1305 static bfd_boolean
1306 create_register_alias (char *newname, char *p)
1307 {
1308 const reg_entry *old;
1309 char *oldname, *nbuf;
1310 size_t nlen;
1311
1312 /* The input scrubber ensures that whitespace after the mnemonic is
1313 collapsed to single spaces. */
1314 oldname = p;
1315 if (strncmp (oldname, " .req ", 6) != 0)
1316 return FALSE;
1317
1318 oldname += 6;
1319 if (*oldname == '\0')
1320 return FALSE;
1321
1322 old = hash_find (aarch64_reg_hsh, oldname);
1323 if (!old)
1324 {
1325 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1326 return TRUE;
1327 }
1328
1329 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1330 the desired alias name, and p points to its end. If not, then
1331 the desired alias name is in the global original_case_string. */
1332 #ifdef TC_CASE_SENSITIVE
1333 nlen = p - newname;
1334 #else
1335 newname = original_case_string;
1336 nlen = strlen (newname);
1337 #endif
1338
1339 nbuf = xmemdup0 (newname, nlen);
1340
1341 /* Create aliases under the new name as stated; an all-lowercase
1342 version of the new name; and an all-uppercase version of the new
1343 name. */
1344 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1345 {
1346 for (p = nbuf; *p; p++)
1347 *p = TOUPPER (*p);
1348
1349 if (strncmp (nbuf, newname, nlen))
1350 {
1351 /* If this attempt to create an additional alias fails, do not bother
1352 trying to create the all-lower case alias. We will fail and issue
1353 a second, duplicate error message. This situation arises when the
1354 programmer does something like:
1355 foo .req r0
1356 Foo .req r1
1357 The second .req creates the "Foo" alias but then fails to create
1358 the artificial FOO alias because it has already been created by the
1359 first .req. */
1360 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1361 {
1362 free (nbuf);
1363 return TRUE;
1364 }
1365 }
1366
1367 for (p = nbuf; *p; p++)
1368 *p = TOLOWER (*p);
1369
1370 if (strncmp (nbuf, newname, nlen))
1371 insert_reg_alias (nbuf, old->number, old->type);
1372 }
1373
1374 free (nbuf);
1375 return TRUE;
1376 }
1377
1378 /* Should never be called, as .req goes between the alias and the
1379 register name, not at the beginning of the line. */
1380 static void
1381 s_req (int a ATTRIBUTE_UNUSED)
1382 {
1383 as_bad (_("invalid syntax for .req directive"));
1384 }
1385
1386 /* The .unreq directive deletes an alias which was previously defined
1387 by .req. For example:
1388
1389 my_alias .req r11
1390 .unreq my_alias */
1391
1392 static void
1393 s_unreq (int a ATTRIBUTE_UNUSED)
1394 {
1395 char *name;
1396 char saved_char;
1397
1398 name = input_line_pointer;
1399
1400 while (*input_line_pointer != 0
1401 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1402 ++input_line_pointer;
1403
1404 saved_char = *input_line_pointer;
1405 *input_line_pointer = 0;
1406
1407 if (!*name)
1408 as_bad (_("invalid syntax for .unreq directive"));
1409 else
1410 {
1411 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1412
1413 if (!reg)
1414 as_bad (_("unknown register alias '%s'"), name);
1415 else if (reg->builtin)
1416 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1417 name);
1418 else
1419 {
1420 char *p;
1421 char *nbuf;
1422
1423 hash_delete (aarch64_reg_hsh, name, FALSE);
1424 free ((char *) reg->name);
1425 free (reg);
1426
1427 /* Also locate the all upper case and all lower case versions.
1428 Do not complain if we cannot find one or the other as it
1429 was probably deleted above. */
1430
1431 nbuf = strdup (name);
1432 for (p = nbuf; *p; p++)
1433 *p = TOUPPER (*p);
1434 reg = hash_find (aarch64_reg_hsh, nbuf);
1435 if (reg)
1436 {
1437 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1438 free ((char *) reg->name);
1439 free (reg);
1440 }
1441
1442 for (p = nbuf; *p; p++)
1443 *p = TOLOWER (*p);
1444 reg = hash_find (aarch64_reg_hsh, nbuf);
1445 if (reg)
1446 {
1447 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1448 free ((char *) reg->name);
1449 free (reg);
1450 }
1451
1452 free (nbuf);
1453 }
1454 }
1455
1456 *input_line_pointer = saved_char;
1457 demand_empty_rest_of_line ();
1458 }
1459
1460 /* Directives: Instruction set selection. */
1461
1462 #ifdef OBJ_ELF
1463 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1464 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1465 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1466 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1467
1468 /* Create a new mapping symbol for the transition to STATE. */
1469
1470 static void
1471 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1472 {
1473 symbolS *symbolP;
1474 const char *symname;
1475 int type;
1476
1477 switch (state)
1478 {
1479 case MAP_DATA:
1480 symname = "$d";
1481 type = BSF_NO_FLAGS;
1482 break;
1483 case MAP_INSN:
1484 symname = "$x";
1485 type = BSF_NO_FLAGS;
1486 break;
1487 default:
1488 abort ();
1489 }
1490
1491 symbolP = symbol_new (symname, now_seg, value, frag);
1492 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1493
1494 /* Save the mapping symbols for future reference. Also check that
1495 we do not place two mapping symbols at the same offset within a
1496 frag. We'll handle overlap between frags in
1497 check_mapping_symbols.
1498
1499 If .fill or other data filling directive generates zero sized data,
1500 the mapping symbol for the following code will have the same value
1501 as the one generated for the data filling directive. In this case,
1502 we replace the old symbol with the new one at the same address. */
1503 if (value == 0)
1504 {
1505 if (frag->tc_frag_data.first_map != NULL)
1506 {
1507 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1508 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1509 &symbol_lastP);
1510 }
1511 frag->tc_frag_data.first_map = symbolP;
1512 }
1513 if (frag->tc_frag_data.last_map != NULL)
1514 {
1515 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1516 S_GET_VALUE (symbolP));
1517 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1518 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1519 &symbol_lastP);
1520 }
1521 frag->tc_frag_data.last_map = symbolP;
1522 }
1523
1524 /* We must sometimes convert a region marked as code to data during
1525 code alignment, if an odd number of bytes have to be padded. The
1526 code mapping symbol is pushed to an aligned address. */
1527
1528 static void
1529 insert_data_mapping_symbol (enum mstate state,
1530 valueT value, fragS * frag, offsetT bytes)
1531 {
1532 /* If there was already a mapping symbol, remove it. */
1533 if (frag->tc_frag_data.last_map != NULL
1534 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1535 frag->fr_address + value)
1536 {
1537 symbolS *symp = frag->tc_frag_data.last_map;
1538
1539 if (value == 0)
1540 {
1541 know (frag->tc_frag_data.first_map == symp);
1542 frag->tc_frag_data.first_map = NULL;
1543 }
1544 frag->tc_frag_data.last_map = NULL;
1545 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1546 }
1547
1548 make_mapping_symbol (MAP_DATA, value, frag);
1549 make_mapping_symbol (state, value + bytes, frag);
1550 }
1551
1552 static void mapping_state_2 (enum mstate state, int max_chars);
1553
1554 /* Set the mapping state to STATE. Only call this when about to
1555 emit some STATE bytes to the file. */
1556
1557 void
1558 mapping_state (enum mstate state)
1559 {
1560 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1561
1562 if (state == MAP_INSN)
1563 /* AArch64 instructions require 4-byte alignment. When emitting
1564 instructions into any section, record the appropriate section
1565 alignment. */
1566 record_alignment (now_seg, 2);
1567
1568 if (mapstate == state)
1569 /* The mapping symbol has already been emitted.
1570 There is nothing else to do. */
1571 return;
1572
1573 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1574 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1575 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1576 evaluated later in the next else. */
1577 return;
1578 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1579 {
1580 /* Only add the symbol if the offset is > 0:
1581 if we're at the first frag, check it's size > 0;
1582 if we're not at the first frag, then for sure
1583 the offset is > 0. */
1584 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1585 const int add_symbol = (frag_now != frag_first)
1586 || (frag_now_fix () > 0);
1587
1588 if (add_symbol)
1589 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1590 }
1591 #undef TRANSITION
1592
1593 mapping_state_2 (state, 0);
1594 }
1595
1596 /* Same as mapping_state, but MAX_CHARS bytes have already been
1597 allocated. Put the mapping symbol that far back. */
1598
1599 static void
1600 mapping_state_2 (enum mstate state, int max_chars)
1601 {
1602 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1603
1604 if (!SEG_NORMAL (now_seg))
1605 return;
1606
1607 if (mapstate == state)
1608 /* The mapping symbol has already been emitted.
1609 There is nothing else to do. */
1610 return;
1611
1612 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1613 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1614 }
1615 #else
1616 #define mapping_state(x) /* nothing */
1617 #define mapping_state_2(x, y) /* nothing */
1618 #endif
1619
1620 /* Directives: sectioning and alignment. */
1621
1622 static void
1623 s_bss (int ignore ATTRIBUTE_UNUSED)
1624 {
1625 /* We don't support putting frags in the BSS segment, we fake it by
1626 marking in_bss, then looking at s_skip for clues. */
1627 subseg_set (bss_section, 0);
1628 demand_empty_rest_of_line ();
1629 mapping_state (MAP_DATA);
1630 }
1631
1632 static void
1633 s_even (int ignore ATTRIBUTE_UNUSED)
1634 {
1635 /* Never make frag if expect extra pass. */
1636 if (!need_pass_2)
1637 frag_align (1, 0, 0);
1638
1639 record_alignment (now_seg, 1);
1640
1641 demand_empty_rest_of_line ();
1642 }
1643
1644 /* Directives: Literal pools. */
1645
1646 static literal_pool *
1647 find_literal_pool (int size)
1648 {
1649 literal_pool *pool;
1650
1651 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1652 {
1653 if (pool->section == now_seg
1654 && pool->sub_section == now_subseg && pool->size == size)
1655 break;
1656 }
1657
1658 return pool;
1659 }
1660
1661 static literal_pool *
1662 find_or_make_literal_pool (int size)
1663 {
1664 /* Next literal pool ID number. */
1665 static unsigned int latest_pool_num = 1;
1666 literal_pool *pool;
1667
1668 pool = find_literal_pool (size);
1669
1670 if (pool == NULL)
1671 {
1672 /* Create a new pool. */
1673 pool = XNEW (literal_pool);
1674 if (!pool)
1675 return NULL;
1676
1677 /* Currently we always put the literal pool in the current text
1678 section. If we were generating "small" model code where we
1679 knew that all code and initialised data was within 1MB then
1680 we could output literals to mergeable, read-only data
1681 sections. */
1682
1683 pool->next_free_entry = 0;
1684 pool->section = now_seg;
1685 pool->sub_section = now_subseg;
1686 pool->size = size;
1687 pool->next = list_of_pools;
1688 pool->symbol = NULL;
1689
1690 /* Add it to the list. */
1691 list_of_pools = pool;
1692 }
1693
1694 /* New pools, and emptied pools, will have a NULL symbol. */
1695 if (pool->symbol == NULL)
1696 {
1697 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1698 (valueT) 0, &zero_address_frag);
1699 pool->id = latest_pool_num++;
1700 }
1701
1702 /* Done. */
1703 return pool;
1704 }
1705
1706 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1707 Return TRUE on success, otherwise return FALSE. */
1708 static bfd_boolean
1709 add_to_lit_pool (expressionS *exp, int size)
1710 {
1711 literal_pool *pool;
1712 unsigned int entry;
1713
1714 pool = find_or_make_literal_pool (size);
1715
1716 /* Check if this literal value is already in the pool. */
1717 for (entry = 0; entry < pool->next_free_entry; entry++)
1718 {
1719 expressionS * litexp = & pool->literals[entry].exp;
1720
1721 if ((litexp->X_op == exp->X_op)
1722 && (exp->X_op == O_constant)
1723 && (litexp->X_add_number == exp->X_add_number)
1724 && (litexp->X_unsigned == exp->X_unsigned))
1725 break;
1726
1727 if ((litexp->X_op == exp->X_op)
1728 && (exp->X_op == O_symbol)
1729 && (litexp->X_add_number == exp->X_add_number)
1730 && (litexp->X_add_symbol == exp->X_add_symbol)
1731 && (litexp->X_op_symbol == exp->X_op_symbol))
1732 break;
1733 }
1734
1735 /* Do we need to create a new entry? */
1736 if (entry == pool->next_free_entry)
1737 {
1738 if (entry >= MAX_LITERAL_POOL_SIZE)
1739 {
1740 set_syntax_error (_("literal pool overflow"));
1741 return FALSE;
1742 }
1743
1744 pool->literals[entry].exp = *exp;
1745 pool->next_free_entry += 1;
1746 if (exp->X_op == O_big)
1747 {
1748 /* PR 16688: Bignums are held in a single global array. We must
1749 copy and preserve that value now, before it is overwritten. */
1750 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1751 exp->X_add_number);
1752 memcpy (pool->literals[entry].bignum, generic_bignum,
1753 CHARS_PER_LITTLENUM * exp->X_add_number);
1754 }
1755 else
1756 pool->literals[entry].bignum = NULL;
1757 }
1758
1759 exp->X_op = O_symbol;
1760 exp->X_add_number = ((int) entry) * size;
1761 exp->X_add_symbol = pool->symbol;
1762
1763 return TRUE;
1764 }
1765
1766 /* Can't use symbol_new here, so have to create a symbol and then at
1767 a later date assign it a value. That's what these functions do. */
1768
1769 static void
1770 symbol_locate (symbolS * symbolP,
1771 const char *name,/* It is copied, the caller can modify. */
1772 segT segment, /* Segment identifier (SEG_<something>). */
1773 valueT valu, /* Symbol value. */
1774 fragS * frag) /* Associated fragment. */
1775 {
1776 size_t name_length;
1777 char *preserved_copy_of_name;
1778
1779 name_length = strlen (name) + 1; /* +1 for \0. */
1780 obstack_grow (&notes, name, name_length);
1781 preserved_copy_of_name = obstack_finish (&notes);
1782
1783 #ifdef tc_canonicalize_symbol_name
1784 preserved_copy_of_name =
1785 tc_canonicalize_symbol_name (preserved_copy_of_name);
1786 #endif
1787
1788 S_SET_NAME (symbolP, preserved_copy_of_name);
1789
1790 S_SET_SEGMENT (symbolP, segment);
1791 S_SET_VALUE (symbolP, valu);
1792 symbol_clear_list_pointers (symbolP);
1793
1794 symbol_set_frag (symbolP, frag);
1795
1796 /* Link to end of symbol chain. */
1797 {
1798 extern int symbol_table_frozen;
1799
1800 if (symbol_table_frozen)
1801 abort ();
1802 }
1803
1804 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1805
1806 obj_symbol_new_hook (symbolP);
1807
1808 #ifdef tc_symbol_new_hook
1809 tc_symbol_new_hook (symbolP);
1810 #endif
1811
1812 #ifdef DEBUG_SYMS
1813 verify_symbol_chain (symbol_rootP, symbol_lastP);
1814 #endif /* DEBUG_SYMS */
1815 }
1816
1817
1818 static void
1819 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1820 {
1821 unsigned int entry;
1822 literal_pool *pool;
1823 char sym_name[20];
1824 int align;
1825
1826 for (align = 2; align <= 4; align++)
1827 {
1828 int size = 1 << align;
1829
1830 pool = find_literal_pool (size);
1831 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1832 continue;
1833
1834 /* Align pool as you have word accesses.
1835 Only make a frag if we have to. */
1836 if (!need_pass_2)
1837 frag_align (align, 0, 0);
1838
1839 mapping_state (MAP_DATA);
1840
1841 record_alignment (now_seg, align);
1842
1843 sprintf (sym_name, "$$lit_\002%x", pool->id);
1844
1845 symbol_locate (pool->symbol, sym_name, now_seg,
1846 (valueT) frag_now_fix (), frag_now);
1847 symbol_table_insert (pool->symbol);
1848
1849 for (entry = 0; entry < pool->next_free_entry; entry++)
1850 {
1851 expressionS * exp = & pool->literals[entry].exp;
1852
1853 if (exp->X_op == O_big)
1854 {
1855 /* PR 16688: Restore the global bignum value. */
1856 gas_assert (pool->literals[entry].bignum != NULL);
1857 memcpy (generic_bignum, pool->literals[entry].bignum,
1858 CHARS_PER_LITTLENUM * exp->X_add_number);
1859 }
1860
1861 /* First output the expression in the instruction to the pool. */
1862 emit_expr (exp, size); /* .word|.xword */
1863
1864 if (exp->X_op == O_big)
1865 {
1866 free (pool->literals[entry].bignum);
1867 pool->literals[entry].bignum = NULL;
1868 }
1869 }
1870
1871 /* Mark the pool as empty. */
1872 pool->next_free_entry = 0;
1873 pool->symbol = NULL;
1874 }
1875 }
1876
1877 #ifdef OBJ_ELF
1878 /* Forward declarations for functions below, in the MD interface
1879 section. */
1880 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1881 static struct reloc_table_entry * find_reloc_table_entry (char **);
1882
1883 /* Directives: Data. */
1884 /* N.B. the support for relocation suffix in this directive needs to be
1885 implemented properly. */
1886
1887 static void
1888 s_aarch64_elf_cons (int nbytes)
1889 {
1890 expressionS exp;
1891
1892 #ifdef md_flush_pending_output
1893 md_flush_pending_output ();
1894 #endif
1895
1896 if (is_it_end_of_statement ())
1897 {
1898 demand_empty_rest_of_line ();
1899 return;
1900 }
1901
1902 #ifdef md_cons_align
1903 md_cons_align (nbytes);
1904 #endif
1905
1906 mapping_state (MAP_DATA);
1907 do
1908 {
1909 struct reloc_table_entry *reloc;
1910
1911 expression (&exp);
1912
1913 if (exp.X_op != O_symbol)
1914 emit_expr (&exp, (unsigned int) nbytes);
1915 else
1916 {
1917 skip_past_char (&input_line_pointer, '#');
1918 if (skip_past_char (&input_line_pointer, ':'))
1919 {
1920 reloc = find_reloc_table_entry (&input_line_pointer);
1921 if (reloc == NULL)
1922 as_bad (_("unrecognized relocation suffix"));
1923 else
1924 as_bad (_("unimplemented relocation suffix"));
1925 ignore_rest_of_line ();
1926 return;
1927 }
1928 else
1929 emit_expr (&exp, (unsigned int) nbytes);
1930 }
1931 }
1932 while (*input_line_pointer++ == ',');
1933
1934 /* Put terminator back into stream. */
1935 input_line_pointer--;
1936 demand_empty_rest_of_line ();
1937 }
1938
1939 #endif /* OBJ_ELF */
1940
1941 /* Output a 32-bit word, but mark as an instruction. */
1942
1943 static void
1944 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1945 {
1946 expressionS exp;
1947
1948 #ifdef md_flush_pending_output
1949 md_flush_pending_output ();
1950 #endif
1951
1952 if (is_it_end_of_statement ())
1953 {
1954 demand_empty_rest_of_line ();
1955 return;
1956 }
1957
1958 /* Sections are assumed to start aligned. In executable section, there is no
1959 MAP_DATA symbol pending. So we only align the address during
1960 MAP_DATA --> MAP_INSN transition.
1961 For other sections, this is not guaranteed. */
1962 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1963 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1964 frag_align_code (2, 0);
1965
1966 #ifdef OBJ_ELF
1967 mapping_state (MAP_INSN);
1968 #endif
1969
1970 do
1971 {
1972 expression (&exp);
1973 if (exp.X_op != O_constant)
1974 {
1975 as_bad (_("constant expression required"));
1976 ignore_rest_of_line ();
1977 return;
1978 }
1979
1980 if (target_big_endian)
1981 {
1982 unsigned int val = exp.X_add_number;
1983 exp.X_add_number = SWAP_32 (val);
1984 }
1985 emit_expr (&exp, 4);
1986 }
1987 while (*input_line_pointer++ == ',');
1988
1989 /* Put terminator back into stream. */
1990 input_line_pointer--;
1991 demand_empty_rest_of_line ();
1992 }
1993
1994 static void
1995 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
1996 {
1997 demand_empty_rest_of_line ();
1998 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
1999 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2000 }
2001
2002 #ifdef OBJ_ELF
2003 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2004
2005 static void
2006 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2007 {
2008 expressionS exp;
2009
2010 expression (&exp);
2011 frag_grow (4);
2012 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2013 BFD_RELOC_AARCH64_TLSDESC_ADD);
2014
2015 demand_empty_rest_of_line ();
2016 }
2017
2018 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2019
2020 static void
2021 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2022 {
2023 expressionS exp;
2024
2025 /* Since we're just labelling the code, there's no need to define a
2026 mapping symbol. */
2027 expression (&exp);
2028 /* Make sure there is enough room in this frag for the following
2029 blr. This trick only works if the blr follows immediately after
2030 the .tlsdesc directive. */
2031 frag_grow (4);
2032 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2033 BFD_RELOC_AARCH64_TLSDESC_CALL);
2034
2035 demand_empty_rest_of_line ();
2036 }
2037
2038 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2039
2040 static void
2041 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2042 {
2043 expressionS exp;
2044
2045 expression (&exp);
2046 frag_grow (4);
2047 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2048 BFD_RELOC_AARCH64_TLSDESC_LDR);
2049
2050 demand_empty_rest_of_line ();
2051 }
2052 #endif /* OBJ_ELF */
2053
2054 static void s_aarch64_arch (int);
2055 static void s_aarch64_cpu (int);
2056 static void s_aarch64_arch_extension (int);
2057
2058 /* This table describes all the machine specific pseudo-ops the assembler
2059 has to support. The fields are:
2060 pseudo-op name without dot
2061 function to call to execute this pseudo-op
2062 Integer arg to pass to the function. */
2063
2064 const pseudo_typeS md_pseudo_table[] = {
2065 /* Never called because '.req' does not start a line. */
2066 {"req", s_req, 0},
2067 {"unreq", s_unreq, 0},
2068 {"bss", s_bss, 0},
2069 {"even", s_even, 0},
2070 {"ltorg", s_ltorg, 0},
2071 {"pool", s_ltorg, 0},
2072 {"cpu", s_aarch64_cpu, 0},
2073 {"arch", s_aarch64_arch, 0},
2074 {"arch_extension", s_aarch64_arch_extension, 0},
2075 {"inst", s_aarch64_inst, 0},
2076 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2077 #ifdef OBJ_ELF
2078 {"tlsdescadd", s_tlsdescadd, 0},
2079 {"tlsdesccall", s_tlsdesccall, 0},
2080 {"tlsdescldr", s_tlsdescldr, 0},
2081 {"word", s_aarch64_elf_cons, 4},
2082 {"long", s_aarch64_elf_cons, 4},
2083 {"xword", s_aarch64_elf_cons, 8},
2084 {"dword", s_aarch64_elf_cons, 8},
2085 #endif
2086 {0, 0, 0}
2087 };
2088 \f
2089
2090 /* Check whether STR points to a register name followed by a comma or the
2091 end of line; REG_TYPE indicates which register types are checked
2092 against. Return TRUE if STR is such a register name; otherwise return
2093 FALSE. The function does not intend to produce any diagnostics, but since
2094 the register parser aarch64_reg_parse, which is called by this function,
2095 does produce diagnostics, we call clear_error to clear any diagnostics
2096 that may be generated by aarch64_reg_parse.
2097 Also, the function returns FALSE directly if there is any user error
2098 present at the function entry. This prevents the existing diagnostics
2099 state from being spoiled.
2100 The function currently serves parse_constant_immediate and
2101 parse_big_immediate only. */
2102 static bfd_boolean
2103 reg_name_p (char *str, aarch64_reg_type reg_type)
2104 {
2105 int reg;
2106
2107 /* Prevent the diagnostics state from being spoiled. */
2108 if (error_p ())
2109 return FALSE;
2110
2111 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2112
2113 /* Clear the parsing error that may be set by the reg parser. */
2114 clear_error ();
2115
2116 if (reg == PARSE_FAIL)
2117 return FALSE;
2118
2119 skip_whitespace (str);
2120 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2121 return TRUE;
2122
2123 return FALSE;
2124 }
2125
2126 /* Parser functions used exclusively in instruction operands. */
2127
2128 /* Parse an immediate expression which may not be constant.
2129
2130 To prevent the expression parser from pushing a register name
2131 into the symbol table as an undefined symbol, firstly a check is
2132 done to find out whether STR is a register of type REG_TYPE followed
2133 by a comma or the end of line. Return FALSE if STR is such a string. */
2134
2135 static bfd_boolean
2136 parse_immediate_expression (char **str, expressionS *exp,
2137 aarch64_reg_type reg_type)
2138 {
2139 if (reg_name_p (*str, reg_type))
2140 {
2141 set_recoverable_error (_("immediate operand required"));
2142 return FALSE;
2143 }
2144
2145 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2146
2147 if (exp->X_op == O_absent)
2148 {
2149 set_fatal_syntax_error (_("missing immediate expression"));
2150 return FALSE;
2151 }
2152
2153 return TRUE;
2154 }
2155
2156 /* Constant immediate-value read function for use in insn parsing.
2157 STR points to the beginning of the immediate (with the optional
2158 leading #); *VAL receives the value. REG_TYPE says which register
2159 names should be treated as registers rather than as symbolic immediates.
2160
2161 Return TRUE on success; otherwise return FALSE. */
2162
2163 static bfd_boolean
2164 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2165 {
2166 expressionS exp;
2167
2168 if (! parse_immediate_expression (str, &exp, reg_type))
2169 return FALSE;
2170
2171 if (exp.X_op != O_constant)
2172 {
2173 set_syntax_error (_("constant expression required"));
2174 return FALSE;
2175 }
2176
2177 *val = exp.X_add_number;
2178 return TRUE;
2179 }
2180
2181 static uint32_t
2182 encode_imm_float_bits (uint32_t imm)
2183 {
2184 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2185 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2186 }
2187
2188 /* Return TRUE if the single-precision floating-point value encoded in IMM
2189 can be expressed in the AArch64 8-bit signed floating-point format with
2190 3-bit exponent and normalized 4 bits of precision; in other words, the
2191 floating-point value must be expressable as
2192 (+/-) n / 16 * power (2, r)
2193 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2194
2195 static bfd_boolean
2196 aarch64_imm_float_p (uint32_t imm)
2197 {
2198 /* If a single-precision floating-point value has the following bit
2199 pattern, it can be expressed in the AArch64 8-bit floating-point
2200 format:
2201
2202 3 32222222 2221111111111
2203 1 09876543 21098765432109876543210
2204 n Eeeeeexx xxxx0000000000000000000
2205
2206 where n, e and each x are either 0 or 1 independently, with
2207 E == ~ e. */
2208
2209 uint32_t pattern;
2210
2211 /* Prepare the pattern for 'Eeeeee'. */
2212 if (((imm >> 30) & 0x1) == 0)
2213 pattern = 0x3e000000;
2214 else
2215 pattern = 0x40000000;
2216
2217 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2218 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2219 }
2220
2221 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2222 as an IEEE float without any loss of precision. Store the value in
2223 *FPWORD if so. */
2224
2225 static bfd_boolean
2226 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2227 {
2228 /* If a double-precision floating-point value has the following bit
2229 pattern, it can be expressed in a float:
2230
2231 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2232 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2233 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2234
2235 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2236 if Eeee_eeee != 1111_1111
2237
2238 where n, e, s and S are either 0 or 1 independently and where ~ is the
2239 inverse of E. */
2240
2241 uint32_t pattern;
2242 uint32_t high32 = imm >> 32;
2243 uint32_t low32 = imm;
2244
2245 /* Lower 29 bits need to be 0s. */
2246 if ((imm & 0x1fffffff) != 0)
2247 return FALSE;
2248
2249 /* Prepare the pattern for 'Eeeeeeeee'. */
2250 if (((high32 >> 30) & 0x1) == 0)
2251 pattern = 0x38000000;
2252 else
2253 pattern = 0x40000000;
2254
2255 /* Check E~~~. */
2256 if ((high32 & 0x78000000) != pattern)
2257 return FALSE;
2258
2259 /* Check Eeee_eeee != 1111_1111. */
2260 if ((high32 & 0x7ff00000) == 0x47f00000)
2261 return FALSE;
2262
2263 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2264 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2265 | (low32 >> 29)); /* 3 S bits. */
2266 return TRUE;
2267 }
2268
2269 /* Return true if we should treat OPERAND as a double-precision
2270 floating-point operand rather than a single-precision one. */
2271 static bfd_boolean
2272 double_precision_operand_p (const aarch64_opnd_info *operand)
2273 {
2274 /* Check for unsuffixed SVE registers, which are allowed
2275 for LDR and STR but not in instructions that require an
2276 immediate. We get better error messages if we arbitrarily
2277 pick one size, parse the immediate normally, and then
2278 report the match failure in the normal way. */
2279 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2280 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2281 }
2282
2283 /* Parse a floating-point immediate. Return TRUE on success and return the
2284 value in *IMMED in the format of IEEE754 single-precision encoding.
2285 *CCP points to the start of the string; DP_P is TRUE when the immediate
2286 is expected to be in double-precision (N.B. this only matters when
2287 hexadecimal representation is involved). REG_TYPE says which register
2288 names should be treated as registers rather than as symbolic immediates.
2289
2290 This routine accepts any IEEE float; it is up to the callers to reject
2291 invalid ones. */
2292
2293 static bfd_boolean
2294 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2295 aarch64_reg_type reg_type)
2296 {
2297 char *str = *ccp;
2298 char *fpnum;
2299 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2300 int64_t val = 0;
2301 unsigned fpword = 0;
2302 bfd_boolean hex_p = FALSE;
2303
2304 skip_past_char (&str, '#');
2305
2306 fpnum = str;
2307 skip_whitespace (fpnum);
2308
2309 if (strncmp (fpnum, "0x", 2) == 0)
2310 {
2311 /* Support the hexadecimal representation of the IEEE754 encoding.
2312 Double-precision is expected when DP_P is TRUE, otherwise the
2313 representation should be in single-precision. */
2314 if (! parse_constant_immediate (&str, &val, reg_type))
2315 goto invalid_fp;
2316
2317 if (dp_p)
2318 {
2319 if (!can_convert_double_to_float (val, &fpword))
2320 goto invalid_fp;
2321 }
2322 else if ((uint64_t) val > 0xffffffff)
2323 goto invalid_fp;
2324 else
2325 fpword = val;
2326
2327 hex_p = TRUE;
2328 }
2329 else if (reg_name_p (str, reg_type))
2330 {
2331 set_recoverable_error (_("immediate operand required"));
2332 return FALSE;
2333 }
2334
2335 if (! hex_p)
2336 {
2337 int i;
2338
2339 if ((str = atof_ieee (str, 's', words)) == NULL)
2340 goto invalid_fp;
2341
2342 /* Our FP word must be 32 bits (single-precision FP). */
2343 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2344 {
2345 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2346 fpword |= words[i];
2347 }
2348 }
2349
2350 *immed = fpword;
2351 *ccp = str;
2352 return TRUE;
2353
2354 invalid_fp:
2355 set_fatal_syntax_error (_("invalid floating-point constant"));
2356 return FALSE;
2357 }
2358
2359 /* Less-generic immediate-value read function with the possibility of loading
2360 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2361 instructions.
2362
2363 To prevent the expression parser from pushing a register name into the
2364 symbol table as an undefined symbol, a check is firstly done to find
2365 out whether STR is a register of type REG_TYPE followed by a comma or
2366 the end of line. Return FALSE if STR is such a register. */
2367
2368 static bfd_boolean
2369 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2370 {
2371 char *ptr = *str;
2372
2373 if (reg_name_p (ptr, reg_type))
2374 {
2375 set_syntax_error (_("immediate operand required"));
2376 return FALSE;
2377 }
2378
2379 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2380
2381 if (inst.reloc.exp.X_op == O_constant)
2382 *imm = inst.reloc.exp.X_add_number;
2383
2384 *str = ptr;
2385
2386 return TRUE;
2387 }
2388
2389 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2390 if NEED_LIBOPCODES is non-zero, the fixup will need
2391 assistance from the libopcodes. */
2392
2393 static inline void
2394 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2395 const aarch64_opnd_info *operand,
2396 int need_libopcodes_p)
2397 {
2398 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2399 reloc->opnd = operand->type;
2400 if (need_libopcodes_p)
2401 reloc->need_libopcodes_p = 1;
2402 };
2403
2404 /* Return TRUE if the instruction needs to be fixed up later internally by
2405 the GAS; otherwise return FALSE. */
2406
2407 static inline bfd_boolean
2408 aarch64_gas_internal_fixup_p (void)
2409 {
2410 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2411 }
2412
2413 /* Assign the immediate value to the relevant field in *OPERAND if
2414 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2415 needs an internal fixup in a later stage.
2416 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2417 IMM.VALUE that may get assigned with the constant. */
2418 static inline void
2419 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2420 aarch64_opnd_info *operand,
2421 int addr_off_p,
2422 int need_libopcodes_p,
2423 int skip_p)
2424 {
2425 if (reloc->exp.X_op == O_constant)
2426 {
2427 if (addr_off_p)
2428 operand->addr.offset.imm = reloc->exp.X_add_number;
2429 else
2430 operand->imm.value = reloc->exp.X_add_number;
2431 reloc->type = BFD_RELOC_UNUSED;
2432 }
2433 else
2434 {
2435 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2436 /* Tell libopcodes to ignore this operand or not. This is helpful
2437 when one of the operands needs to be fixed up later but we need
2438 libopcodes to check the other operands. */
2439 operand->skip = skip_p;
2440 }
2441 }
2442
2443 /* Relocation modifiers. Each entry in the table contains the textual
2444 name for the relocation which may be placed before a symbol used as
2445 a load/store offset, or add immediate. It must be surrounded by a
2446 leading and trailing colon, for example:
2447
2448 ldr x0, [x1, #:rello:varsym]
2449 add x0, x1, #:rello:varsym */
2450
2451 struct reloc_table_entry
2452 {
2453 const char *name;
2454 int pc_rel;
2455 bfd_reloc_code_real_type adr_type;
2456 bfd_reloc_code_real_type adrp_type;
2457 bfd_reloc_code_real_type movw_type;
2458 bfd_reloc_code_real_type add_type;
2459 bfd_reloc_code_real_type ldst_type;
2460 bfd_reloc_code_real_type ld_literal_type;
2461 };
2462
2463 static struct reloc_table_entry reloc_table[] = {
2464 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2465 {"lo12", 0,
2466 0, /* adr_type */
2467 0,
2468 0,
2469 BFD_RELOC_AARCH64_ADD_LO12,
2470 BFD_RELOC_AARCH64_LDST_LO12,
2471 0},
2472
2473 /* Higher 21 bits of pc-relative page offset: ADRP */
2474 {"pg_hi21", 1,
2475 0, /* adr_type */
2476 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2477 0,
2478 0,
2479 0,
2480 0},
2481
2482 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2483 {"pg_hi21_nc", 1,
2484 0, /* adr_type */
2485 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2486 0,
2487 0,
2488 0,
2489 0},
2490
2491 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2492 {"abs_g0", 0,
2493 0, /* adr_type */
2494 0,
2495 BFD_RELOC_AARCH64_MOVW_G0,
2496 0,
2497 0,
2498 0},
2499
2500 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2501 {"abs_g0_s", 0,
2502 0, /* adr_type */
2503 0,
2504 BFD_RELOC_AARCH64_MOVW_G0_S,
2505 0,
2506 0,
2507 0},
2508
2509 /* Less significant bits 0-15 of address/value: MOVK, no check */
2510 {"abs_g0_nc", 0,
2511 0, /* adr_type */
2512 0,
2513 BFD_RELOC_AARCH64_MOVW_G0_NC,
2514 0,
2515 0,
2516 0},
2517
2518 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2519 {"abs_g1", 0,
2520 0, /* adr_type */
2521 0,
2522 BFD_RELOC_AARCH64_MOVW_G1,
2523 0,
2524 0,
2525 0},
2526
2527 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2528 {"abs_g1_s", 0,
2529 0, /* adr_type */
2530 0,
2531 BFD_RELOC_AARCH64_MOVW_G1_S,
2532 0,
2533 0,
2534 0},
2535
2536 /* Less significant bits 16-31 of address/value: MOVK, no check */
2537 {"abs_g1_nc", 0,
2538 0, /* adr_type */
2539 0,
2540 BFD_RELOC_AARCH64_MOVW_G1_NC,
2541 0,
2542 0,
2543 0},
2544
2545 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2546 {"abs_g2", 0,
2547 0, /* adr_type */
2548 0,
2549 BFD_RELOC_AARCH64_MOVW_G2,
2550 0,
2551 0,
2552 0},
2553
2554 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2555 {"abs_g2_s", 0,
2556 0, /* adr_type */
2557 0,
2558 BFD_RELOC_AARCH64_MOVW_G2_S,
2559 0,
2560 0,
2561 0},
2562
2563 /* Less significant bits 32-47 of address/value: MOVK, no check */
2564 {"abs_g2_nc", 0,
2565 0, /* adr_type */
2566 0,
2567 BFD_RELOC_AARCH64_MOVW_G2_NC,
2568 0,
2569 0,
2570 0},
2571
2572 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2573 {"abs_g3", 0,
2574 0, /* adr_type */
2575 0,
2576 BFD_RELOC_AARCH64_MOVW_G3,
2577 0,
2578 0,
2579 0},
2580
2581 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2582 {"prel_g0", 1,
2583 0, /* adr_type */
2584 0,
2585 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2586 0,
2587 0,
2588 0},
2589
2590 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2591 {"prel_g0_nc", 1,
2592 0, /* adr_type */
2593 0,
2594 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2595 0,
2596 0,
2597 0},
2598
2599 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2600 {"prel_g1", 1,
2601 0, /* adr_type */
2602 0,
2603 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2604 0,
2605 0,
2606 0},
2607
2608 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2609 {"prel_g1_nc", 1,
2610 0, /* adr_type */
2611 0,
2612 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2613 0,
2614 0,
2615 0},
2616
2617 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2618 {"prel_g2", 1,
2619 0, /* adr_type */
2620 0,
2621 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2622 0,
2623 0,
2624 0},
2625
2626 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2627 {"prel_g2_nc", 1,
2628 0, /* adr_type */
2629 0,
2630 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2631 0,
2632 0,
2633 0},
2634
2635 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2636 {"prel_g3", 1,
2637 0, /* adr_type */
2638 0,
2639 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2640 0,
2641 0,
2642 0},
2643
2644 /* Get to the page containing GOT entry for a symbol. */
2645 {"got", 1,
2646 0, /* adr_type */
2647 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2648 0,
2649 0,
2650 0,
2651 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2652
2653 /* 12 bit offset into the page containing GOT entry for that symbol. */
2654 {"got_lo12", 0,
2655 0, /* adr_type */
2656 0,
2657 0,
2658 0,
2659 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2660 0},
2661
2662 /* 0-15 bits of address/value: MOVk, no check. */
2663 {"gotoff_g0_nc", 0,
2664 0, /* adr_type */
2665 0,
2666 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2667 0,
2668 0,
2669 0},
2670
2671 /* Most significant bits 16-31 of address/value: MOVZ. */
2672 {"gotoff_g1", 0,
2673 0, /* adr_type */
2674 0,
2675 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2676 0,
2677 0,
2678 0},
2679
2680 /* 15 bit offset into the page containing GOT entry for that symbol. */
2681 {"gotoff_lo15", 0,
2682 0, /* adr_type */
2683 0,
2684 0,
2685 0,
2686 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2687 0},
2688
2689 /* Get to the page containing GOT TLS entry for a symbol */
2690 {"gottprel_g0_nc", 0,
2691 0, /* adr_type */
2692 0,
2693 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2694 0,
2695 0,
2696 0},
2697
2698 /* Get to the page containing GOT TLS entry for a symbol */
2699 {"gottprel_g1", 0,
2700 0, /* adr_type */
2701 0,
2702 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2703 0,
2704 0,
2705 0},
2706
2707 /* Get to the page containing GOT TLS entry for a symbol */
2708 {"tlsgd", 0,
2709 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2710 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2711 0,
2712 0,
2713 0,
2714 0},
2715
2716 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2717 {"tlsgd_lo12", 0,
2718 0, /* adr_type */
2719 0,
2720 0,
2721 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2722 0,
2723 0},
2724
2725 /* Lower 16 bits address/value: MOVk. */
2726 {"tlsgd_g0_nc", 0,
2727 0, /* adr_type */
2728 0,
2729 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2730 0,
2731 0,
2732 0},
2733
2734 /* Most significant bits 16-31 of address/value: MOVZ. */
2735 {"tlsgd_g1", 0,
2736 0, /* adr_type */
2737 0,
2738 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2739 0,
2740 0,
2741 0},
2742
2743 /* Get to the page containing GOT TLS entry for a symbol */
2744 {"tlsdesc", 0,
2745 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2746 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2747 0,
2748 0,
2749 0,
2750 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2751
2752 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2753 {"tlsdesc_lo12", 0,
2754 0, /* adr_type */
2755 0,
2756 0,
2757 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2758 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2759 0},
2760
2761 /* Get to the page containing GOT TLS entry for a symbol.
2762 The same as GD, we allocate two consecutive GOT slots
2763 for module index and module offset, the only difference
2764 with GD is the module offset should be initialized to
2765 zero without any outstanding runtime relocation. */
2766 {"tlsldm", 0,
2767 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2768 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2769 0,
2770 0,
2771 0,
2772 0},
2773
2774 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2775 {"tlsldm_lo12_nc", 0,
2776 0, /* adr_type */
2777 0,
2778 0,
2779 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2780 0,
2781 0},
2782
2783 /* 12 bit offset into the module TLS base address. */
2784 {"dtprel_lo12", 0,
2785 0, /* adr_type */
2786 0,
2787 0,
2788 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2789 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2790 0},
2791
2792 /* Same as dtprel_lo12, no overflow check. */
2793 {"dtprel_lo12_nc", 0,
2794 0, /* adr_type */
2795 0,
2796 0,
2797 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2798 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2799 0},
2800
2801 /* bits[23:12] of offset to the module TLS base address. */
2802 {"dtprel_hi12", 0,
2803 0, /* adr_type */
2804 0,
2805 0,
2806 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2807 0,
2808 0},
2809
2810 /* bits[15:0] of offset to the module TLS base address. */
2811 {"dtprel_g0", 0,
2812 0, /* adr_type */
2813 0,
2814 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2815 0,
2816 0,
2817 0},
2818
2819 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2820 {"dtprel_g0_nc", 0,
2821 0, /* adr_type */
2822 0,
2823 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2824 0,
2825 0,
2826 0},
2827
2828 /* bits[31:16] of offset to the module TLS base address. */
2829 {"dtprel_g1", 0,
2830 0, /* adr_type */
2831 0,
2832 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2833 0,
2834 0,
2835 0},
2836
2837 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2838 {"dtprel_g1_nc", 0,
2839 0, /* adr_type */
2840 0,
2841 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2842 0,
2843 0,
2844 0},
2845
2846 /* bits[47:32] of offset to the module TLS base address. */
2847 {"dtprel_g2", 0,
2848 0, /* adr_type */
2849 0,
2850 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2851 0,
2852 0,
2853 0},
2854
2855 /* Lower 16 bit offset into GOT entry for a symbol */
2856 {"tlsdesc_off_g0_nc", 0,
2857 0, /* adr_type */
2858 0,
2859 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2860 0,
2861 0,
2862 0},
2863
2864 /* Higher 16 bit offset into GOT entry for a symbol */
2865 {"tlsdesc_off_g1", 0,
2866 0, /* adr_type */
2867 0,
2868 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2869 0,
2870 0,
2871 0},
2872
2873 /* Get to the page containing GOT TLS entry for a symbol */
2874 {"gottprel", 0,
2875 0, /* adr_type */
2876 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2877 0,
2878 0,
2879 0,
2880 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2881
2882 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2883 {"gottprel_lo12", 0,
2884 0, /* adr_type */
2885 0,
2886 0,
2887 0,
2888 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2889 0},
2890
2891 /* Get tp offset for a symbol. */
2892 {"tprel", 0,
2893 0, /* adr_type */
2894 0,
2895 0,
2896 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2897 0,
2898 0},
2899
2900 /* Get tp offset for a symbol. */
2901 {"tprel_lo12", 0,
2902 0, /* adr_type */
2903 0,
2904 0,
2905 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2906 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2907 0},
2908
2909 /* Get tp offset for a symbol. */
2910 {"tprel_hi12", 0,
2911 0, /* adr_type */
2912 0,
2913 0,
2914 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2915 0,
2916 0},
2917
2918 /* Get tp offset for a symbol. */
2919 {"tprel_lo12_nc", 0,
2920 0, /* adr_type */
2921 0,
2922 0,
2923 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2924 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2925 0},
2926
2927 /* Most significant bits 32-47 of address/value: MOVZ. */
2928 {"tprel_g2", 0,
2929 0, /* adr_type */
2930 0,
2931 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2932 0,
2933 0,
2934 0},
2935
2936 /* Most significant bits 16-31 of address/value: MOVZ. */
2937 {"tprel_g1", 0,
2938 0, /* adr_type */
2939 0,
2940 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2941 0,
2942 0,
2943 0},
2944
2945 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2946 {"tprel_g1_nc", 0,
2947 0, /* adr_type */
2948 0,
2949 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2950 0,
2951 0,
2952 0},
2953
2954 /* Most significant bits 0-15 of address/value: MOVZ. */
2955 {"tprel_g0", 0,
2956 0, /* adr_type */
2957 0,
2958 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2959 0,
2960 0,
2961 0},
2962
2963 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2964 {"tprel_g0_nc", 0,
2965 0, /* adr_type */
2966 0,
2967 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2968 0,
2969 0,
2970 0},
2971
2972 /* 15bit offset from got entry to base address of GOT table. */
2973 {"gotpage_lo15", 0,
2974 0,
2975 0,
2976 0,
2977 0,
2978 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2979 0},
2980
2981 /* 14bit offset from got entry to base address of GOT table. */
2982 {"gotpage_lo14", 0,
2983 0,
2984 0,
2985 0,
2986 0,
2987 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2988 0},
2989 };
2990
2991 /* Given the address of a pointer pointing to the textual name of a
2992 relocation as may appear in assembler source, attempt to find its
2993 details in reloc_table. The pointer will be updated to the character
2994 after the trailing colon. On failure, NULL will be returned;
2995 otherwise return the reloc_table_entry. */
2996
2997 static struct reloc_table_entry *
2998 find_reloc_table_entry (char **str)
2999 {
3000 unsigned int i;
3001 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3002 {
3003 int length = strlen (reloc_table[i].name);
3004
3005 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3006 && (*str)[length] == ':')
3007 {
3008 *str += (length + 1);
3009 return &reloc_table[i];
3010 }
3011 }
3012
3013 return NULL;
3014 }
3015
3016 /* Mode argument to parse_shift and parser_shifter_operand. */
3017 enum parse_shift_mode
3018 {
3019 SHIFTED_NONE, /* no shifter allowed */
3020 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3021 "#imm{,lsl #n}" */
3022 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3023 "#imm" */
3024 SHIFTED_LSL, /* bare "lsl #n" */
3025 SHIFTED_MUL, /* bare "mul #n" */
3026 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3027 SHIFTED_MUL_VL, /* "mul vl" */
3028 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3029 };
3030
3031 /* Parse a <shift> operator on an AArch64 data processing instruction.
3032 Return TRUE on success; otherwise return FALSE. */
3033 static bfd_boolean
3034 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3035 {
3036 const struct aarch64_name_value_pair *shift_op;
3037 enum aarch64_modifier_kind kind;
3038 expressionS exp;
3039 int exp_has_prefix;
3040 char *s = *str;
3041 char *p = s;
3042
3043 for (p = *str; ISALPHA (*p); p++)
3044 ;
3045
3046 if (p == *str)
3047 {
3048 set_syntax_error (_("shift expression expected"));
3049 return FALSE;
3050 }
3051
3052 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
3053
3054 if (shift_op == NULL)
3055 {
3056 set_syntax_error (_("shift operator expected"));
3057 return FALSE;
3058 }
3059
3060 kind = aarch64_get_operand_modifier (shift_op);
3061
3062 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3063 {
3064 set_syntax_error (_("invalid use of 'MSL'"));
3065 return FALSE;
3066 }
3067
3068 if (kind == AARCH64_MOD_MUL
3069 && mode != SHIFTED_MUL
3070 && mode != SHIFTED_MUL_VL)
3071 {
3072 set_syntax_error (_("invalid use of 'MUL'"));
3073 return FALSE;
3074 }
3075
3076 switch (mode)
3077 {
3078 case SHIFTED_LOGIC_IMM:
3079 if (aarch64_extend_operator_p (kind))
3080 {
3081 set_syntax_error (_("extending shift is not permitted"));
3082 return FALSE;
3083 }
3084 break;
3085
3086 case SHIFTED_ARITH_IMM:
3087 if (kind == AARCH64_MOD_ROR)
3088 {
3089 set_syntax_error (_("'ROR' shift is not permitted"));
3090 return FALSE;
3091 }
3092 break;
3093
3094 case SHIFTED_LSL:
3095 if (kind != AARCH64_MOD_LSL)
3096 {
3097 set_syntax_error (_("only 'LSL' shift is permitted"));
3098 return FALSE;
3099 }
3100 break;
3101
3102 case SHIFTED_MUL:
3103 if (kind != AARCH64_MOD_MUL)
3104 {
3105 set_syntax_error (_("only 'MUL' is permitted"));
3106 return FALSE;
3107 }
3108 break;
3109
3110 case SHIFTED_MUL_VL:
3111 /* "MUL VL" consists of two separate tokens. Require the first
3112 token to be "MUL" and look for a following "VL". */
3113 if (kind == AARCH64_MOD_MUL)
3114 {
3115 skip_whitespace (p);
3116 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3117 {
3118 p += 2;
3119 kind = AARCH64_MOD_MUL_VL;
3120 break;
3121 }
3122 }
3123 set_syntax_error (_("only 'MUL VL' is permitted"));
3124 return FALSE;
3125
3126 case SHIFTED_REG_OFFSET:
3127 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3128 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3129 {
3130 set_fatal_syntax_error
3131 (_("invalid shift for the register offset addressing mode"));
3132 return FALSE;
3133 }
3134 break;
3135
3136 case SHIFTED_LSL_MSL:
3137 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3138 {
3139 set_syntax_error (_("invalid shift operator"));
3140 return FALSE;
3141 }
3142 break;
3143
3144 default:
3145 abort ();
3146 }
3147
3148 /* Whitespace can appear here if the next thing is a bare digit. */
3149 skip_whitespace (p);
3150
3151 /* Parse shift amount. */
3152 exp_has_prefix = 0;
3153 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3154 exp.X_op = O_absent;
3155 else
3156 {
3157 if (is_immediate_prefix (*p))
3158 {
3159 p++;
3160 exp_has_prefix = 1;
3161 }
3162 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3163 }
3164 if (kind == AARCH64_MOD_MUL_VL)
3165 /* For consistency, give MUL VL the same shift amount as an implicit
3166 MUL #1. */
3167 operand->shifter.amount = 1;
3168 else if (exp.X_op == O_absent)
3169 {
3170 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3171 {
3172 set_syntax_error (_("missing shift amount"));
3173 return FALSE;
3174 }
3175 operand->shifter.amount = 0;
3176 }
3177 else if (exp.X_op != O_constant)
3178 {
3179 set_syntax_error (_("constant shift amount required"));
3180 return FALSE;
3181 }
3182 /* For parsing purposes, MUL #n has no inherent range. The range
3183 depends on the operand and will be checked by operand-specific
3184 routines. */
3185 else if (kind != AARCH64_MOD_MUL
3186 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3187 {
3188 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3189 return FALSE;
3190 }
3191 else
3192 {
3193 operand->shifter.amount = exp.X_add_number;
3194 operand->shifter.amount_present = 1;
3195 }
3196
3197 operand->shifter.operator_present = 1;
3198 operand->shifter.kind = kind;
3199
3200 *str = p;
3201 return TRUE;
3202 }
3203
3204 /* Parse a <shifter_operand> for a data processing instruction:
3205
3206 #<immediate>
3207 #<immediate>, LSL #imm
3208
3209 Validation of immediate operands is deferred to md_apply_fix.
3210
3211 Return TRUE on success; otherwise return FALSE. */
3212
3213 static bfd_boolean
3214 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3215 enum parse_shift_mode mode)
3216 {
3217 char *p;
3218
3219 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3220 return FALSE;
3221
3222 p = *str;
3223
3224 /* Accept an immediate expression. */
3225 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3226 return FALSE;
3227
3228 /* Accept optional LSL for arithmetic immediate values. */
3229 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3230 if (! parse_shift (&p, operand, SHIFTED_LSL))
3231 return FALSE;
3232
3233 /* Not accept any shifter for logical immediate values. */
3234 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3235 && parse_shift (&p, operand, mode))
3236 {
3237 set_syntax_error (_("unexpected shift operator"));
3238 return FALSE;
3239 }
3240
3241 *str = p;
3242 return TRUE;
3243 }
3244
3245 /* Parse a <shifter_operand> for a data processing instruction:
3246
3247 <Rm>
3248 <Rm>, <shift>
3249 #<immediate>
3250 #<immediate>, LSL #imm
3251
3252 where <shift> is handled by parse_shift above, and the last two
3253 cases are handled by the function above.
3254
3255 Validation of immediate operands is deferred to md_apply_fix.
3256
3257 Return TRUE on success; otherwise return FALSE. */
3258
3259 static bfd_boolean
3260 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3261 enum parse_shift_mode mode)
3262 {
3263 const reg_entry *reg;
3264 aarch64_opnd_qualifier_t qualifier;
3265 enum aarch64_operand_class opd_class
3266 = aarch64_get_operand_class (operand->type);
3267
3268 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3269 if (reg)
3270 {
3271 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3272 {
3273 set_syntax_error (_("unexpected register in the immediate operand"));
3274 return FALSE;
3275 }
3276
3277 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3278 {
3279 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3280 return FALSE;
3281 }
3282
3283 operand->reg.regno = reg->number;
3284 operand->qualifier = qualifier;
3285
3286 /* Accept optional shift operation on register. */
3287 if (! skip_past_comma (str))
3288 return TRUE;
3289
3290 if (! parse_shift (str, operand, mode))
3291 return FALSE;
3292
3293 return TRUE;
3294 }
3295 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3296 {
3297 set_syntax_error
3298 (_("integer register expected in the extended/shifted operand "
3299 "register"));
3300 return FALSE;
3301 }
3302
3303 /* We have a shifted immediate variable. */
3304 return parse_shifter_operand_imm (str, operand, mode);
3305 }
3306
3307 /* Return TRUE on success; return FALSE otherwise. */
3308
3309 static bfd_boolean
3310 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3311 enum parse_shift_mode mode)
3312 {
3313 char *p = *str;
3314
3315 /* Determine if we have the sequence of characters #: or just :
3316 coming next. If we do, then we check for a :rello: relocation
3317 modifier. If we don't, punt the whole lot to
3318 parse_shifter_operand. */
3319
3320 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3321 {
3322 struct reloc_table_entry *entry;
3323
3324 if (p[0] == '#')
3325 p += 2;
3326 else
3327 p++;
3328 *str = p;
3329
3330 /* Try to parse a relocation. Anything else is an error. */
3331 if (!(entry = find_reloc_table_entry (str)))
3332 {
3333 set_syntax_error (_("unknown relocation modifier"));
3334 return FALSE;
3335 }
3336
3337 if (entry->add_type == 0)
3338 {
3339 set_syntax_error
3340 (_("this relocation modifier is not allowed on this instruction"));
3341 return FALSE;
3342 }
3343
3344 /* Save str before we decompose it. */
3345 p = *str;
3346
3347 /* Next, we parse the expression. */
3348 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3349 return FALSE;
3350
3351 /* Record the relocation type (use the ADD variant here). */
3352 inst.reloc.type = entry->add_type;
3353 inst.reloc.pc_rel = entry->pc_rel;
3354
3355 /* If str is empty, we've reached the end, stop here. */
3356 if (**str == '\0')
3357 return TRUE;
3358
3359 /* Otherwise, we have a shifted reloc modifier, so rewind to
3360 recover the variable name and continue parsing for the shifter. */
3361 *str = p;
3362 return parse_shifter_operand_imm (str, operand, mode);
3363 }
3364
3365 return parse_shifter_operand (str, operand, mode);
3366 }
3367
3368 /* Parse all forms of an address expression. Information is written
3369 to *OPERAND and/or inst.reloc.
3370
3371 The A64 instruction set has the following addressing modes:
3372
3373 Offset
3374 [base] // in SIMD ld/st structure
3375 [base{,#0}] // in ld/st exclusive
3376 [base{,#imm}]
3377 [base,Xm{,LSL #imm}]
3378 [base,Xm,SXTX {#imm}]
3379 [base,Wm,(S|U)XTW {#imm}]
3380 Pre-indexed
3381 [base,#imm]!
3382 Post-indexed
3383 [base],#imm
3384 [base],Xm // in SIMD ld/st structure
3385 PC-relative (literal)
3386 label
3387 SVE:
3388 [base,#imm,MUL VL]
3389 [base,Zm.D{,LSL #imm}]
3390 [base,Zm.S,(S|U)XTW {#imm}]
3391 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3392 [Zn.S,#imm]
3393 [Zn.D,#imm]
3394 [Zn.S{, Xm}]
3395 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3396 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3397 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3398
3399 (As a convenience, the notation "=immediate" is permitted in conjunction
3400 with the pc-relative literal load instructions to automatically place an
3401 immediate value or symbolic address in a nearby literal pool and generate
3402 a hidden label which references it.)
3403
3404 Upon a successful parsing, the address structure in *OPERAND will be
3405 filled in the following way:
3406
3407 .base_regno = <base>
3408 .offset.is_reg // 1 if the offset is a register
3409 .offset.imm = <imm>
3410 .offset.regno = <Rm>
3411
3412 For different addressing modes defined in the A64 ISA:
3413
3414 Offset
3415 .pcrel=0; .preind=1; .postind=0; .writeback=0
3416 Pre-indexed
3417 .pcrel=0; .preind=1; .postind=0; .writeback=1
3418 Post-indexed
3419 .pcrel=0; .preind=0; .postind=1; .writeback=1
3420 PC-relative (literal)
3421 .pcrel=1; .preind=1; .postind=0; .writeback=0
3422
3423 The shift/extension information, if any, will be stored in .shifter.
3424 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3425 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3426 corresponding register.
3427
3428 BASE_TYPE says which types of base register should be accepted and
3429 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3430 is the type of shifter that is allowed for immediate offsets,
3431 or SHIFTED_NONE if none.
3432
3433 In all other respects, it is the caller's responsibility to check
3434 for addressing modes not supported by the instruction, and to set
3435 inst.reloc.type. */
3436
3437 static bfd_boolean
3438 parse_address_main (char **str, aarch64_opnd_info *operand,
3439 aarch64_opnd_qualifier_t *base_qualifier,
3440 aarch64_opnd_qualifier_t *offset_qualifier,
3441 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3442 enum parse_shift_mode imm_shift_mode)
3443 {
3444 char *p = *str;
3445 const reg_entry *reg;
3446 expressionS *exp = &inst.reloc.exp;
3447
3448 *base_qualifier = AARCH64_OPND_QLF_NIL;
3449 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3450 if (! skip_past_char (&p, '['))
3451 {
3452 /* =immediate or label. */
3453 operand->addr.pcrel = 1;
3454 operand->addr.preind = 1;
3455
3456 /* #:<reloc_op>:<symbol> */
3457 skip_past_char (&p, '#');
3458 if (skip_past_char (&p, ':'))
3459 {
3460 bfd_reloc_code_real_type ty;
3461 struct reloc_table_entry *entry;
3462
3463 /* Try to parse a relocation modifier. Anything else is
3464 an error. */
3465 entry = find_reloc_table_entry (&p);
3466 if (! entry)
3467 {
3468 set_syntax_error (_("unknown relocation modifier"));
3469 return FALSE;
3470 }
3471
3472 switch (operand->type)
3473 {
3474 case AARCH64_OPND_ADDR_PCREL21:
3475 /* adr */
3476 ty = entry->adr_type;
3477 break;
3478
3479 default:
3480 ty = entry->ld_literal_type;
3481 break;
3482 }
3483
3484 if (ty == 0)
3485 {
3486 set_syntax_error
3487 (_("this relocation modifier is not allowed on this "
3488 "instruction"));
3489 return FALSE;
3490 }
3491
3492 /* #:<reloc_op>: */
3493 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3494 {
3495 set_syntax_error (_("invalid relocation expression"));
3496 return FALSE;
3497 }
3498
3499 /* #:<reloc_op>:<expr> */
3500 /* Record the relocation type. */
3501 inst.reloc.type = ty;
3502 inst.reloc.pc_rel = entry->pc_rel;
3503 }
3504 else
3505 {
3506
3507 if (skip_past_char (&p, '='))
3508 /* =immediate; need to generate the literal in the literal pool. */
3509 inst.gen_lit_pool = 1;
3510
3511 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3512 {
3513 set_syntax_error (_("invalid address"));
3514 return FALSE;
3515 }
3516 }
3517
3518 *str = p;
3519 return TRUE;
3520 }
3521
3522 /* [ */
3523
3524 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3525 if (!reg || !aarch64_check_reg_type (reg, base_type))
3526 {
3527 set_syntax_error (_(get_reg_expected_msg (base_type)));
3528 return FALSE;
3529 }
3530 operand->addr.base_regno = reg->number;
3531
3532 /* [Xn */
3533 if (skip_past_comma (&p))
3534 {
3535 /* [Xn, */
3536 operand->addr.preind = 1;
3537
3538 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3539 if (reg)
3540 {
3541 if (!aarch64_check_reg_type (reg, offset_type))
3542 {
3543 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3544 return FALSE;
3545 }
3546
3547 /* [Xn,Rm */
3548 operand->addr.offset.regno = reg->number;
3549 operand->addr.offset.is_reg = 1;
3550 /* Shifted index. */
3551 if (skip_past_comma (&p))
3552 {
3553 /* [Xn,Rm, */
3554 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3555 /* Use the diagnostics set in parse_shift, so not set new
3556 error message here. */
3557 return FALSE;
3558 }
3559 /* We only accept:
3560 [base,Xm] # For vector plus scalar SVE2 indexing.
3561 [base,Xm{,LSL #imm}]
3562 [base,Xm,SXTX {#imm}]
3563 [base,Wm,(S|U)XTW {#imm}] */
3564 if (operand->shifter.kind == AARCH64_MOD_NONE
3565 || operand->shifter.kind == AARCH64_MOD_LSL
3566 || operand->shifter.kind == AARCH64_MOD_SXTX)
3567 {
3568 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3569 {
3570 set_syntax_error (_("invalid use of 32-bit register offset"));
3571 return FALSE;
3572 }
3573 if (aarch64_get_qualifier_esize (*base_qualifier)
3574 != aarch64_get_qualifier_esize (*offset_qualifier)
3575 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3576 || *base_qualifier != AARCH64_OPND_QLF_S_S
3577 || *offset_qualifier != AARCH64_OPND_QLF_X))
3578 {
3579 set_syntax_error (_("offset has different size from base"));
3580 return FALSE;
3581 }
3582 }
3583 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3584 {
3585 set_syntax_error (_("invalid use of 64-bit register offset"));
3586 return FALSE;
3587 }
3588 }
3589 else
3590 {
3591 /* [Xn,#:<reloc_op>:<symbol> */
3592 skip_past_char (&p, '#');
3593 if (skip_past_char (&p, ':'))
3594 {
3595 struct reloc_table_entry *entry;
3596
3597 /* Try to parse a relocation modifier. Anything else is
3598 an error. */
3599 if (!(entry = find_reloc_table_entry (&p)))
3600 {
3601 set_syntax_error (_("unknown relocation modifier"));
3602 return FALSE;
3603 }
3604
3605 if (entry->ldst_type == 0)
3606 {
3607 set_syntax_error
3608 (_("this relocation modifier is not allowed on this "
3609 "instruction"));
3610 return FALSE;
3611 }
3612
3613 /* [Xn,#:<reloc_op>: */
3614 /* We now have the group relocation table entry corresponding to
3615 the name in the assembler source. Next, we parse the
3616 expression. */
3617 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3618 {
3619 set_syntax_error (_("invalid relocation expression"));
3620 return FALSE;
3621 }
3622
3623 /* [Xn,#:<reloc_op>:<expr> */
3624 /* Record the load/store relocation type. */
3625 inst.reloc.type = entry->ldst_type;
3626 inst.reloc.pc_rel = entry->pc_rel;
3627 }
3628 else
3629 {
3630 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3631 {
3632 set_syntax_error (_("invalid expression in the address"));
3633 return FALSE;
3634 }
3635 /* [Xn,<expr> */
3636 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3637 /* [Xn,<expr>,<shifter> */
3638 if (! parse_shift (&p, operand, imm_shift_mode))
3639 return FALSE;
3640 }
3641 }
3642 }
3643
3644 if (! skip_past_char (&p, ']'))
3645 {
3646 set_syntax_error (_("']' expected"));
3647 return FALSE;
3648 }
3649
3650 if (skip_past_char (&p, '!'))
3651 {
3652 if (operand->addr.preind && operand->addr.offset.is_reg)
3653 {
3654 set_syntax_error (_("register offset not allowed in pre-indexed "
3655 "addressing mode"));
3656 return FALSE;
3657 }
3658 /* [Xn]! */
3659 operand->addr.writeback = 1;
3660 }
3661 else if (skip_past_comma (&p))
3662 {
3663 /* [Xn], */
3664 operand->addr.postind = 1;
3665 operand->addr.writeback = 1;
3666
3667 if (operand->addr.preind)
3668 {
3669 set_syntax_error (_("cannot combine pre- and post-indexing"));
3670 return FALSE;
3671 }
3672
3673 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3674 if (reg)
3675 {
3676 /* [Xn],Xm */
3677 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3678 {
3679 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3680 return FALSE;
3681 }
3682
3683 operand->addr.offset.regno = reg->number;
3684 operand->addr.offset.is_reg = 1;
3685 }
3686 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3687 {
3688 /* [Xn],#expr */
3689 set_syntax_error (_("invalid expression in the address"));
3690 return FALSE;
3691 }
3692 }
3693
3694 /* If at this point neither .preind nor .postind is set, we have a
3695 bare [Rn]{!}; reject [Rn]! accept [Rn] as a shorthand for [Rn,#0].
3696 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3697 [Zn.<T>, xzr]. */
3698 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3699 {
3700 if (operand->addr.writeback)
3701 {
3702 /* Reject [Rn]! */
3703 set_syntax_error (_("missing offset in the pre-indexed address"));
3704 return FALSE;
3705 }
3706
3707 operand->addr.preind = 1;
3708 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3709 {
3710 operand->addr.offset.is_reg = 1;
3711 operand->addr.offset.regno = REG_ZR;
3712 *offset_qualifier = AARCH64_OPND_QLF_X;
3713 }
3714 else
3715 {
3716 inst.reloc.exp.X_op = O_constant;
3717 inst.reloc.exp.X_add_number = 0;
3718 }
3719 }
3720
3721 *str = p;
3722 return TRUE;
3723 }
3724
3725 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3726 on success. */
3727 static bfd_boolean
3728 parse_address (char **str, aarch64_opnd_info *operand)
3729 {
3730 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3731 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3732 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3733 }
3734
3735 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3736 The arguments have the same meaning as for parse_address_main.
3737 Return TRUE on success. */
3738 static bfd_boolean
3739 parse_sve_address (char **str, aarch64_opnd_info *operand,
3740 aarch64_opnd_qualifier_t *base_qualifier,
3741 aarch64_opnd_qualifier_t *offset_qualifier)
3742 {
3743 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3744 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3745 SHIFTED_MUL_VL);
3746 }
3747
3748 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3749 Return TRUE on success; otherwise return FALSE. */
3750 static bfd_boolean
3751 parse_half (char **str, int *internal_fixup_p)
3752 {
3753 char *p = *str;
3754
3755 skip_past_char (&p, '#');
3756
3757 gas_assert (internal_fixup_p);
3758 *internal_fixup_p = 0;
3759
3760 if (*p == ':')
3761 {
3762 struct reloc_table_entry *entry;
3763
3764 /* Try to parse a relocation. Anything else is an error. */
3765 ++p;
3766 if (!(entry = find_reloc_table_entry (&p)))
3767 {
3768 set_syntax_error (_("unknown relocation modifier"));
3769 return FALSE;
3770 }
3771
3772 if (entry->movw_type == 0)
3773 {
3774 set_syntax_error
3775 (_("this relocation modifier is not allowed on this instruction"));
3776 return FALSE;
3777 }
3778
3779 inst.reloc.type = entry->movw_type;
3780 }
3781 else
3782 *internal_fixup_p = 1;
3783
3784 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3785 return FALSE;
3786
3787 *str = p;
3788 return TRUE;
3789 }
3790
3791 /* Parse an operand for an ADRP instruction:
3792 ADRP <Xd>, <label>
3793 Return TRUE on success; otherwise return FALSE. */
3794
3795 static bfd_boolean
3796 parse_adrp (char **str)
3797 {
3798 char *p;
3799
3800 p = *str;
3801 if (*p == ':')
3802 {
3803 struct reloc_table_entry *entry;
3804
3805 /* Try to parse a relocation. Anything else is an error. */
3806 ++p;
3807 if (!(entry = find_reloc_table_entry (&p)))
3808 {
3809 set_syntax_error (_("unknown relocation modifier"));
3810 return FALSE;
3811 }
3812
3813 if (entry->adrp_type == 0)
3814 {
3815 set_syntax_error
3816 (_("this relocation modifier is not allowed on this instruction"));
3817 return FALSE;
3818 }
3819
3820 inst.reloc.type = entry->adrp_type;
3821 }
3822 else
3823 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3824
3825 inst.reloc.pc_rel = 1;
3826
3827 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3828 return FALSE;
3829
3830 *str = p;
3831 return TRUE;
3832 }
3833
3834 /* Miscellaneous. */
3835
3836 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3837 of SIZE tokens in which index I gives the token for field value I,
3838 or is null if field value I is invalid. REG_TYPE says which register
3839 names should be treated as registers rather than as symbolic immediates.
3840
3841 Return true on success, moving *STR past the operand and storing the
3842 field value in *VAL. */
3843
3844 static int
3845 parse_enum_string (char **str, int64_t *val, const char *const *array,
3846 size_t size, aarch64_reg_type reg_type)
3847 {
3848 expressionS exp;
3849 char *p, *q;
3850 size_t i;
3851
3852 /* Match C-like tokens. */
3853 p = q = *str;
3854 while (ISALNUM (*q))
3855 q++;
3856
3857 for (i = 0; i < size; ++i)
3858 if (array[i]
3859 && strncasecmp (array[i], p, q - p) == 0
3860 && array[i][q - p] == 0)
3861 {
3862 *val = i;
3863 *str = q;
3864 return TRUE;
3865 }
3866
3867 if (!parse_immediate_expression (&p, &exp, reg_type))
3868 return FALSE;
3869
3870 if (exp.X_op == O_constant
3871 && (uint64_t) exp.X_add_number < size)
3872 {
3873 *val = exp.X_add_number;
3874 *str = p;
3875 return TRUE;
3876 }
3877
3878 /* Use the default error for this operand. */
3879 return FALSE;
3880 }
3881
3882 /* Parse an option for a preload instruction. Returns the encoding for the
3883 option, or PARSE_FAIL. */
3884
3885 static int
3886 parse_pldop (char **str)
3887 {
3888 char *p, *q;
3889 const struct aarch64_name_value_pair *o;
3890
3891 p = q = *str;
3892 while (ISALNUM (*q))
3893 q++;
3894
3895 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3896 if (!o)
3897 return PARSE_FAIL;
3898
3899 *str = q;
3900 return o->value;
3901 }
3902
3903 /* Parse an option for a barrier instruction. Returns the encoding for the
3904 option, or PARSE_FAIL. */
3905
3906 static int
3907 parse_barrier (char **str)
3908 {
3909 char *p, *q;
3910 const asm_barrier_opt *o;
3911
3912 p = q = *str;
3913 while (ISALPHA (*q))
3914 q++;
3915
3916 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3917 if (!o)
3918 return PARSE_FAIL;
3919
3920 *str = q;
3921 return o->value;
3922 }
3923
3924 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3925 return 0 if successful. Otherwise return PARSE_FAIL. */
3926
3927 static int
3928 parse_barrier_psb (char **str,
3929 const struct aarch64_name_value_pair ** hint_opt)
3930 {
3931 char *p, *q;
3932 const struct aarch64_name_value_pair *o;
3933
3934 p = q = *str;
3935 while (ISALPHA (*q))
3936 q++;
3937
3938 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3939 if (!o)
3940 {
3941 set_fatal_syntax_error
3942 ( _("unknown or missing option to PSB"));
3943 return PARSE_FAIL;
3944 }
3945
3946 if (o->value != 0x11)
3947 {
3948 /* PSB only accepts option name 'CSYNC'. */
3949 set_syntax_error
3950 (_("the specified option is not accepted for PSB"));
3951 return PARSE_FAIL;
3952 }
3953
3954 *str = q;
3955 *hint_opt = o;
3956 return 0;
3957 }
3958
3959 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
3960 return 0 if successful. Otherwise return PARSE_FAIL. */
3961
3962 static int
3963 parse_bti_operand (char **str,
3964 const struct aarch64_name_value_pair ** hint_opt)
3965 {
3966 char *p, *q;
3967 const struct aarch64_name_value_pair *o;
3968
3969 p = q = *str;
3970 while (ISALPHA (*q))
3971 q++;
3972
3973 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3974 if (!o)
3975 {
3976 set_fatal_syntax_error
3977 ( _("unknown option to BTI"));
3978 return PARSE_FAIL;
3979 }
3980
3981 switch (o->value)
3982 {
3983 /* Valid BTI operands. */
3984 case HINT_OPD_C:
3985 case HINT_OPD_J:
3986 case HINT_OPD_JC:
3987 break;
3988
3989 default:
3990 set_syntax_error
3991 (_("unknown option to BTI"));
3992 return PARSE_FAIL;
3993 }
3994
3995 *str = q;
3996 *hint_opt = o;
3997 return 0;
3998 }
3999
4000 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4001 Returns the encoding for the option, or PARSE_FAIL.
4002
4003 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4004 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4005
4006 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4007 field, otherwise as a system register.
4008 */
4009
4010 static int
4011 parse_sys_reg (char **str, struct hash_control *sys_regs,
4012 int imple_defined_p, int pstatefield_p,
4013 uint32_t* flags)
4014 {
4015 char *p, *q;
4016 char buf[32];
4017 const aarch64_sys_reg *o;
4018 int value;
4019
4020 p = buf;
4021 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4022 if (p < buf + 31)
4023 *p++ = TOLOWER (*q);
4024 *p = '\0';
4025 /* Assert that BUF be large enough. */
4026 gas_assert (p - buf == q - *str);
4027
4028 o = hash_find (sys_regs, buf);
4029 if (!o)
4030 {
4031 if (!imple_defined_p)
4032 return PARSE_FAIL;
4033 else
4034 {
4035 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4036 unsigned int op0, op1, cn, cm, op2;
4037
4038 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4039 != 5)
4040 return PARSE_FAIL;
4041 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4042 return PARSE_FAIL;
4043 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4044 if (flags)
4045 *flags = 0;
4046 }
4047 }
4048 else
4049 {
4050 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4051 as_bad (_("selected processor does not support PSTATE field "
4052 "name '%s'"), buf);
4053 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
4054 as_bad (_("selected processor does not support system register "
4055 "name '%s'"), buf);
4056 if (aarch64_sys_reg_deprecated_p (o))
4057 as_warn (_("system register name '%s' is deprecated and may be "
4058 "removed in a future release"), buf);
4059 value = o->value;
4060 if (flags)
4061 *flags = o->flags;
4062 }
4063
4064 *str = q;
4065 return value;
4066 }
4067
4068 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4069 for the option, or NULL. */
4070
4071 static const aarch64_sys_ins_reg *
4072 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
4073 {
4074 char *p, *q;
4075 char buf[32];
4076 const aarch64_sys_ins_reg *o;
4077
4078 p = buf;
4079 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4080 if (p < buf + 31)
4081 *p++ = TOLOWER (*q);
4082 *p = '\0';
4083
4084 o = hash_find (sys_ins_regs, buf);
4085 if (!o)
4086 return NULL;
4087
4088 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
4089 as_bad (_("selected processor does not support system register "
4090 "name '%s'"), buf);
4091
4092 *str = q;
4093 return o;
4094 }
4095 \f
4096 #define po_char_or_fail(chr) do { \
4097 if (! skip_past_char (&str, chr)) \
4098 goto failure; \
4099 } while (0)
4100
4101 #define po_reg_or_fail(regtype) do { \
4102 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4103 if (val == PARSE_FAIL) \
4104 { \
4105 set_default_error (); \
4106 goto failure; \
4107 } \
4108 } while (0)
4109
4110 #define po_int_reg_or_fail(reg_type) do { \
4111 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4112 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4113 { \
4114 set_default_error (); \
4115 goto failure; \
4116 } \
4117 info->reg.regno = reg->number; \
4118 info->qualifier = qualifier; \
4119 } while (0)
4120
4121 #define po_imm_nc_or_fail() do { \
4122 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4123 goto failure; \
4124 } while (0)
4125
4126 #define po_imm_or_fail(min, max) do { \
4127 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4128 goto failure; \
4129 if (val < min || val > max) \
4130 { \
4131 set_fatal_syntax_error (_("immediate value out of range "\
4132 #min " to "#max)); \
4133 goto failure; \
4134 } \
4135 } while (0)
4136
4137 #define po_enum_or_fail(array) do { \
4138 if (!parse_enum_string (&str, &val, array, \
4139 ARRAY_SIZE (array), imm_reg_type)) \
4140 goto failure; \
4141 } while (0)
4142
4143 #define po_misc_or_fail(expr) do { \
4144 if (!expr) \
4145 goto failure; \
4146 } while (0)
4147 \f
4148 /* encode the 12-bit imm field of Add/sub immediate */
4149 static inline uint32_t
4150 encode_addsub_imm (uint32_t imm)
4151 {
4152 return imm << 10;
4153 }
4154
4155 /* encode the shift amount field of Add/sub immediate */
4156 static inline uint32_t
4157 encode_addsub_imm_shift_amount (uint32_t cnt)
4158 {
4159 return cnt << 22;
4160 }
4161
4162
4163 /* encode the imm field of Adr instruction */
4164 static inline uint32_t
4165 encode_adr_imm (uint32_t imm)
4166 {
4167 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4168 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4169 }
4170
4171 /* encode the immediate field of Move wide immediate */
4172 static inline uint32_t
4173 encode_movw_imm (uint32_t imm)
4174 {
4175 return imm << 5;
4176 }
4177
4178 /* encode the 26-bit offset of unconditional branch */
4179 static inline uint32_t
4180 encode_branch_ofs_26 (uint32_t ofs)
4181 {
4182 return ofs & ((1 << 26) - 1);
4183 }
4184
4185 /* encode the 19-bit offset of conditional branch and compare & branch */
4186 static inline uint32_t
4187 encode_cond_branch_ofs_19 (uint32_t ofs)
4188 {
4189 return (ofs & ((1 << 19) - 1)) << 5;
4190 }
4191
4192 /* encode the 19-bit offset of ld literal */
4193 static inline uint32_t
4194 encode_ld_lit_ofs_19 (uint32_t ofs)
4195 {
4196 return (ofs & ((1 << 19) - 1)) << 5;
4197 }
4198
4199 /* Encode the 14-bit offset of test & branch. */
4200 static inline uint32_t
4201 encode_tst_branch_ofs_14 (uint32_t ofs)
4202 {
4203 return (ofs & ((1 << 14) - 1)) << 5;
4204 }
4205
4206 /* Encode the 16-bit imm field of svc/hvc/smc. */
4207 static inline uint32_t
4208 encode_svc_imm (uint32_t imm)
4209 {
4210 return imm << 5;
4211 }
4212
4213 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4214 static inline uint32_t
4215 reencode_addsub_switch_add_sub (uint32_t opcode)
4216 {
4217 return opcode ^ (1 << 30);
4218 }
4219
4220 static inline uint32_t
4221 reencode_movzn_to_movz (uint32_t opcode)
4222 {
4223 return opcode | (1 << 30);
4224 }
4225
4226 static inline uint32_t
4227 reencode_movzn_to_movn (uint32_t opcode)
4228 {
4229 return opcode & ~(1 << 30);
4230 }
4231
4232 /* Overall per-instruction processing. */
4233
4234 /* We need to be able to fix up arbitrary expressions in some statements.
4235 This is so that we can handle symbols that are an arbitrary distance from
4236 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4237 which returns part of an address in a form which will be valid for
4238 a data instruction. We do this by pushing the expression into a symbol
4239 in the expr_section, and creating a fix for that. */
4240
4241 static fixS *
4242 fix_new_aarch64 (fragS * frag,
4243 int where,
4244 short int size, expressionS * exp, int pc_rel, int reloc)
4245 {
4246 fixS *new_fix;
4247
4248 switch (exp->X_op)
4249 {
4250 case O_constant:
4251 case O_symbol:
4252 case O_add:
4253 case O_subtract:
4254 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4255 break;
4256
4257 default:
4258 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4259 pc_rel, reloc);
4260 break;
4261 }
4262 return new_fix;
4263 }
4264 \f
4265 /* Diagnostics on operands errors. */
4266
4267 /* By default, output verbose error message.
4268 Disable the verbose error message by -mno-verbose-error. */
4269 static int verbose_error_p = 1;
4270
4271 #ifdef DEBUG_AARCH64
4272 /* N.B. this is only for the purpose of debugging. */
4273 const char* operand_mismatch_kind_names[] =
4274 {
4275 "AARCH64_OPDE_NIL",
4276 "AARCH64_OPDE_RECOVERABLE",
4277 "AARCH64_OPDE_SYNTAX_ERROR",
4278 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4279 "AARCH64_OPDE_INVALID_VARIANT",
4280 "AARCH64_OPDE_OUT_OF_RANGE",
4281 "AARCH64_OPDE_UNALIGNED",
4282 "AARCH64_OPDE_REG_LIST",
4283 "AARCH64_OPDE_OTHER_ERROR",
4284 };
4285 #endif /* DEBUG_AARCH64 */
4286
4287 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4288
4289 When multiple errors of different kinds are found in the same assembly
4290 line, only the error of the highest severity will be picked up for
4291 issuing the diagnostics. */
4292
4293 static inline bfd_boolean
4294 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4295 enum aarch64_operand_error_kind rhs)
4296 {
4297 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4298 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4299 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4300 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4301 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4302 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4303 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4304 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4305 return lhs > rhs;
4306 }
4307
4308 /* Helper routine to get the mnemonic name from the assembly instruction
4309 line; should only be called for the diagnosis purpose, as there is
4310 string copy operation involved, which may affect the runtime
4311 performance if used in elsewhere. */
4312
4313 static const char*
4314 get_mnemonic_name (const char *str)
4315 {
4316 static char mnemonic[32];
4317 char *ptr;
4318
4319 /* Get the first 15 bytes and assume that the full name is included. */
4320 strncpy (mnemonic, str, 31);
4321 mnemonic[31] = '\0';
4322
4323 /* Scan up to the end of the mnemonic, which must end in white space,
4324 '.', or end of string. */
4325 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4326 ;
4327
4328 *ptr = '\0';
4329
4330 /* Append '...' to the truncated long name. */
4331 if (ptr - mnemonic == 31)
4332 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4333
4334 return mnemonic;
4335 }
4336
4337 static void
4338 reset_aarch64_instruction (aarch64_instruction *instruction)
4339 {
4340 memset (instruction, '\0', sizeof (aarch64_instruction));
4341 instruction->reloc.type = BFD_RELOC_UNUSED;
4342 }
4343
4344 /* Data structures storing one user error in the assembly code related to
4345 operands. */
4346
4347 struct operand_error_record
4348 {
4349 const aarch64_opcode *opcode;
4350 aarch64_operand_error detail;
4351 struct operand_error_record *next;
4352 };
4353
4354 typedef struct operand_error_record operand_error_record;
4355
4356 struct operand_errors
4357 {
4358 operand_error_record *head;
4359 operand_error_record *tail;
4360 };
4361
4362 typedef struct operand_errors operand_errors;
4363
4364 /* Top-level data structure reporting user errors for the current line of
4365 the assembly code.
4366 The way md_assemble works is that all opcodes sharing the same mnemonic
4367 name are iterated to find a match to the assembly line. In this data
4368 structure, each of the such opcodes will have one operand_error_record
4369 allocated and inserted. In other words, excessive errors related with
4370 a single opcode are disregarded. */
4371 operand_errors operand_error_report;
4372
4373 /* Free record nodes. */
4374 static operand_error_record *free_opnd_error_record_nodes = NULL;
4375
4376 /* Initialize the data structure that stores the operand mismatch
4377 information on assembling one line of the assembly code. */
4378 static void
4379 init_operand_error_report (void)
4380 {
4381 if (operand_error_report.head != NULL)
4382 {
4383 gas_assert (operand_error_report.tail != NULL);
4384 operand_error_report.tail->next = free_opnd_error_record_nodes;
4385 free_opnd_error_record_nodes = operand_error_report.head;
4386 operand_error_report.head = NULL;
4387 operand_error_report.tail = NULL;
4388 return;
4389 }
4390 gas_assert (operand_error_report.tail == NULL);
4391 }
4392
4393 /* Return TRUE if some operand error has been recorded during the
4394 parsing of the current assembly line using the opcode *OPCODE;
4395 otherwise return FALSE. */
4396 static inline bfd_boolean
4397 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4398 {
4399 operand_error_record *record = operand_error_report.head;
4400 return record && record->opcode == opcode;
4401 }
4402
4403 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4404 OPCODE field is initialized with OPCODE.
4405 N.B. only one record for each opcode, i.e. the maximum of one error is
4406 recorded for each instruction template. */
4407
4408 static void
4409 add_operand_error_record (const operand_error_record* new_record)
4410 {
4411 const aarch64_opcode *opcode = new_record->opcode;
4412 operand_error_record* record = operand_error_report.head;
4413
4414 /* The record may have been created for this opcode. If not, we need
4415 to prepare one. */
4416 if (! opcode_has_operand_error_p (opcode))
4417 {
4418 /* Get one empty record. */
4419 if (free_opnd_error_record_nodes == NULL)
4420 {
4421 record = XNEW (operand_error_record);
4422 }
4423 else
4424 {
4425 record = free_opnd_error_record_nodes;
4426 free_opnd_error_record_nodes = record->next;
4427 }
4428 record->opcode = opcode;
4429 /* Insert at the head. */
4430 record->next = operand_error_report.head;
4431 operand_error_report.head = record;
4432 if (operand_error_report.tail == NULL)
4433 operand_error_report.tail = record;
4434 }
4435 else if (record->detail.kind != AARCH64_OPDE_NIL
4436 && record->detail.index <= new_record->detail.index
4437 && operand_error_higher_severity_p (record->detail.kind,
4438 new_record->detail.kind))
4439 {
4440 /* In the case of multiple errors found on operands related with a
4441 single opcode, only record the error of the leftmost operand and
4442 only if the error is of higher severity. */
4443 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4444 " the existing error %s on operand %d",
4445 operand_mismatch_kind_names[new_record->detail.kind],
4446 new_record->detail.index,
4447 operand_mismatch_kind_names[record->detail.kind],
4448 record->detail.index);
4449 return;
4450 }
4451
4452 record->detail = new_record->detail;
4453 }
4454
4455 static inline void
4456 record_operand_error_info (const aarch64_opcode *opcode,
4457 aarch64_operand_error *error_info)
4458 {
4459 operand_error_record record;
4460 record.opcode = opcode;
4461 record.detail = *error_info;
4462 add_operand_error_record (&record);
4463 }
4464
4465 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4466 error message *ERROR, for operand IDX (count from 0). */
4467
4468 static void
4469 record_operand_error (const aarch64_opcode *opcode, int idx,
4470 enum aarch64_operand_error_kind kind,
4471 const char* error)
4472 {
4473 aarch64_operand_error info;
4474 memset(&info, 0, sizeof (info));
4475 info.index = idx;
4476 info.kind = kind;
4477 info.error = error;
4478 info.non_fatal = FALSE;
4479 record_operand_error_info (opcode, &info);
4480 }
4481
4482 static void
4483 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4484 enum aarch64_operand_error_kind kind,
4485 const char* error, const int *extra_data)
4486 {
4487 aarch64_operand_error info;
4488 info.index = idx;
4489 info.kind = kind;
4490 info.error = error;
4491 info.data[0] = extra_data[0];
4492 info.data[1] = extra_data[1];
4493 info.data[2] = extra_data[2];
4494 info.non_fatal = FALSE;
4495 record_operand_error_info (opcode, &info);
4496 }
4497
4498 static void
4499 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4500 const char* error, int lower_bound,
4501 int upper_bound)
4502 {
4503 int data[3] = {lower_bound, upper_bound, 0};
4504 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4505 error, data);
4506 }
4507
4508 /* Remove the operand error record for *OPCODE. */
4509 static void ATTRIBUTE_UNUSED
4510 remove_operand_error_record (const aarch64_opcode *opcode)
4511 {
4512 if (opcode_has_operand_error_p (opcode))
4513 {
4514 operand_error_record* record = operand_error_report.head;
4515 gas_assert (record != NULL && operand_error_report.tail != NULL);
4516 operand_error_report.head = record->next;
4517 record->next = free_opnd_error_record_nodes;
4518 free_opnd_error_record_nodes = record;
4519 if (operand_error_report.head == NULL)
4520 {
4521 gas_assert (operand_error_report.tail == record);
4522 operand_error_report.tail = NULL;
4523 }
4524 }
4525 }
4526
4527 /* Given the instruction in *INSTR, return the index of the best matched
4528 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4529
4530 Return -1 if there is no qualifier sequence; return the first match
4531 if there is multiple matches found. */
4532
4533 static int
4534 find_best_match (const aarch64_inst *instr,
4535 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4536 {
4537 int i, num_opnds, max_num_matched, idx;
4538
4539 num_opnds = aarch64_num_of_operands (instr->opcode);
4540 if (num_opnds == 0)
4541 {
4542 DEBUG_TRACE ("no operand");
4543 return -1;
4544 }
4545
4546 max_num_matched = 0;
4547 idx = 0;
4548
4549 /* For each pattern. */
4550 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4551 {
4552 int j, num_matched;
4553 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4554
4555 /* Most opcodes has much fewer patterns in the list. */
4556 if (empty_qualifier_sequence_p (qualifiers))
4557 {
4558 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4559 break;
4560 }
4561
4562 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4563 if (*qualifiers == instr->operands[j].qualifier)
4564 ++num_matched;
4565
4566 if (num_matched > max_num_matched)
4567 {
4568 max_num_matched = num_matched;
4569 idx = i;
4570 }
4571 }
4572
4573 DEBUG_TRACE ("return with %d", idx);
4574 return idx;
4575 }
4576
4577 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4578 corresponding operands in *INSTR. */
4579
4580 static inline void
4581 assign_qualifier_sequence (aarch64_inst *instr,
4582 const aarch64_opnd_qualifier_t *qualifiers)
4583 {
4584 int i = 0;
4585 int num_opnds = aarch64_num_of_operands (instr->opcode);
4586 gas_assert (num_opnds);
4587 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4588 instr->operands[i].qualifier = *qualifiers;
4589 }
4590
4591 /* Print operands for the diagnosis purpose. */
4592
4593 static void
4594 print_operands (char *buf, const aarch64_opcode *opcode,
4595 const aarch64_opnd_info *opnds)
4596 {
4597 int i;
4598
4599 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4600 {
4601 char str[128];
4602
4603 /* We regard the opcode operand info more, however we also look into
4604 the inst->operands to support the disassembling of the optional
4605 operand.
4606 The two operand code should be the same in all cases, apart from
4607 when the operand can be optional. */
4608 if (opcode->operands[i] == AARCH64_OPND_NIL
4609 || opnds[i].type == AARCH64_OPND_NIL)
4610 break;
4611
4612 /* Generate the operand string in STR. */
4613 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4614 NULL);
4615
4616 /* Delimiter. */
4617 if (str[0] != '\0')
4618 strcat (buf, i == 0 ? " " : ", ");
4619
4620 /* Append the operand string. */
4621 strcat (buf, str);
4622 }
4623 }
4624
4625 /* Send to stderr a string as information. */
4626
4627 static void
4628 output_info (const char *format, ...)
4629 {
4630 const char *file;
4631 unsigned int line;
4632 va_list args;
4633
4634 file = as_where (&line);
4635 if (file)
4636 {
4637 if (line != 0)
4638 fprintf (stderr, "%s:%u: ", file, line);
4639 else
4640 fprintf (stderr, "%s: ", file);
4641 }
4642 fprintf (stderr, _("Info: "));
4643 va_start (args, format);
4644 vfprintf (stderr, format, args);
4645 va_end (args);
4646 (void) putc ('\n', stderr);
4647 }
4648
4649 /* Output one operand error record. */
4650
4651 static void
4652 output_operand_error_record (const operand_error_record *record, char *str)
4653 {
4654 const aarch64_operand_error *detail = &record->detail;
4655 int idx = detail->index;
4656 const aarch64_opcode *opcode = record->opcode;
4657 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4658 : AARCH64_OPND_NIL);
4659
4660 typedef void (*handler_t)(const char *format, ...);
4661 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4662
4663 switch (detail->kind)
4664 {
4665 case AARCH64_OPDE_NIL:
4666 gas_assert (0);
4667 break;
4668 case AARCH64_OPDE_SYNTAX_ERROR:
4669 case AARCH64_OPDE_RECOVERABLE:
4670 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4671 case AARCH64_OPDE_OTHER_ERROR:
4672 /* Use the prepared error message if there is, otherwise use the
4673 operand description string to describe the error. */
4674 if (detail->error != NULL)
4675 {
4676 if (idx < 0)
4677 handler (_("%s -- `%s'"), detail->error, str);
4678 else
4679 handler (_("%s at operand %d -- `%s'"),
4680 detail->error, idx + 1, str);
4681 }
4682 else
4683 {
4684 gas_assert (idx >= 0);
4685 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4686 aarch64_get_operand_desc (opd_code), str);
4687 }
4688 break;
4689
4690 case AARCH64_OPDE_INVALID_VARIANT:
4691 handler (_("operand mismatch -- `%s'"), str);
4692 if (verbose_error_p)
4693 {
4694 /* We will try to correct the erroneous instruction and also provide
4695 more information e.g. all other valid variants.
4696
4697 The string representation of the corrected instruction and other
4698 valid variants are generated by
4699
4700 1) obtaining the intermediate representation of the erroneous
4701 instruction;
4702 2) manipulating the IR, e.g. replacing the operand qualifier;
4703 3) printing out the instruction by calling the printer functions
4704 shared with the disassembler.
4705
4706 The limitation of this method is that the exact input assembly
4707 line cannot be accurately reproduced in some cases, for example an
4708 optional operand present in the actual assembly line will be
4709 omitted in the output; likewise for the optional syntax rules,
4710 e.g. the # before the immediate. Another limitation is that the
4711 assembly symbols and relocation operations in the assembly line
4712 currently cannot be printed out in the error report. Last but not
4713 least, when there is other error(s) co-exist with this error, the
4714 'corrected' instruction may be still incorrect, e.g. given
4715 'ldnp h0,h1,[x0,#6]!'
4716 this diagnosis will provide the version:
4717 'ldnp s0,s1,[x0,#6]!'
4718 which is still not right. */
4719 size_t len = strlen (get_mnemonic_name (str));
4720 int i, qlf_idx;
4721 bfd_boolean result;
4722 char buf[2048];
4723 aarch64_inst *inst_base = &inst.base;
4724 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4725
4726 /* Init inst. */
4727 reset_aarch64_instruction (&inst);
4728 inst_base->opcode = opcode;
4729
4730 /* Reset the error report so that there is no side effect on the
4731 following operand parsing. */
4732 init_operand_error_report ();
4733
4734 /* Fill inst. */
4735 result = parse_operands (str + len, opcode)
4736 && programmer_friendly_fixup (&inst);
4737 gas_assert (result);
4738 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4739 NULL, NULL, insn_sequence);
4740 gas_assert (!result);
4741
4742 /* Find the most matched qualifier sequence. */
4743 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4744 gas_assert (qlf_idx > -1);
4745
4746 /* Assign the qualifiers. */
4747 assign_qualifier_sequence (inst_base,
4748 opcode->qualifiers_list[qlf_idx]);
4749
4750 /* Print the hint. */
4751 output_info (_(" did you mean this?"));
4752 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4753 print_operands (buf, opcode, inst_base->operands);
4754 output_info (_(" %s"), buf);
4755
4756 /* Print out other variant(s) if there is any. */
4757 if (qlf_idx != 0 ||
4758 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4759 output_info (_(" other valid variant(s):"));
4760
4761 /* For each pattern. */
4762 qualifiers_list = opcode->qualifiers_list;
4763 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4764 {
4765 /* Most opcodes has much fewer patterns in the list.
4766 First NIL qualifier indicates the end in the list. */
4767 if (empty_qualifier_sequence_p (*qualifiers_list))
4768 break;
4769
4770 if (i != qlf_idx)
4771 {
4772 /* Mnemonics name. */
4773 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4774
4775 /* Assign the qualifiers. */
4776 assign_qualifier_sequence (inst_base, *qualifiers_list);
4777
4778 /* Print instruction. */
4779 print_operands (buf, opcode, inst_base->operands);
4780
4781 output_info (_(" %s"), buf);
4782 }
4783 }
4784 }
4785 break;
4786
4787 case AARCH64_OPDE_UNTIED_OPERAND:
4788 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4789 detail->index + 1, str);
4790 break;
4791
4792 case AARCH64_OPDE_OUT_OF_RANGE:
4793 if (detail->data[0] != detail->data[1])
4794 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4795 detail->error ? detail->error : _("immediate value"),
4796 detail->data[0], detail->data[1], idx + 1, str);
4797 else
4798 handler (_("%s must be %d at operand %d -- `%s'"),
4799 detail->error ? detail->error : _("immediate value"),
4800 detail->data[0], idx + 1, str);
4801 break;
4802
4803 case AARCH64_OPDE_REG_LIST:
4804 if (detail->data[0] == 1)
4805 handler (_("invalid number of registers in the list; "
4806 "only 1 register is expected at operand %d -- `%s'"),
4807 idx + 1, str);
4808 else
4809 handler (_("invalid number of registers in the list; "
4810 "%d registers are expected at operand %d -- `%s'"),
4811 detail->data[0], idx + 1, str);
4812 break;
4813
4814 case AARCH64_OPDE_UNALIGNED:
4815 handler (_("immediate value must be a multiple of "
4816 "%d at operand %d -- `%s'"),
4817 detail->data[0], idx + 1, str);
4818 break;
4819
4820 default:
4821 gas_assert (0);
4822 break;
4823 }
4824 }
4825
4826 /* Process and output the error message about the operand mismatching.
4827
4828 When this function is called, the operand error information had
4829 been collected for an assembly line and there will be multiple
4830 errors in the case of multiple instruction templates; output the
4831 error message that most closely describes the problem.
4832
4833 The errors to be printed can be filtered on printing all errors
4834 or only non-fatal errors. This distinction has to be made because
4835 the error buffer may already be filled with fatal errors we don't want to
4836 print due to the different instruction templates. */
4837
4838 static void
4839 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4840 {
4841 int largest_error_pos;
4842 const char *msg = NULL;
4843 enum aarch64_operand_error_kind kind;
4844 operand_error_record *curr;
4845 operand_error_record *head = operand_error_report.head;
4846 operand_error_record *record = NULL;
4847
4848 /* No error to report. */
4849 if (head == NULL)
4850 return;
4851
4852 gas_assert (head != NULL && operand_error_report.tail != NULL);
4853
4854 /* Only one error. */
4855 if (head == operand_error_report.tail)
4856 {
4857 /* If the only error is a non-fatal one and we don't want to print it,
4858 just exit. */
4859 if (!non_fatal_only || head->detail.non_fatal)
4860 {
4861 DEBUG_TRACE ("single opcode entry with error kind: %s",
4862 operand_mismatch_kind_names[head->detail.kind]);
4863 output_operand_error_record (head, str);
4864 }
4865 return;
4866 }
4867
4868 /* Find the error kind of the highest severity. */
4869 DEBUG_TRACE ("multiple opcode entries with error kind");
4870 kind = AARCH64_OPDE_NIL;
4871 for (curr = head; curr != NULL; curr = curr->next)
4872 {
4873 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4874 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4875 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4876 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4877 kind = curr->detail.kind;
4878 }
4879
4880 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4881
4882 /* Pick up one of errors of KIND to report. */
4883 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4884 for (curr = head; curr != NULL; curr = curr->next)
4885 {
4886 /* If we don't want to print non-fatal errors then don't consider them
4887 at all. */
4888 if (curr->detail.kind != kind
4889 || (non_fatal_only && !curr->detail.non_fatal))
4890 continue;
4891 /* If there are multiple errors, pick up the one with the highest
4892 mismatching operand index. In the case of multiple errors with
4893 the equally highest operand index, pick up the first one or the
4894 first one with non-NULL error message. */
4895 if (curr->detail.index > largest_error_pos
4896 || (curr->detail.index == largest_error_pos && msg == NULL
4897 && curr->detail.error != NULL))
4898 {
4899 largest_error_pos = curr->detail.index;
4900 record = curr;
4901 msg = record->detail.error;
4902 }
4903 }
4904
4905 /* The way errors are collected in the back-end is a bit non-intuitive. But
4906 essentially, because each operand template is tried recursively you may
4907 always have errors collected from the previous tried OPND. These are
4908 usually skipped if there is one successful match. However now with the
4909 non-fatal errors we have to ignore those previously collected hard errors
4910 when we're only interested in printing the non-fatal ones. This condition
4911 prevents us from printing errors that are not appropriate, since we did
4912 match a condition, but it also has warnings that it wants to print. */
4913 if (non_fatal_only && !record)
4914 return;
4915
4916 gas_assert (largest_error_pos != -2 && record != NULL);
4917 DEBUG_TRACE ("Pick up error kind %s to report",
4918 operand_mismatch_kind_names[record->detail.kind]);
4919
4920 /* Output. */
4921 output_operand_error_record (record, str);
4922 }
4923 \f
4924 /* Write an AARCH64 instruction to buf - always little-endian. */
4925 static void
4926 put_aarch64_insn (char *buf, uint32_t insn)
4927 {
4928 unsigned char *where = (unsigned char *) buf;
4929 where[0] = insn;
4930 where[1] = insn >> 8;
4931 where[2] = insn >> 16;
4932 where[3] = insn >> 24;
4933 }
4934
4935 static uint32_t
4936 get_aarch64_insn (char *buf)
4937 {
4938 unsigned char *where = (unsigned char *) buf;
4939 uint32_t result;
4940 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4941 return result;
4942 }
4943
4944 static void
4945 output_inst (struct aarch64_inst *new_inst)
4946 {
4947 char *to = NULL;
4948
4949 to = frag_more (INSN_SIZE);
4950
4951 frag_now->tc_frag_data.recorded = 1;
4952
4953 put_aarch64_insn (to, inst.base.value);
4954
4955 if (inst.reloc.type != BFD_RELOC_UNUSED)
4956 {
4957 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4958 INSN_SIZE, &inst.reloc.exp,
4959 inst.reloc.pc_rel,
4960 inst.reloc.type);
4961 DEBUG_TRACE ("Prepared relocation fix up");
4962 /* Don't check the addend value against the instruction size,
4963 that's the job of our code in md_apply_fix(). */
4964 fixp->fx_no_overflow = 1;
4965 if (new_inst != NULL)
4966 fixp->tc_fix_data.inst = new_inst;
4967 if (aarch64_gas_internal_fixup_p ())
4968 {
4969 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4970 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4971 fixp->fx_addnumber = inst.reloc.flags;
4972 }
4973 }
4974
4975 dwarf2_emit_insn (INSN_SIZE);
4976 }
4977
4978 /* Link together opcodes of the same name. */
4979
4980 struct templates
4981 {
4982 aarch64_opcode *opcode;
4983 struct templates *next;
4984 };
4985
4986 typedef struct templates templates;
4987
4988 static templates *
4989 lookup_mnemonic (const char *start, int len)
4990 {
4991 templates *templ = NULL;
4992
4993 templ = hash_find_n (aarch64_ops_hsh, start, len);
4994 return templ;
4995 }
4996
4997 /* Subroutine of md_assemble, responsible for looking up the primary
4998 opcode from the mnemonic the user wrote. STR points to the
4999 beginning of the mnemonic. */
5000
5001 static templates *
5002 opcode_lookup (char **str)
5003 {
5004 char *end, *base, *dot;
5005 const aarch64_cond *cond;
5006 char condname[16];
5007 int len;
5008
5009 /* Scan up to the end of the mnemonic, which must end in white space,
5010 '.', or end of string. */
5011 dot = 0;
5012 for (base = end = *str; is_part_of_name(*end); end++)
5013 if (*end == '.' && !dot)
5014 dot = end;
5015
5016 if (end == base || dot == base)
5017 return 0;
5018
5019 inst.cond = COND_ALWAYS;
5020
5021 /* Handle a possible condition. */
5022 if (dot)
5023 {
5024 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5025 if (cond)
5026 {
5027 inst.cond = cond->value;
5028 *str = end;
5029 }
5030 else
5031 {
5032 *str = dot;
5033 return 0;
5034 }
5035 len = dot - base;
5036 }
5037 else
5038 {
5039 *str = end;
5040 len = end - base;
5041 }
5042
5043 if (inst.cond == COND_ALWAYS)
5044 {
5045 /* Look for unaffixed mnemonic. */
5046 return lookup_mnemonic (base, len);
5047 }
5048 else if (len <= 13)
5049 {
5050 /* append ".c" to mnemonic if conditional */
5051 memcpy (condname, base, len);
5052 memcpy (condname + len, ".c", 2);
5053 base = condname;
5054 len += 2;
5055 return lookup_mnemonic (base, len);
5056 }
5057
5058 return NULL;
5059 }
5060
5061 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5062 to a corresponding operand qualifier. */
5063
5064 static inline aarch64_opnd_qualifier_t
5065 vectype_to_qualifier (const struct vector_type_el *vectype)
5066 {
5067 /* Element size in bytes indexed by vector_el_type. */
5068 const unsigned char ele_size[5]
5069 = {1, 2, 4, 8, 16};
5070 const unsigned int ele_base [5] =
5071 {
5072 AARCH64_OPND_QLF_V_4B,
5073 AARCH64_OPND_QLF_V_2H,
5074 AARCH64_OPND_QLF_V_2S,
5075 AARCH64_OPND_QLF_V_1D,
5076 AARCH64_OPND_QLF_V_1Q
5077 };
5078
5079 if (!vectype->defined || vectype->type == NT_invtype)
5080 goto vectype_conversion_fail;
5081
5082 if (vectype->type == NT_zero)
5083 return AARCH64_OPND_QLF_P_Z;
5084 if (vectype->type == NT_merge)
5085 return AARCH64_OPND_QLF_P_M;
5086
5087 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5088
5089 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5090 {
5091 /* Special case S_4B. */
5092 if (vectype->type == NT_b && vectype->width == 4)
5093 return AARCH64_OPND_QLF_S_4B;
5094
5095 /* Vector element register. */
5096 return AARCH64_OPND_QLF_S_B + vectype->type;
5097 }
5098 else
5099 {
5100 /* Vector register. */
5101 int reg_size = ele_size[vectype->type] * vectype->width;
5102 unsigned offset;
5103 unsigned shift;
5104 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5105 goto vectype_conversion_fail;
5106
5107 /* The conversion is by calculating the offset from the base operand
5108 qualifier for the vector type. The operand qualifiers are regular
5109 enough that the offset can established by shifting the vector width by
5110 a vector-type dependent amount. */
5111 shift = 0;
5112 if (vectype->type == NT_b)
5113 shift = 3;
5114 else if (vectype->type == NT_h || vectype->type == NT_s)
5115 shift = 2;
5116 else if (vectype->type >= NT_d)
5117 shift = 1;
5118 else
5119 gas_assert (0);
5120
5121 offset = ele_base [vectype->type] + (vectype->width >> shift);
5122 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5123 && offset <= AARCH64_OPND_QLF_V_1Q);
5124 return offset;
5125 }
5126
5127 vectype_conversion_fail:
5128 first_error (_("bad vector arrangement type"));
5129 return AARCH64_OPND_QLF_NIL;
5130 }
5131
5132 /* Process an optional operand that is found omitted from the assembly line.
5133 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5134 instruction's opcode entry while IDX is the index of this omitted operand.
5135 */
5136
5137 static void
5138 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5139 int idx, aarch64_opnd_info *operand)
5140 {
5141 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5142 gas_assert (optional_operand_p (opcode, idx));
5143 gas_assert (!operand->present);
5144
5145 switch (type)
5146 {
5147 case AARCH64_OPND_Rd:
5148 case AARCH64_OPND_Rn:
5149 case AARCH64_OPND_Rm:
5150 case AARCH64_OPND_Rt:
5151 case AARCH64_OPND_Rt2:
5152 case AARCH64_OPND_Rt_SP:
5153 case AARCH64_OPND_Rs:
5154 case AARCH64_OPND_Ra:
5155 case AARCH64_OPND_Rt_SYS:
5156 case AARCH64_OPND_Rd_SP:
5157 case AARCH64_OPND_Rn_SP:
5158 case AARCH64_OPND_Rm_SP:
5159 case AARCH64_OPND_Fd:
5160 case AARCH64_OPND_Fn:
5161 case AARCH64_OPND_Fm:
5162 case AARCH64_OPND_Fa:
5163 case AARCH64_OPND_Ft:
5164 case AARCH64_OPND_Ft2:
5165 case AARCH64_OPND_Sd:
5166 case AARCH64_OPND_Sn:
5167 case AARCH64_OPND_Sm:
5168 case AARCH64_OPND_Va:
5169 case AARCH64_OPND_Vd:
5170 case AARCH64_OPND_Vn:
5171 case AARCH64_OPND_Vm:
5172 case AARCH64_OPND_VdD1:
5173 case AARCH64_OPND_VnD1:
5174 operand->reg.regno = default_value;
5175 break;
5176
5177 case AARCH64_OPND_Ed:
5178 case AARCH64_OPND_En:
5179 case AARCH64_OPND_Em:
5180 case AARCH64_OPND_Em16:
5181 case AARCH64_OPND_SM3_IMM2:
5182 operand->reglane.regno = default_value;
5183 break;
5184
5185 case AARCH64_OPND_IDX:
5186 case AARCH64_OPND_BIT_NUM:
5187 case AARCH64_OPND_IMMR:
5188 case AARCH64_OPND_IMMS:
5189 case AARCH64_OPND_SHLL_IMM:
5190 case AARCH64_OPND_IMM_VLSL:
5191 case AARCH64_OPND_IMM_VLSR:
5192 case AARCH64_OPND_CCMP_IMM:
5193 case AARCH64_OPND_FBITS:
5194 case AARCH64_OPND_UIMM4:
5195 case AARCH64_OPND_UIMM3_OP1:
5196 case AARCH64_OPND_UIMM3_OP2:
5197 case AARCH64_OPND_IMM:
5198 case AARCH64_OPND_IMM_2:
5199 case AARCH64_OPND_WIDTH:
5200 case AARCH64_OPND_UIMM7:
5201 case AARCH64_OPND_NZCV:
5202 case AARCH64_OPND_SVE_PATTERN:
5203 case AARCH64_OPND_SVE_PRFOP:
5204 operand->imm.value = default_value;
5205 break;
5206
5207 case AARCH64_OPND_SVE_PATTERN_SCALED:
5208 operand->imm.value = default_value;
5209 operand->shifter.kind = AARCH64_MOD_MUL;
5210 operand->shifter.amount = 1;
5211 break;
5212
5213 case AARCH64_OPND_EXCEPTION:
5214 inst.reloc.type = BFD_RELOC_UNUSED;
5215 break;
5216
5217 case AARCH64_OPND_BARRIER_ISB:
5218 operand->barrier = aarch64_barrier_options + default_value;
5219 break;
5220
5221 case AARCH64_OPND_BTI_TARGET:
5222 operand->hint_option = aarch64_hint_options + default_value;
5223 break;
5224
5225 default:
5226 break;
5227 }
5228 }
5229
5230 /* Process the relocation type for move wide instructions.
5231 Return TRUE on success; otherwise return FALSE. */
5232
5233 static bfd_boolean
5234 process_movw_reloc_info (void)
5235 {
5236 int is32;
5237 unsigned shift;
5238
5239 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5240
5241 if (inst.base.opcode->op == OP_MOVK)
5242 switch (inst.reloc.type)
5243 {
5244 case BFD_RELOC_AARCH64_MOVW_G0_S:
5245 case BFD_RELOC_AARCH64_MOVW_G1_S:
5246 case BFD_RELOC_AARCH64_MOVW_G2_S:
5247 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5248 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5249 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5250 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5251 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5252 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5253 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5254 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5255 set_syntax_error
5256 (_("the specified relocation type is not allowed for MOVK"));
5257 return FALSE;
5258 default:
5259 break;
5260 }
5261
5262 switch (inst.reloc.type)
5263 {
5264 case BFD_RELOC_AARCH64_MOVW_G0:
5265 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5266 case BFD_RELOC_AARCH64_MOVW_G0_S:
5267 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5268 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5269 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5270 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5271 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5272 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5273 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5274 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5275 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5276 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5277 shift = 0;
5278 break;
5279 case BFD_RELOC_AARCH64_MOVW_G1:
5280 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5281 case BFD_RELOC_AARCH64_MOVW_G1_S:
5282 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5283 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5284 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5285 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5286 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5287 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5288 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5289 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5290 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5291 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5292 shift = 16;
5293 break;
5294 case BFD_RELOC_AARCH64_MOVW_G2:
5295 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5296 case BFD_RELOC_AARCH64_MOVW_G2_S:
5297 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5298 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5299 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5300 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5301 if (is32)
5302 {
5303 set_fatal_syntax_error
5304 (_("the specified relocation type is not allowed for 32-bit "
5305 "register"));
5306 return FALSE;
5307 }
5308 shift = 32;
5309 break;
5310 case BFD_RELOC_AARCH64_MOVW_G3:
5311 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5312 if (is32)
5313 {
5314 set_fatal_syntax_error
5315 (_("the specified relocation type is not allowed for 32-bit "
5316 "register"));
5317 return FALSE;
5318 }
5319 shift = 48;
5320 break;
5321 default:
5322 /* More cases should be added when more MOVW-related relocation types
5323 are supported in GAS. */
5324 gas_assert (aarch64_gas_internal_fixup_p ());
5325 /* The shift amount should have already been set by the parser. */
5326 return TRUE;
5327 }
5328 inst.base.operands[1].shifter.amount = shift;
5329 return TRUE;
5330 }
5331
5332 /* A primitive log calculator. */
5333
5334 static inline unsigned int
5335 get_logsz (unsigned int size)
5336 {
5337 const unsigned char ls[16] =
5338 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5339 if (size > 16)
5340 {
5341 gas_assert (0);
5342 return -1;
5343 }
5344 gas_assert (ls[size - 1] != (unsigned char)-1);
5345 return ls[size - 1];
5346 }
5347
5348 /* Determine and return the real reloc type code for an instruction
5349 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5350
5351 static inline bfd_reloc_code_real_type
5352 ldst_lo12_determine_real_reloc_type (void)
5353 {
5354 unsigned logsz;
5355 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5356 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5357
5358 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5359 {
5360 BFD_RELOC_AARCH64_LDST8_LO12,
5361 BFD_RELOC_AARCH64_LDST16_LO12,
5362 BFD_RELOC_AARCH64_LDST32_LO12,
5363 BFD_RELOC_AARCH64_LDST64_LO12,
5364 BFD_RELOC_AARCH64_LDST128_LO12
5365 },
5366 {
5367 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5368 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5369 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5370 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5371 BFD_RELOC_AARCH64_NONE
5372 },
5373 {
5374 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5375 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5376 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5377 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5378 BFD_RELOC_AARCH64_NONE
5379 },
5380 {
5381 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5382 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5383 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5384 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5385 BFD_RELOC_AARCH64_NONE
5386 },
5387 {
5388 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5389 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5390 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5391 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5392 BFD_RELOC_AARCH64_NONE
5393 }
5394 };
5395
5396 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5397 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5398 || (inst.reloc.type
5399 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5400 || (inst.reloc.type
5401 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5402 || (inst.reloc.type
5403 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5404 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5405
5406 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5407 opd1_qlf =
5408 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5409 1, opd0_qlf, 0);
5410 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5411
5412 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5413 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5414 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5415 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5416 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5417 gas_assert (logsz <= 3);
5418 else
5419 gas_assert (logsz <= 4);
5420
5421 /* In reloc.c, these pseudo relocation types should be defined in similar
5422 order as above reloc_ldst_lo12 array. Because the array index calculation
5423 below relies on this. */
5424 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5425 }
5426
5427 /* Check whether a register list REGINFO is valid. The registers must be
5428 numbered in increasing order (modulo 32), in increments of one or two.
5429
5430 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5431 increments of two.
5432
5433 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5434
5435 static bfd_boolean
5436 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5437 {
5438 uint32_t i, nb_regs, prev_regno, incr;
5439
5440 nb_regs = 1 + (reginfo & 0x3);
5441 reginfo >>= 2;
5442 prev_regno = reginfo & 0x1f;
5443 incr = accept_alternate ? 2 : 1;
5444
5445 for (i = 1; i < nb_regs; ++i)
5446 {
5447 uint32_t curr_regno;
5448 reginfo >>= 5;
5449 curr_regno = reginfo & 0x1f;
5450 if (curr_regno != ((prev_regno + incr) & 0x1f))
5451 return FALSE;
5452 prev_regno = curr_regno;
5453 }
5454
5455 return TRUE;
5456 }
5457
5458 /* Generic instruction operand parser. This does no encoding and no
5459 semantic validation; it merely squirrels values away in the inst
5460 structure. Returns TRUE or FALSE depending on whether the
5461 specified grammar matched. */
5462
5463 static bfd_boolean
5464 parse_operands (char *str, const aarch64_opcode *opcode)
5465 {
5466 int i;
5467 char *backtrack_pos = 0;
5468 const enum aarch64_opnd *operands = opcode->operands;
5469 aarch64_reg_type imm_reg_type;
5470
5471 clear_error ();
5472 skip_whitespace (str);
5473
5474 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5475 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5476 else
5477 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5478
5479 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5480 {
5481 int64_t val;
5482 const reg_entry *reg;
5483 int comma_skipped_p = 0;
5484 aarch64_reg_type rtype;
5485 struct vector_type_el vectype;
5486 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5487 aarch64_opnd_info *info = &inst.base.operands[i];
5488 aarch64_reg_type reg_type;
5489
5490 DEBUG_TRACE ("parse operand %d", i);
5491
5492 /* Assign the operand code. */
5493 info->type = operands[i];
5494
5495 if (optional_operand_p (opcode, i))
5496 {
5497 /* Remember where we are in case we need to backtrack. */
5498 gas_assert (!backtrack_pos);
5499 backtrack_pos = str;
5500 }
5501
5502 /* Expect comma between operands; the backtrack mechanism will take
5503 care of cases of omitted optional operand. */
5504 if (i > 0 && ! skip_past_char (&str, ','))
5505 {
5506 set_syntax_error (_("comma expected between operands"));
5507 goto failure;
5508 }
5509 else
5510 comma_skipped_p = 1;
5511
5512 switch (operands[i])
5513 {
5514 case AARCH64_OPND_Rd:
5515 case AARCH64_OPND_Rn:
5516 case AARCH64_OPND_Rm:
5517 case AARCH64_OPND_Rt:
5518 case AARCH64_OPND_Rt2:
5519 case AARCH64_OPND_Rs:
5520 case AARCH64_OPND_Ra:
5521 case AARCH64_OPND_Rt_SYS:
5522 case AARCH64_OPND_PAIRREG:
5523 case AARCH64_OPND_SVE_Rm:
5524 po_int_reg_or_fail (REG_TYPE_R_Z);
5525 break;
5526
5527 case AARCH64_OPND_Rd_SP:
5528 case AARCH64_OPND_Rn_SP:
5529 case AARCH64_OPND_Rt_SP:
5530 case AARCH64_OPND_SVE_Rn_SP:
5531 case AARCH64_OPND_Rm_SP:
5532 po_int_reg_or_fail (REG_TYPE_R_SP);
5533 break;
5534
5535 case AARCH64_OPND_Rm_EXT:
5536 case AARCH64_OPND_Rm_SFT:
5537 po_misc_or_fail (parse_shifter_operand
5538 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5539 ? SHIFTED_ARITH_IMM
5540 : SHIFTED_LOGIC_IMM)));
5541 if (!info->shifter.operator_present)
5542 {
5543 /* Default to LSL if not present. Libopcodes prefers shifter
5544 kind to be explicit. */
5545 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5546 info->shifter.kind = AARCH64_MOD_LSL;
5547 /* For Rm_EXT, libopcodes will carry out further check on whether
5548 or not stack pointer is used in the instruction (Recall that
5549 "the extend operator is not optional unless at least one of
5550 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5551 }
5552 break;
5553
5554 case AARCH64_OPND_Fd:
5555 case AARCH64_OPND_Fn:
5556 case AARCH64_OPND_Fm:
5557 case AARCH64_OPND_Fa:
5558 case AARCH64_OPND_Ft:
5559 case AARCH64_OPND_Ft2:
5560 case AARCH64_OPND_Sd:
5561 case AARCH64_OPND_Sn:
5562 case AARCH64_OPND_Sm:
5563 case AARCH64_OPND_SVE_VZn:
5564 case AARCH64_OPND_SVE_Vd:
5565 case AARCH64_OPND_SVE_Vm:
5566 case AARCH64_OPND_SVE_Vn:
5567 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5568 if (val == PARSE_FAIL)
5569 {
5570 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5571 goto failure;
5572 }
5573 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5574
5575 info->reg.regno = val;
5576 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5577 break;
5578
5579 case AARCH64_OPND_SVE_Pd:
5580 case AARCH64_OPND_SVE_Pg3:
5581 case AARCH64_OPND_SVE_Pg4_5:
5582 case AARCH64_OPND_SVE_Pg4_10:
5583 case AARCH64_OPND_SVE_Pg4_16:
5584 case AARCH64_OPND_SVE_Pm:
5585 case AARCH64_OPND_SVE_Pn:
5586 case AARCH64_OPND_SVE_Pt:
5587 reg_type = REG_TYPE_PN;
5588 goto vector_reg;
5589
5590 case AARCH64_OPND_SVE_Za_5:
5591 case AARCH64_OPND_SVE_Za_16:
5592 case AARCH64_OPND_SVE_Zd:
5593 case AARCH64_OPND_SVE_Zm_5:
5594 case AARCH64_OPND_SVE_Zm_16:
5595 case AARCH64_OPND_SVE_Zn:
5596 case AARCH64_OPND_SVE_Zt:
5597 reg_type = REG_TYPE_ZN;
5598 goto vector_reg;
5599
5600 case AARCH64_OPND_Va:
5601 case AARCH64_OPND_Vd:
5602 case AARCH64_OPND_Vn:
5603 case AARCH64_OPND_Vm:
5604 reg_type = REG_TYPE_VN;
5605 vector_reg:
5606 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5607 if (val == PARSE_FAIL)
5608 {
5609 first_error (_(get_reg_expected_msg (reg_type)));
5610 goto failure;
5611 }
5612 if (vectype.defined & NTA_HASINDEX)
5613 goto failure;
5614
5615 info->reg.regno = val;
5616 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5617 && vectype.type == NT_invtype)
5618 /* Unqualified Pn and Zn registers are allowed in certain
5619 contexts. Rely on F_STRICT qualifier checking to catch
5620 invalid uses. */
5621 info->qualifier = AARCH64_OPND_QLF_NIL;
5622 else
5623 {
5624 info->qualifier = vectype_to_qualifier (&vectype);
5625 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5626 goto failure;
5627 }
5628 break;
5629
5630 case AARCH64_OPND_VdD1:
5631 case AARCH64_OPND_VnD1:
5632 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5633 if (val == PARSE_FAIL)
5634 {
5635 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5636 goto failure;
5637 }
5638 if (vectype.type != NT_d || vectype.index != 1)
5639 {
5640 set_fatal_syntax_error
5641 (_("the top half of a 128-bit FP/SIMD register is expected"));
5642 goto failure;
5643 }
5644 info->reg.regno = val;
5645 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5646 here; it is correct for the purpose of encoding/decoding since
5647 only the register number is explicitly encoded in the related
5648 instructions, although this appears a bit hacky. */
5649 info->qualifier = AARCH64_OPND_QLF_S_D;
5650 break;
5651
5652 case AARCH64_OPND_SVE_Zm3_INDEX:
5653 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5654 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5655 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5656 case AARCH64_OPND_SVE_Zm4_INDEX:
5657 case AARCH64_OPND_SVE_Zn_INDEX:
5658 reg_type = REG_TYPE_ZN;
5659 goto vector_reg_index;
5660
5661 case AARCH64_OPND_Ed:
5662 case AARCH64_OPND_En:
5663 case AARCH64_OPND_Em:
5664 case AARCH64_OPND_Em16:
5665 case AARCH64_OPND_SM3_IMM2:
5666 reg_type = REG_TYPE_VN;
5667 vector_reg_index:
5668 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5669 if (val == PARSE_FAIL)
5670 {
5671 first_error (_(get_reg_expected_msg (reg_type)));
5672 goto failure;
5673 }
5674 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5675 goto failure;
5676
5677 info->reglane.regno = val;
5678 info->reglane.index = vectype.index;
5679 info->qualifier = vectype_to_qualifier (&vectype);
5680 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5681 goto failure;
5682 break;
5683
5684 case AARCH64_OPND_SVE_ZnxN:
5685 case AARCH64_OPND_SVE_ZtxN:
5686 reg_type = REG_TYPE_ZN;
5687 goto vector_reg_list;
5688
5689 case AARCH64_OPND_LVn:
5690 case AARCH64_OPND_LVt:
5691 case AARCH64_OPND_LVt_AL:
5692 case AARCH64_OPND_LEt:
5693 reg_type = REG_TYPE_VN;
5694 vector_reg_list:
5695 if (reg_type == REG_TYPE_ZN
5696 && get_opcode_dependent_value (opcode) == 1
5697 && *str != '{')
5698 {
5699 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5700 if (val == PARSE_FAIL)
5701 {
5702 first_error (_(get_reg_expected_msg (reg_type)));
5703 goto failure;
5704 }
5705 info->reglist.first_regno = val;
5706 info->reglist.num_regs = 1;
5707 }
5708 else
5709 {
5710 val = parse_vector_reg_list (&str, reg_type, &vectype);
5711 if (val == PARSE_FAIL)
5712 goto failure;
5713 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5714 {
5715 set_fatal_syntax_error (_("invalid register list"));
5716 goto failure;
5717 }
5718 info->reglist.first_regno = (val >> 2) & 0x1f;
5719 info->reglist.num_regs = (val & 0x3) + 1;
5720 }
5721 if (operands[i] == AARCH64_OPND_LEt)
5722 {
5723 if (!(vectype.defined & NTA_HASINDEX))
5724 goto failure;
5725 info->reglist.has_index = 1;
5726 info->reglist.index = vectype.index;
5727 }
5728 else
5729 {
5730 if (vectype.defined & NTA_HASINDEX)
5731 goto failure;
5732 if (!(vectype.defined & NTA_HASTYPE))
5733 {
5734 if (reg_type == REG_TYPE_ZN)
5735 set_fatal_syntax_error (_("missing type suffix"));
5736 goto failure;
5737 }
5738 }
5739 info->qualifier = vectype_to_qualifier (&vectype);
5740 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5741 goto failure;
5742 break;
5743
5744 case AARCH64_OPND_CRn:
5745 case AARCH64_OPND_CRm:
5746 {
5747 char prefix = *(str++);
5748 if (prefix != 'c' && prefix != 'C')
5749 goto failure;
5750
5751 po_imm_nc_or_fail ();
5752 if (val > 15)
5753 {
5754 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5755 goto failure;
5756 }
5757 info->qualifier = AARCH64_OPND_QLF_CR;
5758 info->imm.value = val;
5759 break;
5760 }
5761
5762 case AARCH64_OPND_SHLL_IMM:
5763 case AARCH64_OPND_IMM_VLSR:
5764 po_imm_or_fail (1, 64);
5765 info->imm.value = val;
5766 break;
5767
5768 case AARCH64_OPND_CCMP_IMM:
5769 case AARCH64_OPND_SIMM5:
5770 case AARCH64_OPND_FBITS:
5771 case AARCH64_OPND_TME_UIMM16:
5772 case AARCH64_OPND_UIMM4:
5773 case AARCH64_OPND_UIMM4_ADDG:
5774 case AARCH64_OPND_UIMM10:
5775 case AARCH64_OPND_UIMM3_OP1:
5776 case AARCH64_OPND_UIMM3_OP2:
5777 case AARCH64_OPND_IMM_VLSL:
5778 case AARCH64_OPND_IMM:
5779 case AARCH64_OPND_IMM_2:
5780 case AARCH64_OPND_WIDTH:
5781 case AARCH64_OPND_SVE_INV_LIMM:
5782 case AARCH64_OPND_SVE_LIMM:
5783 case AARCH64_OPND_SVE_LIMM_MOV:
5784 case AARCH64_OPND_SVE_SHLIMM_PRED:
5785 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5786 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
5787 case AARCH64_OPND_SVE_SHRIMM_PRED:
5788 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5789 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
5790 case AARCH64_OPND_SVE_SIMM5:
5791 case AARCH64_OPND_SVE_SIMM5B:
5792 case AARCH64_OPND_SVE_SIMM6:
5793 case AARCH64_OPND_SVE_SIMM8:
5794 case AARCH64_OPND_SVE_UIMM3:
5795 case AARCH64_OPND_SVE_UIMM7:
5796 case AARCH64_OPND_SVE_UIMM8:
5797 case AARCH64_OPND_SVE_UIMM8_53:
5798 case AARCH64_OPND_IMM_ROT1:
5799 case AARCH64_OPND_IMM_ROT2:
5800 case AARCH64_OPND_IMM_ROT3:
5801 case AARCH64_OPND_SVE_IMM_ROT1:
5802 case AARCH64_OPND_SVE_IMM_ROT2:
5803 case AARCH64_OPND_SVE_IMM_ROT3:
5804 po_imm_nc_or_fail ();
5805 info->imm.value = val;
5806 break;
5807
5808 case AARCH64_OPND_SVE_AIMM:
5809 case AARCH64_OPND_SVE_ASIMM:
5810 po_imm_nc_or_fail ();
5811 info->imm.value = val;
5812 skip_whitespace (str);
5813 if (skip_past_comma (&str))
5814 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5815 else
5816 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5817 break;
5818
5819 case AARCH64_OPND_SVE_PATTERN:
5820 po_enum_or_fail (aarch64_sve_pattern_array);
5821 info->imm.value = val;
5822 break;
5823
5824 case AARCH64_OPND_SVE_PATTERN_SCALED:
5825 po_enum_or_fail (aarch64_sve_pattern_array);
5826 info->imm.value = val;
5827 if (skip_past_comma (&str)
5828 && !parse_shift (&str, info, SHIFTED_MUL))
5829 goto failure;
5830 if (!info->shifter.operator_present)
5831 {
5832 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5833 info->shifter.kind = AARCH64_MOD_MUL;
5834 info->shifter.amount = 1;
5835 }
5836 break;
5837
5838 case AARCH64_OPND_SVE_PRFOP:
5839 po_enum_or_fail (aarch64_sve_prfop_array);
5840 info->imm.value = val;
5841 break;
5842
5843 case AARCH64_OPND_UIMM7:
5844 po_imm_or_fail (0, 127);
5845 info->imm.value = val;
5846 break;
5847
5848 case AARCH64_OPND_IDX:
5849 case AARCH64_OPND_MASK:
5850 case AARCH64_OPND_BIT_NUM:
5851 case AARCH64_OPND_IMMR:
5852 case AARCH64_OPND_IMMS:
5853 po_imm_or_fail (0, 63);
5854 info->imm.value = val;
5855 break;
5856
5857 case AARCH64_OPND_IMM0:
5858 po_imm_nc_or_fail ();
5859 if (val != 0)
5860 {
5861 set_fatal_syntax_error (_("immediate zero expected"));
5862 goto failure;
5863 }
5864 info->imm.value = 0;
5865 break;
5866
5867 case AARCH64_OPND_FPIMM0:
5868 {
5869 int qfloat;
5870 bfd_boolean res1 = FALSE, res2 = FALSE;
5871 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5872 it is probably not worth the effort to support it. */
5873 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5874 imm_reg_type))
5875 && (error_p ()
5876 || !(res2 = parse_constant_immediate (&str, &val,
5877 imm_reg_type))))
5878 goto failure;
5879 if ((res1 && qfloat == 0) || (res2 && val == 0))
5880 {
5881 info->imm.value = 0;
5882 info->imm.is_fp = 1;
5883 break;
5884 }
5885 set_fatal_syntax_error (_("immediate zero expected"));
5886 goto failure;
5887 }
5888
5889 case AARCH64_OPND_IMM_MOV:
5890 {
5891 char *saved = str;
5892 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5893 reg_name_p (str, REG_TYPE_VN))
5894 goto failure;
5895 str = saved;
5896 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5897 GE_OPT_PREFIX, 1));
5898 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5899 later. fix_mov_imm_insn will try to determine a machine
5900 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5901 message if the immediate cannot be moved by a single
5902 instruction. */
5903 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5904 inst.base.operands[i].skip = 1;
5905 }
5906 break;
5907
5908 case AARCH64_OPND_SIMD_IMM:
5909 case AARCH64_OPND_SIMD_IMM_SFT:
5910 if (! parse_big_immediate (&str, &val, imm_reg_type))
5911 goto failure;
5912 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5913 /* addr_off_p */ 0,
5914 /* need_libopcodes_p */ 1,
5915 /* skip_p */ 1);
5916 /* Parse shift.
5917 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5918 shift, we don't check it here; we leave the checking to
5919 the libopcodes (operand_general_constraint_met_p). By
5920 doing this, we achieve better diagnostics. */
5921 if (skip_past_comma (&str)
5922 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5923 goto failure;
5924 if (!info->shifter.operator_present
5925 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5926 {
5927 /* Default to LSL if not present. Libopcodes prefers shifter
5928 kind to be explicit. */
5929 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5930 info->shifter.kind = AARCH64_MOD_LSL;
5931 }
5932 break;
5933
5934 case AARCH64_OPND_FPIMM:
5935 case AARCH64_OPND_SIMD_FPIMM:
5936 case AARCH64_OPND_SVE_FPIMM8:
5937 {
5938 int qfloat;
5939 bfd_boolean dp_p;
5940
5941 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5942 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5943 || !aarch64_imm_float_p (qfloat))
5944 {
5945 if (!error_p ())
5946 set_fatal_syntax_error (_("invalid floating-point"
5947 " constant"));
5948 goto failure;
5949 }
5950 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5951 inst.base.operands[i].imm.is_fp = 1;
5952 }
5953 break;
5954
5955 case AARCH64_OPND_SVE_I1_HALF_ONE:
5956 case AARCH64_OPND_SVE_I1_HALF_TWO:
5957 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5958 {
5959 int qfloat;
5960 bfd_boolean dp_p;
5961
5962 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5963 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5964 {
5965 if (!error_p ())
5966 set_fatal_syntax_error (_("invalid floating-point"
5967 " constant"));
5968 goto failure;
5969 }
5970 inst.base.operands[i].imm.value = qfloat;
5971 inst.base.operands[i].imm.is_fp = 1;
5972 }
5973 break;
5974
5975 case AARCH64_OPND_LIMM:
5976 po_misc_or_fail (parse_shifter_operand (&str, info,
5977 SHIFTED_LOGIC_IMM));
5978 if (info->shifter.operator_present)
5979 {
5980 set_fatal_syntax_error
5981 (_("shift not allowed for bitmask immediate"));
5982 goto failure;
5983 }
5984 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5985 /* addr_off_p */ 0,
5986 /* need_libopcodes_p */ 1,
5987 /* skip_p */ 1);
5988 break;
5989
5990 case AARCH64_OPND_AIMM:
5991 if (opcode->op == OP_ADD)
5992 /* ADD may have relocation types. */
5993 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5994 SHIFTED_ARITH_IMM));
5995 else
5996 po_misc_or_fail (parse_shifter_operand (&str, info,
5997 SHIFTED_ARITH_IMM));
5998 switch (inst.reloc.type)
5999 {
6000 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6001 info->shifter.amount = 12;
6002 break;
6003 case BFD_RELOC_UNUSED:
6004 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6005 if (info->shifter.kind != AARCH64_MOD_NONE)
6006 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6007 inst.reloc.pc_rel = 0;
6008 break;
6009 default:
6010 break;
6011 }
6012 info->imm.value = 0;
6013 if (!info->shifter.operator_present)
6014 {
6015 /* Default to LSL if not present. Libopcodes prefers shifter
6016 kind to be explicit. */
6017 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6018 info->shifter.kind = AARCH64_MOD_LSL;
6019 }
6020 break;
6021
6022 case AARCH64_OPND_HALF:
6023 {
6024 /* #<imm16> or relocation. */
6025 int internal_fixup_p;
6026 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6027 if (internal_fixup_p)
6028 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6029 skip_whitespace (str);
6030 if (skip_past_comma (&str))
6031 {
6032 /* {, LSL #<shift>} */
6033 if (! aarch64_gas_internal_fixup_p ())
6034 {
6035 set_fatal_syntax_error (_("can't mix relocation modifier "
6036 "with explicit shift"));
6037 goto failure;
6038 }
6039 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6040 }
6041 else
6042 inst.base.operands[i].shifter.amount = 0;
6043 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6044 inst.base.operands[i].imm.value = 0;
6045 if (! process_movw_reloc_info ())
6046 goto failure;
6047 }
6048 break;
6049
6050 case AARCH64_OPND_EXCEPTION:
6051 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6052 imm_reg_type));
6053 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6054 /* addr_off_p */ 0,
6055 /* need_libopcodes_p */ 0,
6056 /* skip_p */ 1);
6057 break;
6058
6059 case AARCH64_OPND_NZCV:
6060 {
6061 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
6062 if (nzcv != NULL)
6063 {
6064 str += 4;
6065 info->imm.value = nzcv->value;
6066 break;
6067 }
6068 po_imm_or_fail (0, 15);
6069 info->imm.value = val;
6070 }
6071 break;
6072
6073 case AARCH64_OPND_COND:
6074 case AARCH64_OPND_COND1:
6075 {
6076 char *start = str;
6077 do
6078 str++;
6079 while (ISALPHA (*str));
6080 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
6081 if (info->cond == NULL)
6082 {
6083 set_syntax_error (_("invalid condition"));
6084 goto failure;
6085 }
6086 else if (operands[i] == AARCH64_OPND_COND1
6087 && (info->cond->value & 0xe) == 0xe)
6088 {
6089 /* Do not allow AL or NV. */
6090 set_default_error ();
6091 goto failure;
6092 }
6093 }
6094 break;
6095
6096 case AARCH64_OPND_ADDR_ADRP:
6097 po_misc_or_fail (parse_adrp (&str));
6098 /* Clear the value as operand needs to be relocated. */
6099 info->imm.value = 0;
6100 break;
6101
6102 case AARCH64_OPND_ADDR_PCREL14:
6103 case AARCH64_OPND_ADDR_PCREL19:
6104 case AARCH64_OPND_ADDR_PCREL21:
6105 case AARCH64_OPND_ADDR_PCREL26:
6106 po_misc_or_fail (parse_address (&str, info));
6107 if (!info->addr.pcrel)
6108 {
6109 set_syntax_error (_("invalid pc-relative address"));
6110 goto failure;
6111 }
6112 if (inst.gen_lit_pool
6113 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6114 {
6115 /* Only permit "=value" in the literal load instructions.
6116 The literal will be generated by programmer_friendly_fixup. */
6117 set_syntax_error (_("invalid use of \"=immediate\""));
6118 goto failure;
6119 }
6120 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6121 {
6122 set_syntax_error (_("unrecognized relocation suffix"));
6123 goto failure;
6124 }
6125 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6126 {
6127 info->imm.value = inst.reloc.exp.X_add_number;
6128 inst.reloc.type = BFD_RELOC_UNUSED;
6129 }
6130 else
6131 {
6132 info->imm.value = 0;
6133 if (inst.reloc.type == BFD_RELOC_UNUSED)
6134 switch (opcode->iclass)
6135 {
6136 case compbranch:
6137 case condbranch:
6138 /* e.g. CBZ or B.COND */
6139 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6140 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6141 break;
6142 case testbranch:
6143 /* e.g. TBZ */
6144 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6145 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6146 break;
6147 case branch_imm:
6148 /* e.g. B or BL */
6149 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6150 inst.reloc.type =
6151 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6152 : BFD_RELOC_AARCH64_JUMP26;
6153 break;
6154 case loadlit:
6155 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6156 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6157 break;
6158 case pcreladdr:
6159 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6160 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6161 break;
6162 default:
6163 gas_assert (0);
6164 abort ();
6165 }
6166 inst.reloc.pc_rel = 1;
6167 }
6168 break;
6169
6170 case AARCH64_OPND_ADDR_SIMPLE:
6171 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6172 {
6173 /* [<Xn|SP>{, #<simm>}] */
6174 char *start = str;
6175 /* First use the normal address-parsing routines, to get
6176 the usual syntax errors. */
6177 po_misc_or_fail (parse_address (&str, info));
6178 if (info->addr.pcrel || info->addr.offset.is_reg
6179 || !info->addr.preind || info->addr.postind
6180 || info->addr.writeback)
6181 {
6182 set_syntax_error (_("invalid addressing mode"));
6183 goto failure;
6184 }
6185
6186 /* Then retry, matching the specific syntax of these addresses. */
6187 str = start;
6188 po_char_or_fail ('[');
6189 po_reg_or_fail (REG_TYPE_R64_SP);
6190 /* Accept optional ", #0". */
6191 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6192 && skip_past_char (&str, ','))
6193 {
6194 skip_past_char (&str, '#');
6195 if (! skip_past_char (&str, '0'))
6196 {
6197 set_fatal_syntax_error
6198 (_("the optional immediate offset can only be 0"));
6199 goto failure;
6200 }
6201 }
6202 po_char_or_fail (']');
6203 break;
6204 }
6205
6206 case AARCH64_OPND_ADDR_REGOFF:
6207 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6208 po_misc_or_fail (parse_address (&str, info));
6209 regoff_addr:
6210 if (info->addr.pcrel || !info->addr.offset.is_reg
6211 || !info->addr.preind || info->addr.postind
6212 || info->addr.writeback)
6213 {
6214 set_syntax_error (_("invalid addressing mode"));
6215 goto failure;
6216 }
6217 if (!info->shifter.operator_present)
6218 {
6219 /* Default to LSL if not present. Libopcodes prefers shifter
6220 kind to be explicit. */
6221 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6222 info->shifter.kind = AARCH64_MOD_LSL;
6223 }
6224 /* Qualifier to be deduced by libopcodes. */
6225 break;
6226
6227 case AARCH64_OPND_ADDR_SIMM7:
6228 po_misc_or_fail (parse_address (&str, info));
6229 if (info->addr.pcrel || info->addr.offset.is_reg
6230 || (!info->addr.preind && !info->addr.postind))
6231 {
6232 set_syntax_error (_("invalid addressing mode"));
6233 goto failure;
6234 }
6235 if (inst.reloc.type != BFD_RELOC_UNUSED)
6236 {
6237 set_syntax_error (_("relocation not allowed"));
6238 goto failure;
6239 }
6240 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6241 /* addr_off_p */ 1,
6242 /* need_libopcodes_p */ 1,
6243 /* skip_p */ 0);
6244 break;
6245
6246 case AARCH64_OPND_ADDR_SIMM9:
6247 case AARCH64_OPND_ADDR_SIMM9_2:
6248 case AARCH64_OPND_ADDR_SIMM11:
6249 case AARCH64_OPND_ADDR_SIMM13:
6250 po_misc_or_fail (parse_address (&str, info));
6251 if (info->addr.pcrel || info->addr.offset.is_reg
6252 || (!info->addr.preind && !info->addr.postind)
6253 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6254 && info->addr.writeback))
6255 {
6256 set_syntax_error (_("invalid addressing mode"));
6257 goto failure;
6258 }
6259 if (inst.reloc.type != BFD_RELOC_UNUSED)
6260 {
6261 set_syntax_error (_("relocation not allowed"));
6262 goto failure;
6263 }
6264 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6265 /* addr_off_p */ 1,
6266 /* need_libopcodes_p */ 1,
6267 /* skip_p */ 0);
6268 break;
6269
6270 case AARCH64_OPND_ADDR_SIMM10:
6271 case AARCH64_OPND_ADDR_OFFSET:
6272 po_misc_or_fail (parse_address (&str, info));
6273 if (info->addr.pcrel || info->addr.offset.is_reg
6274 || !info->addr.preind || info->addr.postind)
6275 {
6276 set_syntax_error (_("invalid addressing mode"));
6277 goto failure;
6278 }
6279 if (inst.reloc.type != BFD_RELOC_UNUSED)
6280 {
6281 set_syntax_error (_("relocation not allowed"));
6282 goto failure;
6283 }
6284 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6285 /* addr_off_p */ 1,
6286 /* need_libopcodes_p */ 1,
6287 /* skip_p */ 0);
6288 break;
6289
6290 case AARCH64_OPND_ADDR_UIMM12:
6291 po_misc_or_fail (parse_address (&str, info));
6292 if (info->addr.pcrel || info->addr.offset.is_reg
6293 || !info->addr.preind || info->addr.writeback)
6294 {
6295 set_syntax_error (_("invalid addressing mode"));
6296 goto failure;
6297 }
6298 if (inst.reloc.type == BFD_RELOC_UNUSED)
6299 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6300 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6301 || (inst.reloc.type
6302 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6303 || (inst.reloc.type
6304 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6305 || (inst.reloc.type
6306 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6307 || (inst.reloc.type
6308 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6309 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6310 /* Leave qualifier to be determined by libopcodes. */
6311 break;
6312
6313 case AARCH64_OPND_SIMD_ADDR_POST:
6314 /* [<Xn|SP>], <Xm|#<amount>> */
6315 po_misc_or_fail (parse_address (&str, info));
6316 if (!info->addr.postind || !info->addr.writeback)
6317 {
6318 set_syntax_error (_("invalid addressing mode"));
6319 goto failure;
6320 }
6321 if (!info->addr.offset.is_reg)
6322 {
6323 if (inst.reloc.exp.X_op == O_constant)
6324 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6325 else
6326 {
6327 set_fatal_syntax_error
6328 (_("writeback value must be an immediate constant"));
6329 goto failure;
6330 }
6331 }
6332 /* No qualifier. */
6333 break;
6334
6335 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6336 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6337 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6338 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6339 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6340 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6341 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6342 case AARCH64_OPND_SVE_ADDR_RI_U6:
6343 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6344 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6345 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6346 /* [X<n>{, #imm, MUL VL}]
6347 [X<n>{, #imm}]
6348 but recognizing SVE registers. */
6349 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6350 &offset_qualifier));
6351 if (base_qualifier != AARCH64_OPND_QLF_X)
6352 {
6353 set_syntax_error (_("invalid addressing mode"));
6354 goto failure;
6355 }
6356 sve_regimm:
6357 if (info->addr.pcrel || info->addr.offset.is_reg
6358 || !info->addr.preind || info->addr.writeback)
6359 {
6360 set_syntax_error (_("invalid addressing mode"));
6361 goto failure;
6362 }
6363 if (inst.reloc.type != BFD_RELOC_UNUSED
6364 || inst.reloc.exp.X_op != O_constant)
6365 {
6366 /* Make sure this has priority over
6367 "invalid addressing mode". */
6368 set_fatal_syntax_error (_("constant offset required"));
6369 goto failure;
6370 }
6371 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6372 break;
6373
6374 case AARCH64_OPND_SVE_ADDR_R:
6375 /* [<Xn|SP>{, <R><m>}]
6376 but recognizing SVE registers. */
6377 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6378 &offset_qualifier));
6379 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6380 {
6381 offset_qualifier = AARCH64_OPND_QLF_X;
6382 info->addr.offset.is_reg = 1;
6383 info->addr.offset.regno = 31;
6384 }
6385 else if (base_qualifier != AARCH64_OPND_QLF_X
6386 || offset_qualifier != AARCH64_OPND_QLF_X)
6387 {
6388 set_syntax_error (_("invalid addressing mode"));
6389 goto failure;
6390 }
6391 goto regoff_addr;
6392
6393 case AARCH64_OPND_SVE_ADDR_RR:
6394 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6395 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6396 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6397 case AARCH64_OPND_SVE_ADDR_RX:
6398 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6399 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6400 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6401 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6402 but recognizing SVE registers. */
6403 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6404 &offset_qualifier));
6405 if (base_qualifier != AARCH64_OPND_QLF_X
6406 || offset_qualifier != AARCH64_OPND_QLF_X)
6407 {
6408 set_syntax_error (_("invalid addressing mode"));
6409 goto failure;
6410 }
6411 goto regoff_addr;
6412
6413 case AARCH64_OPND_SVE_ADDR_RZ:
6414 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6415 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6416 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6417 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6418 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6419 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6420 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6421 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6422 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6423 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6424 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6425 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6426 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6427 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6428 &offset_qualifier));
6429 if (base_qualifier != AARCH64_OPND_QLF_X
6430 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6431 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6432 {
6433 set_syntax_error (_("invalid addressing mode"));
6434 goto failure;
6435 }
6436 info->qualifier = offset_qualifier;
6437 goto regoff_addr;
6438
6439 case AARCH64_OPND_SVE_ADDR_ZX:
6440 /* [Zn.<T>{, <Xm>}]. */
6441 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6442 &offset_qualifier));
6443 /* Things to check:
6444 base_qualifier either S_S or S_D
6445 offset_qualifier must be X
6446 */
6447 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6448 && base_qualifier != AARCH64_OPND_QLF_S_D)
6449 || offset_qualifier != AARCH64_OPND_QLF_X)
6450 {
6451 set_syntax_error (_("invalid addressing mode"));
6452 goto failure;
6453 }
6454 info->qualifier = base_qualifier;
6455 if (!info->addr.offset.is_reg || info->addr.pcrel
6456 || !info->addr.preind || info->addr.writeback
6457 || info->shifter.operator_present != 0)
6458 {
6459 set_syntax_error (_("invalid addressing mode"));
6460 goto failure;
6461 }
6462 info->shifter.kind = AARCH64_MOD_LSL;
6463 break;
6464
6465
6466 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6467 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6468 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6469 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6470 /* [Z<n>.<T>{, #imm}] */
6471 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6472 &offset_qualifier));
6473 if (base_qualifier != AARCH64_OPND_QLF_S_S
6474 && base_qualifier != AARCH64_OPND_QLF_S_D)
6475 {
6476 set_syntax_error (_("invalid addressing mode"));
6477 goto failure;
6478 }
6479 info->qualifier = base_qualifier;
6480 goto sve_regimm;
6481
6482 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6483 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6484 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6485 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6486 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6487
6488 We don't reject:
6489
6490 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6491
6492 here since we get better error messages by leaving it to
6493 the qualifier checking routines. */
6494 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6495 &offset_qualifier));
6496 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6497 && base_qualifier != AARCH64_OPND_QLF_S_D)
6498 || offset_qualifier != base_qualifier)
6499 {
6500 set_syntax_error (_("invalid addressing mode"));
6501 goto failure;
6502 }
6503 info->qualifier = base_qualifier;
6504 goto regoff_addr;
6505
6506 case AARCH64_OPND_SYSREG:
6507 {
6508 uint32_t sysreg_flags;
6509 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6510 &sysreg_flags)) == PARSE_FAIL)
6511 {
6512 set_syntax_error (_("unknown or missing system register name"));
6513 goto failure;
6514 }
6515 inst.base.operands[i].sysreg.value = val;
6516 inst.base.operands[i].sysreg.flags = sysreg_flags;
6517 break;
6518 }
6519
6520 case AARCH64_OPND_PSTATEFIELD:
6521 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6522 == PARSE_FAIL)
6523 {
6524 set_syntax_error (_("unknown or missing PSTATE field name"));
6525 goto failure;
6526 }
6527 inst.base.operands[i].pstatefield = val;
6528 break;
6529
6530 case AARCH64_OPND_SYSREG_IC:
6531 inst.base.operands[i].sysins_op =
6532 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6533 goto sys_reg_ins;
6534
6535 case AARCH64_OPND_SYSREG_DC:
6536 inst.base.operands[i].sysins_op =
6537 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6538 goto sys_reg_ins;
6539
6540 case AARCH64_OPND_SYSREG_AT:
6541 inst.base.operands[i].sysins_op =
6542 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6543 goto sys_reg_ins;
6544
6545 case AARCH64_OPND_SYSREG_SR:
6546 inst.base.operands[i].sysins_op =
6547 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6548 goto sys_reg_ins;
6549
6550 case AARCH64_OPND_SYSREG_TLBI:
6551 inst.base.operands[i].sysins_op =
6552 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6553 sys_reg_ins:
6554 if (inst.base.operands[i].sysins_op == NULL)
6555 {
6556 set_fatal_syntax_error ( _("unknown or missing operation name"));
6557 goto failure;
6558 }
6559 break;
6560
6561 case AARCH64_OPND_BARRIER:
6562 case AARCH64_OPND_BARRIER_ISB:
6563 val = parse_barrier (&str);
6564 if (val != PARSE_FAIL
6565 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6566 {
6567 /* ISB only accepts options name 'sy'. */
6568 set_syntax_error
6569 (_("the specified option is not accepted in ISB"));
6570 /* Turn off backtrack as this optional operand is present. */
6571 backtrack_pos = 0;
6572 goto failure;
6573 }
6574 /* This is an extension to accept a 0..15 immediate. */
6575 if (val == PARSE_FAIL)
6576 po_imm_or_fail (0, 15);
6577 info->barrier = aarch64_barrier_options + val;
6578 break;
6579
6580 case AARCH64_OPND_PRFOP:
6581 val = parse_pldop (&str);
6582 /* This is an extension to accept a 0..31 immediate. */
6583 if (val == PARSE_FAIL)
6584 po_imm_or_fail (0, 31);
6585 inst.base.operands[i].prfop = aarch64_prfops + val;
6586 break;
6587
6588 case AARCH64_OPND_BARRIER_PSB:
6589 val = parse_barrier_psb (&str, &(info->hint_option));
6590 if (val == PARSE_FAIL)
6591 goto failure;
6592 break;
6593
6594 case AARCH64_OPND_BTI_TARGET:
6595 val = parse_bti_operand (&str, &(info->hint_option));
6596 if (val == PARSE_FAIL)
6597 goto failure;
6598 break;
6599
6600 default:
6601 as_fatal (_("unhandled operand code %d"), operands[i]);
6602 }
6603
6604 /* If we get here, this operand was successfully parsed. */
6605 inst.base.operands[i].present = 1;
6606 continue;
6607
6608 failure:
6609 /* The parse routine should already have set the error, but in case
6610 not, set a default one here. */
6611 if (! error_p ())
6612 set_default_error ();
6613
6614 if (! backtrack_pos)
6615 goto parse_operands_return;
6616
6617 {
6618 /* We reach here because this operand is marked as optional, and
6619 either no operand was supplied or the operand was supplied but it
6620 was syntactically incorrect. In the latter case we report an
6621 error. In the former case we perform a few more checks before
6622 dropping through to the code to insert the default operand. */
6623
6624 char *tmp = backtrack_pos;
6625 char endchar = END_OF_INSN;
6626
6627 if (i != (aarch64_num_of_operands (opcode) - 1))
6628 endchar = ',';
6629 skip_past_char (&tmp, ',');
6630
6631 if (*tmp != endchar)
6632 /* The user has supplied an operand in the wrong format. */
6633 goto parse_operands_return;
6634
6635 /* Make sure there is not a comma before the optional operand.
6636 For example the fifth operand of 'sys' is optional:
6637
6638 sys #0,c0,c0,#0, <--- wrong
6639 sys #0,c0,c0,#0 <--- correct. */
6640 if (comma_skipped_p && i && endchar == END_OF_INSN)
6641 {
6642 set_fatal_syntax_error
6643 (_("unexpected comma before the omitted optional operand"));
6644 goto parse_operands_return;
6645 }
6646 }
6647
6648 /* Reaching here means we are dealing with an optional operand that is
6649 omitted from the assembly line. */
6650 gas_assert (optional_operand_p (opcode, i));
6651 info->present = 0;
6652 process_omitted_operand (operands[i], opcode, i, info);
6653
6654 /* Try again, skipping the optional operand at backtrack_pos. */
6655 str = backtrack_pos;
6656 backtrack_pos = 0;
6657
6658 /* Clear any error record after the omitted optional operand has been
6659 successfully handled. */
6660 clear_error ();
6661 }
6662
6663 /* Check if we have parsed all the operands. */
6664 if (*str != '\0' && ! error_p ())
6665 {
6666 /* Set I to the index of the last present operand; this is
6667 for the purpose of diagnostics. */
6668 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6669 ;
6670 set_fatal_syntax_error
6671 (_("unexpected characters following instruction"));
6672 }
6673
6674 parse_operands_return:
6675
6676 if (error_p ())
6677 {
6678 DEBUG_TRACE ("parsing FAIL: %s - %s",
6679 operand_mismatch_kind_names[get_error_kind ()],
6680 get_error_message ());
6681 /* Record the operand error properly; this is useful when there
6682 are multiple instruction templates for a mnemonic name, so that
6683 later on, we can select the error that most closely describes
6684 the problem. */
6685 record_operand_error (opcode, i, get_error_kind (),
6686 get_error_message ());
6687 return FALSE;
6688 }
6689 else
6690 {
6691 DEBUG_TRACE ("parsing SUCCESS");
6692 return TRUE;
6693 }
6694 }
6695
6696 /* It does some fix-up to provide some programmer friendly feature while
6697 keeping the libopcodes happy, i.e. libopcodes only accepts
6698 the preferred architectural syntax.
6699 Return FALSE if there is any failure; otherwise return TRUE. */
6700
6701 static bfd_boolean
6702 programmer_friendly_fixup (aarch64_instruction *instr)
6703 {
6704 aarch64_inst *base = &instr->base;
6705 const aarch64_opcode *opcode = base->opcode;
6706 enum aarch64_op op = opcode->op;
6707 aarch64_opnd_info *operands = base->operands;
6708
6709 DEBUG_TRACE ("enter");
6710
6711 switch (opcode->iclass)
6712 {
6713 case testbranch:
6714 /* TBNZ Xn|Wn, #uimm6, label
6715 Test and Branch Not Zero: conditionally jumps to label if bit number
6716 uimm6 in register Xn is not zero. The bit number implies the width of
6717 the register, which may be written and should be disassembled as Wn if
6718 uimm is less than 32. */
6719 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6720 {
6721 if (operands[1].imm.value >= 32)
6722 {
6723 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6724 0, 31);
6725 return FALSE;
6726 }
6727 operands[0].qualifier = AARCH64_OPND_QLF_X;
6728 }
6729 break;
6730 case loadlit:
6731 /* LDR Wt, label | =value
6732 As a convenience assemblers will typically permit the notation
6733 "=value" in conjunction with the pc-relative literal load instructions
6734 to automatically place an immediate value or symbolic address in a
6735 nearby literal pool and generate a hidden label which references it.
6736 ISREG has been set to 0 in the case of =value. */
6737 if (instr->gen_lit_pool
6738 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6739 {
6740 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6741 if (op == OP_LDRSW_LIT)
6742 size = 4;
6743 if (instr->reloc.exp.X_op != O_constant
6744 && instr->reloc.exp.X_op != O_big
6745 && instr->reloc.exp.X_op != O_symbol)
6746 {
6747 record_operand_error (opcode, 1,
6748 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6749 _("constant expression expected"));
6750 return FALSE;
6751 }
6752 if (! add_to_lit_pool (&instr->reloc.exp, size))
6753 {
6754 record_operand_error (opcode, 1,
6755 AARCH64_OPDE_OTHER_ERROR,
6756 _("literal pool insertion failed"));
6757 return FALSE;
6758 }
6759 }
6760 break;
6761 case log_shift:
6762 case bitfield:
6763 /* UXT[BHW] Wd, Wn
6764 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6765 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6766 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6767 A programmer-friendly assembler should accept a destination Xd in
6768 place of Wd, however that is not the preferred form for disassembly.
6769 */
6770 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6771 && operands[1].qualifier == AARCH64_OPND_QLF_W
6772 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6773 operands[0].qualifier = AARCH64_OPND_QLF_W;
6774 break;
6775
6776 case addsub_ext:
6777 {
6778 /* In the 64-bit form, the final register operand is written as Wm
6779 for all but the (possibly omitted) UXTX/LSL and SXTX
6780 operators.
6781 As a programmer-friendly assembler, we accept e.g.
6782 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6783 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6784 int idx = aarch64_operand_index (opcode->operands,
6785 AARCH64_OPND_Rm_EXT);
6786 gas_assert (idx == 1 || idx == 2);
6787 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6788 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6789 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6790 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6791 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6792 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6793 }
6794 break;
6795
6796 default:
6797 break;
6798 }
6799
6800 DEBUG_TRACE ("exit with SUCCESS");
6801 return TRUE;
6802 }
6803
6804 /* Check for loads and stores that will cause unpredictable behavior. */
6805
6806 static void
6807 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6808 {
6809 aarch64_inst *base = &instr->base;
6810 const aarch64_opcode *opcode = base->opcode;
6811 const aarch64_opnd_info *opnds = base->operands;
6812 switch (opcode->iclass)
6813 {
6814 case ldst_pos:
6815 case ldst_imm9:
6816 case ldst_imm10:
6817 case ldst_unscaled:
6818 case ldst_unpriv:
6819 /* Loading/storing the base register is unpredictable if writeback. */
6820 if ((aarch64_get_operand_class (opnds[0].type)
6821 == AARCH64_OPND_CLASS_INT_REG)
6822 && opnds[0].reg.regno == opnds[1].addr.base_regno
6823 && opnds[1].addr.base_regno != REG_SP
6824 /* Exempt STG/STZG/ST2G/STZ2G. */
6825 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
6826 && opnds[1].addr.writeback)
6827 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6828 break;
6829
6830 case ldstpair_off:
6831 case ldstnapair_offs:
6832 case ldstpair_indexed:
6833 /* Loading/storing the base register is unpredictable if writeback. */
6834 if ((aarch64_get_operand_class (opnds[0].type)
6835 == AARCH64_OPND_CLASS_INT_REG)
6836 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6837 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6838 && opnds[2].addr.base_regno != REG_SP
6839 /* Exempt STGP. */
6840 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
6841 && opnds[2].addr.writeback)
6842 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6843 /* Load operations must load different registers. */
6844 if ((opcode->opcode & (1 << 22))
6845 && opnds[0].reg.regno == opnds[1].reg.regno)
6846 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6847 break;
6848
6849 case ldstexcl:
6850 /* It is unpredictable if the destination and status registers are the
6851 same. */
6852 if ((aarch64_get_operand_class (opnds[0].type)
6853 == AARCH64_OPND_CLASS_INT_REG)
6854 && (aarch64_get_operand_class (opnds[1].type)
6855 == AARCH64_OPND_CLASS_INT_REG)
6856 && (opnds[0].reg.regno == opnds[1].reg.regno
6857 || opnds[0].reg.regno == opnds[2].reg.regno))
6858 as_warn (_("unpredictable: identical transfer and status registers"
6859 " --`%s'"),
6860 str);
6861
6862 break;
6863
6864 default:
6865 break;
6866 }
6867 }
6868
6869 static void
6870 force_automatic_sequence_close (void)
6871 {
6872 if (now_instr_sequence.instr)
6873 {
6874 as_warn (_("previous `%s' sequence has not been closed"),
6875 now_instr_sequence.instr->opcode->name);
6876 init_insn_sequence (NULL, &now_instr_sequence);
6877 }
6878 }
6879
6880 /* A wrapper function to interface with libopcodes on encoding and
6881 record the error message if there is any.
6882
6883 Return TRUE on success; otherwise return FALSE. */
6884
6885 static bfd_boolean
6886 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6887 aarch64_insn *code)
6888 {
6889 aarch64_operand_error error_info;
6890 memset (&error_info, '\0', sizeof (error_info));
6891 error_info.kind = AARCH64_OPDE_NIL;
6892 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
6893 && !error_info.non_fatal)
6894 return TRUE;
6895
6896 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6897 record_operand_error_info (opcode, &error_info);
6898 return error_info.non_fatal;
6899 }
6900
6901 #ifdef DEBUG_AARCH64
6902 static inline void
6903 dump_opcode_operands (const aarch64_opcode *opcode)
6904 {
6905 int i = 0;
6906 while (opcode->operands[i] != AARCH64_OPND_NIL)
6907 {
6908 aarch64_verbose ("\t\t opnd%d: %s", i,
6909 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6910 ? aarch64_get_operand_name (opcode->operands[i])
6911 : aarch64_get_operand_desc (opcode->operands[i]));
6912 ++i;
6913 }
6914 }
6915 #endif /* DEBUG_AARCH64 */
6916
6917 /* This is the guts of the machine-dependent assembler. STR points to a
6918 machine dependent instruction. This function is supposed to emit
6919 the frags/bytes it assembles to. */
6920
6921 void
6922 md_assemble (char *str)
6923 {
6924 char *p = str;
6925 templates *template;
6926 aarch64_opcode *opcode;
6927 aarch64_inst *inst_base;
6928 unsigned saved_cond;
6929
6930 /* Align the previous label if needed. */
6931 if (last_label_seen != NULL)
6932 {
6933 symbol_set_frag (last_label_seen, frag_now);
6934 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6935 S_SET_SEGMENT (last_label_seen, now_seg);
6936 }
6937
6938 /* Update the current insn_sequence from the segment. */
6939 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
6940
6941 inst.reloc.type = BFD_RELOC_UNUSED;
6942
6943 DEBUG_TRACE ("\n\n");
6944 DEBUG_TRACE ("==============================");
6945 DEBUG_TRACE ("Enter md_assemble with %s", str);
6946
6947 template = opcode_lookup (&p);
6948 if (!template)
6949 {
6950 /* It wasn't an instruction, but it might be a register alias of
6951 the form alias .req reg directive. */
6952 if (!create_register_alias (str, p))
6953 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6954 str);
6955 return;
6956 }
6957
6958 skip_whitespace (p);
6959 if (*p == ',')
6960 {
6961 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6962 get_mnemonic_name (str), str);
6963 return;
6964 }
6965
6966 init_operand_error_report ();
6967
6968 /* Sections are assumed to start aligned. In executable section, there is no
6969 MAP_DATA symbol pending. So we only align the address during
6970 MAP_DATA --> MAP_INSN transition.
6971 For other sections, this is not guaranteed. */
6972 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6973 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6974 frag_align_code (2, 0);
6975
6976 saved_cond = inst.cond;
6977 reset_aarch64_instruction (&inst);
6978 inst.cond = saved_cond;
6979
6980 /* Iterate through all opcode entries with the same mnemonic name. */
6981 do
6982 {
6983 opcode = template->opcode;
6984
6985 DEBUG_TRACE ("opcode %s found", opcode->name);
6986 #ifdef DEBUG_AARCH64
6987 if (debug_dump)
6988 dump_opcode_operands (opcode);
6989 #endif /* DEBUG_AARCH64 */
6990
6991 mapping_state (MAP_INSN);
6992
6993 inst_base = &inst.base;
6994 inst_base->opcode = opcode;
6995
6996 /* Truly conditionally executed instructions, e.g. b.cond. */
6997 if (opcode->flags & F_COND)
6998 {
6999 gas_assert (inst.cond != COND_ALWAYS);
7000 inst_base->cond = get_cond_from_value (inst.cond);
7001 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7002 }
7003 else if (inst.cond != COND_ALWAYS)
7004 {
7005 /* It shouldn't arrive here, where the assembly looks like a
7006 conditional instruction but the found opcode is unconditional. */
7007 gas_assert (0);
7008 continue;
7009 }
7010
7011 if (parse_operands (p, opcode)
7012 && programmer_friendly_fixup (&inst)
7013 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7014 {
7015 /* Check that this instruction is supported for this CPU. */
7016 if (!opcode->avariant
7017 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7018 {
7019 as_bad (_("selected processor does not support `%s'"), str);
7020 return;
7021 }
7022
7023 warn_unpredictable_ldst (&inst, str);
7024
7025 if (inst.reloc.type == BFD_RELOC_UNUSED
7026 || !inst.reloc.need_libopcodes_p)
7027 output_inst (NULL);
7028 else
7029 {
7030 /* If there is relocation generated for the instruction,
7031 store the instruction information for the future fix-up. */
7032 struct aarch64_inst *copy;
7033 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7034 copy = XNEW (struct aarch64_inst);
7035 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7036 output_inst (copy);
7037 }
7038
7039 /* Issue non-fatal messages if any. */
7040 output_operand_error_report (str, TRUE);
7041 return;
7042 }
7043
7044 template = template->next;
7045 if (template != NULL)
7046 {
7047 reset_aarch64_instruction (&inst);
7048 inst.cond = saved_cond;
7049 }
7050 }
7051 while (template != NULL);
7052
7053 /* Issue the error messages if any. */
7054 output_operand_error_report (str, FALSE);
7055 }
7056
7057 /* Various frobbings of labels and their addresses. */
7058
7059 void
7060 aarch64_start_line_hook (void)
7061 {
7062 last_label_seen = NULL;
7063 }
7064
7065 void
7066 aarch64_frob_label (symbolS * sym)
7067 {
7068 last_label_seen = sym;
7069
7070 dwarf2_emit_label (sym);
7071 }
7072
7073 void
7074 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7075 {
7076 /* Check to see if we have a block to close. */
7077 force_automatic_sequence_close ();
7078 }
7079
7080 int
7081 aarch64_data_in_code (void)
7082 {
7083 if (!strncmp (input_line_pointer + 1, "data:", 5))
7084 {
7085 *input_line_pointer = '/';
7086 input_line_pointer += 5;
7087 *input_line_pointer = 0;
7088 return 1;
7089 }
7090
7091 return 0;
7092 }
7093
7094 char *
7095 aarch64_canonicalize_symbol_name (char *name)
7096 {
7097 int len;
7098
7099 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7100 *(name + len - 5) = 0;
7101
7102 return name;
7103 }
7104 \f
7105 /* Table of all register names defined by default. The user can
7106 define additional names with .req. Note that all register names
7107 should appear in both upper and lowercase variants. Some registers
7108 also have mixed-case names. */
7109
7110 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7111 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7112 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7113 #define REGSET16(p,t) \
7114 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7115 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7116 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7117 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7118 #define REGSET31(p,t) \
7119 REGSET16(p, t), \
7120 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7121 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7122 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7123 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7124 #define REGSET(p,t) \
7125 REGSET31(p,t), REGNUM(p,31,t)
7126
7127 /* These go into aarch64_reg_hsh hash-table. */
7128 static const reg_entry reg_names[] = {
7129 /* Integer registers. */
7130 REGSET31 (x, R_64), REGSET31 (X, R_64),
7131 REGSET31 (w, R_32), REGSET31 (W, R_32),
7132
7133 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7134 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7135 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7136 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7137 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7138 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7139
7140 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7141 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7142
7143 /* Floating-point single precision registers. */
7144 REGSET (s, FP_S), REGSET (S, FP_S),
7145
7146 /* Floating-point double precision registers. */
7147 REGSET (d, FP_D), REGSET (D, FP_D),
7148
7149 /* Floating-point half precision registers. */
7150 REGSET (h, FP_H), REGSET (H, FP_H),
7151
7152 /* Floating-point byte precision registers. */
7153 REGSET (b, FP_B), REGSET (B, FP_B),
7154
7155 /* Floating-point quad precision registers. */
7156 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7157
7158 /* FP/SIMD registers. */
7159 REGSET (v, VN), REGSET (V, VN),
7160
7161 /* SVE vector registers. */
7162 REGSET (z, ZN), REGSET (Z, ZN),
7163
7164 /* SVE predicate registers. */
7165 REGSET16 (p, PN), REGSET16 (P, PN)
7166 };
7167
7168 #undef REGDEF
7169 #undef REGDEF_ALIAS
7170 #undef REGNUM
7171 #undef REGSET16
7172 #undef REGSET31
7173 #undef REGSET
7174
7175 #define N 1
7176 #define n 0
7177 #define Z 1
7178 #define z 0
7179 #define C 1
7180 #define c 0
7181 #define V 1
7182 #define v 0
7183 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7184 static const asm_nzcv nzcv_names[] = {
7185 {"nzcv", B (n, z, c, v)},
7186 {"nzcV", B (n, z, c, V)},
7187 {"nzCv", B (n, z, C, v)},
7188 {"nzCV", B (n, z, C, V)},
7189 {"nZcv", B (n, Z, c, v)},
7190 {"nZcV", B (n, Z, c, V)},
7191 {"nZCv", B (n, Z, C, v)},
7192 {"nZCV", B (n, Z, C, V)},
7193 {"Nzcv", B (N, z, c, v)},
7194 {"NzcV", B (N, z, c, V)},
7195 {"NzCv", B (N, z, C, v)},
7196 {"NzCV", B (N, z, C, V)},
7197 {"NZcv", B (N, Z, c, v)},
7198 {"NZcV", B (N, Z, c, V)},
7199 {"NZCv", B (N, Z, C, v)},
7200 {"NZCV", B (N, Z, C, V)}
7201 };
7202
7203 #undef N
7204 #undef n
7205 #undef Z
7206 #undef z
7207 #undef C
7208 #undef c
7209 #undef V
7210 #undef v
7211 #undef B
7212 \f
7213 /* MD interface: bits in the object file. */
7214
7215 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7216 for use in the a.out file, and stores them in the array pointed to by buf.
7217 This knows about the endian-ness of the target machine and does
7218 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7219 2 (short) and 4 (long) Floating numbers are put out as a series of
7220 LITTLENUMS (shorts, here at least). */
7221
7222 void
7223 md_number_to_chars (char *buf, valueT val, int n)
7224 {
7225 if (target_big_endian)
7226 number_to_chars_bigendian (buf, val, n);
7227 else
7228 number_to_chars_littleendian (buf, val, n);
7229 }
7230
7231 /* MD interface: Sections. */
7232
7233 /* Estimate the size of a frag before relaxing. Assume everything fits in
7234 4 bytes. */
7235
7236 int
7237 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7238 {
7239 fragp->fr_var = 4;
7240 return 4;
7241 }
7242
7243 /* Round up a section size to the appropriate boundary. */
7244
7245 valueT
7246 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7247 {
7248 return size;
7249 }
7250
7251 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7252 of an rs_align_code fragment.
7253
7254 Here we fill the frag with the appropriate info for padding the
7255 output stream. The resulting frag will consist of a fixed (fr_fix)
7256 and of a repeating (fr_var) part.
7257
7258 The fixed content is always emitted before the repeating content and
7259 these two parts are used as follows in constructing the output:
7260 - the fixed part will be used to align to a valid instruction word
7261 boundary, in case that we start at a misaligned address; as no
7262 executable instruction can live at the misaligned location, we
7263 simply fill with zeros;
7264 - the variable part will be used to cover the remaining padding and
7265 we fill using the AArch64 NOP instruction.
7266
7267 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7268 enough storage space for up to 3 bytes for padding the back to a valid
7269 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7270
7271 void
7272 aarch64_handle_align (fragS * fragP)
7273 {
7274 /* NOP = d503201f */
7275 /* AArch64 instructions are always little-endian. */
7276 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7277
7278 int bytes, fix, noop_size;
7279 char *p;
7280
7281 if (fragP->fr_type != rs_align_code)
7282 return;
7283
7284 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7285 p = fragP->fr_literal + fragP->fr_fix;
7286
7287 #ifdef OBJ_ELF
7288 gas_assert (fragP->tc_frag_data.recorded);
7289 #endif
7290
7291 noop_size = sizeof (aarch64_noop);
7292
7293 fix = bytes & (noop_size - 1);
7294 if (fix)
7295 {
7296 #ifdef OBJ_ELF
7297 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7298 #endif
7299 memset (p, 0, fix);
7300 p += fix;
7301 fragP->fr_fix += fix;
7302 }
7303
7304 if (noop_size)
7305 memcpy (p, aarch64_noop, noop_size);
7306 fragP->fr_var = noop_size;
7307 }
7308
7309 /* Perform target specific initialisation of a frag.
7310 Note - despite the name this initialisation is not done when the frag
7311 is created, but only when its type is assigned. A frag can be created
7312 and used a long time before its type is set, so beware of assuming that
7313 this initialisation is performed first. */
7314
7315 #ifndef OBJ_ELF
7316 void
7317 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7318 int max_chars ATTRIBUTE_UNUSED)
7319 {
7320 }
7321
7322 #else /* OBJ_ELF is defined. */
7323 void
7324 aarch64_init_frag (fragS * fragP, int max_chars)
7325 {
7326 /* Record a mapping symbol for alignment frags. We will delete this
7327 later if the alignment ends up empty. */
7328 if (!fragP->tc_frag_data.recorded)
7329 fragP->tc_frag_data.recorded = 1;
7330
7331 /* PR 21809: Do not set a mapping state for debug sections
7332 - it just confuses other tools. */
7333 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
7334 return;
7335
7336 switch (fragP->fr_type)
7337 {
7338 case rs_align_test:
7339 case rs_fill:
7340 mapping_state_2 (MAP_DATA, max_chars);
7341 break;
7342 case rs_align:
7343 /* PR 20364: We can get alignment frags in code sections,
7344 so do not just assume that we should use the MAP_DATA state. */
7345 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7346 break;
7347 case rs_align_code:
7348 mapping_state_2 (MAP_INSN, max_chars);
7349 break;
7350 default:
7351 break;
7352 }
7353 }
7354 \f
7355 /* Initialize the DWARF-2 unwind information for this procedure. */
7356
7357 void
7358 tc_aarch64_frame_initial_instructions (void)
7359 {
7360 cfi_add_CFA_def_cfa (REG_SP, 0);
7361 }
7362 #endif /* OBJ_ELF */
7363
7364 /* Convert REGNAME to a DWARF-2 register number. */
7365
7366 int
7367 tc_aarch64_regname_to_dw2regnum (char *regname)
7368 {
7369 const reg_entry *reg = parse_reg (&regname);
7370 if (reg == NULL)
7371 return -1;
7372
7373 switch (reg->type)
7374 {
7375 case REG_TYPE_SP_32:
7376 case REG_TYPE_SP_64:
7377 case REG_TYPE_R_32:
7378 case REG_TYPE_R_64:
7379 return reg->number;
7380
7381 case REG_TYPE_FP_B:
7382 case REG_TYPE_FP_H:
7383 case REG_TYPE_FP_S:
7384 case REG_TYPE_FP_D:
7385 case REG_TYPE_FP_Q:
7386 return reg->number + 64;
7387
7388 default:
7389 break;
7390 }
7391 return -1;
7392 }
7393
7394 /* Implement DWARF2_ADDR_SIZE. */
7395
7396 int
7397 aarch64_dwarf2_addr_size (void)
7398 {
7399 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7400 if (ilp32_p)
7401 return 4;
7402 #endif
7403 return bfd_arch_bits_per_address (stdoutput) / 8;
7404 }
7405
7406 /* MD interface: Symbol and relocation handling. */
7407
7408 /* Return the address within the segment that a PC-relative fixup is
7409 relative to. For AArch64 PC-relative fixups applied to instructions
7410 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7411
7412 long
7413 md_pcrel_from_section (fixS * fixP, segT seg)
7414 {
7415 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7416
7417 /* If this is pc-relative and we are going to emit a relocation
7418 then we just want to put out any pipeline compensation that the linker
7419 will need. Otherwise we want to use the calculated base. */
7420 if (fixP->fx_pcrel
7421 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7422 || aarch64_force_relocation (fixP)))
7423 base = 0;
7424
7425 /* AArch64 should be consistent for all pc-relative relocations. */
7426 return base + AARCH64_PCREL_OFFSET;
7427 }
7428
7429 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7430 Otherwise we have no need to default values of symbols. */
7431
7432 symbolS *
7433 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7434 {
7435 #ifdef OBJ_ELF
7436 if (name[0] == '_' && name[1] == 'G'
7437 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7438 {
7439 if (!GOT_symbol)
7440 {
7441 if (symbol_find (name))
7442 as_bad (_("GOT already in the symbol table"));
7443
7444 GOT_symbol = symbol_new (name, undefined_section,
7445 (valueT) 0, &zero_address_frag);
7446 }
7447
7448 return GOT_symbol;
7449 }
7450 #endif
7451
7452 return 0;
7453 }
7454
7455 /* Return non-zero if the indicated VALUE has overflowed the maximum
7456 range expressible by a unsigned number with the indicated number of
7457 BITS. */
7458
7459 static bfd_boolean
7460 unsigned_overflow (valueT value, unsigned bits)
7461 {
7462 valueT lim;
7463 if (bits >= sizeof (valueT) * 8)
7464 return FALSE;
7465 lim = (valueT) 1 << bits;
7466 return (value >= lim);
7467 }
7468
7469
7470 /* Return non-zero if the indicated VALUE has overflowed the maximum
7471 range expressible by an signed number with the indicated number of
7472 BITS. */
7473
7474 static bfd_boolean
7475 signed_overflow (offsetT value, unsigned bits)
7476 {
7477 offsetT lim;
7478 if (bits >= sizeof (offsetT) * 8)
7479 return FALSE;
7480 lim = (offsetT) 1 << (bits - 1);
7481 return (value < -lim || value >= lim);
7482 }
7483
7484 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7485 unsigned immediate offset load/store instruction, try to encode it as
7486 an unscaled, 9-bit, signed immediate offset load/store instruction.
7487 Return TRUE if it is successful; otherwise return FALSE.
7488
7489 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7490 in response to the standard LDR/STR mnemonics when the immediate offset is
7491 unambiguous, i.e. when it is negative or unaligned. */
7492
7493 static bfd_boolean
7494 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7495 {
7496 int idx;
7497 enum aarch64_op new_op;
7498 const aarch64_opcode *new_opcode;
7499
7500 gas_assert (instr->opcode->iclass == ldst_pos);
7501
7502 switch (instr->opcode->op)
7503 {
7504 case OP_LDRB_POS:new_op = OP_LDURB; break;
7505 case OP_STRB_POS: new_op = OP_STURB; break;
7506 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7507 case OP_LDRH_POS: new_op = OP_LDURH; break;
7508 case OP_STRH_POS: new_op = OP_STURH; break;
7509 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7510 case OP_LDR_POS: new_op = OP_LDUR; break;
7511 case OP_STR_POS: new_op = OP_STUR; break;
7512 case OP_LDRF_POS: new_op = OP_LDURV; break;
7513 case OP_STRF_POS: new_op = OP_STURV; break;
7514 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7515 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7516 default: new_op = OP_NIL; break;
7517 }
7518
7519 if (new_op == OP_NIL)
7520 return FALSE;
7521
7522 new_opcode = aarch64_get_opcode (new_op);
7523 gas_assert (new_opcode != NULL);
7524
7525 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7526 instr->opcode->op, new_opcode->op);
7527
7528 aarch64_replace_opcode (instr, new_opcode);
7529
7530 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7531 qualifier matching may fail because the out-of-date qualifier will
7532 prevent the operand being updated with a new and correct qualifier. */
7533 idx = aarch64_operand_index (instr->opcode->operands,
7534 AARCH64_OPND_ADDR_SIMM9);
7535 gas_assert (idx == 1);
7536 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7537
7538 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7539
7540 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7541 insn_sequence))
7542 return FALSE;
7543
7544 return TRUE;
7545 }
7546
7547 /* Called by fix_insn to fix a MOV immediate alias instruction.
7548
7549 Operand for a generic move immediate instruction, which is an alias
7550 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7551 a 32-bit/64-bit immediate value into general register. An assembler error
7552 shall result if the immediate cannot be created by a single one of these
7553 instructions. If there is a choice, then to ensure reversability an
7554 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7555
7556 static void
7557 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7558 {
7559 const aarch64_opcode *opcode;
7560
7561 /* Need to check if the destination is SP/ZR. The check has to be done
7562 before any aarch64_replace_opcode. */
7563 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7564 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7565
7566 instr->operands[1].imm.value = value;
7567 instr->operands[1].skip = 0;
7568
7569 if (try_mov_wide_p)
7570 {
7571 /* Try the MOVZ alias. */
7572 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7573 aarch64_replace_opcode (instr, opcode);
7574 if (aarch64_opcode_encode (instr->opcode, instr,
7575 &instr->value, NULL, NULL, insn_sequence))
7576 {
7577 put_aarch64_insn (buf, instr->value);
7578 return;
7579 }
7580 /* Try the MOVK alias. */
7581 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7582 aarch64_replace_opcode (instr, opcode);
7583 if (aarch64_opcode_encode (instr->opcode, instr,
7584 &instr->value, NULL, NULL, insn_sequence))
7585 {
7586 put_aarch64_insn (buf, instr->value);
7587 return;
7588 }
7589 }
7590
7591 if (try_mov_bitmask_p)
7592 {
7593 /* Try the ORR alias. */
7594 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7595 aarch64_replace_opcode (instr, opcode);
7596 if (aarch64_opcode_encode (instr->opcode, instr,
7597 &instr->value, NULL, NULL, insn_sequence))
7598 {
7599 put_aarch64_insn (buf, instr->value);
7600 return;
7601 }
7602 }
7603
7604 as_bad_where (fixP->fx_file, fixP->fx_line,
7605 _("immediate cannot be moved by a single instruction"));
7606 }
7607
7608 /* An instruction operand which is immediate related may have symbol used
7609 in the assembly, e.g.
7610
7611 mov w0, u32
7612 .set u32, 0x00ffff00
7613
7614 At the time when the assembly instruction is parsed, a referenced symbol,
7615 like 'u32' in the above example may not have been seen; a fixS is created
7616 in such a case and is handled here after symbols have been resolved.
7617 Instruction is fixed up with VALUE using the information in *FIXP plus
7618 extra information in FLAGS.
7619
7620 This function is called by md_apply_fix to fix up instructions that need
7621 a fix-up described above but does not involve any linker-time relocation. */
7622
7623 static void
7624 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7625 {
7626 int idx;
7627 uint32_t insn;
7628 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7629 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7630 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7631
7632 if (new_inst)
7633 {
7634 /* Now the instruction is about to be fixed-up, so the operand that
7635 was previously marked as 'ignored' needs to be unmarked in order
7636 to get the encoding done properly. */
7637 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7638 new_inst->operands[idx].skip = 0;
7639 }
7640
7641 gas_assert (opnd != AARCH64_OPND_NIL);
7642
7643 switch (opnd)
7644 {
7645 case AARCH64_OPND_EXCEPTION:
7646 if (unsigned_overflow (value, 16))
7647 as_bad_where (fixP->fx_file, fixP->fx_line,
7648 _("immediate out of range"));
7649 insn = get_aarch64_insn (buf);
7650 insn |= encode_svc_imm (value);
7651 put_aarch64_insn (buf, insn);
7652 break;
7653
7654 case AARCH64_OPND_AIMM:
7655 /* ADD or SUB with immediate.
7656 NOTE this assumes we come here with a add/sub shifted reg encoding
7657 3 322|2222|2 2 2 21111 111111
7658 1 098|7654|3 2 1 09876 543210 98765 43210
7659 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7660 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7661 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7662 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7663 ->
7664 3 322|2222|2 2 221111111111
7665 1 098|7654|3 2 109876543210 98765 43210
7666 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7667 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7668 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7669 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7670 Fields sf Rn Rd are already set. */
7671 insn = get_aarch64_insn (buf);
7672 if (value < 0)
7673 {
7674 /* Add <-> sub. */
7675 insn = reencode_addsub_switch_add_sub (insn);
7676 value = -value;
7677 }
7678
7679 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7680 && unsigned_overflow (value, 12))
7681 {
7682 /* Try to shift the value by 12 to make it fit. */
7683 if (((value >> 12) << 12) == value
7684 && ! unsigned_overflow (value, 12 + 12))
7685 {
7686 value >>= 12;
7687 insn |= encode_addsub_imm_shift_amount (1);
7688 }
7689 }
7690
7691 if (unsigned_overflow (value, 12))
7692 as_bad_where (fixP->fx_file, fixP->fx_line,
7693 _("immediate out of range"));
7694
7695 insn |= encode_addsub_imm (value);
7696
7697 put_aarch64_insn (buf, insn);
7698 break;
7699
7700 case AARCH64_OPND_SIMD_IMM:
7701 case AARCH64_OPND_SIMD_IMM_SFT:
7702 case AARCH64_OPND_LIMM:
7703 /* Bit mask immediate. */
7704 gas_assert (new_inst != NULL);
7705 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7706 new_inst->operands[idx].imm.value = value;
7707 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7708 &new_inst->value, NULL, NULL, insn_sequence))
7709 put_aarch64_insn (buf, new_inst->value);
7710 else
7711 as_bad_where (fixP->fx_file, fixP->fx_line,
7712 _("invalid immediate"));
7713 break;
7714
7715 case AARCH64_OPND_HALF:
7716 /* 16-bit unsigned immediate. */
7717 if (unsigned_overflow (value, 16))
7718 as_bad_where (fixP->fx_file, fixP->fx_line,
7719 _("immediate out of range"));
7720 insn = get_aarch64_insn (buf);
7721 insn |= encode_movw_imm (value & 0xffff);
7722 put_aarch64_insn (buf, insn);
7723 break;
7724
7725 case AARCH64_OPND_IMM_MOV:
7726 /* Operand for a generic move immediate instruction, which is
7727 an alias instruction that generates a single MOVZ, MOVN or ORR
7728 instruction to loads a 32-bit/64-bit immediate value into general
7729 register. An assembler error shall result if the immediate cannot be
7730 created by a single one of these instructions. If there is a choice,
7731 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7732 and MOVZ or MOVN to ORR. */
7733 gas_assert (new_inst != NULL);
7734 fix_mov_imm_insn (fixP, buf, new_inst, value);
7735 break;
7736
7737 case AARCH64_OPND_ADDR_SIMM7:
7738 case AARCH64_OPND_ADDR_SIMM9:
7739 case AARCH64_OPND_ADDR_SIMM9_2:
7740 case AARCH64_OPND_ADDR_SIMM10:
7741 case AARCH64_OPND_ADDR_UIMM12:
7742 case AARCH64_OPND_ADDR_SIMM11:
7743 case AARCH64_OPND_ADDR_SIMM13:
7744 /* Immediate offset in an address. */
7745 insn = get_aarch64_insn (buf);
7746
7747 gas_assert (new_inst != NULL && new_inst->value == insn);
7748 gas_assert (new_inst->opcode->operands[1] == opnd
7749 || new_inst->opcode->operands[2] == opnd);
7750
7751 /* Get the index of the address operand. */
7752 if (new_inst->opcode->operands[1] == opnd)
7753 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7754 idx = 1;
7755 else
7756 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7757 idx = 2;
7758
7759 /* Update the resolved offset value. */
7760 new_inst->operands[idx].addr.offset.imm = value;
7761
7762 /* Encode/fix-up. */
7763 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7764 &new_inst->value, NULL, NULL, insn_sequence))
7765 {
7766 put_aarch64_insn (buf, new_inst->value);
7767 break;
7768 }
7769 else if (new_inst->opcode->iclass == ldst_pos
7770 && try_to_encode_as_unscaled_ldst (new_inst))
7771 {
7772 put_aarch64_insn (buf, new_inst->value);
7773 break;
7774 }
7775
7776 as_bad_where (fixP->fx_file, fixP->fx_line,
7777 _("immediate offset out of range"));
7778 break;
7779
7780 default:
7781 gas_assert (0);
7782 as_fatal (_("unhandled operand code %d"), opnd);
7783 }
7784 }
7785
7786 /* Apply a fixup (fixP) to segment data, once it has been determined
7787 by our caller that we have all the info we need to fix it up.
7788
7789 Parameter valP is the pointer to the value of the bits. */
7790
7791 void
7792 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7793 {
7794 offsetT value = *valP;
7795 uint32_t insn;
7796 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7797 int scale;
7798 unsigned flags = fixP->fx_addnumber;
7799
7800 DEBUG_TRACE ("\n\n");
7801 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7802 DEBUG_TRACE ("Enter md_apply_fix");
7803
7804 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7805
7806 /* Note whether this will delete the relocation. */
7807
7808 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7809 fixP->fx_done = 1;
7810
7811 /* Process the relocations. */
7812 switch (fixP->fx_r_type)
7813 {
7814 case BFD_RELOC_NONE:
7815 /* This will need to go in the object file. */
7816 fixP->fx_done = 0;
7817 break;
7818
7819 case BFD_RELOC_8:
7820 case BFD_RELOC_8_PCREL:
7821 if (fixP->fx_done || !seg->use_rela_p)
7822 md_number_to_chars (buf, value, 1);
7823 break;
7824
7825 case BFD_RELOC_16:
7826 case BFD_RELOC_16_PCREL:
7827 if (fixP->fx_done || !seg->use_rela_p)
7828 md_number_to_chars (buf, value, 2);
7829 break;
7830
7831 case BFD_RELOC_32:
7832 case BFD_RELOC_32_PCREL:
7833 if (fixP->fx_done || !seg->use_rela_p)
7834 md_number_to_chars (buf, value, 4);
7835 break;
7836
7837 case BFD_RELOC_64:
7838 case BFD_RELOC_64_PCREL:
7839 if (fixP->fx_done || !seg->use_rela_p)
7840 md_number_to_chars (buf, value, 8);
7841 break;
7842
7843 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7844 /* We claim that these fixups have been processed here, even if
7845 in fact we generate an error because we do not have a reloc
7846 for them, so tc_gen_reloc() will reject them. */
7847 fixP->fx_done = 1;
7848 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7849 {
7850 as_bad_where (fixP->fx_file, fixP->fx_line,
7851 _("undefined symbol %s used as an immediate value"),
7852 S_GET_NAME (fixP->fx_addsy));
7853 goto apply_fix_return;
7854 }
7855 fix_insn (fixP, flags, value);
7856 break;
7857
7858 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7859 if (fixP->fx_done || !seg->use_rela_p)
7860 {
7861 if (value & 3)
7862 as_bad_where (fixP->fx_file, fixP->fx_line,
7863 _("pc-relative load offset not word aligned"));
7864 if (signed_overflow (value, 21))
7865 as_bad_where (fixP->fx_file, fixP->fx_line,
7866 _("pc-relative load offset out of range"));
7867 insn = get_aarch64_insn (buf);
7868 insn |= encode_ld_lit_ofs_19 (value >> 2);
7869 put_aarch64_insn (buf, insn);
7870 }
7871 break;
7872
7873 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7874 if (fixP->fx_done || !seg->use_rela_p)
7875 {
7876 if (signed_overflow (value, 21))
7877 as_bad_where (fixP->fx_file, fixP->fx_line,
7878 _("pc-relative address offset out of range"));
7879 insn = get_aarch64_insn (buf);
7880 insn |= encode_adr_imm (value);
7881 put_aarch64_insn (buf, insn);
7882 }
7883 break;
7884
7885 case BFD_RELOC_AARCH64_BRANCH19:
7886 if (fixP->fx_done || !seg->use_rela_p)
7887 {
7888 if (value & 3)
7889 as_bad_where (fixP->fx_file, fixP->fx_line,
7890 _("conditional branch target not word aligned"));
7891 if (signed_overflow (value, 21))
7892 as_bad_where (fixP->fx_file, fixP->fx_line,
7893 _("conditional branch out of range"));
7894 insn = get_aarch64_insn (buf);
7895 insn |= encode_cond_branch_ofs_19 (value >> 2);
7896 put_aarch64_insn (buf, insn);
7897 }
7898 break;
7899
7900 case BFD_RELOC_AARCH64_TSTBR14:
7901 if (fixP->fx_done || !seg->use_rela_p)
7902 {
7903 if (value & 3)
7904 as_bad_where (fixP->fx_file, fixP->fx_line,
7905 _("conditional branch target not word aligned"));
7906 if (signed_overflow (value, 16))
7907 as_bad_where (fixP->fx_file, fixP->fx_line,
7908 _("conditional branch out of range"));
7909 insn = get_aarch64_insn (buf);
7910 insn |= encode_tst_branch_ofs_14 (value >> 2);
7911 put_aarch64_insn (buf, insn);
7912 }
7913 break;
7914
7915 case BFD_RELOC_AARCH64_CALL26:
7916 case BFD_RELOC_AARCH64_JUMP26:
7917 if (fixP->fx_done || !seg->use_rela_p)
7918 {
7919 if (value & 3)
7920 as_bad_where (fixP->fx_file, fixP->fx_line,
7921 _("branch target not word aligned"));
7922 if (signed_overflow (value, 28))
7923 as_bad_where (fixP->fx_file, fixP->fx_line,
7924 _("branch out of range"));
7925 insn = get_aarch64_insn (buf);
7926 insn |= encode_branch_ofs_26 (value >> 2);
7927 put_aarch64_insn (buf, insn);
7928 }
7929 break;
7930
7931 case BFD_RELOC_AARCH64_MOVW_G0:
7932 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7933 case BFD_RELOC_AARCH64_MOVW_G0_S:
7934 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7935 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7936 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
7937 scale = 0;
7938 goto movw_common;
7939 case BFD_RELOC_AARCH64_MOVW_G1:
7940 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7941 case BFD_RELOC_AARCH64_MOVW_G1_S:
7942 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7943 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7944 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
7945 scale = 16;
7946 goto movw_common;
7947 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7948 scale = 0;
7949 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7950 /* Should always be exported to object file, see
7951 aarch64_force_relocation(). */
7952 gas_assert (!fixP->fx_done);
7953 gas_assert (seg->use_rela_p);
7954 goto movw_common;
7955 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7956 scale = 16;
7957 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7958 /* Should always be exported to object file, see
7959 aarch64_force_relocation(). */
7960 gas_assert (!fixP->fx_done);
7961 gas_assert (seg->use_rela_p);
7962 goto movw_common;
7963 case BFD_RELOC_AARCH64_MOVW_G2:
7964 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7965 case BFD_RELOC_AARCH64_MOVW_G2_S:
7966 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7967 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
7968 scale = 32;
7969 goto movw_common;
7970 case BFD_RELOC_AARCH64_MOVW_G3:
7971 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
7972 scale = 48;
7973 movw_common:
7974 if (fixP->fx_done || !seg->use_rela_p)
7975 {
7976 insn = get_aarch64_insn (buf);
7977
7978 if (!fixP->fx_done)
7979 {
7980 /* REL signed addend must fit in 16 bits */
7981 if (signed_overflow (value, 16))
7982 as_bad_where (fixP->fx_file, fixP->fx_line,
7983 _("offset out of range"));
7984 }
7985 else
7986 {
7987 /* Check for overflow and scale. */
7988 switch (fixP->fx_r_type)
7989 {
7990 case BFD_RELOC_AARCH64_MOVW_G0:
7991 case BFD_RELOC_AARCH64_MOVW_G1:
7992 case BFD_RELOC_AARCH64_MOVW_G2:
7993 case BFD_RELOC_AARCH64_MOVW_G3:
7994 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7995 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7996 if (unsigned_overflow (value, scale + 16))
7997 as_bad_where (fixP->fx_file, fixP->fx_line,
7998 _("unsigned value out of range"));
7999 break;
8000 case BFD_RELOC_AARCH64_MOVW_G0_S:
8001 case BFD_RELOC_AARCH64_MOVW_G1_S:
8002 case BFD_RELOC_AARCH64_MOVW_G2_S:
8003 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8004 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8005 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8006 /* NOTE: We can only come here with movz or movn. */
8007 if (signed_overflow (value, scale + 16))
8008 as_bad_where (fixP->fx_file, fixP->fx_line,
8009 _("signed value out of range"));
8010 if (value < 0)
8011 {
8012 /* Force use of MOVN. */
8013 value = ~value;
8014 insn = reencode_movzn_to_movn (insn);
8015 }
8016 else
8017 {
8018 /* Force use of MOVZ. */
8019 insn = reencode_movzn_to_movz (insn);
8020 }
8021 break;
8022 default:
8023 /* Unchecked relocations. */
8024 break;
8025 }
8026 value >>= scale;
8027 }
8028
8029 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8030 insn |= encode_movw_imm (value & 0xffff);
8031
8032 put_aarch64_insn (buf, insn);
8033 }
8034 break;
8035
8036 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8037 fixP->fx_r_type = (ilp32_p
8038 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8039 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8040 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8041 /* Should always be exported to object file, see
8042 aarch64_force_relocation(). */
8043 gas_assert (!fixP->fx_done);
8044 gas_assert (seg->use_rela_p);
8045 break;
8046
8047 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8048 fixP->fx_r_type = (ilp32_p
8049 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8050 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8051 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8052 /* Should always be exported to object file, see
8053 aarch64_force_relocation(). */
8054 gas_assert (!fixP->fx_done);
8055 gas_assert (seg->use_rela_p);
8056 break;
8057
8058 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8059 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8060 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8061 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8062 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8063 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8064 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8065 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8066 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8067 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8068 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8069 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8070 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8071 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8072 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8073 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8074 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8075 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8076 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8077 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8078 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8079 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8080 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8081 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8082 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8083 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8084 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8085 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8086 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8087 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8088 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8089 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8090 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8091 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8092 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8093 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8094 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8095 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8096 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8097 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8098 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8099 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8100 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8101 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8102 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8103 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8104 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8105 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8106 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8107 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8108 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8109 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8110 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8111 /* Should always be exported to object file, see
8112 aarch64_force_relocation(). */
8113 gas_assert (!fixP->fx_done);
8114 gas_assert (seg->use_rela_p);
8115 break;
8116
8117 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8118 /* Should always be exported to object file, see
8119 aarch64_force_relocation(). */
8120 fixP->fx_r_type = (ilp32_p
8121 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8122 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8123 gas_assert (!fixP->fx_done);
8124 gas_assert (seg->use_rela_p);
8125 break;
8126
8127 case BFD_RELOC_AARCH64_ADD_LO12:
8128 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8129 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8130 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8131 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8132 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8133 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8134 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8135 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8136 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8137 case BFD_RELOC_AARCH64_LDST128_LO12:
8138 case BFD_RELOC_AARCH64_LDST16_LO12:
8139 case BFD_RELOC_AARCH64_LDST32_LO12:
8140 case BFD_RELOC_AARCH64_LDST64_LO12:
8141 case BFD_RELOC_AARCH64_LDST8_LO12:
8142 /* Should always be exported to object file, see
8143 aarch64_force_relocation(). */
8144 gas_assert (!fixP->fx_done);
8145 gas_assert (seg->use_rela_p);
8146 break;
8147
8148 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8149 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8150 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8151 break;
8152
8153 case BFD_RELOC_UNUSED:
8154 /* An error will already have been reported. */
8155 break;
8156
8157 default:
8158 as_bad_where (fixP->fx_file, fixP->fx_line,
8159 _("unexpected %s fixup"),
8160 bfd_get_reloc_code_name (fixP->fx_r_type));
8161 break;
8162 }
8163
8164 apply_fix_return:
8165 /* Free the allocated the struct aarch64_inst.
8166 N.B. currently there are very limited number of fix-up types actually use
8167 this field, so the impact on the performance should be minimal . */
8168 if (fixP->tc_fix_data.inst != NULL)
8169 free (fixP->tc_fix_data.inst);
8170
8171 return;
8172 }
8173
8174 /* Translate internal representation of relocation info to BFD target
8175 format. */
8176
8177 arelent *
8178 tc_gen_reloc (asection * section, fixS * fixp)
8179 {
8180 arelent *reloc;
8181 bfd_reloc_code_real_type code;
8182
8183 reloc = XNEW (arelent);
8184
8185 reloc->sym_ptr_ptr = XNEW (asymbol *);
8186 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8187 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8188
8189 if (fixp->fx_pcrel)
8190 {
8191 if (section->use_rela_p)
8192 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8193 else
8194 fixp->fx_offset = reloc->address;
8195 }
8196 reloc->addend = fixp->fx_offset;
8197
8198 code = fixp->fx_r_type;
8199 switch (code)
8200 {
8201 case BFD_RELOC_16:
8202 if (fixp->fx_pcrel)
8203 code = BFD_RELOC_16_PCREL;
8204 break;
8205
8206 case BFD_RELOC_32:
8207 if (fixp->fx_pcrel)
8208 code = BFD_RELOC_32_PCREL;
8209 break;
8210
8211 case BFD_RELOC_64:
8212 if (fixp->fx_pcrel)
8213 code = BFD_RELOC_64_PCREL;
8214 break;
8215
8216 default:
8217 break;
8218 }
8219
8220 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8221 if (reloc->howto == NULL)
8222 {
8223 as_bad_where (fixp->fx_file, fixp->fx_line,
8224 _
8225 ("cannot represent %s relocation in this object file format"),
8226 bfd_get_reloc_code_name (code));
8227 return NULL;
8228 }
8229
8230 return reloc;
8231 }
8232
8233 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8234
8235 void
8236 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8237 {
8238 bfd_reloc_code_real_type type;
8239 int pcrel = 0;
8240
8241 /* Pick a reloc.
8242 FIXME: @@ Should look at CPU word size. */
8243 switch (size)
8244 {
8245 case 1:
8246 type = BFD_RELOC_8;
8247 break;
8248 case 2:
8249 type = BFD_RELOC_16;
8250 break;
8251 case 4:
8252 type = BFD_RELOC_32;
8253 break;
8254 case 8:
8255 type = BFD_RELOC_64;
8256 break;
8257 default:
8258 as_bad (_("cannot do %u-byte relocation"), size);
8259 type = BFD_RELOC_UNUSED;
8260 break;
8261 }
8262
8263 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8264 }
8265
8266 int
8267 aarch64_force_relocation (struct fix *fixp)
8268 {
8269 switch (fixp->fx_r_type)
8270 {
8271 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8272 /* Perform these "immediate" internal relocations
8273 even if the symbol is extern or weak. */
8274 return 0;
8275
8276 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8277 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8278 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8279 /* Pseudo relocs that need to be fixed up according to
8280 ilp32_p. */
8281 return 0;
8282
8283 case BFD_RELOC_AARCH64_ADD_LO12:
8284 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8285 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8286 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8287 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8288 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8289 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8290 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8291 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8292 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8293 case BFD_RELOC_AARCH64_LDST128_LO12:
8294 case BFD_RELOC_AARCH64_LDST16_LO12:
8295 case BFD_RELOC_AARCH64_LDST32_LO12:
8296 case BFD_RELOC_AARCH64_LDST64_LO12:
8297 case BFD_RELOC_AARCH64_LDST8_LO12:
8298 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8299 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8300 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8301 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8302 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8303 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8304 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8305 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8306 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8307 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8308 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8309 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8310 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8311 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8312 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8313 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8314 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8315 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8316 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8317 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8318 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8319 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8320 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8321 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8322 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8323 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8324 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8325 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8326 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8327 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8328 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8329 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8330 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8331 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8332 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8333 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8334 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8335 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8336 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8337 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8338 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8339 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8340 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8341 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8342 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8343 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8344 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8345 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8346 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8347 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8348 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8349 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8350 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8351 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8352 /* Always leave these relocations for the linker. */
8353 return 1;
8354
8355 default:
8356 break;
8357 }
8358
8359 return generic_force_reloc (fixp);
8360 }
8361
8362 #ifdef OBJ_ELF
8363
8364 /* Implement md_after_parse_args. This is the earliest time we need to decide
8365 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8366
8367 void
8368 aarch64_after_parse_args (void)
8369 {
8370 if (aarch64_abi != AARCH64_ABI_NONE)
8371 return;
8372
8373 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8374 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8375 aarch64_abi = AARCH64_ABI_ILP32;
8376 else
8377 aarch64_abi = AARCH64_ABI_LP64;
8378 }
8379
8380 const char *
8381 elf64_aarch64_target_format (void)
8382 {
8383 #ifdef TE_CLOUDABI
8384 /* FIXME: What to do for ilp32_p ? */
8385 if (target_big_endian)
8386 return "elf64-bigaarch64-cloudabi";
8387 else
8388 return "elf64-littleaarch64-cloudabi";
8389 #else
8390 if (target_big_endian)
8391 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8392 else
8393 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8394 #endif
8395 }
8396
8397 void
8398 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8399 {
8400 elf_frob_symbol (symp, puntp);
8401 }
8402 #endif
8403
8404 /* MD interface: Finalization. */
8405
8406 /* A good place to do this, although this was probably not intended
8407 for this kind of use. We need to dump the literal pool before
8408 references are made to a null symbol pointer. */
8409
8410 void
8411 aarch64_cleanup (void)
8412 {
8413 literal_pool *pool;
8414
8415 for (pool = list_of_pools; pool; pool = pool->next)
8416 {
8417 /* Put it at the end of the relevant section. */
8418 subseg_set (pool->section, pool->sub_section);
8419 s_ltorg (0);
8420 }
8421 }
8422
8423 #ifdef OBJ_ELF
8424 /* Remove any excess mapping symbols generated for alignment frags in
8425 SEC. We may have created a mapping symbol before a zero byte
8426 alignment; remove it if there's a mapping symbol after the
8427 alignment. */
8428 static void
8429 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8430 void *dummy ATTRIBUTE_UNUSED)
8431 {
8432 segment_info_type *seginfo = seg_info (sec);
8433 fragS *fragp;
8434
8435 if (seginfo == NULL || seginfo->frchainP == NULL)
8436 return;
8437
8438 for (fragp = seginfo->frchainP->frch_root;
8439 fragp != NULL; fragp = fragp->fr_next)
8440 {
8441 symbolS *sym = fragp->tc_frag_data.last_map;
8442 fragS *next = fragp->fr_next;
8443
8444 /* Variable-sized frags have been converted to fixed size by
8445 this point. But if this was variable-sized to start with,
8446 there will be a fixed-size frag after it. So don't handle
8447 next == NULL. */
8448 if (sym == NULL || next == NULL)
8449 continue;
8450
8451 if (S_GET_VALUE (sym) < next->fr_address)
8452 /* Not at the end of this frag. */
8453 continue;
8454 know (S_GET_VALUE (sym) == next->fr_address);
8455
8456 do
8457 {
8458 if (next->tc_frag_data.first_map != NULL)
8459 {
8460 /* Next frag starts with a mapping symbol. Discard this
8461 one. */
8462 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8463 break;
8464 }
8465
8466 if (next->fr_next == NULL)
8467 {
8468 /* This mapping symbol is at the end of the section. Discard
8469 it. */
8470 know (next->fr_fix == 0 && next->fr_var == 0);
8471 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8472 break;
8473 }
8474
8475 /* As long as we have empty frags without any mapping symbols,
8476 keep looking. */
8477 /* If the next frag is non-empty and does not start with a
8478 mapping symbol, then this mapping symbol is required. */
8479 if (next->fr_address != next->fr_next->fr_address)
8480 break;
8481
8482 next = next->fr_next;
8483 }
8484 while (next != NULL);
8485 }
8486 }
8487 #endif
8488
8489 /* Adjust the symbol table. */
8490
8491 void
8492 aarch64_adjust_symtab (void)
8493 {
8494 #ifdef OBJ_ELF
8495 /* Remove any overlapping mapping symbols generated by alignment frags. */
8496 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8497 /* Now do generic ELF adjustments. */
8498 elf_adjust_symtab ();
8499 #endif
8500 }
8501
8502 static void
8503 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8504 {
8505 const char *hash_err;
8506
8507 hash_err = hash_insert (table, key, value);
8508 if (hash_err)
8509 printf ("Internal Error: Can't hash %s\n", key);
8510 }
8511
8512 static void
8513 fill_instruction_hash_table (void)
8514 {
8515 aarch64_opcode *opcode = aarch64_opcode_table;
8516
8517 while (opcode->name != NULL)
8518 {
8519 templates *templ, *new_templ;
8520 templ = hash_find (aarch64_ops_hsh, opcode->name);
8521
8522 new_templ = XNEW (templates);
8523 new_templ->opcode = opcode;
8524 new_templ->next = NULL;
8525
8526 if (!templ)
8527 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8528 else
8529 {
8530 new_templ->next = templ->next;
8531 templ->next = new_templ;
8532 }
8533 ++opcode;
8534 }
8535 }
8536
8537 static inline void
8538 convert_to_upper (char *dst, const char *src, size_t num)
8539 {
8540 unsigned int i;
8541 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8542 *dst = TOUPPER (*src);
8543 *dst = '\0';
8544 }
8545
8546 /* Assume STR point to a lower-case string, allocate, convert and return
8547 the corresponding upper-case string. */
8548 static inline const char*
8549 get_upper_str (const char *str)
8550 {
8551 char *ret;
8552 size_t len = strlen (str);
8553 ret = XNEWVEC (char, len + 1);
8554 convert_to_upper (ret, str, len);
8555 return ret;
8556 }
8557
8558 /* MD interface: Initialization. */
8559
8560 void
8561 md_begin (void)
8562 {
8563 unsigned mach;
8564 unsigned int i;
8565
8566 if ((aarch64_ops_hsh = hash_new ()) == NULL
8567 || (aarch64_cond_hsh = hash_new ()) == NULL
8568 || (aarch64_shift_hsh = hash_new ()) == NULL
8569 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8570 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8571 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8572 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8573 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8574 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8575 || (aarch64_sys_regs_sr_hsh = hash_new ()) == NULL
8576 || (aarch64_reg_hsh = hash_new ()) == NULL
8577 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8578 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8579 || (aarch64_pldop_hsh = hash_new ()) == NULL
8580 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8581 as_fatal (_("virtual memory exhausted"));
8582
8583 fill_instruction_hash_table ();
8584
8585 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8586 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8587 (void *) (aarch64_sys_regs + i));
8588
8589 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8590 checked_hash_insert (aarch64_pstatefield_hsh,
8591 aarch64_pstatefields[i].name,
8592 (void *) (aarch64_pstatefields + i));
8593
8594 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8595 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8596 aarch64_sys_regs_ic[i].name,
8597 (void *) (aarch64_sys_regs_ic + i));
8598
8599 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8600 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8601 aarch64_sys_regs_dc[i].name,
8602 (void *) (aarch64_sys_regs_dc + i));
8603
8604 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8605 checked_hash_insert (aarch64_sys_regs_at_hsh,
8606 aarch64_sys_regs_at[i].name,
8607 (void *) (aarch64_sys_regs_at + i));
8608
8609 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8610 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8611 aarch64_sys_regs_tlbi[i].name,
8612 (void *) (aarch64_sys_regs_tlbi + i));
8613
8614 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8615 checked_hash_insert (aarch64_sys_regs_sr_hsh,
8616 aarch64_sys_regs_sr[i].name,
8617 (void *) (aarch64_sys_regs_sr + i));
8618
8619 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8620 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8621 (void *) (reg_names + i));
8622
8623 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8624 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8625 (void *) (nzcv_names + i));
8626
8627 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8628 {
8629 const char *name = aarch64_operand_modifiers[i].name;
8630 checked_hash_insert (aarch64_shift_hsh, name,
8631 (void *) (aarch64_operand_modifiers + i));
8632 /* Also hash the name in the upper case. */
8633 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8634 (void *) (aarch64_operand_modifiers + i));
8635 }
8636
8637 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8638 {
8639 unsigned int j;
8640 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8641 the same condition code. */
8642 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8643 {
8644 const char *name = aarch64_conds[i].names[j];
8645 if (name == NULL)
8646 break;
8647 checked_hash_insert (aarch64_cond_hsh, name,
8648 (void *) (aarch64_conds + i));
8649 /* Also hash the name in the upper case. */
8650 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8651 (void *) (aarch64_conds + i));
8652 }
8653 }
8654
8655 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8656 {
8657 const char *name = aarch64_barrier_options[i].name;
8658 /* Skip xx00 - the unallocated values of option. */
8659 if ((i & 0x3) == 0)
8660 continue;
8661 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8662 (void *) (aarch64_barrier_options + i));
8663 /* Also hash the name in the upper case. */
8664 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8665 (void *) (aarch64_barrier_options + i));
8666 }
8667
8668 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8669 {
8670 const char* name = aarch64_prfops[i].name;
8671 /* Skip the unallocated hint encodings. */
8672 if (name == NULL)
8673 continue;
8674 checked_hash_insert (aarch64_pldop_hsh, name,
8675 (void *) (aarch64_prfops + i));
8676 /* Also hash the name in the upper case. */
8677 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8678 (void *) (aarch64_prfops + i));
8679 }
8680
8681 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8682 {
8683 const char* name = aarch64_hint_options[i].name;
8684
8685 checked_hash_insert (aarch64_hint_opt_hsh, name,
8686 (void *) (aarch64_hint_options + i));
8687 /* Also hash the name in the upper case. */
8688 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8689 (void *) (aarch64_hint_options + i));
8690 }
8691
8692 /* Set the cpu variant based on the command-line options. */
8693 if (!mcpu_cpu_opt)
8694 mcpu_cpu_opt = march_cpu_opt;
8695
8696 if (!mcpu_cpu_opt)
8697 mcpu_cpu_opt = &cpu_default;
8698
8699 cpu_variant = *mcpu_cpu_opt;
8700
8701 /* Record the CPU type. */
8702 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8703
8704 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8705 }
8706
8707 /* Command line processing. */
8708
8709 const char *md_shortopts = "m:";
8710
8711 #ifdef AARCH64_BI_ENDIAN
8712 #define OPTION_EB (OPTION_MD_BASE + 0)
8713 #define OPTION_EL (OPTION_MD_BASE + 1)
8714 #else
8715 #if TARGET_BYTES_BIG_ENDIAN
8716 #define OPTION_EB (OPTION_MD_BASE + 0)
8717 #else
8718 #define OPTION_EL (OPTION_MD_BASE + 1)
8719 #endif
8720 #endif
8721
8722 struct option md_longopts[] = {
8723 #ifdef OPTION_EB
8724 {"EB", no_argument, NULL, OPTION_EB},
8725 #endif
8726 #ifdef OPTION_EL
8727 {"EL", no_argument, NULL, OPTION_EL},
8728 #endif
8729 {NULL, no_argument, NULL, 0}
8730 };
8731
8732 size_t md_longopts_size = sizeof (md_longopts);
8733
8734 struct aarch64_option_table
8735 {
8736 const char *option; /* Option name to match. */
8737 const char *help; /* Help information. */
8738 int *var; /* Variable to change. */
8739 int value; /* What to change it to. */
8740 char *deprecated; /* If non-null, print this message. */
8741 };
8742
8743 static struct aarch64_option_table aarch64_opts[] = {
8744 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8745 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8746 NULL},
8747 #ifdef DEBUG_AARCH64
8748 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8749 #endif /* DEBUG_AARCH64 */
8750 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8751 NULL},
8752 {"mno-verbose-error", N_("do not output verbose error messages"),
8753 &verbose_error_p, 0, NULL},
8754 {NULL, NULL, NULL, 0, NULL}
8755 };
8756
8757 struct aarch64_cpu_option_table
8758 {
8759 const char *name;
8760 const aarch64_feature_set value;
8761 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8762 case. */
8763 const char *canonical_name;
8764 };
8765
8766 /* This list should, at a minimum, contain all the cpu names
8767 recognized by GCC. */
8768 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8769 {"all", AARCH64_ANY, NULL},
8770 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8771 AARCH64_FEATURE_CRC), "Cortex-A35"},
8772 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8773 AARCH64_FEATURE_CRC), "Cortex-A53"},
8774 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8775 AARCH64_FEATURE_CRC), "Cortex-A57"},
8776 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8777 AARCH64_FEATURE_CRC), "Cortex-A72"},
8778 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8779 AARCH64_FEATURE_CRC), "Cortex-A73"},
8780 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8781 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8782 "Cortex-A55"},
8783 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8784 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8785 "Cortex-A75"},
8786 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8787 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8788 "Cortex-A76"},
8789 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8790 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8791 | AARCH64_FEATURE_DOTPROD
8792 | AARCH64_FEATURE_PROFILE),
8793 "Ares"},
8794 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8795 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8796 "Samsung Exynos M1"},
8797 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8798 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8799 | AARCH64_FEATURE_RDMA),
8800 "Qualcomm Falkor"},
8801 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8802 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8803 | AARCH64_FEATURE_DOTPROD
8804 | AARCH64_FEATURE_SSBS),
8805 "Neoverse E1"},
8806 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8807 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8808 | AARCH64_FEATURE_DOTPROD
8809 | AARCH64_FEATURE_PROFILE),
8810 "Neoverse N1"},
8811 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8812 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8813 | AARCH64_FEATURE_RDMA),
8814 "Qualcomm QDF24XX"},
8815 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8816 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8817 "Qualcomm Saphira"},
8818 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8819 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8820 "Cavium ThunderX"},
8821 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8822 AARCH64_FEATURE_CRYPTO),
8823 "Broadcom Vulcan"},
8824 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8825 in earlier releases and is superseded by 'xgene1' in all
8826 tools. */
8827 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8828 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8829 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8830 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8831 {"generic", AARCH64_ARCH_V8, NULL},
8832
8833 {NULL, AARCH64_ARCH_NONE, NULL}
8834 };
8835
8836 struct aarch64_arch_option_table
8837 {
8838 const char *name;
8839 const aarch64_feature_set value;
8840 };
8841
8842 /* This list should, at a minimum, contain all the architecture names
8843 recognized by GCC. */
8844 static const struct aarch64_arch_option_table aarch64_archs[] = {
8845 {"all", AARCH64_ANY},
8846 {"armv8-a", AARCH64_ARCH_V8},
8847 {"armv8.1-a", AARCH64_ARCH_V8_1},
8848 {"armv8.2-a", AARCH64_ARCH_V8_2},
8849 {"armv8.3-a", AARCH64_ARCH_V8_3},
8850 {"armv8.4-a", AARCH64_ARCH_V8_4},
8851 {"armv8.5-a", AARCH64_ARCH_V8_5},
8852 {NULL, AARCH64_ARCH_NONE}
8853 };
8854
8855 /* ISA extensions. */
8856 struct aarch64_option_cpu_value_table
8857 {
8858 const char *name;
8859 const aarch64_feature_set value;
8860 const aarch64_feature_set require; /* Feature dependencies. */
8861 };
8862
8863 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8864 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8865 AARCH64_ARCH_NONE},
8866 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO
8867 | AARCH64_FEATURE_AES
8868 | AARCH64_FEATURE_SHA2, 0),
8869 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8870 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8871 AARCH64_ARCH_NONE},
8872 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8873 AARCH64_ARCH_NONE},
8874 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8875 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8876 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8877 AARCH64_ARCH_NONE},
8878 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8879 AARCH64_ARCH_NONE},
8880 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8881 AARCH64_ARCH_NONE},
8882 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8883 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8884 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8885 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8886 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
8887 AARCH64_FEATURE (AARCH64_FEATURE_FP
8888 | AARCH64_FEATURE_F16, 0)},
8889 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8890 AARCH64_ARCH_NONE},
8891 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8892 AARCH64_FEATURE (AARCH64_FEATURE_F16
8893 | AARCH64_FEATURE_SIMD
8894 | AARCH64_FEATURE_COMPNUM, 0)},
8895 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
8896 AARCH64_ARCH_NONE},
8897 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8898 AARCH64_FEATURE (AARCH64_FEATURE_F16
8899 | AARCH64_FEATURE_SIMD, 0)},
8900 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8901 AARCH64_ARCH_NONE},
8902 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8903 AARCH64_ARCH_NONE},
8904 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
8905 AARCH64_ARCH_NONE},
8906 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
8907 AARCH64_ARCH_NONE},
8908 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
8909 AARCH64_ARCH_NONE},
8910 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
8911 AARCH64_ARCH_NONE},
8912 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
8913 AARCH64_ARCH_NONE},
8914 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA2
8915 | AARCH64_FEATURE_SHA3, 0),
8916 AARCH64_ARCH_NONE},
8917 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
8918 AARCH64_ARCH_NONE},
8919 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
8920 AARCH64_ARCH_NONE},
8921 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
8922 AARCH64_ARCH_NONE},
8923 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
8924 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
8925 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
8926 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
8927 | AARCH64_FEATURE_SM4, 0)},
8928 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
8929 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
8930 | AARCH64_FEATURE_AES, 0)},
8931 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
8932 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
8933 | AARCH64_FEATURE_SHA3, 0)},
8934 {"bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
8935 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
8936 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8937 };
8938
8939 struct aarch64_long_option_table
8940 {
8941 const char *option; /* Substring to match. */
8942 const char *help; /* Help information. */
8943 int (*func) (const char *subopt); /* Function to decode sub-option. */
8944 char *deprecated; /* If non-null, print this message. */
8945 };
8946
8947 /* Transitive closure of features depending on set. */
8948 static aarch64_feature_set
8949 aarch64_feature_disable_set (aarch64_feature_set set)
8950 {
8951 const struct aarch64_option_cpu_value_table *opt;
8952 aarch64_feature_set prev = 0;
8953
8954 while (prev != set) {
8955 prev = set;
8956 for (opt = aarch64_features; opt->name != NULL; opt++)
8957 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8958 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8959 }
8960 return set;
8961 }
8962
8963 /* Transitive closure of dependencies of set. */
8964 static aarch64_feature_set
8965 aarch64_feature_enable_set (aarch64_feature_set set)
8966 {
8967 const struct aarch64_option_cpu_value_table *opt;
8968 aarch64_feature_set prev = 0;
8969
8970 while (prev != set) {
8971 prev = set;
8972 for (opt = aarch64_features; opt->name != NULL; opt++)
8973 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8974 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8975 }
8976 return set;
8977 }
8978
8979 static int
8980 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8981 bfd_boolean ext_only)
8982 {
8983 /* We insist on extensions being added before being removed. We achieve
8984 this by using the ADDING_VALUE variable to indicate whether we are
8985 adding an extension (1) or removing it (0) and only allowing it to
8986 change in the order -1 -> 1 -> 0. */
8987 int adding_value = -1;
8988 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8989
8990 /* Copy the feature set, so that we can modify it. */
8991 *ext_set = **opt_p;
8992 *opt_p = ext_set;
8993
8994 while (str != NULL && *str != 0)
8995 {
8996 const struct aarch64_option_cpu_value_table *opt;
8997 const char *ext = NULL;
8998 int optlen;
8999
9000 if (!ext_only)
9001 {
9002 if (*str != '+')
9003 {
9004 as_bad (_("invalid architectural extension"));
9005 return 0;
9006 }
9007
9008 ext = strchr (++str, '+');
9009 }
9010
9011 if (ext != NULL)
9012 optlen = ext - str;
9013 else
9014 optlen = strlen (str);
9015
9016 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
9017 {
9018 if (adding_value != 0)
9019 adding_value = 0;
9020 optlen -= 2;
9021 str += 2;
9022 }
9023 else if (optlen > 0)
9024 {
9025 if (adding_value == -1)
9026 adding_value = 1;
9027 else if (adding_value != 1)
9028 {
9029 as_bad (_("must specify extensions to add before specifying "
9030 "those to remove"));
9031 return FALSE;
9032 }
9033 }
9034
9035 if (optlen == 0)
9036 {
9037 as_bad (_("missing architectural extension"));
9038 return 0;
9039 }
9040
9041 gas_assert (adding_value != -1);
9042
9043 for (opt = aarch64_features; opt->name != NULL; opt++)
9044 if (strncmp (opt->name, str, optlen) == 0)
9045 {
9046 aarch64_feature_set set;
9047
9048 /* Add or remove the extension. */
9049 if (adding_value)
9050 {
9051 set = aarch64_feature_enable_set (opt->value);
9052 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9053 }
9054 else
9055 {
9056 set = aarch64_feature_disable_set (opt->value);
9057 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9058 }
9059 break;
9060 }
9061
9062 if (opt->name == NULL)
9063 {
9064 as_bad (_("unknown architectural extension `%s'"), str);
9065 return 0;
9066 }
9067
9068 str = ext;
9069 };
9070
9071 return 1;
9072 }
9073
9074 static int
9075 aarch64_parse_cpu (const char *str)
9076 {
9077 const struct aarch64_cpu_option_table *opt;
9078 const char *ext = strchr (str, '+');
9079 size_t optlen;
9080
9081 if (ext != NULL)
9082 optlen = ext - str;
9083 else
9084 optlen = strlen (str);
9085
9086 if (optlen == 0)
9087 {
9088 as_bad (_("missing cpu name `%s'"), str);
9089 return 0;
9090 }
9091
9092 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9093 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9094 {
9095 mcpu_cpu_opt = &opt->value;
9096 if (ext != NULL)
9097 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9098
9099 return 1;
9100 }
9101
9102 as_bad (_("unknown cpu `%s'"), str);
9103 return 0;
9104 }
9105
9106 static int
9107 aarch64_parse_arch (const char *str)
9108 {
9109 const struct aarch64_arch_option_table *opt;
9110 const char *ext = strchr (str, '+');
9111 size_t optlen;
9112
9113 if (ext != NULL)
9114 optlen = ext - str;
9115 else
9116 optlen = strlen (str);
9117
9118 if (optlen == 0)
9119 {
9120 as_bad (_("missing architecture name `%s'"), str);
9121 return 0;
9122 }
9123
9124 for (opt = aarch64_archs; opt->name != NULL; opt++)
9125 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9126 {
9127 march_cpu_opt = &opt->value;
9128 if (ext != NULL)
9129 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9130
9131 return 1;
9132 }
9133
9134 as_bad (_("unknown architecture `%s'\n"), str);
9135 return 0;
9136 }
9137
9138 /* ABIs. */
9139 struct aarch64_option_abi_value_table
9140 {
9141 const char *name;
9142 enum aarch64_abi_type value;
9143 };
9144
9145 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9146 {"ilp32", AARCH64_ABI_ILP32},
9147 {"lp64", AARCH64_ABI_LP64},
9148 };
9149
9150 static int
9151 aarch64_parse_abi (const char *str)
9152 {
9153 unsigned int i;
9154
9155 if (str[0] == '\0')
9156 {
9157 as_bad (_("missing abi name `%s'"), str);
9158 return 0;
9159 }
9160
9161 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9162 if (strcmp (str, aarch64_abis[i].name) == 0)
9163 {
9164 aarch64_abi = aarch64_abis[i].value;
9165 return 1;
9166 }
9167
9168 as_bad (_("unknown abi `%s'\n"), str);
9169 return 0;
9170 }
9171
9172 static struct aarch64_long_option_table aarch64_long_opts[] = {
9173 #ifdef OBJ_ELF
9174 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9175 aarch64_parse_abi, NULL},
9176 #endif /* OBJ_ELF */
9177 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9178 aarch64_parse_cpu, NULL},
9179 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9180 aarch64_parse_arch, NULL},
9181 {NULL, NULL, 0, NULL}
9182 };
9183
9184 int
9185 md_parse_option (int c, const char *arg)
9186 {
9187 struct aarch64_option_table *opt;
9188 struct aarch64_long_option_table *lopt;
9189
9190 switch (c)
9191 {
9192 #ifdef OPTION_EB
9193 case OPTION_EB:
9194 target_big_endian = 1;
9195 break;
9196 #endif
9197
9198 #ifdef OPTION_EL
9199 case OPTION_EL:
9200 target_big_endian = 0;
9201 break;
9202 #endif
9203
9204 case 'a':
9205 /* Listing option. Just ignore these, we don't support additional
9206 ones. */
9207 return 0;
9208
9209 default:
9210 for (opt = aarch64_opts; opt->option != NULL; opt++)
9211 {
9212 if (c == opt->option[0]
9213 && ((arg == NULL && opt->option[1] == 0)
9214 || streq (arg, opt->option + 1)))
9215 {
9216 /* If the option is deprecated, tell the user. */
9217 if (opt->deprecated != NULL)
9218 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9219 arg ? arg : "", _(opt->deprecated));
9220
9221 if (opt->var != NULL)
9222 *opt->var = opt->value;
9223
9224 return 1;
9225 }
9226 }
9227
9228 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9229 {
9230 /* These options are expected to have an argument. */
9231 if (c == lopt->option[0]
9232 && arg != NULL
9233 && strncmp (arg, lopt->option + 1,
9234 strlen (lopt->option + 1)) == 0)
9235 {
9236 /* If the option is deprecated, tell the user. */
9237 if (lopt->deprecated != NULL)
9238 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9239 _(lopt->deprecated));
9240
9241 /* Call the sup-option parser. */
9242 return lopt->func (arg + strlen (lopt->option) - 1);
9243 }
9244 }
9245
9246 return 0;
9247 }
9248
9249 return 1;
9250 }
9251
9252 void
9253 md_show_usage (FILE * fp)
9254 {
9255 struct aarch64_option_table *opt;
9256 struct aarch64_long_option_table *lopt;
9257
9258 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9259
9260 for (opt = aarch64_opts; opt->option != NULL; opt++)
9261 if (opt->help != NULL)
9262 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9263
9264 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9265 if (lopt->help != NULL)
9266 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9267
9268 #ifdef OPTION_EB
9269 fprintf (fp, _("\
9270 -EB assemble code for a big-endian cpu\n"));
9271 #endif
9272
9273 #ifdef OPTION_EL
9274 fprintf (fp, _("\
9275 -EL assemble code for a little-endian cpu\n"));
9276 #endif
9277 }
9278
9279 /* Parse a .cpu directive. */
9280
9281 static void
9282 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9283 {
9284 const struct aarch64_cpu_option_table *opt;
9285 char saved_char;
9286 char *name;
9287 char *ext;
9288 size_t optlen;
9289
9290 name = input_line_pointer;
9291 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9292 input_line_pointer++;
9293 saved_char = *input_line_pointer;
9294 *input_line_pointer = 0;
9295
9296 ext = strchr (name, '+');
9297
9298 if (ext != NULL)
9299 optlen = ext - name;
9300 else
9301 optlen = strlen (name);
9302
9303 /* Skip the first "all" entry. */
9304 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9305 if (strlen (opt->name) == optlen
9306 && strncmp (name, opt->name, optlen) == 0)
9307 {
9308 mcpu_cpu_opt = &opt->value;
9309 if (ext != NULL)
9310 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9311 return;
9312
9313 cpu_variant = *mcpu_cpu_opt;
9314
9315 *input_line_pointer = saved_char;
9316 demand_empty_rest_of_line ();
9317 return;
9318 }
9319 as_bad (_("unknown cpu `%s'"), name);
9320 *input_line_pointer = saved_char;
9321 ignore_rest_of_line ();
9322 }
9323
9324
9325 /* Parse a .arch directive. */
9326
9327 static void
9328 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9329 {
9330 const struct aarch64_arch_option_table *opt;
9331 char saved_char;
9332 char *name;
9333 char *ext;
9334 size_t optlen;
9335
9336 name = input_line_pointer;
9337 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9338 input_line_pointer++;
9339 saved_char = *input_line_pointer;
9340 *input_line_pointer = 0;
9341
9342 ext = strchr (name, '+');
9343
9344 if (ext != NULL)
9345 optlen = ext - name;
9346 else
9347 optlen = strlen (name);
9348
9349 /* Skip the first "all" entry. */
9350 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9351 if (strlen (opt->name) == optlen
9352 && strncmp (name, opt->name, optlen) == 0)
9353 {
9354 mcpu_cpu_opt = &opt->value;
9355 if (ext != NULL)
9356 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9357 return;
9358
9359 cpu_variant = *mcpu_cpu_opt;
9360
9361 *input_line_pointer = saved_char;
9362 demand_empty_rest_of_line ();
9363 return;
9364 }
9365
9366 as_bad (_("unknown architecture `%s'\n"), name);
9367 *input_line_pointer = saved_char;
9368 ignore_rest_of_line ();
9369 }
9370
9371 /* Parse a .arch_extension directive. */
9372
9373 static void
9374 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9375 {
9376 char saved_char;
9377 char *ext = input_line_pointer;;
9378
9379 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9380 input_line_pointer++;
9381 saved_char = *input_line_pointer;
9382 *input_line_pointer = 0;
9383
9384 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9385 return;
9386
9387 cpu_variant = *mcpu_cpu_opt;
9388
9389 *input_line_pointer = saved_char;
9390 demand_empty_rest_of_line ();
9391 }
9392
9393 /* Copy symbol information. */
9394
9395 void
9396 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9397 {
9398 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9399 }
This page took 0.317329 seconds and 3 git commands to generate.