[BINUTILS, AARCH64, 6/8] Add Tag getting instruction in Memory Tagging Extension
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Number of littlenums required to hold an extended precision number. */
242 #define MAX_LITTLENUMS 6
243
244 /* Return value for certain parsers when the parsing fails; those parsers
245 return the information of the parsed result, e.g. register number, on
246 success. */
247 #define PARSE_FAIL -1
248
249 /* This is an invalid condition code that means no conditional field is
250 present. */
251 #define COND_ALWAYS 0x10
252
253 typedef struct
254 {
255 const char *template;
256 unsigned long value;
257 } asm_barrier_opt;
258
259 typedef struct
260 {
261 const char *template;
262 uint32_t value;
263 } asm_nzcv;
264
265 struct reloc_entry
266 {
267 char *name;
268 bfd_reloc_code_real_type reloc;
269 };
270
271 /* Macros to define the register types and masks for the purpose
272 of parsing. */
273
274 #undef AARCH64_REG_TYPES
275 #define AARCH64_REG_TYPES \
276 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
277 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
278 BASIC_REG_TYPE(SP_32) /* wsp */ \
279 BASIC_REG_TYPE(SP_64) /* sp */ \
280 BASIC_REG_TYPE(Z_32) /* wzr */ \
281 BASIC_REG_TYPE(Z_64) /* xzr */ \
282 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
283 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
284 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
285 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
286 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
287 BASIC_REG_TYPE(VN) /* v[0-31] */ \
288 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
289 BASIC_REG_TYPE(PN) /* p[0-15] */ \
290 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
291 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
292 /* Typecheck: same, plus SVE registers. */ \
293 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
294 | REG_TYPE(ZN)) \
295 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
296 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
298 /* Typecheck: same, plus SVE registers. */ \
299 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
300 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
301 | REG_TYPE(ZN)) \
302 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
303 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
305 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
306 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
307 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
309 /* Typecheck: any [BHSDQ]P FP. */ \
310 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
311 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
312 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
313 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
314 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
315 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
317 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
318 be used for SVE instructions, since Zn and Pn are valid symbols \
319 in other contexts. */ \
320 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
322 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
323 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
324 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
325 | REG_TYPE(ZN) | REG_TYPE(PN)) \
326 /* Any integer register; used for error messages only. */ \
327 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
329 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
330 /* Pseudo type to mark the end of the enumerator sequence. */ \
331 BASIC_REG_TYPE(MAX)
332
333 #undef BASIC_REG_TYPE
334 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
335 #undef MULTI_REG_TYPE
336 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
337
338 /* Register type enumerators. */
339 typedef enum aarch64_reg_type_
340 {
341 /* A list of REG_TYPE_*. */
342 AARCH64_REG_TYPES
343 } aarch64_reg_type;
344
345 #undef BASIC_REG_TYPE
346 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
347 #undef REG_TYPE
348 #define REG_TYPE(T) (1 << REG_TYPE_##T)
349 #undef MULTI_REG_TYPE
350 #define MULTI_REG_TYPE(T,V) V,
351
352 /* Structure for a hash table entry for a register. */
353 typedef struct
354 {
355 const char *name;
356 unsigned char number;
357 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
358 unsigned char builtin;
359 } reg_entry;
360
361 /* Values indexed by aarch64_reg_type to assist the type checking. */
362 static const unsigned reg_type_masks[] =
363 {
364 AARCH64_REG_TYPES
365 };
366
367 #undef BASIC_REG_TYPE
368 #undef REG_TYPE
369 #undef MULTI_REG_TYPE
370 #undef AARCH64_REG_TYPES
371
372 /* Diagnostics used when we don't get a register of the expected type.
373 Note: this has to synchronized with aarch64_reg_type definitions
374 above. */
375 static const char *
376 get_reg_expected_msg (aarch64_reg_type reg_type)
377 {
378 const char *msg;
379
380 switch (reg_type)
381 {
382 case REG_TYPE_R_32:
383 msg = N_("integer 32-bit register expected");
384 break;
385 case REG_TYPE_R_64:
386 msg = N_("integer 64-bit register expected");
387 break;
388 case REG_TYPE_R_N:
389 msg = N_("integer register expected");
390 break;
391 case REG_TYPE_R64_SP:
392 msg = N_("64-bit integer or SP register expected");
393 break;
394 case REG_TYPE_SVE_BASE:
395 msg = N_("base register expected");
396 break;
397 case REG_TYPE_R_Z:
398 msg = N_("integer or zero register expected");
399 break;
400 case REG_TYPE_SVE_OFFSET:
401 msg = N_("offset register expected");
402 break;
403 case REG_TYPE_R_SP:
404 msg = N_("integer or SP register expected");
405 break;
406 case REG_TYPE_R_Z_SP:
407 msg = N_("integer, zero or SP register expected");
408 break;
409 case REG_TYPE_FP_B:
410 msg = N_("8-bit SIMD scalar register expected");
411 break;
412 case REG_TYPE_FP_H:
413 msg = N_("16-bit SIMD scalar or floating-point half precision "
414 "register expected");
415 break;
416 case REG_TYPE_FP_S:
417 msg = N_("32-bit SIMD scalar or floating-point single precision "
418 "register expected");
419 break;
420 case REG_TYPE_FP_D:
421 msg = N_("64-bit SIMD scalar or floating-point double precision "
422 "register expected");
423 break;
424 case REG_TYPE_FP_Q:
425 msg = N_("128-bit SIMD scalar or floating-point quad precision "
426 "register expected");
427 break;
428 case REG_TYPE_R_Z_BHSDQ_V:
429 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
430 msg = N_("register expected");
431 break;
432 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
433 msg = N_("SIMD scalar or floating-point register expected");
434 break;
435 case REG_TYPE_VN: /* any V reg */
436 msg = N_("vector register expected");
437 break;
438 case REG_TYPE_ZN:
439 msg = N_("SVE vector register expected");
440 break;
441 case REG_TYPE_PN:
442 msg = N_("SVE predicate register expected");
443 break;
444 default:
445 as_fatal (_("invalid register type %d"), reg_type);
446 }
447 return msg;
448 }
449
450 /* Some well known registers that we refer to directly elsewhere. */
451 #define REG_SP 31
452
453 /* Instructions take 4 bytes in the object file. */
454 #define INSN_SIZE 4
455
456 static struct hash_control *aarch64_ops_hsh;
457 static struct hash_control *aarch64_cond_hsh;
458 static struct hash_control *aarch64_shift_hsh;
459 static struct hash_control *aarch64_sys_regs_hsh;
460 static struct hash_control *aarch64_pstatefield_hsh;
461 static struct hash_control *aarch64_sys_regs_ic_hsh;
462 static struct hash_control *aarch64_sys_regs_dc_hsh;
463 static struct hash_control *aarch64_sys_regs_at_hsh;
464 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
465 static struct hash_control *aarch64_sys_regs_sr_hsh;
466 static struct hash_control *aarch64_reg_hsh;
467 static struct hash_control *aarch64_barrier_opt_hsh;
468 static struct hash_control *aarch64_nzcv_hsh;
469 static struct hash_control *aarch64_pldop_hsh;
470 static struct hash_control *aarch64_hint_opt_hsh;
471
472 /* Stuff needed to resolve the label ambiguity
473 As:
474 ...
475 label: <insn>
476 may differ from:
477 ...
478 label:
479 <insn> */
480
481 static symbolS *last_label_seen;
482
483 /* Literal pool structure. Held on a per-section
484 and per-sub-section basis. */
485
486 #define MAX_LITERAL_POOL_SIZE 1024
487 typedef struct literal_expression
488 {
489 expressionS exp;
490 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
491 LITTLENUM_TYPE * bignum;
492 } literal_expression;
493
494 typedef struct literal_pool
495 {
496 literal_expression literals[MAX_LITERAL_POOL_SIZE];
497 unsigned int next_free_entry;
498 unsigned int id;
499 symbolS *symbol;
500 segT section;
501 subsegT sub_section;
502 int size;
503 struct literal_pool *next;
504 } literal_pool;
505
506 /* Pointer to a linked list of literal pools. */
507 static literal_pool *list_of_pools = NULL;
508 \f
509 /* Pure syntax. */
510
511 /* This array holds the chars that always start a comment. If the
512 pre-processor is disabled, these aren't very useful. */
513 const char comment_chars[] = "";
514
515 /* This array holds the chars that only start a comment at the beginning of
516 a line. If the line seems to have the form '# 123 filename'
517 .line and .file directives will appear in the pre-processed output. */
518 /* Note that input_file.c hand checks for '#' at the beginning of the
519 first line of the input file. This is because the compiler outputs
520 #NO_APP at the beginning of its output. */
521 /* Also note that comments like this one will always work. */
522 const char line_comment_chars[] = "#";
523
524 const char line_separator_chars[] = ";";
525
526 /* Chars that can be used to separate mant
527 from exp in floating point numbers. */
528 const char EXP_CHARS[] = "eE";
529
530 /* Chars that mean this number is a floating point constant. */
531 /* As in 0f12.456 */
532 /* or 0d1.2345e12 */
533
534 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
535
536 /* Prefix character that indicates the start of an immediate value. */
537 #define is_immediate_prefix(C) ((C) == '#')
538
539 /* Separator character handling. */
540
541 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
542
543 static inline bfd_boolean
544 skip_past_char (char **str, char c)
545 {
546 if (**str == c)
547 {
548 (*str)++;
549 return TRUE;
550 }
551 else
552 return FALSE;
553 }
554
555 #define skip_past_comma(str) skip_past_char (str, ',')
556
557 /* Arithmetic expressions (possibly involving symbols). */
558
559 static bfd_boolean in_my_get_expression_p = FALSE;
560
561 /* Third argument to my_get_expression. */
562 #define GE_NO_PREFIX 0
563 #define GE_OPT_PREFIX 1
564
565 /* Return TRUE if the string pointed by *STR is successfully parsed
566 as an valid expression; *EP will be filled with the information of
567 such an expression. Otherwise return FALSE. */
568
569 static bfd_boolean
570 my_get_expression (expressionS * ep, char **str, int prefix_mode,
571 int reject_absent)
572 {
573 char *save_in;
574 segT seg;
575 int prefix_present_p = 0;
576
577 switch (prefix_mode)
578 {
579 case GE_NO_PREFIX:
580 break;
581 case GE_OPT_PREFIX:
582 if (is_immediate_prefix (**str))
583 {
584 (*str)++;
585 prefix_present_p = 1;
586 }
587 break;
588 default:
589 abort ();
590 }
591
592 memset (ep, 0, sizeof (expressionS));
593
594 save_in = input_line_pointer;
595 input_line_pointer = *str;
596 in_my_get_expression_p = TRUE;
597 seg = expression (ep);
598 in_my_get_expression_p = FALSE;
599
600 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
601 {
602 /* We found a bad expression in md_operand(). */
603 *str = input_line_pointer;
604 input_line_pointer = save_in;
605 if (prefix_present_p && ! error_p ())
606 set_fatal_syntax_error (_("bad expression"));
607 else
608 set_first_syntax_error (_("bad expression"));
609 return FALSE;
610 }
611
612 #ifdef OBJ_AOUT
613 if (seg != absolute_section
614 && seg != text_section
615 && seg != data_section
616 && seg != bss_section && seg != undefined_section)
617 {
618 set_syntax_error (_("bad segment"));
619 *str = input_line_pointer;
620 input_line_pointer = save_in;
621 return FALSE;
622 }
623 #else
624 (void) seg;
625 #endif
626
627 *str = input_line_pointer;
628 input_line_pointer = save_in;
629 return TRUE;
630 }
631
632 /* Turn a string in input_line_pointer into a floating point constant
633 of type TYPE, and store the appropriate bytes in *LITP. The number
634 of LITTLENUMS emitted is stored in *SIZEP. An error message is
635 returned, or NULL on OK. */
636
637 const char *
638 md_atof (int type, char *litP, int *sizeP)
639 {
640 return ieee_md_atof (type, litP, sizeP, target_big_endian);
641 }
642
643 /* We handle all bad expressions here, so that we can report the faulty
644 instruction in the error message. */
645 void
646 md_operand (expressionS * exp)
647 {
648 if (in_my_get_expression_p)
649 exp->X_op = O_illegal;
650 }
651
652 /* Immediate values. */
653
654 /* Errors may be set multiple times during parsing or bit encoding
655 (particularly in the Neon bits), but usually the earliest error which is set
656 will be the most meaningful. Avoid overwriting it with later (cascading)
657 errors by calling this function. */
658
659 static void
660 first_error (const char *error)
661 {
662 if (! error_p ())
663 set_syntax_error (error);
664 }
665
666 /* Similar to first_error, but this function accepts formatted error
667 message. */
668 static void
669 first_error_fmt (const char *format, ...)
670 {
671 va_list args;
672 enum
673 { size = 100 };
674 /* N.B. this single buffer will not cause error messages for different
675 instructions to pollute each other; this is because at the end of
676 processing of each assembly line, error message if any will be
677 collected by as_bad. */
678 static char buffer[size];
679
680 if (! error_p ())
681 {
682 int ret ATTRIBUTE_UNUSED;
683 va_start (args, format);
684 ret = vsnprintf (buffer, size, format, args);
685 know (ret <= size - 1 && ret >= 0);
686 va_end (args);
687 set_syntax_error (buffer);
688 }
689 }
690
691 /* Register parsing. */
692
693 /* Generic register parser which is called by other specialized
694 register parsers.
695 CCP points to what should be the beginning of a register name.
696 If it is indeed a valid register name, advance CCP over it and
697 return the reg_entry structure; otherwise return NULL.
698 It does not issue diagnostics. */
699
700 static reg_entry *
701 parse_reg (char **ccp)
702 {
703 char *start = *ccp;
704 char *p;
705 reg_entry *reg;
706
707 #ifdef REGISTER_PREFIX
708 if (*start != REGISTER_PREFIX)
709 return NULL;
710 start++;
711 #endif
712
713 p = start;
714 if (!ISALPHA (*p) || !is_name_beginner (*p))
715 return NULL;
716
717 do
718 p++;
719 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
720
721 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
722
723 if (!reg)
724 return NULL;
725
726 *ccp = p;
727 return reg;
728 }
729
730 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
731 return FALSE. */
732 static bfd_boolean
733 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
734 {
735 return (reg_type_masks[type] & (1 << reg->type)) != 0;
736 }
737
738 /* Try to parse a base or offset register. Allow SVE base and offset
739 registers if REG_TYPE includes SVE registers. Return the register
740 entry on success, setting *QUALIFIER to the register qualifier.
741 Return null otherwise.
742
743 Note that this function does not issue any diagnostics. */
744
745 static const reg_entry *
746 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
747 aarch64_opnd_qualifier_t *qualifier)
748 {
749 char *str = *ccp;
750 const reg_entry *reg = parse_reg (&str);
751
752 if (reg == NULL)
753 return NULL;
754
755 switch (reg->type)
756 {
757 case REG_TYPE_R_32:
758 case REG_TYPE_SP_32:
759 case REG_TYPE_Z_32:
760 *qualifier = AARCH64_OPND_QLF_W;
761 break;
762
763 case REG_TYPE_R_64:
764 case REG_TYPE_SP_64:
765 case REG_TYPE_Z_64:
766 *qualifier = AARCH64_OPND_QLF_X;
767 break;
768
769 case REG_TYPE_ZN:
770 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
771 || str[0] != '.')
772 return NULL;
773 switch (TOLOWER (str[1]))
774 {
775 case 's':
776 *qualifier = AARCH64_OPND_QLF_S_S;
777 break;
778 case 'd':
779 *qualifier = AARCH64_OPND_QLF_S_D;
780 break;
781 default:
782 return NULL;
783 }
784 str += 2;
785 break;
786
787 default:
788 return NULL;
789 }
790
791 *ccp = str;
792
793 return reg;
794 }
795
796 /* Try to parse a base or offset register. Return the register entry
797 on success, setting *QUALIFIER to the register qualifier. Return null
798 otherwise.
799
800 Note that this function does not issue any diagnostics. */
801
802 static const reg_entry *
803 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
804 {
805 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
806 }
807
808 /* Parse the qualifier of a vector register or vector element of type
809 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
810 succeeds; otherwise return FALSE.
811
812 Accept only one occurrence of:
813 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
814 b h s d q */
815 static bfd_boolean
816 parse_vector_type_for_operand (aarch64_reg_type reg_type,
817 struct vector_type_el *parsed_type, char **str)
818 {
819 char *ptr = *str;
820 unsigned width;
821 unsigned element_size;
822 enum vector_el_type type;
823
824 /* skip '.' */
825 gas_assert (*ptr == '.');
826 ptr++;
827
828 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
829 {
830 width = 0;
831 goto elt_size;
832 }
833 width = strtoul (ptr, &ptr, 10);
834 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
835 {
836 first_error_fmt (_("bad size %d in vector width specifier"), width);
837 return FALSE;
838 }
839
840 elt_size:
841 switch (TOLOWER (*ptr))
842 {
843 case 'b':
844 type = NT_b;
845 element_size = 8;
846 break;
847 case 'h':
848 type = NT_h;
849 element_size = 16;
850 break;
851 case 's':
852 type = NT_s;
853 element_size = 32;
854 break;
855 case 'd':
856 type = NT_d;
857 element_size = 64;
858 break;
859 case 'q':
860 if (reg_type == REG_TYPE_ZN || width == 1)
861 {
862 type = NT_q;
863 element_size = 128;
864 break;
865 }
866 /* fall through. */
867 default:
868 if (*ptr != '\0')
869 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
870 else
871 first_error (_("missing element size"));
872 return FALSE;
873 }
874 if (width != 0 && width * element_size != 64
875 && width * element_size != 128
876 && !(width == 2 && element_size == 16)
877 && !(width == 4 && element_size == 8))
878 {
879 first_error_fmt (_
880 ("invalid element size %d and vector size combination %c"),
881 width, *ptr);
882 return FALSE;
883 }
884 ptr++;
885
886 parsed_type->type = type;
887 parsed_type->width = width;
888
889 *str = ptr;
890
891 return TRUE;
892 }
893
894 /* *STR contains an SVE zero/merge predication suffix. Parse it into
895 *PARSED_TYPE and point *STR at the end of the suffix. */
896
897 static bfd_boolean
898 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
899 {
900 char *ptr = *str;
901
902 /* Skip '/'. */
903 gas_assert (*ptr == '/');
904 ptr++;
905 switch (TOLOWER (*ptr))
906 {
907 case 'z':
908 parsed_type->type = NT_zero;
909 break;
910 case 'm':
911 parsed_type->type = NT_merge;
912 break;
913 default:
914 if (*ptr != '\0' && *ptr != ',')
915 first_error_fmt (_("unexpected character `%c' in predication type"),
916 *ptr);
917 else
918 first_error (_("missing predication type"));
919 return FALSE;
920 }
921 parsed_type->width = 0;
922 *str = ptr + 1;
923 return TRUE;
924 }
925
926 /* Parse a register of the type TYPE.
927
928 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
929 name or the parsed register is not of TYPE.
930
931 Otherwise return the register number, and optionally fill in the actual
932 type of the register in *RTYPE when multiple alternatives were given, and
933 return the register shape and element index information in *TYPEINFO.
934
935 IN_REG_LIST should be set with TRUE if the caller is parsing a register
936 list. */
937
938 static int
939 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
940 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
941 {
942 char *str = *ccp;
943 const reg_entry *reg = parse_reg (&str);
944 struct vector_type_el atype;
945 struct vector_type_el parsetype;
946 bfd_boolean is_typed_vecreg = FALSE;
947
948 atype.defined = 0;
949 atype.type = NT_invtype;
950 atype.width = -1;
951 atype.index = 0;
952
953 if (reg == NULL)
954 {
955 if (typeinfo)
956 *typeinfo = atype;
957 set_default_error ();
958 return PARSE_FAIL;
959 }
960
961 if (! aarch64_check_reg_type (reg, type))
962 {
963 DEBUG_TRACE ("reg type check failed");
964 set_default_error ();
965 return PARSE_FAIL;
966 }
967 type = reg->type;
968
969 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
970 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
971 {
972 if (*str == '.')
973 {
974 if (!parse_vector_type_for_operand (type, &parsetype, &str))
975 return PARSE_FAIL;
976 }
977 else
978 {
979 if (!parse_predication_for_operand (&parsetype, &str))
980 return PARSE_FAIL;
981 }
982
983 /* Register if of the form Vn.[bhsdq]. */
984 is_typed_vecreg = TRUE;
985
986 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
987 {
988 /* The width is always variable; we don't allow an integer width
989 to be specified. */
990 gas_assert (parsetype.width == 0);
991 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
992 }
993 else if (parsetype.width == 0)
994 /* Expect index. In the new scheme we cannot have
995 Vn.[bhsdq] represent a scalar. Therefore any
996 Vn.[bhsdq] should have an index following it.
997 Except in reglists of course. */
998 atype.defined |= NTA_HASINDEX;
999 else
1000 atype.defined |= NTA_HASTYPE;
1001
1002 atype.type = parsetype.type;
1003 atype.width = parsetype.width;
1004 }
1005
1006 if (skip_past_char (&str, '['))
1007 {
1008 expressionS exp;
1009
1010 /* Reject Sn[index] syntax. */
1011 if (!is_typed_vecreg)
1012 {
1013 first_error (_("this type of register can't be indexed"));
1014 return PARSE_FAIL;
1015 }
1016
1017 if (in_reg_list)
1018 {
1019 first_error (_("index not allowed inside register list"));
1020 return PARSE_FAIL;
1021 }
1022
1023 atype.defined |= NTA_HASINDEX;
1024
1025 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1026
1027 if (exp.X_op != O_constant)
1028 {
1029 first_error (_("constant expression required"));
1030 return PARSE_FAIL;
1031 }
1032
1033 if (! skip_past_char (&str, ']'))
1034 return PARSE_FAIL;
1035
1036 atype.index = exp.X_add_number;
1037 }
1038 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1039 {
1040 /* Indexed vector register expected. */
1041 first_error (_("indexed vector register expected"));
1042 return PARSE_FAIL;
1043 }
1044
1045 /* A vector reg Vn should be typed or indexed. */
1046 if (type == REG_TYPE_VN && atype.defined == 0)
1047 {
1048 first_error (_("invalid use of vector register"));
1049 }
1050
1051 if (typeinfo)
1052 *typeinfo = atype;
1053
1054 if (rtype)
1055 *rtype = type;
1056
1057 *ccp = str;
1058
1059 return reg->number;
1060 }
1061
1062 /* Parse register.
1063
1064 Return the register number on success; return PARSE_FAIL otherwise.
1065
1066 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1067 the register (e.g. NEON double or quad reg when either has been requested).
1068
1069 If this is a NEON vector register with additional type information, fill
1070 in the struct pointed to by VECTYPE (if non-NULL).
1071
1072 This parser does not handle register list. */
1073
1074 static int
1075 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1076 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1077 {
1078 struct vector_type_el atype;
1079 char *str = *ccp;
1080 int reg = parse_typed_reg (&str, type, rtype, &atype,
1081 /*in_reg_list= */ FALSE);
1082
1083 if (reg == PARSE_FAIL)
1084 return PARSE_FAIL;
1085
1086 if (vectype)
1087 *vectype = atype;
1088
1089 *ccp = str;
1090
1091 return reg;
1092 }
1093
1094 static inline bfd_boolean
1095 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1096 {
1097 return
1098 e1.type == e2.type
1099 && e1.defined == e2.defined
1100 && e1.width == e2.width && e1.index == e2.index;
1101 }
1102
1103 /* This function parses a list of vector registers of type TYPE.
1104 On success, it returns the parsed register list information in the
1105 following encoded format:
1106
1107 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1108 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1109
1110 The information of the register shape and/or index is returned in
1111 *VECTYPE.
1112
1113 It returns PARSE_FAIL if the register list is invalid.
1114
1115 The list contains one to four registers.
1116 Each register can be one of:
1117 <Vt>.<T>[<index>]
1118 <Vt>.<T>
1119 All <T> should be identical.
1120 All <index> should be identical.
1121 There are restrictions on <Vt> numbers which are checked later
1122 (by reg_list_valid_p). */
1123
1124 static int
1125 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1126 struct vector_type_el *vectype)
1127 {
1128 char *str = *ccp;
1129 int nb_regs;
1130 struct vector_type_el typeinfo, typeinfo_first;
1131 int val, val_range;
1132 int in_range;
1133 int ret_val;
1134 int i;
1135 bfd_boolean error = FALSE;
1136 bfd_boolean expect_index = FALSE;
1137
1138 if (*str != '{')
1139 {
1140 set_syntax_error (_("expecting {"));
1141 return PARSE_FAIL;
1142 }
1143 str++;
1144
1145 nb_regs = 0;
1146 typeinfo_first.defined = 0;
1147 typeinfo_first.type = NT_invtype;
1148 typeinfo_first.width = -1;
1149 typeinfo_first.index = 0;
1150 ret_val = 0;
1151 val = -1;
1152 val_range = -1;
1153 in_range = 0;
1154 do
1155 {
1156 if (in_range)
1157 {
1158 str++; /* skip over '-' */
1159 val_range = val;
1160 }
1161 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1162 /*in_reg_list= */ TRUE);
1163 if (val == PARSE_FAIL)
1164 {
1165 set_first_syntax_error (_("invalid vector register in list"));
1166 error = TRUE;
1167 continue;
1168 }
1169 /* reject [bhsd]n */
1170 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1171 {
1172 set_first_syntax_error (_("invalid scalar register in list"));
1173 error = TRUE;
1174 continue;
1175 }
1176
1177 if (typeinfo.defined & NTA_HASINDEX)
1178 expect_index = TRUE;
1179
1180 if (in_range)
1181 {
1182 if (val < val_range)
1183 {
1184 set_first_syntax_error
1185 (_("invalid range in vector register list"));
1186 error = TRUE;
1187 }
1188 val_range++;
1189 }
1190 else
1191 {
1192 val_range = val;
1193 if (nb_regs == 0)
1194 typeinfo_first = typeinfo;
1195 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1196 {
1197 set_first_syntax_error
1198 (_("type mismatch in vector register list"));
1199 error = TRUE;
1200 }
1201 }
1202 if (! error)
1203 for (i = val_range; i <= val; i++)
1204 {
1205 ret_val |= i << (5 * nb_regs);
1206 nb_regs++;
1207 }
1208 in_range = 0;
1209 }
1210 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1211
1212 skip_whitespace (str);
1213 if (*str != '}')
1214 {
1215 set_first_syntax_error (_("end of vector register list not found"));
1216 error = TRUE;
1217 }
1218 str++;
1219
1220 skip_whitespace (str);
1221
1222 if (expect_index)
1223 {
1224 if (skip_past_char (&str, '['))
1225 {
1226 expressionS exp;
1227
1228 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1229 if (exp.X_op != O_constant)
1230 {
1231 set_first_syntax_error (_("constant expression required."));
1232 error = TRUE;
1233 }
1234 if (! skip_past_char (&str, ']'))
1235 error = TRUE;
1236 else
1237 typeinfo_first.index = exp.X_add_number;
1238 }
1239 else
1240 {
1241 set_first_syntax_error (_("expected index"));
1242 error = TRUE;
1243 }
1244 }
1245
1246 if (nb_regs > 4)
1247 {
1248 set_first_syntax_error (_("too many registers in vector register list"));
1249 error = TRUE;
1250 }
1251 else if (nb_regs == 0)
1252 {
1253 set_first_syntax_error (_("empty vector register list"));
1254 error = TRUE;
1255 }
1256
1257 *ccp = str;
1258 if (! error)
1259 *vectype = typeinfo_first;
1260
1261 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1262 }
1263
1264 /* Directives: register aliases. */
1265
1266 static reg_entry *
1267 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1268 {
1269 reg_entry *new;
1270 const char *name;
1271
1272 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1273 {
1274 if (new->builtin)
1275 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1276 str);
1277
1278 /* Only warn about a redefinition if it's not defined as the
1279 same register. */
1280 else if (new->number != number || new->type != type)
1281 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1282
1283 return NULL;
1284 }
1285
1286 name = xstrdup (str);
1287 new = XNEW (reg_entry);
1288
1289 new->name = name;
1290 new->number = number;
1291 new->type = type;
1292 new->builtin = FALSE;
1293
1294 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1295 abort ();
1296
1297 return new;
1298 }
1299
1300 /* Look for the .req directive. This is of the form:
1301
1302 new_register_name .req existing_register_name
1303
1304 If we find one, or if it looks sufficiently like one that we want to
1305 handle any error here, return TRUE. Otherwise return FALSE. */
1306
1307 static bfd_boolean
1308 create_register_alias (char *newname, char *p)
1309 {
1310 const reg_entry *old;
1311 char *oldname, *nbuf;
1312 size_t nlen;
1313
1314 /* The input scrubber ensures that whitespace after the mnemonic is
1315 collapsed to single spaces. */
1316 oldname = p;
1317 if (strncmp (oldname, " .req ", 6) != 0)
1318 return FALSE;
1319
1320 oldname += 6;
1321 if (*oldname == '\0')
1322 return FALSE;
1323
1324 old = hash_find (aarch64_reg_hsh, oldname);
1325 if (!old)
1326 {
1327 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1328 return TRUE;
1329 }
1330
1331 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1332 the desired alias name, and p points to its end. If not, then
1333 the desired alias name is in the global original_case_string. */
1334 #ifdef TC_CASE_SENSITIVE
1335 nlen = p - newname;
1336 #else
1337 newname = original_case_string;
1338 nlen = strlen (newname);
1339 #endif
1340
1341 nbuf = xmemdup0 (newname, nlen);
1342
1343 /* Create aliases under the new name as stated; an all-lowercase
1344 version of the new name; and an all-uppercase version of the new
1345 name. */
1346 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1347 {
1348 for (p = nbuf; *p; p++)
1349 *p = TOUPPER (*p);
1350
1351 if (strncmp (nbuf, newname, nlen))
1352 {
1353 /* If this attempt to create an additional alias fails, do not bother
1354 trying to create the all-lower case alias. We will fail and issue
1355 a second, duplicate error message. This situation arises when the
1356 programmer does something like:
1357 foo .req r0
1358 Foo .req r1
1359 The second .req creates the "Foo" alias but then fails to create
1360 the artificial FOO alias because it has already been created by the
1361 first .req. */
1362 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1363 {
1364 free (nbuf);
1365 return TRUE;
1366 }
1367 }
1368
1369 for (p = nbuf; *p; p++)
1370 *p = TOLOWER (*p);
1371
1372 if (strncmp (nbuf, newname, nlen))
1373 insert_reg_alias (nbuf, old->number, old->type);
1374 }
1375
1376 free (nbuf);
1377 return TRUE;
1378 }
1379
1380 /* Should never be called, as .req goes between the alias and the
1381 register name, not at the beginning of the line. */
1382 static void
1383 s_req (int a ATTRIBUTE_UNUSED)
1384 {
1385 as_bad (_("invalid syntax for .req directive"));
1386 }
1387
1388 /* The .unreq directive deletes an alias which was previously defined
1389 by .req. For example:
1390
1391 my_alias .req r11
1392 .unreq my_alias */
1393
1394 static void
1395 s_unreq (int a ATTRIBUTE_UNUSED)
1396 {
1397 char *name;
1398 char saved_char;
1399
1400 name = input_line_pointer;
1401
1402 while (*input_line_pointer != 0
1403 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1404 ++input_line_pointer;
1405
1406 saved_char = *input_line_pointer;
1407 *input_line_pointer = 0;
1408
1409 if (!*name)
1410 as_bad (_("invalid syntax for .unreq directive"));
1411 else
1412 {
1413 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1414
1415 if (!reg)
1416 as_bad (_("unknown register alias '%s'"), name);
1417 else if (reg->builtin)
1418 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1419 name);
1420 else
1421 {
1422 char *p;
1423 char *nbuf;
1424
1425 hash_delete (aarch64_reg_hsh, name, FALSE);
1426 free ((char *) reg->name);
1427 free (reg);
1428
1429 /* Also locate the all upper case and all lower case versions.
1430 Do not complain if we cannot find one or the other as it
1431 was probably deleted above. */
1432
1433 nbuf = strdup (name);
1434 for (p = nbuf; *p; p++)
1435 *p = TOUPPER (*p);
1436 reg = hash_find (aarch64_reg_hsh, nbuf);
1437 if (reg)
1438 {
1439 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1440 free ((char *) reg->name);
1441 free (reg);
1442 }
1443
1444 for (p = nbuf; *p; p++)
1445 *p = TOLOWER (*p);
1446 reg = hash_find (aarch64_reg_hsh, nbuf);
1447 if (reg)
1448 {
1449 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1450 free ((char *) reg->name);
1451 free (reg);
1452 }
1453
1454 free (nbuf);
1455 }
1456 }
1457
1458 *input_line_pointer = saved_char;
1459 demand_empty_rest_of_line ();
1460 }
1461
1462 /* Directives: Instruction set selection. */
1463
1464 #ifdef OBJ_ELF
1465 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1466 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1467 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1468 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1469
1470 /* Create a new mapping symbol for the transition to STATE. */
1471
1472 static void
1473 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1474 {
1475 symbolS *symbolP;
1476 const char *symname;
1477 int type;
1478
1479 switch (state)
1480 {
1481 case MAP_DATA:
1482 symname = "$d";
1483 type = BSF_NO_FLAGS;
1484 break;
1485 case MAP_INSN:
1486 symname = "$x";
1487 type = BSF_NO_FLAGS;
1488 break;
1489 default:
1490 abort ();
1491 }
1492
1493 symbolP = symbol_new (symname, now_seg, value, frag);
1494 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1495
1496 /* Save the mapping symbols for future reference. Also check that
1497 we do not place two mapping symbols at the same offset within a
1498 frag. We'll handle overlap between frags in
1499 check_mapping_symbols.
1500
1501 If .fill or other data filling directive generates zero sized data,
1502 the mapping symbol for the following code will have the same value
1503 as the one generated for the data filling directive. In this case,
1504 we replace the old symbol with the new one at the same address. */
1505 if (value == 0)
1506 {
1507 if (frag->tc_frag_data.first_map != NULL)
1508 {
1509 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1510 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1511 &symbol_lastP);
1512 }
1513 frag->tc_frag_data.first_map = symbolP;
1514 }
1515 if (frag->tc_frag_data.last_map != NULL)
1516 {
1517 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1518 S_GET_VALUE (symbolP));
1519 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1520 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1521 &symbol_lastP);
1522 }
1523 frag->tc_frag_data.last_map = symbolP;
1524 }
1525
1526 /* We must sometimes convert a region marked as code to data during
1527 code alignment, if an odd number of bytes have to be padded. The
1528 code mapping symbol is pushed to an aligned address. */
1529
1530 static void
1531 insert_data_mapping_symbol (enum mstate state,
1532 valueT value, fragS * frag, offsetT bytes)
1533 {
1534 /* If there was already a mapping symbol, remove it. */
1535 if (frag->tc_frag_data.last_map != NULL
1536 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1537 frag->fr_address + value)
1538 {
1539 symbolS *symp = frag->tc_frag_data.last_map;
1540
1541 if (value == 0)
1542 {
1543 know (frag->tc_frag_data.first_map == symp);
1544 frag->tc_frag_data.first_map = NULL;
1545 }
1546 frag->tc_frag_data.last_map = NULL;
1547 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1548 }
1549
1550 make_mapping_symbol (MAP_DATA, value, frag);
1551 make_mapping_symbol (state, value + bytes, frag);
1552 }
1553
1554 static void mapping_state_2 (enum mstate state, int max_chars);
1555
1556 /* Set the mapping state to STATE. Only call this when about to
1557 emit some STATE bytes to the file. */
1558
1559 void
1560 mapping_state (enum mstate state)
1561 {
1562 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1563
1564 if (state == MAP_INSN)
1565 /* AArch64 instructions require 4-byte alignment. When emitting
1566 instructions into any section, record the appropriate section
1567 alignment. */
1568 record_alignment (now_seg, 2);
1569
1570 if (mapstate == state)
1571 /* The mapping symbol has already been emitted.
1572 There is nothing else to do. */
1573 return;
1574
1575 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1576 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1577 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1578 evaluated later in the next else. */
1579 return;
1580 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1581 {
1582 /* Only add the symbol if the offset is > 0:
1583 if we're at the first frag, check it's size > 0;
1584 if we're not at the first frag, then for sure
1585 the offset is > 0. */
1586 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1587 const int add_symbol = (frag_now != frag_first)
1588 || (frag_now_fix () > 0);
1589
1590 if (add_symbol)
1591 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1592 }
1593 #undef TRANSITION
1594
1595 mapping_state_2 (state, 0);
1596 }
1597
1598 /* Same as mapping_state, but MAX_CHARS bytes have already been
1599 allocated. Put the mapping symbol that far back. */
1600
1601 static void
1602 mapping_state_2 (enum mstate state, int max_chars)
1603 {
1604 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1605
1606 if (!SEG_NORMAL (now_seg))
1607 return;
1608
1609 if (mapstate == state)
1610 /* The mapping symbol has already been emitted.
1611 There is nothing else to do. */
1612 return;
1613
1614 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1615 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1616 }
1617 #else
1618 #define mapping_state(x) /* nothing */
1619 #define mapping_state_2(x, y) /* nothing */
1620 #endif
1621
1622 /* Directives: sectioning and alignment. */
1623
1624 static void
1625 s_bss (int ignore ATTRIBUTE_UNUSED)
1626 {
1627 /* We don't support putting frags in the BSS segment, we fake it by
1628 marking in_bss, then looking at s_skip for clues. */
1629 subseg_set (bss_section, 0);
1630 demand_empty_rest_of_line ();
1631 mapping_state (MAP_DATA);
1632 }
1633
1634 static void
1635 s_even (int ignore ATTRIBUTE_UNUSED)
1636 {
1637 /* Never make frag if expect extra pass. */
1638 if (!need_pass_2)
1639 frag_align (1, 0, 0);
1640
1641 record_alignment (now_seg, 1);
1642
1643 demand_empty_rest_of_line ();
1644 }
1645
1646 /* Directives: Literal pools. */
1647
1648 static literal_pool *
1649 find_literal_pool (int size)
1650 {
1651 literal_pool *pool;
1652
1653 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1654 {
1655 if (pool->section == now_seg
1656 && pool->sub_section == now_subseg && pool->size == size)
1657 break;
1658 }
1659
1660 return pool;
1661 }
1662
1663 static literal_pool *
1664 find_or_make_literal_pool (int size)
1665 {
1666 /* Next literal pool ID number. */
1667 static unsigned int latest_pool_num = 1;
1668 literal_pool *pool;
1669
1670 pool = find_literal_pool (size);
1671
1672 if (pool == NULL)
1673 {
1674 /* Create a new pool. */
1675 pool = XNEW (literal_pool);
1676 if (!pool)
1677 return NULL;
1678
1679 /* Currently we always put the literal pool in the current text
1680 section. If we were generating "small" model code where we
1681 knew that all code and initialised data was within 1MB then
1682 we could output literals to mergeable, read-only data
1683 sections. */
1684
1685 pool->next_free_entry = 0;
1686 pool->section = now_seg;
1687 pool->sub_section = now_subseg;
1688 pool->size = size;
1689 pool->next = list_of_pools;
1690 pool->symbol = NULL;
1691
1692 /* Add it to the list. */
1693 list_of_pools = pool;
1694 }
1695
1696 /* New pools, and emptied pools, will have a NULL symbol. */
1697 if (pool->symbol == NULL)
1698 {
1699 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1700 (valueT) 0, &zero_address_frag);
1701 pool->id = latest_pool_num++;
1702 }
1703
1704 /* Done. */
1705 return pool;
1706 }
1707
1708 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1709 Return TRUE on success, otherwise return FALSE. */
1710 static bfd_boolean
1711 add_to_lit_pool (expressionS *exp, int size)
1712 {
1713 literal_pool *pool;
1714 unsigned int entry;
1715
1716 pool = find_or_make_literal_pool (size);
1717
1718 /* Check if this literal value is already in the pool. */
1719 for (entry = 0; entry < pool->next_free_entry; entry++)
1720 {
1721 expressionS * litexp = & pool->literals[entry].exp;
1722
1723 if ((litexp->X_op == exp->X_op)
1724 && (exp->X_op == O_constant)
1725 && (litexp->X_add_number == exp->X_add_number)
1726 && (litexp->X_unsigned == exp->X_unsigned))
1727 break;
1728
1729 if ((litexp->X_op == exp->X_op)
1730 && (exp->X_op == O_symbol)
1731 && (litexp->X_add_number == exp->X_add_number)
1732 && (litexp->X_add_symbol == exp->X_add_symbol)
1733 && (litexp->X_op_symbol == exp->X_op_symbol))
1734 break;
1735 }
1736
1737 /* Do we need to create a new entry? */
1738 if (entry == pool->next_free_entry)
1739 {
1740 if (entry >= MAX_LITERAL_POOL_SIZE)
1741 {
1742 set_syntax_error (_("literal pool overflow"));
1743 return FALSE;
1744 }
1745
1746 pool->literals[entry].exp = *exp;
1747 pool->next_free_entry += 1;
1748 if (exp->X_op == O_big)
1749 {
1750 /* PR 16688: Bignums are held in a single global array. We must
1751 copy and preserve that value now, before it is overwritten. */
1752 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1753 exp->X_add_number);
1754 memcpy (pool->literals[entry].bignum, generic_bignum,
1755 CHARS_PER_LITTLENUM * exp->X_add_number);
1756 }
1757 else
1758 pool->literals[entry].bignum = NULL;
1759 }
1760
1761 exp->X_op = O_symbol;
1762 exp->X_add_number = ((int) entry) * size;
1763 exp->X_add_symbol = pool->symbol;
1764
1765 return TRUE;
1766 }
1767
1768 /* Can't use symbol_new here, so have to create a symbol and then at
1769 a later date assign it a value. That's what these functions do. */
1770
1771 static void
1772 symbol_locate (symbolS * symbolP,
1773 const char *name,/* It is copied, the caller can modify. */
1774 segT segment, /* Segment identifier (SEG_<something>). */
1775 valueT valu, /* Symbol value. */
1776 fragS * frag) /* Associated fragment. */
1777 {
1778 size_t name_length;
1779 char *preserved_copy_of_name;
1780
1781 name_length = strlen (name) + 1; /* +1 for \0. */
1782 obstack_grow (&notes, name, name_length);
1783 preserved_copy_of_name = obstack_finish (&notes);
1784
1785 #ifdef tc_canonicalize_symbol_name
1786 preserved_copy_of_name =
1787 tc_canonicalize_symbol_name (preserved_copy_of_name);
1788 #endif
1789
1790 S_SET_NAME (symbolP, preserved_copy_of_name);
1791
1792 S_SET_SEGMENT (symbolP, segment);
1793 S_SET_VALUE (symbolP, valu);
1794 symbol_clear_list_pointers (symbolP);
1795
1796 symbol_set_frag (symbolP, frag);
1797
1798 /* Link to end of symbol chain. */
1799 {
1800 extern int symbol_table_frozen;
1801
1802 if (symbol_table_frozen)
1803 abort ();
1804 }
1805
1806 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1807
1808 obj_symbol_new_hook (symbolP);
1809
1810 #ifdef tc_symbol_new_hook
1811 tc_symbol_new_hook (symbolP);
1812 #endif
1813
1814 #ifdef DEBUG_SYMS
1815 verify_symbol_chain (symbol_rootP, symbol_lastP);
1816 #endif /* DEBUG_SYMS */
1817 }
1818
1819
1820 static void
1821 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1822 {
1823 unsigned int entry;
1824 literal_pool *pool;
1825 char sym_name[20];
1826 int align;
1827
1828 for (align = 2; align <= 4; align++)
1829 {
1830 int size = 1 << align;
1831
1832 pool = find_literal_pool (size);
1833 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1834 continue;
1835
1836 /* Align pool as you have word accesses.
1837 Only make a frag if we have to. */
1838 if (!need_pass_2)
1839 frag_align (align, 0, 0);
1840
1841 mapping_state (MAP_DATA);
1842
1843 record_alignment (now_seg, align);
1844
1845 sprintf (sym_name, "$$lit_\002%x", pool->id);
1846
1847 symbol_locate (pool->symbol, sym_name, now_seg,
1848 (valueT) frag_now_fix (), frag_now);
1849 symbol_table_insert (pool->symbol);
1850
1851 for (entry = 0; entry < pool->next_free_entry; entry++)
1852 {
1853 expressionS * exp = & pool->literals[entry].exp;
1854
1855 if (exp->X_op == O_big)
1856 {
1857 /* PR 16688: Restore the global bignum value. */
1858 gas_assert (pool->literals[entry].bignum != NULL);
1859 memcpy (generic_bignum, pool->literals[entry].bignum,
1860 CHARS_PER_LITTLENUM * exp->X_add_number);
1861 }
1862
1863 /* First output the expression in the instruction to the pool. */
1864 emit_expr (exp, size); /* .word|.xword */
1865
1866 if (exp->X_op == O_big)
1867 {
1868 free (pool->literals[entry].bignum);
1869 pool->literals[entry].bignum = NULL;
1870 }
1871 }
1872
1873 /* Mark the pool as empty. */
1874 pool->next_free_entry = 0;
1875 pool->symbol = NULL;
1876 }
1877 }
1878
1879 #ifdef OBJ_ELF
1880 /* Forward declarations for functions below, in the MD interface
1881 section. */
1882 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1883 static struct reloc_table_entry * find_reloc_table_entry (char **);
1884
1885 /* Directives: Data. */
1886 /* N.B. the support for relocation suffix in this directive needs to be
1887 implemented properly. */
1888
1889 static void
1890 s_aarch64_elf_cons (int nbytes)
1891 {
1892 expressionS exp;
1893
1894 #ifdef md_flush_pending_output
1895 md_flush_pending_output ();
1896 #endif
1897
1898 if (is_it_end_of_statement ())
1899 {
1900 demand_empty_rest_of_line ();
1901 return;
1902 }
1903
1904 #ifdef md_cons_align
1905 md_cons_align (nbytes);
1906 #endif
1907
1908 mapping_state (MAP_DATA);
1909 do
1910 {
1911 struct reloc_table_entry *reloc;
1912
1913 expression (&exp);
1914
1915 if (exp.X_op != O_symbol)
1916 emit_expr (&exp, (unsigned int) nbytes);
1917 else
1918 {
1919 skip_past_char (&input_line_pointer, '#');
1920 if (skip_past_char (&input_line_pointer, ':'))
1921 {
1922 reloc = find_reloc_table_entry (&input_line_pointer);
1923 if (reloc == NULL)
1924 as_bad (_("unrecognized relocation suffix"));
1925 else
1926 as_bad (_("unimplemented relocation suffix"));
1927 ignore_rest_of_line ();
1928 return;
1929 }
1930 else
1931 emit_expr (&exp, (unsigned int) nbytes);
1932 }
1933 }
1934 while (*input_line_pointer++ == ',');
1935
1936 /* Put terminator back into stream. */
1937 input_line_pointer--;
1938 demand_empty_rest_of_line ();
1939 }
1940
1941 #endif /* OBJ_ELF */
1942
1943 /* Output a 32-bit word, but mark as an instruction. */
1944
1945 static void
1946 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1947 {
1948 expressionS exp;
1949
1950 #ifdef md_flush_pending_output
1951 md_flush_pending_output ();
1952 #endif
1953
1954 if (is_it_end_of_statement ())
1955 {
1956 demand_empty_rest_of_line ();
1957 return;
1958 }
1959
1960 /* Sections are assumed to start aligned. In executable section, there is no
1961 MAP_DATA symbol pending. So we only align the address during
1962 MAP_DATA --> MAP_INSN transition.
1963 For other sections, this is not guaranteed. */
1964 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1965 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1966 frag_align_code (2, 0);
1967
1968 #ifdef OBJ_ELF
1969 mapping_state (MAP_INSN);
1970 #endif
1971
1972 do
1973 {
1974 expression (&exp);
1975 if (exp.X_op != O_constant)
1976 {
1977 as_bad (_("constant expression required"));
1978 ignore_rest_of_line ();
1979 return;
1980 }
1981
1982 if (target_big_endian)
1983 {
1984 unsigned int val = exp.X_add_number;
1985 exp.X_add_number = SWAP_32 (val);
1986 }
1987 emit_expr (&exp, 4);
1988 }
1989 while (*input_line_pointer++ == ',');
1990
1991 /* Put terminator back into stream. */
1992 input_line_pointer--;
1993 demand_empty_rest_of_line ();
1994 }
1995
1996 #ifdef OBJ_ELF
1997 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1998
1999 static void
2000 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2001 {
2002 expressionS exp;
2003
2004 expression (&exp);
2005 frag_grow (4);
2006 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2007 BFD_RELOC_AARCH64_TLSDESC_ADD);
2008
2009 demand_empty_rest_of_line ();
2010 }
2011
2012 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2013
2014 static void
2015 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2016 {
2017 expressionS exp;
2018
2019 /* Since we're just labelling the code, there's no need to define a
2020 mapping symbol. */
2021 expression (&exp);
2022 /* Make sure there is enough room in this frag for the following
2023 blr. This trick only works if the blr follows immediately after
2024 the .tlsdesc directive. */
2025 frag_grow (4);
2026 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2027 BFD_RELOC_AARCH64_TLSDESC_CALL);
2028
2029 demand_empty_rest_of_line ();
2030 }
2031
2032 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2033
2034 static void
2035 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2036 {
2037 expressionS exp;
2038
2039 expression (&exp);
2040 frag_grow (4);
2041 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2042 BFD_RELOC_AARCH64_TLSDESC_LDR);
2043
2044 demand_empty_rest_of_line ();
2045 }
2046 #endif /* OBJ_ELF */
2047
2048 static void s_aarch64_arch (int);
2049 static void s_aarch64_cpu (int);
2050 static void s_aarch64_arch_extension (int);
2051
2052 /* This table describes all the machine specific pseudo-ops the assembler
2053 has to support. The fields are:
2054 pseudo-op name without dot
2055 function to call to execute this pseudo-op
2056 Integer arg to pass to the function. */
2057
2058 const pseudo_typeS md_pseudo_table[] = {
2059 /* Never called because '.req' does not start a line. */
2060 {"req", s_req, 0},
2061 {"unreq", s_unreq, 0},
2062 {"bss", s_bss, 0},
2063 {"even", s_even, 0},
2064 {"ltorg", s_ltorg, 0},
2065 {"pool", s_ltorg, 0},
2066 {"cpu", s_aarch64_cpu, 0},
2067 {"arch", s_aarch64_arch, 0},
2068 {"arch_extension", s_aarch64_arch_extension, 0},
2069 {"inst", s_aarch64_inst, 0},
2070 #ifdef OBJ_ELF
2071 {"tlsdescadd", s_tlsdescadd, 0},
2072 {"tlsdesccall", s_tlsdesccall, 0},
2073 {"tlsdescldr", s_tlsdescldr, 0},
2074 {"word", s_aarch64_elf_cons, 4},
2075 {"long", s_aarch64_elf_cons, 4},
2076 {"xword", s_aarch64_elf_cons, 8},
2077 {"dword", s_aarch64_elf_cons, 8},
2078 #endif
2079 {0, 0, 0}
2080 };
2081 \f
2082
2083 /* Check whether STR points to a register name followed by a comma or the
2084 end of line; REG_TYPE indicates which register types are checked
2085 against. Return TRUE if STR is such a register name; otherwise return
2086 FALSE. The function does not intend to produce any diagnostics, but since
2087 the register parser aarch64_reg_parse, which is called by this function,
2088 does produce diagnostics, we call clear_error to clear any diagnostics
2089 that may be generated by aarch64_reg_parse.
2090 Also, the function returns FALSE directly if there is any user error
2091 present at the function entry. This prevents the existing diagnostics
2092 state from being spoiled.
2093 The function currently serves parse_constant_immediate and
2094 parse_big_immediate only. */
2095 static bfd_boolean
2096 reg_name_p (char *str, aarch64_reg_type reg_type)
2097 {
2098 int reg;
2099
2100 /* Prevent the diagnostics state from being spoiled. */
2101 if (error_p ())
2102 return FALSE;
2103
2104 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2105
2106 /* Clear the parsing error that may be set by the reg parser. */
2107 clear_error ();
2108
2109 if (reg == PARSE_FAIL)
2110 return FALSE;
2111
2112 skip_whitespace (str);
2113 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2114 return TRUE;
2115
2116 return FALSE;
2117 }
2118
2119 /* Parser functions used exclusively in instruction operands. */
2120
2121 /* Parse an immediate expression which may not be constant.
2122
2123 To prevent the expression parser from pushing a register name
2124 into the symbol table as an undefined symbol, firstly a check is
2125 done to find out whether STR is a register of type REG_TYPE followed
2126 by a comma or the end of line. Return FALSE if STR is such a string. */
2127
2128 static bfd_boolean
2129 parse_immediate_expression (char **str, expressionS *exp,
2130 aarch64_reg_type reg_type)
2131 {
2132 if (reg_name_p (*str, reg_type))
2133 {
2134 set_recoverable_error (_("immediate operand required"));
2135 return FALSE;
2136 }
2137
2138 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2139
2140 if (exp->X_op == O_absent)
2141 {
2142 set_fatal_syntax_error (_("missing immediate expression"));
2143 return FALSE;
2144 }
2145
2146 return TRUE;
2147 }
2148
2149 /* Constant immediate-value read function for use in insn parsing.
2150 STR points to the beginning of the immediate (with the optional
2151 leading #); *VAL receives the value. REG_TYPE says which register
2152 names should be treated as registers rather than as symbolic immediates.
2153
2154 Return TRUE on success; otherwise return FALSE. */
2155
2156 static bfd_boolean
2157 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2158 {
2159 expressionS exp;
2160
2161 if (! parse_immediate_expression (str, &exp, reg_type))
2162 return FALSE;
2163
2164 if (exp.X_op != O_constant)
2165 {
2166 set_syntax_error (_("constant expression required"));
2167 return FALSE;
2168 }
2169
2170 *val = exp.X_add_number;
2171 return TRUE;
2172 }
2173
2174 static uint32_t
2175 encode_imm_float_bits (uint32_t imm)
2176 {
2177 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2178 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2179 }
2180
2181 /* Return TRUE if the single-precision floating-point value encoded in IMM
2182 can be expressed in the AArch64 8-bit signed floating-point format with
2183 3-bit exponent and normalized 4 bits of precision; in other words, the
2184 floating-point value must be expressable as
2185 (+/-) n / 16 * power (2, r)
2186 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2187
2188 static bfd_boolean
2189 aarch64_imm_float_p (uint32_t imm)
2190 {
2191 /* If a single-precision floating-point value has the following bit
2192 pattern, it can be expressed in the AArch64 8-bit floating-point
2193 format:
2194
2195 3 32222222 2221111111111
2196 1 09876543 21098765432109876543210
2197 n Eeeeeexx xxxx0000000000000000000
2198
2199 where n, e and each x are either 0 or 1 independently, with
2200 E == ~ e. */
2201
2202 uint32_t pattern;
2203
2204 /* Prepare the pattern for 'Eeeeee'. */
2205 if (((imm >> 30) & 0x1) == 0)
2206 pattern = 0x3e000000;
2207 else
2208 pattern = 0x40000000;
2209
2210 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2211 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2212 }
2213
2214 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2215 as an IEEE float without any loss of precision. Store the value in
2216 *FPWORD if so. */
2217
2218 static bfd_boolean
2219 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2220 {
2221 /* If a double-precision floating-point value has the following bit
2222 pattern, it can be expressed in a float:
2223
2224 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2225 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2226 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2227
2228 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2229 if Eeee_eeee != 1111_1111
2230
2231 where n, e, s and S are either 0 or 1 independently and where ~ is the
2232 inverse of E. */
2233
2234 uint32_t pattern;
2235 uint32_t high32 = imm >> 32;
2236 uint32_t low32 = imm;
2237
2238 /* Lower 29 bits need to be 0s. */
2239 if ((imm & 0x1fffffff) != 0)
2240 return FALSE;
2241
2242 /* Prepare the pattern for 'Eeeeeeeee'. */
2243 if (((high32 >> 30) & 0x1) == 0)
2244 pattern = 0x38000000;
2245 else
2246 pattern = 0x40000000;
2247
2248 /* Check E~~~. */
2249 if ((high32 & 0x78000000) != pattern)
2250 return FALSE;
2251
2252 /* Check Eeee_eeee != 1111_1111. */
2253 if ((high32 & 0x7ff00000) == 0x47f00000)
2254 return FALSE;
2255
2256 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2257 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2258 | (low32 >> 29)); /* 3 S bits. */
2259 return TRUE;
2260 }
2261
2262 /* Return true if we should treat OPERAND as a double-precision
2263 floating-point operand rather than a single-precision one. */
2264 static bfd_boolean
2265 double_precision_operand_p (const aarch64_opnd_info *operand)
2266 {
2267 /* Check for unsuffixed SVE registers, which are allowed
2268 for LDR and STR but not in instructions that require an
2269 immediate. We get better error messages if we arbitrarily
2270 pick one size, parse the immediate normally, and then
2271 report the match failure in the normal way. */
2272 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2273 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2274 }
2275
2276 /* Parse a floating-point immediate. Return TRUE on success and return the
2277 value in *IMMED in the format of IEEE754 single-precision encoding.
2278 *CCP points to the start of the string; DP_P is TRUE when the immediate
2279 is expected to be in double-precision (N.B. this only matters when
2280 hexadecimal representation is involved). REG_TYPE says which register
2281 names should be treated as registers rather than as symbolic immediates.
2282
2283 This routine accepts any IEEE float; it is up to the callers to reject
2284 invalid ones. */
2285
2286 static bfd_boolean
2287 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2288 aarch64_reg_type reg_type)
2289 {
2290 char *str = *ccp;
2291 char *fpnum;
2292 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2293 int64_t val = 0;
2294 unsigned fpword = 0;
2295 bfd_boolean hex_p = FALSE;
2296
2297 skip_past_char (&str, '#');
2298
2299 fpnum = str;
2300 skip_whitespace (fpnum);
2301
2302 if (strncmp (fpnum, "0x", 2) == 0)
2303 {
2304 /* Support the hexadecimal representation of the IEEE754 encoding.
2305 Double-precision is expected when DP_P is TRUE, otherwise the
2306 representation should be in single-precision. */
2307 if (! parse_constant_immediate (&str, &val, reg_type))
2308 goto invalid_fp;
2309
2310 if (dp_p)
2311 {
2312 if (!can_convert_double_to_float (val, &fpword))
2313 goto invalid_fp;
2314 }
2315 else if ((uint64_t) val > 0xffffffff)
2316 goto invalid_fp;
2317 else
2318 fpword = val;
2319
2320 hex_p = TRUE;
2321 }
2322 else if (reg_name_p (str, reg_type))
2323 {
2324 set_recoverable_error (_("immediate operand required"));
2325 return FALSE;
2326 }
2327
2328 if (! hex_p)
2329 {
2330 int i;
2331
2332 if ((str = atof_ieee (str, 's', words)) == NULL)
2333 goto invalid_fp;
2334
2335 /* Our FP word must be 32 bits (single-precision FP). */
2336 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2337 {
2338 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2339 fpword |= words[i];
2340 }
2341 }
2342
2343 *immed = fpword;
2344 *ccp = str;
2345 return TRUE;
2346
2347 invalid_fp:
2348 set_fatal_syntax_error (_("invalid floating-point constant"));
2349 return FALSE;
2350 }
2351
2352 /* Less-generic immediate-value read function with the possibility of loading
2353 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2354 instructions.
2355
2356 To prevent the expression parser from pushing a register name into the
2357 symbol table as an undefined symbol, a check is firstly done to find
2358 out whether STR is a register of type REG_TYPE followed by a comma or
2359 the end of line. Return FALSE if STR is such a register. */
2360
2361 static bfd_boolean
2362 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2363 {
2364 char *ptr = *str;
2365
2366 if (reg_name_p (ptr, reg_type))
2367 {
2368 set_syntax_error (_("immediate operand required"));
2369 return FALSE;
2370 }
2371
2372 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2373
2374 if (inst.reloc.exp.X_op == O_constant)
2375 *imm = inst.reloc.exp.X_add_number;
2376
2377 *str = ptr;
2378
2379 return TRUE;
2380 }
2381
2382 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2383 if NEED_LIBOPCODES is non-zero, the fixup will need
2384 assistance from the libopcodes. */
2385
2386 static inline void
2387 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2388 const aarch64_opnd_info *operand,
2389 int need_libopcodes_p)
2390 {
2391 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2392 reloc->opnd = operand->type;
2393 if (need_libopcodes_p)
2394 reloc->need_libopcodes_p = 1;
2395 };
2396
2397 /* Return TRUE if the instruction needs to be fixed up later internally by
2398 the GAS; otherwise return FALSE. */
2399
2400 static inline bfd_boolean
2401 aarch64_gas_internal_fixup_p (void)
2402 {
2403 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2404 }
2405
2406 /* Assign the immediate value to the relevant field in *OPERAND if
2407 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2408 needs an internal fixup in a later stage.
2409 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2410 IMM.VALUE that may get assigned with the constant. */
2411 static inline void
2412 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2413 aarch64_opnd_info *operand,
2414 int addr_off_p,
2415 int need_libopcodes_p,
2416 int skip_p)
2417 {
2418 if (reloc->exp.X_op == O_constant)
2419 {
2420 if (addr_off_p)
2421 operand->addr.offset.imm = reloc->exp.X_add_number;
2422 else
2423 operand->imm.value = reloc->exp.X_add_number;
2424 reloc->type = BFD_RELOC_UNUSED;
2425 }
2426 else
2427 {
2428 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2429 /* Tell libopcodes to ignore this operand or not. This is helpful
2430 when one of the operands needs to be fixed up later but we need
2431 libopcodes to check the other operands. */
2432 operand->skip = skip_p;
2433 }
2434 }
2435
2436 /* Relocation modifiers. Each entry in the table contains the textual
2437 name for the relocation which may be placed before a symbol used as
2438 a load/store offset, or add immediate. It must be surrounded by a
2439 leading and trailing colon, for example:
2440
2441 ldr x0, [x1, #:rello:varsym]
2442 add x0, x1, #:rello:varsym */
2443
2444 struct reloc_table_entry
2445 {
2446 const char *name;
2447 int pc_rel;
2448 bfd_reloc_code_real_type adr_type;
2449 bfd_reloc_code_real_type adrp_type;
2450 bfd_reloc_code_real_type movw_type;
2451 bfd_reloc_code_real_type add_type;
2452 bfd_reloc_code_real_type ldst_type;
2453 bfd_reloc_code_real_type ld_literal_type;
2454 };
2455
2456 static struct reloc_table_entry reloc_table[] = {
2457 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2458 {"lo12", 0,
2459 0, /* adr_type */
2460 0,
2461 0,
2462 BFD_RELOC_AARCH64_ADD_LO12,
2463 BFD_RELOC_AARCH64_LDST_LO12,
2464 0},
2465
2466 /* Higher 21 bits of pc-relative page offset: ADRP */
2467 {"pg_hi21", 1,
2468 0, /* adr_type */
2469 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2470 0,
2471 0,
2472 0,
2473 0},
2474
2475 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2476 {"pg_hi21_nc", 1,
2477 0, /* adr_type */
2478 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2479 0,
2480 0,
2481 0,
2482 0},
2483
2484 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2485 {"abs_g0", 0,
2486 0, /* adr_type */
2487 0,
2488 BFD_RELOC_AARCH64_MOVW_G0,
2489 0,
2490 0,
2491 0},
2492
2493 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2494 {"abs_g0_s", 0,
2495 0, /* adr_type */
2496 0,
2497 BFD_RELOC_AARCH64_MOVW_G0_S,
2498 0,
2499 0,
2500 0},
2501
2502 /* Less significant bits 0-15 of address/value: MOVK, no check */
2503 {"abs_g0_nc", 0,
2504 0, /* adr_type */
2505 0,
2506 BFD_RELOC_AARCH64_MOVW_G0_NC,
2507 0,
2508 0,
2509 0},
2510
2511 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2512 {"abs_g1", 0,
2513 0, /* adr_type */
2514 0,
2515 BFD_RELOC_AARCH64_MOVW_G1,
2516 0,
2517 0,
2518 0},
2519
2520 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2521 {"abs_g1_s", 0,
2522 0, /* adr_type */
2523 0,
2524 BFD_RELOC_AARCH64_MOVW_G1_S,
2525 0,
2526 0,
2527 0},
2528
2529 /* Less significant bits 16-31 of address/value: MOVK, no check */
2530 {"abs_g1_nc", 0,
2531 0, /* adr_type */
2532 0,
2533 BFD_RELOC_AARCH64_MOVW_G1_NC,
2534 0,
2535 0,
2536 0},
2537
2538 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2539 {"abs_g2", 0,
2540 0, /* adr_type */
2541 0,
2542 BFD_RELOC_AARCH64_MOVW_G2,
2543 0,
2544 0,
2545 0},
2546
2547 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2548 {"abs_g2_s", 0,
2549 0, /* adr_type */
2550 0,
2551 BFD_RELOC_AARCH64_MOVW_G2_S,
2552 0,
2553 0,
2554 0},
2555
2556 /* Less significant bits 32-47 of address/value: MOVK, no check */
2557 {"abs_g2_nc", 0,
2558 0, /* adr_type */
2559 0,
2560 BFD_RELOC_AARCH64_MOVW_G2_NC,
2561 0,
2562 0,
2563 0},
2564
2565 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2566 {"abs_g3", 0,
2567 0, /* adr_type */
2568 0,
2569 BFD_RELOC_AARCH64_MOVW_G3,
2570 0,
2571 0,
2572 0},
2573
2574 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2575 {"prel_g0", 1,
2576 0, /* adr_type */
2577 0,
2578 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2579 0,
2580 0,
2581 0},
2582
2583 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2584 {"prel_g0_nc", 1,
2585 0, /* adr_type */
2586 0,
2587 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2588 0,
2589 0,
2590 0},
2591
2592 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2593 {"prel_g1", 1,
2594 0, /* adr_type */
2595 0,
2596 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2597 0,
2598 0,
2599 0},
2600
2601 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2602 {"prel_g1_nc", 1,
2603 0, /* adr_type */
2604 0,
2605 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2606 0,
2607 0,
2608 0},
2609
2610 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2611 {"prel_g2", 1,
2612 0, /* adr_type */
2613 0,
2614 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2615 0,
2616 0,
2617 0},
2618
2619 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2620 {"prel_g2_nc", 1,
2621 0, /* adr_type */
2622 0,
2623 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2624 0,
2625 0,
2626 0},
2627
2628 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2629 {"prel_g3", 1,
2630 0, /* adr_type */
2631 0,
2632 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2633 0,
2634 0,
2635 0},
2636
2637 /* Get to the page containing GOT entry for a symbol. */
2638 {"got", 1,
2639 0, /* adr_type */
2640 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2641 0,
2642 0,
2643 0,
2644 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2645
2646 /* 12 bit offset into the page containing GOT entry for that symbol. */
2647 {"got_lo12", 0,
2648 0, /* adr_type */
2649 0,
2650 0,
2651 0,
2652 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2653 0},
2654
2655 /* 0-15 bits of address/value: MOVk, no check. */
2656 {"gotoff_g0_nc", 0,
2657 0, /* adr_type */
2658 0,
2659 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2660 0,
2661 0,
2662 0},
2663
2664 /* Most significant bits 16-31 of address/value: MOVZ. */
2665 {"gotoff_g1", 0,
2666 0, /* adr_type */
2667 0,
2668 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2669 0,
2670 0,
2671 0},
2672
2673 /* 15 bit offset into the page containing GOT entry for that symbol. */
2674 {"gotoff_lo15", 0,
2675 0, /* adr_type */
2676 0,
2677 0,
2678 0,
2679 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2680 0},
2681
2682 /* Get to the page containing GOT TLS entry for a symbol */
2683 {"gottprel_g0_nc", 0,
2684 0, /* adr_type */
2685 0,
2686 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2687 0,
2688 0,
2689 0},
2690
2691 /* Get to the page containing GOT TLS entry for a symbol */
2692 {"gottprel_g1", 0,
2693 0, /* adr_type */
2694 0,
2695 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2696 0,
2697 0,
2698 0},
2699
2700 /* Get to the page containing GOT TLS entry for a symbol */
2701 {"tlsgd", 0,
2702 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2703 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2704 0,
2705 0,
2706 0,
2707 0},
2708
2709 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2710 {"tlsgd_lo12", 0,
2711 0, /* adr_type */
2712 0,
2713 0,
2714 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2715 0,
2716 0},
2717
2718 /* Lower 16 bits address/value: MOVk. */
2719 {"tlsgd_g0_nc", 0,
2720 0, /* adr_type */
2721 0,
2722 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2723 0,
2724 0,
2725 0},
2726
2727 /* Most significant bits 16-31 of address/value: MOVZ. */
2728 {"tlsgd_g1", 0,
2729 0, /* adr_type */
2730 0,
2731 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2732 0,
2733 0,
2734 0},
2735
2736 /* Get to the page containing GOT TLS entry for a symbol */
2737 {"tlsdesc", 0,
2738 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2739 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2740 0,
2741 0,
2742 0,
2743 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2744
2745 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2746 {"tlsdesc_lo12", 0,
2747 0, /* adr_type */
2748 0,
2749 0,
2750 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2751 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2752 0},
2753
2754 /* Get to the page containing GOT TLS entry for a symbol.
2755 The same as GD, we allocate two consecutive GOT slots
2756 for module index and module offset, the only difference
2757 with GD is the module offset should be initialized to
2758 zero without any outstanding runtime relocation. */
2759 {"tlsldm", 0,
2760 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2761 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2762 0,
2763 0,
2764 0,
2765 0},
2766
2767 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2768 {"tlsldm_lo12_nc", 0,
2769 0, /* adr_type */
2770 0,
2771 0,
2772 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2773 0,
2774 0},
2775
2776 /* 12 bit offset into the module TLS base address. */
2777 {"dtprel_lo12", 0,
2778 0, /* adr_type */
2779 0,
2780 0,
2781 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2782 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2783 0},
2784
2785 /* Same as dtprel_lo12, no overflow check. */
2786 {"dtprel_lo12_nc", 0,
2787 0, /* adr_type */
2788 0,
2789 0,
2790 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2791 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2792 0},
2793
2794 /* bits[23:12] of offset to the module TLS base address. */
2795 {"dtprel_hi12", 0,
2796 0, /* adr_type */
2797 0,
2798 0,
2799 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2800 0,
2801 0},
2802
2803 /* bits[15:0] of offset to the module TLS base address. */
2804 {"dtprel_g0", 0,
2805 0, /* adr_type */
2806 0,
2807 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2808 0,
2809 0,
2810 0},
2811
2812 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2813 {"dtprel_g0_nc", 0,
2814 0, /* adr_type */
2815 0,
2816 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2817 0,
2818 0,
2819 0},
2820
2821 /* bits[31:16] of offset to the module TLS base address. */
2822 {"dtprel_g1", 0,
2823 0, /* adr_type */
2824 0,
2825 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2826 0,
2827 0,
2828 0},
2829
2830 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2831 {"dtprel_g1_nc", 0,
2832 0, /* adr_type */
2833 0,
2834 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2835 0,
2836 0,
2837 0},
2838
2839 /* bits[47:32] of offset to the module TLS base address. */
2840 {"dtprel_g2", 0,
2841 0, /* adr_type */
2842 0,
2843 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2844 0,
2845 0,
2846 0},
2847
2848 /* Lower 16 bit offset into GOT entry for a symbol */
2849 {"tlsdesc_off_g0_nc", 0,
2850 0, /* adr_type */
2851 0,
2852 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2853 0,
2854 0,
2855 0},
2856
2857 /* Higher 16 bit offset into GOT entry for a symbol */
2858 {"tlsdesc_off_g1", 0,
2859 0, /* adr_type */
2860 0,
2861 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2862 0,
2863 0,
2864 0},
2865
2866 /* Get to the page containing GOT TLS entry for a symbol */
2867 {"gottprel", 0,
2868 0, /* adr_type */
2869 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2870 0,
2871 0,
2872 0,
2873 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2874
2875 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2876 {"gottprel_lo12", 0,
2877 0, /* adr_type */
2878 0,
2879 0,
2880 0,
2881 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2882 0},
2883
2884 /* Get tp offset for a symbol. */
2885 {"tprel", 0,
2886 0, /* adr_type */
2887 0,
2888 0,
2889 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2890 0,
2891 0},
2892
2893 /* Get tp offset for a symbol. */
2894 {"tprel_lo12", 0,
2895 0, /* adr_type */
2896 0,
2897 0,
2898 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2899 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2900 0},
2901
2902 /* Get tp offset for a symbol. */
2903 {"tprel_hi12", 0,
2904 0, /* adr_type */
2905 0,
2906 0,
2907 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2908 0,
2909 0},
2910
2911 /* Get tp offset for a symbol. */
2912 {"tprel_lo12_nc", 0,
2913 0, /* adr_type */
2914 0,
2915 0,
2916 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2917 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2918 0},
2919
2920 /* Most significant bits 32-47 of address/value: MOVZ. */
2921 {"tprel_g2", 0,
2922 0, /* adr_type */
2923 0,
2924 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2925 0,
2926 0,
2927 0},
2928
2929 /* Most significant bits 16-31 of address/value: MOVZ. */
2930 {"tprel_g1", 0,
2931 0, /* adr_type */
2932 0,
2933 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2934 0,
2935 0,
2936 0},
2937
2938 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2939 {"tprel_g1_nc", 0,
2940 0, /* adr_type */
2941 0,
2942 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2943 0,
2944 0,
2945 0},
2946
2947 /* Most significant bits 0-15 of address/value: MOVZ. */
2948 {"tprel_g0", 0,
2949 0, /* adr_type */
2950 0,
2951 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2952 0,
2953 0,
2954 0},
2955
2956 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2957 {"tprel_g0_nc", 0,
2958 0, /* adr_type */
2959 0,
2960 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2961 0,
2962 0,
2963 0},
2964
2965 /* 15bit offset from got entry to base address of GOT table. */
2966 {"gotpage_lo15", 0,
2967 0,
2968 0,
2969 0,
2970 0,
2971 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2972 0},
2973
2974 /* 14bit offset from got entry to base address of GOT table. */
2975 {"gotpage_lo14", 0,
2976 0,
2977 0,
2978 0,
2979 0,
2980 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2981 0},
2982 };
2983
2984 /* Given the address of a pointer pointing to the textual name of a
2985 relocation as may appear in assembler source, attempt to find its
2986 details in reloc_table. The pointer will be updated to the character
2987 after the trailing colon. On failure, NULL will be returned;
2988 otherwise return the reloc_table_entry. */
2989
2990 static struct reloc_table_entry *
2991 find_reloc_table_entry (char **str)
2992 {
2993 unsigned int i;
2994 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2995 {
2996 int length = strlen (reloc_table[i].name);
2997
2998 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2999 && (*str)[length] == ':')
3000 {
3001 *str += (length + 1);
3002 return &reloc_table[i];
3003 }
3004 }
3005
3006 return NULL;
3007 }
3008
3009 /* Mode argument to parse_shift and parser_shifter_operand. */
3010 enum parse_shift_mode
3011 {
3012 SHIFTED_NONE, /* no shifter allowed */
3013 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3014 "#imm{,lsl #n}" */
3015 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3016 "#imm" */
3017 SHIFTED_LSL, /* bare "lsl #n" */
3018 SHIFTED_MUL, /* bare "mul #n" */
3019 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3020 SHIFTED_MUL_VL, /* "mul vl" */
3021 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3022 };
3023
3024 /* Parse a <shift> operator on an AArch64 data processing instruction.
3025 Return TRUE on success; otherwise return FALSE. */
3026 static bfd_boolean
3027 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3028 {
3029 const struct aarch64_name_value_pair *shift_op;
3030 enum aarch64_modifier_kind kind;
3031 expressionS exp;
3032 int exp_has_prefix;
3033 char *s = *str;
3034 char *p = s;
3035
3036 for (p = *str; ISALPHA (*p); p++)
3037 ;
3038
3039 if (p == *str)
3040 {
3041 set_syntax_error (_("shift expression expected"));
3042 return FALSE;
3043 }
3044
3045 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
3046
3047 if (shift_op == NULL)
3048 {
3049 set_syntax_error (_("shift operator expected"));
3050 return FALSE;
3051 }
3052
3053 kind = aarch64_get_operand_modifier (shift_op);
3054
3055 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3056 {
3057 set_syntax_error (_("invalid use of 'MSL'"));
3058 return FALSE;
3059 }
3060
3061 if (kind == AARCH64_MOD_MUL
3062 && mode != SHIFTED_MUL
3063 && mode != SHIFTED_MUL_VL)
3064 {
3065 set_syntax_error (_("invalid use of 'MUL'"));
3066 return FALSE;
3067 }
3068
3069 switch (mode)
3070 {
3071 case SHIFTED_LOGIC_IMM:
3072 if (aarch64_extend_operator_p (kind))
3073 {
3074 set_syntax_error (_("extending shift is not permitted"));
3075 return FALSE;
3076 }
3077 break;
3078
3079 case SHIFTED_ARITH_IMM:
3080 if (kind == AARCH64_MOD_ROR)
3081 {
3082 set_syntax_error (_("'ROR' shift is not permitted"));
3083 return FALSE;
3084 }
3085 break;
3086
3087 case SHIFTED_LSL:
3088 if (kind != AARCH64_MOD_LSL)
3089 {
3090 set_syntax_error (_("only 'LSL' shift is permitted"));
3091 return FALSE;
3092 }
3093 break;
3094
3095 case SHIFTED_MUL:
3096 if (kind != AARCH64_MOD_MUL)
3097 {
3098 set_syntax_error (_("only 'MUL' is permitted"));
3099 return FALSE;
3100 }
3101 break;
3102
3103 case SHIFTED_MUL_VL:
3104 /* "MUL VL" consists of two separate tokens. Require the first
3105 token to be "MUL" and look for a following "VL". */
3106 if (kind == AARCH64_MOD_MUL)
3107 {
3108 skip_whitespace (p);
3109 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3110 {
3111 p += 2;
3112 kind = AARCH64_MOD_MUL_VL;
3113 break;
3114 }
3115 }
3116 set_syntax_error (_("only 'MUL VL' is permitted"));
3117 return FALSE;
3118
3119 case SHIFTED_REG_OFFSET:
3120 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3121 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3122 {
3123 set_fatal_syntax_error
3124 (_("invalid shift for the register offset addressing mode"));
3125 return FALSE;
3126 }
3127 break;
3128
3129 case SHIFTED_LSL_MSL:
3130 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3131 {
3132 set_syntax_error (_("invalid shift operator"));
3133 return FALSE;
3134 }
3135 break;
3136
3137 default:
3138 abort ();
3139 }
3140
3141 /* Whitespace can appear here if the next thing is a bare digit. */
3142 skip_whitespace (p);
3143
3144 /* Parse shift amount. */
3145 exp_has_prefix = 0;
3146 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3147 exp.X_op = O_absent;
3148 else
3149 {
3150 if (is_immediate_prefix (*p))
3151 {
3152 p++;
3153 exp_has_prefix = 1;
3154 }
3155 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3156 }
3157 if (kind == AARCH64_MOD_MUL_VL)
3158 /* For consistency, give MUL VL the same shift amount as an implicit
3159 MUL #1. */
3160 operand->shifter.amount = 1;
3161 else if (exp.X_op == O_absent)
3162 {
3163 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3164 {
3165 set_syntax_error (_("missing shift amount"));
3166 return FALSE;
3167 }
3168 operand->shifter.amount = 0;
3169 }
3170 else if (exp.X_op != O_constant)
3171 {
3172 set_syntax_error (_("constant shift amount required"));
3173 return FALSE;
3174 }
3175 /* For parsing purposes, MUL #n has no inherent range. The range
3176 depends on the operand and will be checked by operand-specific
3177 routines. */
3178 else if (kind != AARCH64_MOD_MUL
3179 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3180 {
3181 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3182 return FALSE;
3183 }
3184 else
3185 {
3186 operand->shifter.amount = exp.X_add_number;
3187 operand->shifter.amount_present = 1;
3188 }
3189
3190 operand->shifter.operator_present = 1;
3191 operand->shifter.kind = kind;
3192
3193 *str = p;
3194 return TRUE;
3195 }
3196
3197 /* Parse a <shifter_operand> for a data processing instruction:
3198
3199 #<immediate>
3200 #<immediate>, LSL #imm
3201
3202 Validation of immediate operands is deferred to md_apply_fix.
3203
3204 Return TRUE on success; otherwise return FALSE. */
3205
3206 static bfd_boolean
3207 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3208 enum parse_shift_mode mode)
3209 {
3210 char *p;
3211
3212 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3213 return FALSE;
3214
3215 p = *str;
3216
3217 /* Accept an immediate expression. */
3218 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3219 return FALSE;
3220
3221 /* Accept optional LSL for arithmetic immediate values. */
3222 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3223 if (! parse_shift (&p, operand, SHIFTED_LSL))
3224 return FALSE;
3225
3226 /* Not accept any shifter for logical immediate values. */
3227 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3228 && parse_shift (&p, operand, mode))
3229 {
3230 set_syntax_error (_("unexpected shift operator"));
3231 return FALSE;
3232 }
3233
3234 *str = p;
3235 return TRUE;
3236 }
3237
3238 /* Parse a <shifter_operand> for a data processing instruction:
3239
3240 <Rm>
3241 <Rm>, <shift>
3242 #<immediate>
3243 #<immediate>, LSL #imm
3244
3245 where <shift> is handled by parse_shift above, and the last two
3246 cases are handled by the function above.
3247
3248 Validation of immediate operands is deferred to md_apply_fix.
3249
3250 Return TRUE on success; otherwise return FALSE. */
3251
3252 static bfd_boolean
3253 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3254 enum parse_shift_mode mode)
3255 {
3256 const reg_entry *reg;
3257 aarch64_opnd_qualifier_t qualifier;
3258 enum aarch64_operand_class opd_class
3259 = aarch64_get_operand_class (operand->type);
3260
3261 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3262 if (reg)
3263 {
3264 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3265 {
3266 set_syntax_error (_("unexpected register in the immediate operand"));
3267 return FALSE;
3268 }
3269
3270 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3271 {
3272 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3273 return FALSE;
3274 }
3275
3276 operand->reg.regno = reg->number;
3277 operand->qualifier = qualifier;
3278
3279 /* Accept optional shift operation on register. */
3280 if (! skip_past_comma (str))
3281 return TRUE;
3282
3283 if (! parse_shift (str, operand, mode))
3284 return FALSE;
3285
3286 return TRUE;
3287 }
3288 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3289 {
3290 set_syntax_error
3291 (_("integer register expected in the extended/shifted operand "
3292 "register"));
3293 return FALSE;
3294 }
3295
3296 /* We have a shifted immediate variable. */
3297 return parse_shifter_operand_imm (str, operand, mode);
3298 }
3299
3300 /* Return TRUE on success; return FALSE otherwise. */
3301
3302 static bfd_boolean
3303 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3304 enum parse_shift_mode mode)
3305 {
3306 char *p = *str;
3307
3308 /* Determine if we have the sequence of characters #: or just :
3309 coming next. If we do, then we check for a :rello: relocation
3310 modifier. If we don't, punt the whole lot to
3311 parse_shifter_operand. */
3312
3313 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3314 {
3315 struct reloc_table_entry *entry;
3316
3317 if (p[0] == '#')
3318 p += 2;
3319 else
3320 p++;
3321 *str = p;
3322
3323 /* Try to parse a relocation. Anything else is an error. */
3324 if (!(entry = find_reloc_table_entry (str)))
3325 {
3326 set_syntax_error (_("unknown relocation modifier"));
3327 return FALSE;
3328 }
3329
3330 if (entry->add_type == 0)
3331 {
3332 set_syntax_error
3333 (_("this relocation modifier is not allowed on this instruction"));
3334 return FALSE;
3335 }
3336
3337 /* Save str before we decompose it. */
3338 p = *str;
3339
3340 /* Next, we parse the expression. */
3341 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3342 return FALSE;
3343
3344 /* Record the relocation type (use the ADD variant here). */
3345 inst.reloc.type = entry->add_type;
3346 inst.reloc.pc_rel = entry->pc_rel;
3347
3348 /* If str is empty, we've reached the end, stop here. */
3349 if (**str == '\0')
3350 return TRUE;
3351
3352 /* Otherwise, we have a shifted reloc modifier, so rewind to
3353 recover the variable name and continue parsing for the shifter. */
3354 *str = p;
3355 return parse_shifter_operand_imm (str, operand, mode);
3356 }
3357
3358 return parse_shifter_operand (str, operand, mode);
3359 }
3360
3361 /* Parse all forms of an address expression. Information is written
3362 to *OPERAND and/or inst.reloc.
3363
3364 The A64 instruction set has the following addressing modes:
3365
3366 Offset
3367 [base] // in SIMD ld/st structure
3368 [base{,#0}] // in ld/st exclusive
3369 [base{,#imm}]
3370 [base,Xm{,LSL #imm}]
3371 [base,Xm,SXTX {#imm}]
3372 [base,Wm,(S|U)XTW {#imm}]
3373 Pre-indexed
3374 [base,#imm]!
3375 [base]! // in ld/stgv
3376 Post-indexed
3377 [base],#imm
3378 [base],Xm // in SIMD ld/st structure
3379 PC-relative (literal)
3380 label
3381 SVE:
3382 [base,#imm,MUL VL]
3383 [base,Zm.D{,LSL #imm}]
3384 [base,Zm.S,(S|U)XTW {#imm}]
3385 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3386 [Zn.S,#imm]
3387 [Zn.D,#imm]
3388 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3389 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3390 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3391
3392 (As a convenience, the notation "=immediate" is permitted in conjunction
3393 with the pc-relative literal load instructions to automatically place an
3394 immediate value or symbolic address in a nearby literal pool and generate
3395 a hidden label which references it.)
3396
3397 Upon a successful parsing, the address structure in *OPERAND will be
3398 filled in the following way:
3399
3400 .base_regno = <base>
3401 .offset.is_reg // 1 if the offset is a register
3402 .offset.imm = <imm>
3403 .offset.regno = <Rm>
3404
3405 For different addressing modes defined in the A64 ISA:
3406
3407 Offset
3408 .pcrel=0; .preind=1; .postind=0; .writeback=0
3409 Pre-indexed
3410 .pcrel=0; .preind=1; .postind=0; .writeback=1
3411 Post-indexed
3412 .pcrel=0; .preind=0; .postind=1; .writeback=1
3413 PC-relative (literal)
3414 .pcrel=1; .preind=1; .postind=0; .writeback=0
3415
3416 The shift/extension information, if any, will be stored in .shifter.
3417 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3418 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3419 corresponding register.
3420
3421 BASE_TYPE says which types of base register should be accepted and
3422 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3423 is the type of shifter that is allowed for immediate offsets,
3424 or SHIFTED_NONE if none.
3425
3426 In all other respects, it is the caller's responsibility to check
3427 for addressing modes not supported by the instruction, and to set
3428 inst.reloc.type. */
3429
3430 static bfd_boolean
3431 parse_address_main (char **str, aarch64_opnd_info *operand,
3432 aarch64_opnd_qualifier_t *base_qualifier,
3433 aarch64_opnd_qualifier_t *offset_qualifier,
3434 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3435 enum parse_shift_mode imm_shift_mode)
3436 {
3437 char *p = *str;
3438 const reg_entry *reg;
3439 expressionS *exp = &inst.reloc.exp;
3440
3441 *base_qualifier = AARCH64_OPND_QLF_NIL;
3442 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3443 if (! skip_past_char (&p, '['))
3444 {
3445 /* =immediate or label. */
3446 operand->addr.pcrel = 1;
3447 operand->addr.preind = 1;
3448
3449 /* #:<reloc_op>:<symbol> */
3450 skip_past_char (&p, '#');
3451 if (skip_past_char (&p, ':'))
3452 {
3453 bfd_reloc_code_real_type ty;
3454 struct reloc_table_entry *entry;
3455
3456 /* Try to parse a relocation modifier. Anything else is
3457 an error. */
3458 entry = find_reloc_table_entry (&p);
3459 if (! entry)
3460 {
3461 set_syntax_error (_("unknown relocation modifier"));
3462 return FALSE;
3463 }
3464
3465 switch (operand->type)
3466 {
3467 case AARCH64_OPND_ADDR_PCREL21:
3468 /* adr */
3469 ty = entry->adr_type;
3470 break;
3471
3472 default:
3473 ty = entry->ld_literal_type;
3474 break;
3475 }
3476
3477 if (ty == 0)
3478 {
3479 set_syntax_error
3480 (_("this relocation modifier is not allowed on this "
3481 "instruction"));
3482 return FALSE;
3483 }
3484
3485 /* #:<reloc_op>: */
3486 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3487 {
3488 set_syntax_error (_("invalid relocation expression"));
3489 return FALSE;
3490 }
3491
3492 /* #:<reloc_op>:<expr> */
3493 /* Record the relocation type. */
3494 inst.reloc.type = ty;
3495 inst.reloc.pc_rel = entry->pc_rel;
3496 }
3497 else
3498 {
3499
3500 if (skip_past_char (&p, '='))
3501 /* =immediate; need to generate the literal in the literal pool. */
3502 inst.gen_lit_pool = 1;
3503
3504 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3505 {
3506 set_syntax_error (_("invalid address"));
3507 return FALSE;
3508 }
3509 }
3510
3511 *str = p;
3512 return TRUE;
3513 }
3514
3515 /* [ */
3516
3517 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3518 if (!reg || !aarch64_check_reg_type (reg, base_type))
3519 {
3520 set_syntax_error (_(get_reg_expected_msg (base_type)));
3521 return FALSE;
3522 }
3523 operand->addr.base_regno = reg->number;
3524
3525 /* [Xn */
3526 if (skip_past_comma (&p))
3527 {
3528 /* [Xn, */
3529 operand->addr.preind = 1;
3530
3531 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3532 if (reg)
3533 {
3534 if (!aarch64_check_reg_type (reg, offset_type))
3535 {
3536 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3537 return FALSE;
3538 }
3539
3540 /* [Xn,Rm */
3541 operand->addr.offset.regno = reg->number;
3542 operand->addr.offset.is_reg = 1;
3543 /* Shifted index. */
3544 if (skip_past_comma (&p))
3545 {
3546 /* [Xn,Rm, */
3547 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3548 /* Use the diagnostics set in parse_shift, so not set new
3549 error message here. */
3550 return FALSE;
3551 }
3552 /* We only accept:
3553 [base,Xm{,LSL #imm}]
3554 [base,Xm,SXTX {#imm}]
3555 [base,Wm,(S|U)XTW {#imm}] */
3556 if (operand->shifter.kind == AARCH64_MOD_NONE
3557 || operand->shifter.kind == AARCH64_MOD_LSL
3558 || operand->shifter.kind == AARCH64_MOD_SXTX)
3559 {
3560 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3561 {
3562 set_syntax_error (_("invalid use of 32-bit register offset"));
3563 return FALSE;
3564 }
3565 if (aarch64_get_qualifier_esize (*base_qualifier)
3566 != aarch64_get_qualifier_esize (*offset_qualifier))
3567 {
3568 set_syntax_error (_("offset has different size from base"));
3569 return FALSE;
3570 }
3571 }
3572 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3573 {
3574 set_syntax_error (_("invalid use of 64-bit register offset"));
3575 return FALSE;
3576 }
3577 }
3578 else
3579 {
3580 /* [Xn,#:<reloc_op>:<symbol> */
3581 skip_past_char (&p, '#');
3582 if (skip_past_char (&p, ':'))
3583 {
3584 struct reloc_table_entry *entry;
3585
3586 /* Try to parse a relocation modifier. Anything else is
3587 an error. */
3588 if (!(entry = find_reloc_table_entry (&p)))
3589 {
3590 set_syntax_error (_("unknown relocation modifier"));
3591 return FALSE;
3592 }
3593
3594 if (entry->ldst_type == 0)
3595 {
3596 set_syntax_error
3597 (_("this relocation modifier is not allowed on this "
3598 "instruction"));
3599 return FALSE;
3600 }
3601
3602 /* [Xn,#:<reloc_op>: */
3603 /* We now have the group relocation table entry corresponding to
3604 the name in the assembler source. Next, we parse the
3605 expression. */
3606 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3607 {
3608 set_syntax_error (_("invalid relocation expression"));
3609 return FALSE;
3610 }
3611
3612 /* [Xn,#:<reloc_op>:<expr> */
3613 /* Record the load/store relocation type. */
3614 inst.reloc.type = entry->ldst_type;
3615 inst.reloc.pc_rel = entry->pc_rel;
3616 }
3617 else
3618 {
3619 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3620 {
3621 set_syntax_error (_("invalid expression in the address"));
3622 return FALSE;
3623 }
3624 /* [Xn,<expr> */
3625 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3626 /* [Xn,<expr>,<shifter> */
3627 if (! parse_shift (&p, operand, imm_shift_mode))
3628 return FALSE;
3629 }
3630 }
3631 }
3632
3633 if (! skip_past_char (&p, ']'))
3634 {
3635 set_syntax_error (_("']' expected"));
3636 return FALSE;
3637 }
3638
3639 if (skip_past_char (&p, '!'))
3640 {
3641 if (operand->addr.preind && operand->addr.offset.is_reg)
3642 {
3643 set_syntax_error (_("register offset not allowed in pre-indexed "
3644 "addressing mode"));
3645 return FALSE;
3646 }
3647 /* [Xn]! */
3648 operand->addr.writeback = 1;
3649 }
3650 else if (skip_past_comma (&p))
3651 {
3652 /* [Xn], */
3653 operand->addr.postind = 1;
3654 operand->addr.writeback = 1;
3655
3656 if (operand->addr.preind)
3657 {
3658 set_syntax_error (_("cannot combine pre- and post-indexing"));
3659 return FALSE;
3660 }
3661
3662 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3663 if (reg)
3664 {
3665 /* [Xn],Xm */
3666 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3667 {
3668 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3669 return FALSE;
3670 }
3671
3672 operand->addr.offset.regno = reg->number;
3673 operand->addr.offset.is_reg = 1;
3674 }
3675 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3676 {
3677 /* [Xn],#expr */
3678 set_syntax_error (_("invalid expression in the address"));
3679 return FALSE;
3680 }
3681 }
3682
3683 /* If at this point neither .preind nor .postind is set, we have a
3684 bare [Rn]{!}; reject [Rn]! except for ld/stgv but accept [Rn]
3685 as a shorthand for [Rn,#0]. */
3686 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3687 {
3688 if (operand->type != AARCH64_OPND_ADDR_SIMPLE_2 && operand->addr.writeback)
3689 {
3690 /* Reject [Rn]! */
3691 set_syntax_error (_("missing offset in the pre-indexed address"));
3692 return FALSE;
3693 }
3694
3695 operand->addr.preind = 1;
3696 inst.reloc.exp.X_op = O_constant;
3697 inst.reloc.exp.X_add_number = 0;
3698 }
3699
3700 *str = p;
3701 return TRUE;
3702 }
3703
3704 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3705 on success. */
3706 static bfd_boolean
3707 parse_address (char **str, aarch64_opnd_info *operand)
3708 {
3709 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3710 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3711 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3712 }
3713
3714 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3715 The arguments have the same meaning as for parse_address_main.
3716 Return TRUE on success. */
3717 static bfd_boolean
3718 parse_sve_address (char **str, aarch64_opnd_info *operand,
3719 aarch64_opnd_qualifier_t *base_qualifier,
3720 aarch64_opnd_qualifier_t *offset_qualifier)
3721 {
3722 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3723 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3724 SHIFTED_MUL_VL);
3725 }
3726
3727 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3728 Return TRUE on success; otherwise return FALSE. */
3729 static bfd_boolean
3730 parse_half (char **str, int *internal_fixup_p)
3731 {
3732 char *p = *str;
3733
3734 skip_past_char (&p, '#');
3735
3736 gas_assert (internal_fixup_p);
3737 *internal_fixup_p = 0;
3738
3739 if (*p == ':')
3740 {
3741 struct reloc_table_entry *entry;
3742
3743 /* Try to parse a relocation. Anything else is an error. */
3744 ++p;
3745 if (!(entry = find_reloc_table_entry (&p)))
3746 {
3747 set_syntax_error (_("unknown relocation modifier"));
3748 return FALSE;
3749 }
3750
3751 if (entry->movw_type == 0)
3752 {
3753 set_syntax_error
3754 (_("this relocation modifier is not allowed on this instruction"));
3755 return FALSE;
3756 }
3757
3758 inst.reloc.type = entry->movw_type;
3759 }
3760 else
3761 *internal_fixup_p = 1;
3762
3763 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3764 return FALSE;
3765
3766 *str = p;
3767 return TRUE;
3768 }
3769
3770 /* Parse an operand for an ADRP instruction:
3771 ADRP <Xd>, <label>
3772 Return TRUE on success; otherwise return FALSE. */
3773
3774 static bfd_boolean
3775 parse_adrp (char **str)
3776 {
3777 char *p;
3778
3779 p = *str;
3780 if (*p == ':')
3781 {
3782 struct reloc_table_entry *entry;
3783
3784 /* Try to parse a relocation. Anything else is an error. */
3785 ++p;
3786 if (!(entry = find_reloc_table_entry (&p)))
3787 {
3788 set_syntax_error (_("unknown relocation modifier"));
3789 return FALSE;
3790 }
3791
3792 if (entry->adrp_type == 0)
3793 {
3794 set_syntax_error
3795 (_("this relocation modifier is not allowed on this instruction"));
3796 return FALSE;
3797 }
3798
3799 inst.reloc.type = entry->adrp_type;
3800 }
3801 else
3802 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3803
3804 inst.reloc.pc_rel = 1;
3805
3806 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3807 return FALSE;
3808
3809 *str = p;
3810 return TRUE;
3811 }
3812
3813 /* Miscellaneous. */
3814
3815 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3816 of SIZE tokens in which index I gives the token for field value I,
3817 or is null if field value I is invalid. REG_TYPE says which register
3818 names should be treated as registers rather than as symbolic immediates.
3819
3820 Return true on success, moving *STR past the operand and storing the
3821 field value in *VAL. */
3822
3823 static int
3824 parse_enum_string (char **str, int64_t *val, const char *const *array,
3825 size_t size, aarch64_reg_type reg_type)
3826 {
3827 expressionS exp;
3828 char *p, *q;
3829 size_t i;
3830
3831 /* Match C-like tokens. */
3832 p = q = *str;
3833 while (ISALNUM (*q))
3834 q++;
3835
3836 for (i = 0; i < size; ++i)
3837 if (array[i]
3838 && strncasecmp (array[i], p, q - p) == 0
3839 && array[i][q - p] == 0)
3840 {
3841 *val = i;
3842 *str = q;
3843 return TRUE;
3844 }
3845
3846 if (!parse_immediate_expression (&p, &exp, reg_type))
3847 return FALSE;
3848
3849 if (exp.X_op == O_constant
3850 && (uint64_t) exp.X_add_number < size)
3851 {
3852 *val = exp.X_add_number;
3853 *str = p;
3854 return TRUE;
3855 }
3856
3857 /* Use the default error for this operand. */
3858 return FALSE;
3859 }
3860
3861 /* Parse an option for a preload instruction. Returns the encoding for the
3862 option, or PARSE_FAIL. */
3863
3864 static int
3865 parse_pldop (char **str)
3866 {
3867 char *p, *q;
3868 const struct aarch64_name_value_pair *o;
3869
3870 p = q = *str;
3871 while (ISALNUM (*q))
3872 q++;
3873
3874 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3875 if (!o)
3876 return PARSE_FAIL;
3877
3878 *str = q;
3879 return o->value;
3880 }
3881
3882 /* Parse an option for a barrier instruction. Returns the encoding for the
3883 option, or PARSE_FAIL. */
3884
3885 static int
3886 parse_barrier (char **str)
3887 {
3888 char *p, *q;
3889 const asm_barrier_opt *o;
3890
3891 p = q = *str;
3892 while (ISALPHA (*q))
3893 q++;
3894
3895 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3896 if (!o)
3897 return PARSE_FAIL;
3898
3899 *str = q;
3900 return o->value;
3901 }
3902
3903 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3904 return 0 if successful. Otherwise return PARSE_FAIL. */
3905
3906 static int
3907 parse_barrier_psb (char **str,
3908 const struct aarch64_name_value_pair ** hint_opt)
3909 {
3910 char *p, *q;
3911 const struct aarch64_name_value_pair *o;
3912
3913 p = q = *str;
3914 while (ISALPHA (*q))
3915 q++;
3916
3917 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3918 if (!o)
3919 {
3920 set_fatal_syntax_error
3921 ( _("unknown or missing option to PSB"));
3922 return PARSE_FAIL;
3923 }
3924
3925 if (o->value != 0x11)
3926 {
3927 /* PSB only accepts option name 'CSYNC'. */
3928 set_syntax_error
3929 (_("the specified option is not accepted for PSB"));
3930 return PARSE_FAIL;
3931 }
3932
3933 *str = q;
3934 *hint_opt = o;
3935 return 0;
3936 }
3937
3938 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
3939 return 0 if successful. Otherwise return PARSE_FAIL. */
3940
3941 static int
3942 parse_bti_operand (char **str,
3943 const struct aarch64_name_value_pair ** hint_opt)
3944 {
3945 char *p, *q;
3946 const struct aarch64_name_value_pair *o;
3947
3948 p = q = *str;
3949 while (ISALPHA (*q))
3950 q++;
3951
3952 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3953 if (!o)
3954 {
3955 set_fatal_syntax_error
3956 ( _("unknown option to BTI"));
3957 return PARSE_FAIL;
3958 }
3959
3960 switch (o->value)
3961 {
3962 /* Valid BTI operands. */
3963 case HINT_OPD_C:
3964 case HINT_OPD_J:
3965 case HINT_OPD_JC:
3966 break;
3967
3968 default:
3969 set_syntax_error
3970 (_("unknown option to BTI"));
3971 return PARSE_FAIL;
3972 }
3973
3974 *str = q;
3975 *hint_opt = o;
3976 return 0;
3977 }
3978
3979 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3980 Returns the encoding for the option, or PARSE_FAIL.
3981
3982 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3983 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3984
3985 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3986 field, otherwise as a system register.
3987 */
3988
3989 static int
3990 parse_sys_reg (char **str, struct hash_control *sys_regs,
3991 int imple_defined_p, int pstatefield_p,
3992 uint32_t* flags)
3993 {
3994 char *p, *q;
3995 char buf[32];
3996 const aarch64_sys_reg *o;
3997 int value;
3998
3999 p = buf;
4000 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4001 if (p < buf + 31)
4002 *p++ = TOLOWER (*q);
4003 *p = '\0';
4004 /* Assert that BUF be large enough. */
4005 gas_assert (p - buf == q - *str);
4006
4007 o = hash_find (sys_regs, buf);
4008 if (!o)
4009 {
4010 if (!imple_defined_p)
4011 return PARSE_FAIL;
4012 else
4013 {
4014 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4015 unsigned int op0, op1, cn, cm, op2;
4016
4017 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4018 != 5)
4019 return PARSE_FAIL;
4020 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4021 return PARSE_FAIL;
4022 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4023 if (flags)
4024 *flags = 0;
4025 }
4026 }
4027 else
4028 {
4029 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4030 as_bad (_("selected processor does not support PSTATE field "
4031 "name '%s'"), buf);
4032 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
4033 as_bad (_("selected processor does not support system register "
4034 "name '%s'"), buf);
4035 if (aarch64_sys_reg_deprecated_p (o))
4036 as_warn (_("system register name '%s' is deprecated and may be "
4037 "removed in a future release"), buf);
4038 value = o->value;
4039 if (flags)
4040 *flags = o->flags;
4041 }
4042
4043 *str = q;
4044 return value;
4045 }
4046
4047 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4048 for the option, or NULL. */
4049
4050 static const aarch64_sys_ins_reg *
4051 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
4052 {
4053 char *p, *q;
4054 char buf[32];
4055 const aarch64_sys_ins_reg *o;
4056
4057 p = buf;
4058 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4059 if (p < buf + 31)
4060 *p++ = TOLOWER (*q);
4061 *p = '\0';
4062
4063 o = hash_find (sys_ins_regs, buf);
4064 if (!o)
4065 return NULL;
4066
4067 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
4068 as_bad (_("selected processor does not support system register "
4069 "name '%s'"), buf);
4070
4071 *str = q;
4072 return o;
4073 }
4074 \f
4075 #define po_char_or_fail(chr) do { \
4076 if (! skip_past_char (&str, chr)) \
4077 goto failure; \
4078 } while (0)
4079
4080 #define po_reg_or_fail(regtype) do { \
4081 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4082 if (val == PARSE_FAIL) \
4083 { \
4084 set_default_error (); \
4085 goto failure; \
4086 } \
4087 } while (0)
4088
4089 #define po_int_reg_or_fail(reg_type) do { \
4090 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4091 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4092 { \
4093 set_default_error (); \
4094 goto failure; \
4095 } \
4096 info->reg.regno = reg->number; \
4097 info->qualifier = qualifier; \
4098 } while (0)
4099
4100 #define po_imm_nc_or_fail() do { \
4101 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4102 goto failure; \
4103 } while (0)
4104
4105 #define po_imm_or_fail(min, max) do { \
4106 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4107 goto failure; \
4108 if (val < min || val > max) \
4109 { \
4110 set_fatal_syntax_error (_("immediate value out of range "\
4111 #min " to "#max)); \
4112 goto failure; \
4113 } \
4114 } while (0)
4115
4116 #define po_enum_or_fail(array) do { \
4117 if (!parse_enum_string (&str, &val, array, \
4118 ARRAY_SIZE (array), imm_reg_type)) \
4119 goto failure; \
4120 } while (0)
4121
4122 #define po_misc_or_fail(expr) do { \
4123 if (!expr) \
4124 goto failure; \
4125 } while (0)
4126 \f
4127 /* encode the 12-bit imm field of Add/sub immediate */
4128 static inline uint32_t
4129 encode_addsub_imm (uint32_t imm)
4130 {
4131 return imm << 10;
4132 }
4133
4134 /* encode the shift amount field of Add/sub immediate */
4135 static inline uint32_t
4136 encode_addsub_imm_shift_amount (uint32_t cnt)
4137 {
4138 return cnt << 22;
4139 }
4140
4141
4142 /* encode the imm field of Adr instruction */
4143 static inline uint32_t
4144 encode_adr_imm (uint32_t imm)
4145 {
4146 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4147 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4148 }
4149
4150 /* encode the immediate field of Move wide immediate */
4151 static inline uint32_t
4152 encode_movw_imm (uint32_t imm)
4153 {
4154 return imm << 5;
4155 }
4156
4157 /* encode the 26-bit offset of unconditional branch */
4158 static inline uint32_t
4159 encode_branch_ofs_26 (uint32_t ofs)
4160 {
4161 return ofs & ((1 << 26) - 1);
4162 }
4163
4164 /* encode the 19-bit offset of conditional branch and compare & branch */
4165 static inline uint32_t
4166 encode_cond_branch_ofs_19 (uint32_t ofs)
4167 {
4168 return (ofs & ((1 << 19) - 1)) << 5;
4169 }
4170
4171 /* encode the 19-bit offset of ld literal */
4172 static inline uint32_t
4173 encode_ld_lit_ofs_19 (uint32_t ofs)
4174 {
4175 return (ofs & ((1 << 19) - 1)) << 5;
4176 }
4177
4178 /* Encode the 14-bit offset of test & branch. */
4179 static inline uint32_t
4180 encode_tst_branch_ofs_14 (uint32_t ofs)
4181 {
4182 return (ofs & ((1 << 14) - 1)) << 5;
4183 }
4184
4185 /* Encode the 16-bit imm field of svc/hvc/smc. */
4186 static inline uint32_t
4187 encode_svc_imm (uint32_t imm)
4188 {
4189 return imm << 5;
4190 }
4191
4192 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4193 static inline uint32_t
4194 reencode_addsub_switch_add_sub (uint32_t opcode)
4195 {
4196 return opcode ^ (1 << 30);
4197 }
4198
4199 static inline uint32_t
4200 reencode_movzn_to_movz (uint32_t opcode)
4201 {
4202 return opcode | (1 << 30);
4203 }
4204
4205 static inline uint32_t
4206 reencode_movzn_to_movn (uint32_t opcode)
4207 {
4208 return opcode & ~(1 << 30);
4209 }
4210
4211 /* Overall per-instruction processing. */
4212
4213 /* We need to be able to fix up arbitrary expressions in some statements.
4214 This is so that we can handle symbols that are an arbitrary distance from
4215 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4216 which returns part of an address in a form which will be valid for
4217 a data instruction. We do this by pushing the expression into a symbol
4218 in the expr_section, and creating a fix for that. */
4219
4220 static fixS *
4221 fix_new_aarch64 (fragS * frag,
4222 int where,
4223 short int size, expressionS * exp, int pc_rel, int reloc)
4224 {
4225 fixS *new_fix;
4226
4227 switch (exp->X_op)
4228 {
4229 case O_constant:
4230 case O_symbol:
4231 case O_add:
4232 case O_subtract:
4233 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4234 break;
4235
4236 default:
4237 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4238 pc_rel, reloc);
4239 break;
4240 }
4241 return new_fix;
4242 }
4243 \f
4244 /* Diagnostics on operands errors. */
4245
4246 /* By default, output verbose error message.
4247 Disable the verbose error message by -mno-verbose-error. */
4248 static int verbose_error_p = 1;
4249
4250 #ifdef DEBUG_AARCH64
4251 /* N.B. this is only for the purpose of debugging. */
4252 const char* operand_mismatch_kind_names[] =
4253 {
4254 "AARCH64_OPDE_NIL",
4255 "AARCH64_OPDE_RECOVERABLE",
4256 "AARCH64_OPDE_SYNTAX_ERROR",
4257 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4258 "AARCH64_OPDE_INVALID_VARIANT",
4259 "AARCH64_OPDE_OUT_OF_RANGE",
4260 "AARCH64_OPDE_UNALIGNED",
4261 "AARCH64_OPDE_REG_LIST",
4262 "AARCH64_OPDE_OTHER_ERROR",
4263 };
4264 #endif /* DEBUG_AARCH64 */
4265
4266 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4267
4268 When multiple errors of different kinds are found in the same assembly
4269 line, only the error of the highest severity will be picked up for
4270 issuing the diagnostics. */
4271
4272 static inline bfd_boolean
4273 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4274 enum aarch64_operand_error_kind rhs)
4275 {
4276 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4277 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4278 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4279 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4280 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4281 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4282 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4283 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4284 return lhs > rhs;
4285 }
4286
4287 /* Helper routine to get the mnemonic name from the assembly instruction
4288 line; should only be called for the diagnosis purpose, as there is
4289 string copy operation involved, which may affect the runtime
4290 performance if used in elsewhere. */
4291
4292 static const char*
4293 get_mnemonic_name (const char *str)
4294 {
4295 static char mnemonic[32];
4296 char *ptr;
4297
4298 /* Get the first 15 bytes and assume that the full name is included. */
4299 strncpy (mnemonic, str, 31);
4300 mnemonic[31] = '\0';
4301
4302 /* Scan up to the end of the mnemonic, which must end in white space,
4303 '.', or end of string. */
4304 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4305 ;
4306
4307 *ptr = '\0';
4308
4309 /* Append '...' to the truncated long name. */
4310 if (ptr - mnemonic == 31)
4311 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4312
4313 return mnemonic;
4314 }
4315
4316 static void
4317 reset_aarch64_instruction (aarch64_instruction *instruction)
4318 {
4319 memset (instruction, '\0', sizeof (aarch64_instruction));
4320 instruction->reloc.type = BFD_RELOC_UNUSED;
4321 }
4322
4323 /* Data structures storing one user error in the assembly code related to
4324 operands. */
4325
4326 struct operand_error_record
4327 {
4328 const aarch64_opcode *opcode;
4329 aarch64_operand_error detail;
4330 struct operand_error_record *next;
4331 };
4332
4333 typedef struct operand_error_record operand_error_record;
4334
4335 struct operand_errors
4336 {
4337 operand_error_record *head;
4338 operand_error_record *tail;
4339 };
4340
4341 typedef struct operand_errors operand_errors;
4342
4343 /* Top-level data structure reporting user errors for the current line of
4344 the assembly code.
4345 The way md_assemble works is that all opcodes sharing the same mnemonic
4346 name are iterated to find a match to the assembly line. In this data
4347 structure, each of the such opcodes will have one operand_error_record
4348 allocated and inserted. In other words, excessive errors related with
4349 a single opcode are disregarded. */
4350 operand_errors operand_error_report;
4351
4352 /* Free record nodes. */
4353 static operand_error_record *free_opnd_error_record_nodes = NULL;
4354
4355 /* Initialize the data structure that stores the operand mismatch
4356 information on assembling one line of the assembly code. */
4357 static void
4358 init_operand_error_report (void)
4359 {
4360 if (operand_error_report.head != NULL)
4361 {
4362 gas_assert (operand_error_report.tail != NULL);
4363 operand_error_report.tail->next = free_opnd_error_record_nodes;
4364 free_opnd_error_record_nodes = operand_error_report.head;
4365 operand_error_report.head = NULL;
4366 operand_error_report.tail = NULL;
4367 return;
4368 }
4369 gas_assert (operand_error_report.tail == NULL);
4370 }
4371
4372 /* Return TRUE if some operand error has been recorded during the
4373 parsing of the current assembly line using the opcode *OPCODE;
4374 otherwise return FALSE. */
4375 static inline bfd_boolean
4376 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4377 {
4378 operand_error_record *record = operand_error_report.head;
4379 return record && record->opcode == opcode;
4380 }
4381
4382 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4383 OPCODE field is initialized with OPCODE.
4384 N.B. only one record for each opcode, i.e. the maximum of one error is
4385 recorded for each instruction template. */
4386
4387 static void
4388 add_operand_error_record (const operand_error_record* new_record)
4389 {
4390 const aarch64_opcode *opcode = new_record->opcode;
4391 operand_error_record* record = operand_error_report.head;
4392
4393 /* The record may have been created for this opcode. If not, we need
4394 to prepare one. */
4395 if (! opcode_has_operand_error_p (opcode))
4396 {
4397 /* Get one empty record. */
4398 if (free_opnd_error_record_nodes == NULL)
4399 {
4400 record = XNEW (operand_error_record);
4401 }
4402 else
4403 {
4404 record = free_opnd_error_record_nodes;
4405 free_opnd_error_record_nodes = record->next;
4406 }
4407 record->opcode = opcode;
4408 /* Insert at the head. */
4409 record->next = operand_error_report.head;
4410 operand_error_report.head = record;
4411 if (operand_error_report.tail == NULL)
4412 operand_error_report.tail = record;
4413 }
4414 else if (record->detail.kind != AARCH64_OPDE_NIL
4415 && record->detail.index <= new_record->detail.index
4416 && operand_error_higher_severity_p (record->detail.kind,
4417 new_record->detail.kind))
4418 {
4419 /* In the case of multiple errors found on operands related with a
4420 single opcode, only record the error of the leftmost operand and
4421 only if the error is of higher severity. */
4422 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4423 " the existing error %s on operand %d",
4424 operand_mismatch_kind_names[new_record->detail.kind],
4425 new_record->detail.index,
4426 operand_mismatch_kind_names[record->detail.kind],
4427 record->detail.index);
4428 return;
4429 }
4430
4431 record->detail = new_record->detail;
4432 }
4433
4434 static inline void
4435 record_operand_error_info (const aarch64_opcode *opcode,
4436 aarch64_operand_error *error_info)
4437 {
4438 operand_error_record record;
4439 record.opcode = opcode;
4440 record.detail = *error_info;
4441 add_operand_error_record (&record);
4442 }
4443
4444 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4445 error message *ERROR, for operand IDX (count from 0). */
4446
4447 static void
4448 record_operand_error (const aarch64_opcode *opcode, int idx,
4449 enum aarch64_operand_error_kind kind,
4450 const char* error)
4451 {
4452 aarch64_operand_error info;
4453 memset(&info, 0, sizeof (info));
4454 info.index = idx;
4455 info.kind = kind;
4456 info.error = error;
4457 info.non_fatal = FALSE;
4458 record_operand_error_info (opcode, &info);
4459 }
4460
4461 static void
4462 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4463 enum aarch64_operand_error_kind kind,
4464 const char* error, const int *extra_data)
4465 {
4466 aarch64_operand_error info;
4467 info.index = idx;
4468 info.kind = kind;
4469 info.error = error;
4470 info.data[0] = extra_data[0];
4471 info.data[1] = extra_data[1];
4472 info.data[2] = extra_data[2];
4473 info.non_fatal = FALSE;
4474 record_operand_error_info (opcode, &info);
4475 }
4476
4477 static void
4478 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4479 const char* error, int lower_bound,
4480 int upper_bound)
4481 {
4482 int data[3] = {lower_bound, upper_bound, 0};
4483 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4484 error, data);
4485 }
4486
4487 /* Remove the operand error record for *OPCODE. */
4488 static void ATTRIBUTE_UNUSED
4489 remove_operand_error_record (const aarch64_opcode *opcode)
4490 {
4491 if (opcode_has_operand_error_p (opcode))
4492 {
4493 operand_error_record* record = operand_error_report.head;
4494 gas_assert (record != NULL && operand_error_report.tail != NULL);
4495 operand_error_report.head = record->next;
4496 record->next = free_opnd_error_record_nodes;
4497 free_opnd_error_record_nodes = record;
4498 if (operand_error_report.head == NULL)
4499 {
4500 gas_assert (operand_error_report.tail == record);
4501 operand_error_report.tail = NULL;
4502 }
4503 }
4504 }
4505
4506 /* Given the instruction in *INSTR, return the index of the best matched
4507 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4508
4509 Return -1 if there is no qualifier sequence; return the first match
4510 if there is multiple matches found. */
4511
4512 static int
4513 find_best_match (const aarch64_inst *instr,
4514 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4515 {
4516 int i, num_opnds, max_num_matched, idx;
4517
4518 num_opnds = aarch64_num_of_operands (instr->opcode);
4519 if (num_opnds == 0)
4520 {
4521 DEBUG_TRACE ("no operand");
4522 return -1;
4523 }
4524
4525 max_num_matched = 0;
4526 idx = 0;
4527
4528 /* For each pattern. */
4529 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4530 {
4531 int j, num_matched;
4532 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4533
4534 /* Most opcodes has much fewer patterns in the list. */
4535 if (empty_qualifier_sequence_p (qualifiers))
4536 {
4537 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4538 break;
4539 }
4540
4541 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4542 if (*qualifiers == instr->operands[j].qualifier)
4543 ++num_matched;
4544
4545 if (num_matched > max_num_matched)
4546 {
4547 max_num_matched = num_matched;
4548 idx = i;
4549 }
4550 }
4551
4552 DEBUG_TRACE ("return with %d", idx);
4553 return idx;
4554 }
4555
4556 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4557 corresponding operands in *INSTR. */
4558
4559 static inline void
4560 assign_qualifier_sequence (aarch64_inst *instr,
4561 const aarch64_opnd_qualifier_t *qualifiers)
4562 {
4563 int i = 0;
4564 int num_opnds = aarch64_num_of_operands (instr->opcode);
4565 gas_assert (num_opnds);
4566 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4567 instr->operands[i].qualifier = *qualifiers;
4568 }
4569
4570 /* Print operands for the diagnosis purpose. */
4571
4572 static void
4573 print_operands (char *buf, const aarch64_opcode *opcode,
4574 const aarch64_opnd_info *opnds)
4575 {
4576 int i;
4577
4578 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4579 {
4580 char str[128];
4581
4582 /* We regard the opcode operand info more, however we also look into
4583 the inst->operands to support the disassembling of the optional
4584 operand.
4585 The two operand code should be the same in all cases, apart from
4586 when the operand can be optional. */
4587 if (opcode->operands[i] == AARCH64_OPND_NIL
4588 || opnds[i].type == AARCH64_OPND_NIL)
4589 break;
4590
4591 /* Generate the operand string in STR. */
4592 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4593 NULL);
4594
4595 /* Delimiter. */
4596 if (str[0] != '\0')
4597 strcat (buf, i == 0 ? " " : ", ");
4598
4599 /* Append the operand string. */
4600 strcat (buf, str);
4601 }
4602 }
4603
4604 /* Send to stderr a string as information. */
4605
4606 static void
4607 output_info (const char *format, ...)
4608 {
4609 const char *file;
4610 unsigned int line;
4611 va_list args;
4612
4613 file = as_where (&line);
4614 if (file)
4615 {
4616 if (line != 0)
4617 fprintf (stderr, "%s:%u: ", file, line);
4618 else
4619 fprintf (stderr, "%s: ", file);
4620 }
4621 fprintf (stderr, _("Info: "));
4622 va_start (args, format);
4623 vfprintf (stderr, format, args);
4624 va_end (args);
4625 (void) putc ('\n', stderr);
4626 }
4627
4628 /* Output one operand error record. */
4629
4630 static void
4631 output_operand_error_record (const operand_error_record *record, char *str)
4632 {
4633 const aarch64_operand_error *detail = &record->detail;
4634 int idx = detail->index;
4635 const aarch64_opcode *opcode = record->opcode;
4636 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4637 : AARCH64_OPND_NIL);
4638
4639 typedef void (*handler_t)(const char *format, ...);
4640 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4641
4642 switch (detail->kind)
4643 {
4644 case AARCH64_OPDE_NIL:
4645 gas_assert (0);
4646 break;
4647 case AARCH64_OPDE_SYNTAX_ERROR:
4648 case AARCH64_OPDE_RECOVERABLE:
4649 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4650 case AARCH64_OPDE_OTHER_ERROR:
4651 /* Use the prepared error message if there is, otherwise use the
4652 operand description string to describe the error. */
4653 if (detail->error != NULL)
4654 {
4655 if (idx < 0)
4656 handler (_("%s -- `%s'"), detail->error, str);
4657 else
4658 handler (_("%s at operand %d -- `%s'"),
4659 detail->error, idx + 1, str);
4660 }
4661 else
4662 {
4663 gas_assert (idx >= 0);
4664 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4665 aarch64_get_operand_desc (opd_code), str);
4666 }
4667 break;
4668
4669 case AARCH64_OPDE_INVALID_VARIANT:
4670 handler (_("operand mismatch -- `%s'"), str);
4671 if (verbose_error_p)
4672 {
4673 /* We will try to correct the erroneous instruction and also provide
4674 more information e.g. all other valid variants.
4675
4676 The string representation of the corrected instruction and other
4677 valid variants are generated by
4678
4679 1) obtaining the intermediate representation of the erroneous
4680 instruction;
4681 2) manipulating the IR, e.g. replacing the operand qualifier;
4682 3) printing out the instruction by calling the printer functions
4683 shared with the disassembler.
4684
4685 The limitation of this method is that the exact input assembly
4686 line cannot be accurately reproduced in some cases, for example an
4687 optional operand present in the actual assembly line will be
4688 omitted in the output; likewise for the optional syntax rules,
4689 e.g. the # before the immediate. Another limitation is that the
4690 assembly symbols and relocation operations in the assembly line
4691 currently cannot be printed out in the error report. Last but not
4692 least, when there is other error(s) co-exist with this error, the
4693 'corrected' instruction may be still incorrect, e.g. given
4694 'ldnp h0,h1,[x0,#6]!'
4695 this diagnosis will provide the version:
4696 'ldnp s0,s1,[x0,#6]!'
4697 which is still not right. */
4698 size_t len = strlen (get_mnemonic_name (str));
4699 int i, qlf_idx;
4700 bfd_boolean result;
4701 char buf[2048];
4702 aarch64_inst *inst_base = &inst.base;
4703 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4704
4705 /* Init inst. */
4706 reset_aarch64_instruction (&inst);
4707 inst_base->opcode = opcode;
4708
4709 /* Reset the error report so that there is no side effect on the
4710 following operand parsing. */
4711 init_operand_error_report ();
4712
4713 /* Fill inst. */
4714 result = parse_operands (str + len, opcode)
4715 && programmer_friendly_fixup (&inst);
4716 gas_assert (result);
4717 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4718 NULL, NULL, insn_sequence);
4719 gas_assert (!result);
4720
4721 /* Find the most matched qualifier sequence. */
4722 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4723 gas_assert (qlf_idx > -1);
4724
4725 /* Assign the qualifiers. */
4726 assign_qualifier_sequence (inst_base,
4727 opcode->qualifiers_list[qlf_idx]);
4728
4729 /* Print the hint. */
4730 output_info (_(" did you mean this?"));
4731 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4732 print_operands (buf, opcode, inst_base->operands);
4733 output_info (_(" %s"), buf);
4734
4735 /* Print out other variant(s) if there is any. */
4736 if (qlf_idx != 0 ||
4737 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4738 output_info (_(" other valid variant(s):"));
4739
4740 /* For each pattern. */
4741 qualifiers_list = opcode->qualifiers_list;
4742 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4743 {
4744 /* Most opcodes has much fewer patterns in the list.
4745 First NIL qualifier indicates the end in the list. */
4746 if (empty_qualifier_sequence_p (*qualifiers_list))
4747 break;
4748
4749 if (i != qlf_idx)
4750 {
4751 /* Mnemonics name. */
4752 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4753
4754 /* Assign the qualifiers. */
4755 assign_qualifier_sequence (inst_base, *qualifiers_list);
4756
4757 /* Print instruction. */
4758 print_operands (buf, opcode, inst_base->operands);
4759
4760 output_info (_(" %s"), buf);
4761 }
4762 }
4763 }
4764 break;
4765
4766 case AARCH64_OPDE_UNTIED_OPERAND:
4767 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4768 detail->index + 1, str);
4769 break;
4770
4771 case AARCH64_OPDE_OUT_OF_RANGE:
4772 if (detail->data[0] != detail->data[1])
4773 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4774 detail->error ? detail->error : _("immediate value"),
4775 detail->data[0], detail->data[1], idx + 1, str);
4776 else
4777 handler (_("%s must be %d at operand %d -- `%s'"),
4778 detail->error ? detail->error : _("immediate value"),
4779 detail->data[0], idx + 1, str);
4780 break;
4781
4782 case AARCH64_OPDE_REG_LIST:
4783 if (detail->data[0] == 1)
4784 handler (_("invalid number of registers in the list; "
4785 "only 1 register is expected at operand %d -- `%s'"),
4786 idx + 1, str);
4787 else
4788 handler (_("invalid number of registers in the list; "
4789 "%d registers are expected at operand %d -- `%s'"),
4790 detail->data[0], idx + 1, str);
4791 break;
4792
4793 case AARCH64_OPDE_UNALIGNED:
4794 handler (_("immediate value must be a multiple of "
4795 "%d at operand %d -- `%s'"),
4796 detail->data[0], idx + 1, str);
4797 break;
4798
4799 default:
4800 gas_assert (0);
4801 break;
4802 }
4803 }
4804
4805 /* Process and output the error message about the operand mismatching.
4806
4807 When this function is called, the operand error information had
4808 been collected for an assembly line and there will be multiple
4809 errors in the case of multiple instruction templates; output the
4810 error message that most closely describes the problem.
4811
4812 The errors to be printed can be filtered on printing all errors
4813 or only non-fatal errors. This distinction has to be made because
4814 the error buffer may already be filled with fatal errors we don't want to
4815 print due to the different instruction templates. */
4816
4817 static void
4818 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4819 {
4820 int largest_error_pos;
4821 const char *msg = NULL;
4822 enum aarch64_operand_error_kind kind;
4823 operand_error_record *curr;
4824 operand_error_record *head = operand_error_report.head;
4825 operand_error_record *record = NULL;
4826
4827 /* No error to report. */
4828 if (head == NULL)
4829 return;
4830
4831 gas_assert (head != NULL && operand_error_report.tail != NULL);
4832
4833 /* Only one error. */
4834 if (head == operand_error_report.tail)
4835 {
4836 /* If the only error is a non-fatal one and we don't want to print it,
4837 just exit. */
4838 if (!non_fatal_only || head->detail.non_fatal)
4839 {
4840 DEBUG_TRACE ("single opcode entry with error kind: %s",
4841 operand_mismatch_kind_names[head->detail.kind]);
4842 output_operand_error_record (head, str);
4843 }
4844 return;
4845 }
4846
4847 /* Find the error kind of the highest severity. */
4848 DEBUG_TRACE ("multiple opcode entries with error kind");
4849 kind = AARCH64_OPDE_NIL;
4850 for (curr = head; curr != NULL; curr = curr->next)
4851 {
4852 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4853 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4854 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4855 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4856 kind = curr->detail.kind;
4857 }
4858
4859 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4860
4861 /* Pick up one of errors of KIND to report. */
4862 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4863 for (curr = head; curr != NULL; curr = curr->next)
4864 {
4865 /* If we don't want to print non-fatal errors then don't consider them
4866 at all. */
4867 if (curr->detail.kind != kind
4868 || (non_fatal_only && !curr->detail.non_fatal))
4869 continue;
4870 /* If there are multiple errors, pick up the one with the highest
4871 mismatching operand index. In the case of multiple errors with
4872 the equally highest operand index, pick up the first one or the
4873 first one with non-NULL error message. */
4874 if (curr->detail.index > largest_error_pos
4875 || (curr->detail.index == largest_error_pos && msg == NULL
4876 && curr->detail.error != NULL))
4877 {
4878 largest_error_pos = curr->detail.index;
4879 record = curr;
4880 msg = record->detail.error;
4881 }
4882 }
4883
4884 /* The way errors are collected in the back-end is a bit non-intuitive. But
4885 essentially, because each operand template is tried recursively you may
4886 always have errors collected from the previous tried OPND. These are
4887 usually skipped if there is one successful match. However now with the
4888 non-fatal errors we have to ignore those previously collected hard errors
4889 when we're only interested in printing the non-fatal ones. This condition
4890 prevents us from printing errors that are not appropriate, since we did
4891 match a condition, but it also has warnings that it wants to print. */
4892 if (non_fatal_only && !record)
4893 return;
4894
4895 gas_assert (largest_error_pos != -2 && record != NULL);
4896 DEBUG_TRACE ("Pick up error kind %s to report",
4897 operand_mismatch_kind_names[record->detail.kind]);
4898
4899 /* Output. */
4900 output_operand_error_record (record, str);
4901 }
4902 \f
4903 /* Write an AARCH64 instruction to buf - always little-endian. */
4904 static void
4905 put_aarch64_insn (char *buf, uint32_t insn)
4906 {
4907 unsigned char *where = (unsigned char *) buf;
4908 where[0] = insn;
4909 where[1] = insn >> 8;
4910 where[2] = insn >> 16;
4911 where[3] = insn >> 24;
4912 }
4913
4914 static uint32_t
4915 get_aarch64_insn (char *buf)
4916 {
4917 unsigned char *where = (unsigned char *) buf;
4918 uint32_t result;
4919 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4920 return result;
4921 }
4922
4923 static void
4924 output_inst (struct aarch64_inst *new_inst)
4925 {
4926 char *to = NULL;
4927
4928 to = frag_more (INSN_SIZE);
4929
4930 frag_now->tc_frag_data.recorded = 1;
4931
4932 put_aarch64_insn (to, inst.base.value);
4933
4934 if (inst.reloc.type != BFD_RELOC_UNUSED)
4935 {
4936 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4937 INSN_SIZE, &inst.reloc.exp,
4938 inst.reloc.pc_rel,
4939 inst.reloc.type);
4940 DEBUG_TRACE ("Prepared relocation fix up");
4941 /* Don't check the addend value against the instruction size,
4942 that's the job of our code in md_apply_fix(). */
4943 fixp->fx_no_overflow = 1;
4944 if (new_inst != NULL)
4945 fixp->tc_fix_data.inst = new_inst;
4946 if (aarch64_gas_internal_fixup_p ())
4947 {
4948 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4949 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4950 fixp->fx_addnumber = inst.reloc.flags;
4951 }
4952 }
4953
4954 dwarf2_emit_insn (INSN_SIZE);
4955 }
4956
4957 /* Link together opcodes of the same name. */
4958
4959 struct templates
4960 {
4961 aarch64_opcode *opcode;
4962 struct templates *next;
4963 };
4964
4965 typedef struct templates templates;
4966
4967 static templates *
4968 lookup_mnemonic (const char *start, int len)
4969 {
4970 templates *templ = NULL;
4971
4972 templ = hash_find_n (aarch64_ops_hsh, start, len);
4973 return templ;
4974 }
4975
4976 /* Subroutine of md_assemble, responsible for looking up the primary
4977 opcode from the mnemonic the user wrote. STR points to the
4978 beginning of the mnemonic. */
4979
4980 static templates *
4981 opcode_lookup (char **str)
4982 {
4983 char *end, *base, *dot;
4984 const aarch64_cond *cond;
4985 char condname[16];
4986 int len;
4987
4988 /* Scan up to the end of the mnemonic, which must end in white space,
4989 '.', or end of string. */
4990 dot = 0;
4991 for (base = end = *str; is_part_of_name(*end); end++)
4992 if (*end == '.' && !dot)
4993 dot = end;
4994
4995 if (end == base || dot == base)
4996 return 0;
4997
4998 inst.cond = COND_ALWAYS;
4999
5000 /* Handle a possible condition. */
5001 if (dot)
5002 {
5003 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5004 if (cond)
5005 {
5006 inst.cond = cond->value;
5007 *str = end;
5008 }
5009 else
5010 {
5011 *str = dot;
5012 return 0;
5013 }
5014 len = dot - base;
5015 }
5016 else
5017 {
5018 *str = end;
5019 len = end - base;
5020 }
5021
5022 if (inst.cond == COND_ALWAYS)
5023 {
5024 /* Look for unaffixed mnemonic. */
5025 return lookup_mnemonic (base, len);
5026 }
5027 else if (len <= 13)
5028 {
5029 /* append ".c" to mnemonic if conditional */
5030 memcpy (condname, base, len);
5031 memcpy (condname + len, ".c", 2);
5032 base = condname;
5033 len += 2;
5034 return lookup_mnemonic (base, len);
5035 }
5036
5037 return NULL;
5038 }
5039
5040 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5041 to a corresponding operand qualifier. */
5042
5043 static inline aarch64_opnd_qualifier_t
5044 vectype_to_qualifier (const struct vector_type_el *vectype)
5045 {
5046 /* Element size in bytes indexed by vector_el_type. */
5047 const unsigned char ele_size[5]
5048 = {1, 2, 4, 8, 16};
5049 const unsigned int ele_base [5] =
5050 {
5051 AARCH64_OPND_QLF_V_4B,
5052 AARCH64_OPND_QLF_V_2H,
5053 AARCH64_OPND_QLF_V_2S,
5054 AARCH64_OPND_QLF_V_1D,
5055 AARCH64_OPND_QLF_V_1Q
5056 };
5057
5058 if (!vectype->defined || vectype->type == NT_invtype)
5059 goto vectype_conversion_fail;
5060
5061 if (vectype->type == NT_zero)
5062 return AARCH64_OPND_QLF_P_Z;
5063 if (vectype->type == NT_merge)
5064 return AARCH64_OPND_QLF_P_M;
5065
5066 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5067
5068 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5069 {
5070 /* Special case S_4B. */
5071 if (vectype->type == NT_b && vectype->width == 4)
5072 return AARCH64_OPND_QLF_S_4B;
5073
5074 /* Vector element register. */
5075 return AARCH64_OPND_QLF_S_B + vectype->type;
5076 }
5077 else
5078 {
5079 /* Vector register. */
5080 int reg_size = ele_size[vectype->type] * vectype->width;
5081 unsigned offset;
5082 unsigned shift;
5083 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5084 goto vectype_conversion_fail;
5085
5086 /* The conversion is by calculating the offset from the base operand
5087 qualifier for the vector type. The operand qualifiers are regular
5088 enough that the offset can established by shifting the vector width by
5089 a vector-type dependent amount. */
5090 shift = 0;
5091 if (vectype->type == NT_b)
5092 shift = 3;
5093 else if (vectype->type == NT_h || vectype->type == NT_s)
5094 shift = 2;
5095 else if (vectype->type >= NT_d)
5096 shift = 1;
5097 else
5098 gas_assert (0);
5099
5100 offset = ele_base [vectype->type] + (vectype->width >> shift);
5101 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5102 && offset <= AARCH64_OPND_QLF_V_1Q);
5103 return offset;
5104 }
5105
5106 vectype_conversion_fail:
5107 first_error (_("bad vector arrangement type"));
5108 return AARCH64_OPND_QLF_NIL;
5109 }
5110
5111 /* Process an optional operand that is found omitted from the assembly line.
5112 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5113 instruction's opcode entry while IDX is the index of this omitted operand.
5114 */
5115
5116 static void
5117 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5118 int idx, aarch64_opnd_info *operand)
5119 {
5120 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5121 gas_assert (optional_operand_p (opcode, idx));
5122 gas_assert (!operand->present);
5123
5124 switch (type)
5125 {
5126 case AARCH64_OPND_Rd:
5127 case AARCH64_OPND_Rn:
5128 case AARCH64_OPND_Rm:
5129 case AARCH64_OPND_Rt:
5130 case AARCH64_OPND_Rt2:
5131 case AARCH64_OPND_Rs:
5132 case AARCH64_OPND_Ra:
5133 case AARCH64_OPND_Rt_SYS:
5134 case AARCH64_OPND_Rd_SP:
5135 case AARCH64_OPND_Rn_SP:
5136 case AARCH64_OPND_Rm_SP:
5137 case AARCH64_OPND_Fd:
5138 case AARCH64_OPND_Fn:
5139 case AARCH64_OPND_Fm:
5140 case AARCH64_OPND_Fa:
5141 case AARCH64_OPND_Ft:
5142 case AARCH64_OPND_Ft2:
5143 case AARCH64_OPND_Sd:
5144 case AARCH64_OPND_Sn:
5145 case AARCH64_OPND_Sm:
5146 case AARCH64_OPND_Va:
5147 case AARCH64_OPND_Vd:
5148 case AARCH64_OPND_Vn:
5149 case AARCH64_OPND_Vm:
5150 case AARCH64_OPND_VdD1:
5151 case AARCH64_OPND_VnD1:
5152 operand->reg.regno = default_value;
5153 break;
5154
5155 case AARCH64_OPND_Ed:
5156 case AARCH64_OPND_En:
5157 case AARCH64_OPND_Em:
5158 case AARCH64_OPND_Em16:
5159 case AARCH64_OPND_SM3_IMM2:
5160 operand->reglane.regno = default_value;
5161 break;
5162
5163 case AARCH64_OPND_IDX:
5164 case AARCH64_OPND_BIT_NUM:
5165 case AARCH64_OPND_IMMR:
5166 case AARCH64_OPND_IMMS:
5167 case AARCH64_OPND_SHLL_IMM:
5168 case AARCH64_OPND_IMM_VLSL:
5169 case AARCH64_OPND_IMM_VLSR:
5170 case AARCH64_OPND_CCMP_IMM:
5171 case AARCH64_OPND_FBITS:
5172 case AARCH64_OPND_UIMM4:
5173 case AARCH64_OPND_UIMM3_OP1:
5174 case AARCH64_OPND_UIMM3_OP2:
5175 case AARCH64_OPND_IMM:
5176 case AARCH64_OPND_IMM_2:
5177 case AARCH64_OPND_WIDTH:
5178 case AARCH64_OPND_UIMM7:
5179 case AARCH64_OPND_NZCV:
5180 case AARCH64_OPND_SVE_PATTERN:
5181 case AARCH64_OPND_SVE_PRFOP:
5182 operand->imm.value = default_value;
5183 break;
5184
5185 case AARCH64_OPND_SVE_PATTERN_SCALED:
5186 operand->imm.value = default_value;
5187 operand->shifter.kind = AARCH64_MOD_MUL;
5188 operand->shifter.amount = 1;
5189 break;
5190
5191 case AARCH64_OPND_EXCEPTION:
5192 inst.reloc.type = BFD_RELOC_UNUSED;
5193 break;
5194
5195 case AARCH64_OPND_BARRIER_ISB:
5196 operand->barrier = aarch64_barrier_options + default_value;
5197 break;
5198
5199 case AARCH64_OPND_BTI_TARGET:
5200 operand->hint_option = aarch64_hint_options + default_value;
5201 break;
5202
5203 default:
5204 break;
5205 }
5206 }
5207
5208 /* Process the relocation type for move wide instructions.
5209 Return TRUE on success; otherwise return FALSE. */
5210
5211 static bfd_boolean
5212 process_movw_reloc_info (void)
5213 {
5214 int is32;
5215 unsigned shift;
5216
5217 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5218
5219 if (inst.base.opcode->op == OP_MOVK)
5220 switch (inst.reloc.type)
5221 {
5222 case BFD_RELOC_AARCH64_MOVW_G0_S:
5223 case BFD_RELOC_AARCH64_MOVW_G1_S:
5224 case BFD_RELOC_AARCH64_MOVW_G2_S:
5225 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5226 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5227 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5228 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5229 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5230 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5231 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5232 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5233 set_syntax_error
5234 (_("the specified relocation type is not allowed for MOVK"));
5235 return FALSE;
5236 default:
5237 break;
5238 }
5239
5240 switch (inst.reloc.type)
5241 {
5242 case BFD_RELOC_AARCH64_MOVW_G0:
5243 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5244 case BFD_RELOC_AARCH64_MOVW_G0_S:
5245 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5246 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5247 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5248 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5249 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5250 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5251 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5252 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5253 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5254 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5255 shift = 0;
5256 break;
5257 case BFD_RELOC_AARCH64_MOVW_G1:
5258 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5259 case BFD_RELOC_AARCH64_MOVW_G1_S:
5260 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5261 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5262 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5263 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5264 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5265 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5266 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5267 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5268 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5269 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5270 shift = 16;
5271 break;
5272 case BFD_RELOC_AARCH64_MOVW_G2:
5273 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5274 case BFD_RELOC_AARCH64_MOVW_G2_S:
5275 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5276 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5277 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5278 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5279 if (is32)
5280 {
5281 set_fatal_syntax_error
5282 (_("the specified relocation type is not allowed for 32-bit "
5283 "register"));
5284 return FALSE;
5285 }
5286 shift = 32;
5287 break;
5288 case BFD_RELOC_AARCH64_MOVW_G3:
5289 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5290 if (is32)
5291 {
5292 set_fatal_syntax_error
5293 (_("the specified relocation type is not allowed for 32-bit "
5294 "register"));
5295 return FALSE;
5296 }
5297 shift = 48;
5298 break;
5299 default:
5300 /* More cases should be added when more MOVW-related relocation types
5301 are supported in GAS. */
5302 gas_assert (aarch64_gas_internal_fixup_p ());
5303 /* The shift amount should have already been set by the parser. */
5304 return TRUE;
5305 }
5306 inst.base.operands[1].shifter.amount = shift;
5307 return TRUE;
5308 }
5309
5310 /* A primitive log calculator. */
5311
5312 static inline unsigned int
5313 get_logsz (unsigned int size)
5314 {
5315 const unsigned char ls[16] =
5316 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5317 if (size > 16)
5318 {
5319 gas_assert (0);
5320 return -1;
5321 }
5322 gas_assert (ls[size - 1] != (unsigned char)-1);
5323 return ls[size - 1];
5324 }
5325
5326 /* Determine and return the real reloc type code for an instruction
5327 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5328
5329 static inline bfd_reloc_code_real_type
5330 ldst_lo12_determine_real_reloc_type (void)
5331 {
5332 unsigned logsz;
5333 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5334 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5335
5336 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5337 {
5338 BFD_RELOC_AARCH64_LDST8_LO12,
5339 BFD_RELOC_AARCH64_LDST16_LO12,
5340 BFD_RELOC_AARCH64_LDST32_LO12,
5341 BFD_RELOC_AARCH64_LDST64_LO12,
5342 BFD_RELOC_AARCH64_LDST128_LO12
5343 },
5344 {
5345 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5346 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5347 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5348 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5349 BFD_RELOC_AARCH64_NONE
5350 },
5351 {
5352 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5353 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5354 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5355 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5356 BFD_RELOC_AARCH64_NONE
5357 },
5358 {
5359 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5360 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5361 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5362 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5363 BFD_RELOC_AARCH64_NONE
5364 },
5365 {
5366 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5367 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5368 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5369 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5370 BFD_RELOC_AARCH64_NONE
5371 }
5372 };
5373
5374 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5375 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5376 || (inst.reloc.type
5377 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5378 || (inst.reloc.type
5379 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5380 || (inst.reloc.type
5381 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5382 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5383
5384 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5385 opd1_qlf =
5386 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5387 1, opd0_qlf, 0);
5388 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5389
5390 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5391 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5392 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5393 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5394 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5395 gas_assert (logsz <= 3);
5396 else
5397 gas_assert (logsz <= 4);
5398
5399 /* In reloc.c, these pseudo relocation types should be defined in similar
5400 order as above reloc_ldst_lo12 array. Because the array index calculation
5401 below relies on this. */
5402 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5403 }
5404
5405 /* Check whether a register list REGINFO is valid. The registers must be
5406 numbered in increasing order (modulo 32), in increments of one or two.
5407
5408 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5409 increments of two.
5410
5411 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5412
5413 static bfd_boolean
5414 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5415 {
5416 uint32_t i, nb_regs, prev_regno, incr;
5417
5418 nb_regs = 1 + (reginfo & 0x3);
5419 reginfo >>= 2;
5420 prev_regno = reginfo & 0x1f;
5421 incr = accept_alternate ? 2 : 1;
5422
5423 for (i = 1; i < nb_regs; ++i)
5424 {
5425 uint32_t curr_regno;
5426 reginfo >>= 5;
5427 curr_regno = reginfo & 0x1f;
5428 if (curr_regno != ((prev_regno + incr) & 0x1f))
5429 return FALSE;
5430 prev_regno = curr_regno;
5431 }
5432
5433 return TRUE;
5434 }
5435
5436 /* Generic instruction operand parser. This does no encoding and no
5437 semantic validation; it merely squirrels values away in the inst
5438 structure. Returns TRUE or FALSE depending on whether the
5439 specified grammar matched. */
5440
5441 static bfd_boolean
5442 parse_operands (char *str, const aarch64_opcode *opcode)
5443 {
5444 int i;
5445 char *backtrack_pos = 0;
5446 const enum aarch64_opnd *operands = opcode->operands;
5447 aarch64_reg_type imm_reg_type;
5448
5449 clear_error ();
5450 skip_whitespace (str);
5451
5452 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5453 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5454 else
5455 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5456
5457 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5458 {
5459 int64_t val;
5460 const reg_entry *reg;
5461 int comma_skipped_p = 0;
5462 aarch64_reg_type rtype;
5463 struct vector_type_el vectype;
5464 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5465 aarch64_opnd_info *info = &inst.base.operands[i];
5466 aarch64_reg_type reg_type;
5467
5468 DEBUG_TRACE ("parse operand %d", i);
5469
5470 /* Assign the operand code. */
5471 info->type = operands[i];
5472
5473 if (optional_operand_p (opcode, i))
5474 {
5475 /* Remember where we are in case we need to backtrack. */
5476 gas_assert (!backtrack_pos);
5477 backtrack_pos = str;
5478 }
5479
5480 /* Expect comma between operands; the backtrack mechanism will take
5481 care of cases of omitted optional operand. */
5482 if (i > 0 && ! skip_past_char (&str, ','))
5483 {
5484 set_syntax_error (_("comma expected between operands"));
5485 goto failure;
5486 }
5487 else
5488 comma_skipped_p = 1;
5489
5490 switch (operands[i])
5491 {
5492 case AARCH64_OPND_Rd:
5493 case AARCH64_OPND_Rn:
5494 case AARCH64_OPND_Rm:
5495 case AARCH64_OPND_Rt:
5496 case AARCH64_OPND_Rt2:
5497 case AARCH64_OPND_Rs:
5498 case AARCH64_OPND_Ra:
5499 case AARCH64_OPND_Rt_SYS:
5500 case AARCH64_OPND_PAIRREG:
5501 case AARCH64_OPND_SVE_Rm:
5502 po_int_reg_or_fail (REG_TYPE_R_Z);
5503 break;
5504
5505 case AARCH64_OPND_Rd_SP:
5506 case AARCH64_OPND_Rn_SP:
5507 case AARCH64_OPND_SVE_Rn_SP:
5508 case AARCH64_OPND_Rm_SP:
5509 po_int_reg_or_fail (REG_TYPE_R_SP);
5510 break;
5511
5512 case AARCH64_OPND_Rm_EXT:
5513 case AARCH64_OPND_Rm_SFT:
5514 po_misc_or_fail (parse_shifter_operand
5515 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5516 ? SHIFTED_ARITH_IMM
5517 : SHIFTED_LOGIC_IMM)));
5518 if (!info->shifter.operator_present)
5519 {
5520 /* Default to LSL if not present. Libopcodes prefers shifter
5521 kind to be explicit. */
5522 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5523 info->shifter.kind = AARCH64_MOD_LSL;
5524 /* For Rm_EXT, libopcodes will carry out further check on whether
5525 or not stack pointer is used in the instruction (Recall that
5526 "the extend operator is not optional unless at least one of
5527 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5528 }
5529 break;
5530
5531 case AARCH64_OPND_Fd:
5532 case AARCH64_OPND_Fn:
5533 case AARCH64_OPND_Fm:
5534 case AARCH64_OPND_Fa:
5535 case AARCH64_OPND_Ft:
5536 case AARCH64_OPND_Ft2:
5537 case AARCH64_OPND_Sd:
5538 case AARCH64_OPND_Sn:
5539 case AARCH64_OPND_Sm:
5540 case AARCH64_OPND_SVE_VZn:
5541 case AARCH64_OPND_SVE_Vd:
5542 case AARCH64_OPND_SVE_Vm:
5543 case AARCH64_OPND_SVE_Vn:
5544 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5545 if (val == PARSE_FAIL)
5546 {
5547 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5548 goto failure;
5549 }
5550 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5551
5552 info->reg.regno = val;
5553 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5554 break;
5555
5556 case AARCH64_OPND_SVE_Pd:
5557 case AARCH64_OPND_SVE_Pg3:
5558 case AARCH64_OPND_SVE_Pg4_5:
5559 case AARCH64_OPND_SVE_Pg4_10:
5560 case AARCH64_OPND_SVE_Pg4_16:
5561 case AARCH64_OPND_SVE_Pm:
5562 case AARCH64_OPND_SVE_Pn:
5563 case AARCH64_OPND_SVE_Pt:
5564 reg_type = REG_TYPE_PN;
5565 goto vector_reg;
5566
5567 case AARCH64_OPND_SVE_Za_5:
5568 case AARCH64_OPND_SVE_Za_16:
5569 case AARCH64_OPND_SVE_Zd:
5570 case AARCH64_OPND_SVE_Zm_5:
5571 case AARCH64_OPND_SVE_Zm_16:
5572 case AARCH64_OPND_SVE_Zn:
5573 case AARCH64_OPND_SVE_Zt:
5574 reg_type = REG_TYPE_ZN;
5575 goto vector_reg;
5576
5577 case AARCH64_OPND_Va:
5578 case AARCH64_OPND_Vd:
5579 case AARCH64_OPND_Vn:
5580 case AARCH64_OPND_Vm:
5581 reg_type = REG_TYPE_VN;
5582 vector_reg:
5583 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5584 if (val == PARSE_FAIL)
5585 {
5586 first_error (_(get_reg_expected_msg (reg_type)));
5587 goto failure;
5588 }
5589 if (vectype.defined & NTA_HASINDEX)
5590 goto failure;
5591
5592 info->reg.regno = val;
5593 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5594 && vectype.type == NT_invtype)
5595 /* Unqualified Pn and Zn registers are allowed in certain
5596 contexts. Rely on F_STRICT qualifier checking to catch
5597 invalid uses. */
5598 info->qualifier = AARCH64_OPND_QLF_NIL;
5599 else
5600 {
5601 info->qualifier = vectype_to_qualifier (&vectype);
5602 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5603 goto failure;
5604 }
5605 break;
5606
5607 case AARCH64_OPND_VdD1:
5608 case AARCH64_OPND_VnD1:
5609 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5610 if (val == PARSE_FAIL)
5611 {
5612 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5613 goto failure;
5614 }
5615 if (vectype.type != NT_d || vectype.index != 1)
5616 {
5617 set_fatal_syntax_error
5618 (_("the top half of a 128-bit FP/SIMD register is expected"));
5619 goto failure;
5620 }
5621 info->reg.regno = val;
5622 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5623 here; it is correct for the purpose of encoding/decoding since
5624 only the register number is explicitly encoded in the related
5625 instructions, although this appears a bit hacky. */
5626 info->qualifier = AARCH64_OPND_QLF_S_D;
5627 break;
5628
5629 case AARCH64_OPND_SVE_Zm3_INDEX:
5630 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5631 case AARCH64_OPND_SVE_Zm4_INDEX:
5632 case AARCH64_OPND_SVE_Zn_INDEX:
5633 reg_type = REG_TYPE_ZN;
5634 goto vector_reg_index;
5635
5636 case AARCH64_OPND_Ed:
5637 case AARCH64_OPND_En:
5638 case AARCH64_OPND_Em:
5639 case AARCH64_OPND_Em16:
5640 case AARCH64_OPND_SM3_IMM2:
5641 reg_type = REG_TYPE_VN;
5642 vector_reg_index:
5643 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5644 if (val == PARSE_FAIL)
5645 {
5646 first_error (_(get_reg_expected_msg (reg_type)));
5647 goto failure;
5648 }
5649 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5650 goto failure;
5651
5652 info->reglane.regno = val;
5653 info->reglane.index = vectype.index;
5654 info->qualifier = vectype_to_qualifier (&vectype);
5655 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5656 goto failure;
5657 break;
5658
5659 case AARCH64_OPND_SVE_ZnxN:
5660 case AARCH64_OPND_SVE_ZtxN:
5661 reg_type = REG_TYPE_ZN;
5662 goto vector_reg_list;
5663
5664 case AARCH64_OPND_LVn:
5665 case AARCH64_OPND_LVt:
5666 case AARCH64_OPND_LVt_AL:
5667 case AARCH64_OPND_LEt:
5668 reg_type = REG_TYPE_VN;
5669 vector_reg_list:
5670 if (reg_type == REG_TYPE_ZN
5671 && get_opcode_dependent_value (opcode) == 1
5672 && *str != '{')
5673 {
5674 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5675 if (val == PARSE_FAIL)
5676 {
5677 first_error (_(get_reg_expected_msg (reg_type)));
5678 goto failure;
5679 }
5680 info->reglist.first_regno = val;
5681 info->reglist.num_regs = 1;
5682 }
5683 else
5684 {
5685 val = parse_vector_reg_list (&str, reg_type, &vectype);
5686 if (val == PARSE_FAIL)
5687 goto failure;
5688 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5689 {
5690 set_fatal_syntax_error (_("invalid register list"));
5691 goto failure;
5692 }
5693 info->reglist.first_regno = (val >> 2) & 0x1f;
5694 info->reglist.num_regs = (val & 0x3) + 1;
5695 }
5696 if (operands[i] == AARCH64_OPND_LEt)
5697 {
5698 if (!(vectype.defined & NTA_HASINDEX))
5699 goto failure;
5700 info->reglist.has_index = 1;
5701 info->reglist.index = vectype.index;
5702 }
5703 else
5704 {
5705 if (vectype.defined & NTA_HASINDEX)
5706 goto failure;
5707 if (!(vectype.defined & NTA_HASTYPE))
5708 {
5709 if (reg_type == REG_TYPE_ZN)
5710 set_fatal_syntax_error (_("missing type suffix"));
5711 goto failure;
5712 }
5713 }
5714 info->qualifier = vectype_to_qualifier (&vectype);
5715 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5716 goto failure;
5717 break;
5718
5719 case AARCH64_OPND_CRn:
5720 case AARCH64_OPND_CRm:
5721 {
5722 char prefix = *(str++);
5723 if (prefix != 'c' && prefix != 'C')
5724 goto failure;
5725
5726 po_imm_nc_or_fail ();
5727 if (val > 15)
5728 {
5729 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5730 goto failure;
5731 }
5732 info->qualifier = AARCH64_OPND_QLF_CR;
5733 info->imm.value = val;
5734 break;
5735 }
5736
5737 case AARCH64_OPND_SHLL_IMM:
5738 case AARCH64_OPND_IMM_VLSR:
5739 po_imm_or_fail (1, 64);
5740 info->imm.value = val;
5741 break;
5742
5743 case AARCH64_OPND_CCMP_IMM:
5744 case AARCH64_OPND_SIMM5:
5745 case AARCH64_OPND_FBITS:
5746 case AARCH64_OPND_UIMM4:
5747 case AARCH64_OPND_UIMM4_ADDG:
5748 case AARCH64_OPND_UIMM10:
5749 case AARCH64_OPND_UIMM3_OP1:
5750 case AARCH64_OPND_UIMM3_OP2:
5751 case AARCH64_OPND_IMM_VLSL:
5752 case AARCH64_OPND_IMM:
5753 case AARCH64_OPND_IMM_2:
5754 case AARCH64_OPND_WIDTH:
5755 case AARCH64_OPND_SVE_INV_LIMM:
5756 case AARCH64_OPND_SVE_LIMM:
5757 case AARCH64_OPND_SVE_LIMM_MOV:
5758 case AARCH64_OPND_SVE_SHLIMM_PRED:
5759 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5760 case AARCH64_OPND_SVE_SHRIMM_PRED:
5761 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5762 case AARCH64_OPND_SVE_SIMM5:
5763 case AARCH64_OPND_SVE_SIMM5B:
5764 case AARCH64_OPND_SVE_SIMM6:
5765 case AARCH64_OPND_SVE_SIMM8:
5766 case AARCH64_OPND_SVE_UIMM3:
5767 case AARCH64_OPND_SVE_UIMM7:
5768 case AARCH64_OPND_SVE_UIMM8:
5769 case AARCH64_OPND_SVE_UIMM8_53:
5770 case AARCH64_OPND_IMM_ROT1:
5771 case AARCH64_OPND_IMM_ROT2:
5772 case AARCH64_OPND_IMM_ROT3:
5773 case AARCH64_OPND_SVE_IMM_ROT1:
5774 case AARCH64_OPND_SVE_IMM_ROT2:
5775 po_imm_nc_or_fail ();
5776 info->imm.value = val;
5777 break;
5778
5779 case AARCH64_OPND_SVE_AIMM:
5780 case AARCH64_OPND_SVE_ASIMM:
5781 po_imm_nc_or_fail ();
5782 info->imm.value = val;
5783 skip_whitespace (str);
5784 if (skip_past_comma (&str))
5785 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5786 else
5787 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5788 break;
5789
5790 case AARCH64_OPND_SVE_PATTERN:
5791 po_enum_or_fail (aarch64_sve_pattern_array);
5792 info->imm.value = val;
5793 break;
5794
5795 case AARCH64_OPND_SVE_PATTERN_SCALED:
5796 po_enum_or_fail (aarch64_sve_pattern_array);
5797 info->imm.value = val;
5798 if (skip_past_comma (&str)
5799 && !parse_shift (&str, info, SHIFTED_MUL))
5800 goto failure;
5801 if (!info->shifter.operator_present)
5802 {
5803 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5804 info->shifter.kind = AARCH64_MOD_MUL;
5805 info->shifter.amount = 1;
5806 }
5807 break;
5808
5809 case AARCH64_OPND_SVE_PRFOP:
5810 po_enum_or_fail (aarch64_sve_prfop_array);
5811 info->imm.value = val;
5812 break;
5813
5814 case AARCH64_OPND_UIMM7:
5815 po_imm_or_fail (0, 127);
5816 info->imm.value = val;
5817 break;
5818
5819 case AARCH64_OPND_IDX:
5820 case AARCH64_OPND_MASK:
5821 case AARCH64_OPND_BIT_NUM:
5822 case AARCH64_OPND_IMMR:
5823 case AARCH64_OPND_IMMS:
5824 po_imm_or_fail (0, 63);
5825 info->imm.value = val;
5826 break;
5827
5828 case AARCH64_OPND_IMM0:
5829 po_imm_nc_or_fail ();
5830 if (val != 0)
5831 {
5832 set_fatal_syntax_error (_("immediate zero expected"));
5833 goto failure;
5834 }
5835 info->imm.value = 0;
5836 break;
5837
5838 case AARCH64_OPND_FPIMM0:
5839 {
5840 int qfloat;
5841 bfd_boolean res1 = FALSE, res2 = FALSE;
5842 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5843 it is probably not worth the effort to support it. */
5844 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5845 imm_reg_type))
5846 && (error_p ()
5847 || !(res2 = parse_constant_immediate (&str, &val,
5848 imm_reg_type))))
5849 goto failure;
5850 if ((res1 && qfloat == 0) || (res2 && val == 0))
5851 {
5852 info->imm.value = 0;
5853 info->imm.is_fp = 1;
5854 break;
5855 }
5856 set_fatal_syntax_error (_("immediate zero expected"));
5857 goto failure;
5858 }
5859
5860 case AARCH64_OPND_IMM_MOV:
5861 {
5862 char *saved = str;
5863 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5864 reg_name_p (str, REG_TYPE_VN))
5865 goto failure;
5866 str = saved;
5867 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5868 GE_OPT_PREFIX, 1));
5869 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5870 later. fix_mov_imm_insn will try to determine a machine
5871 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5872 message if the immediate cannot be moved by a single
5873 instruction. */
5874 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5875 inst.base.operands[i].skip = 1;
5876 }
5877 break;
5878
5879 case AARCH64_OPND_SIMD_IMM:
5880 case AARCH64_OPND_SIMD_IMM_SFT:
5881 if (! parse_big_immediate (&str, &val, imm_reg_type))
5882 goto failure;
5883 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5884 /* addr_off_p */ 0,
5885 /* need_libopcodes_p */ 1,
5886 /* skip_p */ 1);
5887 /* Parse shift.
5888 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5889 shift, we don't check it here; we leave the checking to
5890 the libopcodes (operand_general_constraint_met_p). By
5891 doing this, we achieve better diagnostics. */
5892 if (skip_past_comma (&str)
5893 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5894 goto failure;
5895 if (!info->shifter.operator_present
5896 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5897 {
5898 /* Default to LSL if not present. Libopcodes prefers shifter
5899 kind to be explicit. */
5900 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5901 info->shifter.kind = AARCH64_MOD_LSL;
5902 }
5903 break;
5904
5905 case AARCH64_OPND_FPIMM:
5906 case AARCH64_OPND_SIMD_FPIMM:
5907 case AARCH64_OPND_SVE_FPIMM8:
5908 {
5909 int qfloat;
5910 bfd_boolean dp_p;
5911
5912 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5913 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5914 || !aarch64_imm_float_p (qfloat))
5915 {
5916 if (!error_p ())
5917 set_fatal_syntax_error (_("invalid floating-point"
5918 " constant"));
5919 goto failure;
5920 }
5921 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5922 inst.base.operands[i].imm.is_fp = 1;
5923 }
5924 break;
5925
5926 case AARCH64_OPND_SVE_I1_HALF_ONE:
5927 case AARCH64_OPND_SVE_I1_HALF_TWO:
5928 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5929 {
5930 int qfloat;
5931 bfd_boolean dp_p;
5932
5933 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5934 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5935 {
5936 if (!error_p ())
5937 set_fatal_syntax_error (_("invalid floating-point"
5938 " constant"));
5939 goto failure;
5940 }
5941 inst.base.operands[i].imm.value = qfloat;
5942 inst.base.operands[i].imm.is_fp = 1;
5943 }
5944 break;
5945
5946 case AARCH64_OPND_LIMM:
5947 po_misc_or_fail (parse_shifter_operand (&str, info,
5948 SHIFTED_LOGIC_IMM));
5949 if (info->shifter.operator_present)
5950 {
5951 set_fatal_syntax_error
5952 (_("shift not allowed for bitmask immediate"));
5953 goto failure;
5954 }
5955 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5956 /* addr_off_p */ 0,
5957 /* need_libopcodes_p */ 1,
5958 /* skip_p */ 1);
5959 break;
5960
5961 case AARCH64_OPND_AIMM:
5962 if (opcode->op == OP_ADD)
5963 /* ADD may have relocation types. */
5964 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5965 SHIFTED_ARITH_IMM));
5966 else
5967 po_misc_or_fail (parse_shifter_operand (&str, info,
5968 SHIFTED_ARITH_IMM));
5969 switch (inst.reloc.type)
5970 {
5971 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5972 info->shifter.amount = 12;
5973 break;
5974 case BFD_RELOC_UNUSED:
5975 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5976 if (info->shifter.kind != AARCH64_MOD_NONE)
5977 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5978 inst.reloc.pc_rel = 0;
5979 break;
5980 default:
5981 break;
5982 }
5983 info->imm.value = 0;
5984 if (!info->shifter.operator_present)
5985 {
5986 /* Default to LSL if not present. Libopcodes prefers shifter
5987 kind to be explicit. */
5988 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5989 info->shifter.kind = AARCH64_MOD_LSL;
5990 }
5991 break;
5992
5993 case AARCH64_OPND_HALF:
5994 {
5995 /* #<imm16> or relocation. */
5996 int internal_fixup_p;
5997 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5998 if (internal_fixup_p)
5999 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6000 skip_whitespace (str);
6001 if (skip_past_comma (&str))
6002 {
6003 /* {, LSL #<shift>} */
6004 if (! aarch64_gas_internal_fixup_p ())
6005 {
6006 set_fatal_syntax_error (_("can't mix relocation modifier "
6007 "with explicit shift"));
6008 goto failure;
6009 }
6010 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6011 }
6012 else
6013 inst.base.operands[i].shifter.amount = 0;
6014 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6015 inst.base.operands[i].imm.value = 0;
6016 if (! process_movw_reloc_info ())
6017 goto failure;
6018 }
6019 break;
6020
6021 case AARCH64_OPND_EXCEPTION:
6022 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6023 imm_reg_type));
6024 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6025 /* addr_off_p */ 0,
6026 /* need_libopcodes_p */ 0,
6027 /* skip_p */ 1);
6028 break;
6029
6030 case AARCH64_OPND_NZCV:
6031 {
6032 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
6033 if (nzcv != NULL)
6034 {
6035 str += 4;
6036 info->imm.value = nzcv->value;
6037 break;
6038 }
6039 po_imm_or_fail (0, 15);
6040 info->imm.value = val;
6041 }
6042 break;
6043
6044 case AARCH64_OPND_COND:
6045 case AARCH64_OPND_COND1:
6046 {
6047 char *start = str;
6048 do
6049 str++;
6050 while (ISALPHA (*str));
6051 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
6052 if (info->cond == NULL)
6053 {
6054 set_syntax_error (_("invalid condition"));
6055 goto failure;
6056 }
6057 else if (operands[i] == AARCH64_OPND_COND1
6058 && (info->cond->value & 0xe) == 0xe)
6059 {
6060 /* Do not allow AL or NV. */
6061 set_default_error ();
6062 goto failure;
6063 }
6064 }
6065 break;
6066
6067 case AARCH64_OPND_ADDR_ADRP:
6068 po_misc_or_fail (parse_adrp (&str));
6069 /* Clear the value as operand needs to be relocated. */
6070 info->imm.value = 0;
6071 break;
6072
6073 case AARCH64_OPND_ADDR_PCREL14:
6074 case AARCH64_OPND_ADDR_PCREL19:
6075 case AARCH64_OPND_ADDR_PCREL21:
6076 case AARCH64_OPND_ADDR_PCREL26:
6077 po_misc_or_fail (parse_address (&str, info));
6078 if (!info->addr.pcrel)
6079 {
6080 set_syntax_error (_("invalid pc-relative address"));
6081 goto failure;
6082 }
6083 if (inst.gen_lit_pool
6084 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6085 {
6086 /* Only permit "=value" in the literal load instructions.
6087 The literal will be generated by programmer_friendly_fixup. */
6088 set_syntax_error (_("invalid use of \"=immediate\""));
6089 goto failure;
6090 }
6091 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6092 {
6093 set_syntax_error (_("unrecognized relocation suffix"));
6094 goto failure;
6095 }
6096 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6097 {
6098 info->imm.value = inst.reloc.exp.X_add_number;
6099 inst.reloc.type = BFD_RELOC_UNUSED;
6100 }
6101 else
6102 {
6103 info->imm.value = 0;
6104 if (inst.reloc.type == BFD_RELOC_UNUSED)
6105 switch (opcode->iclass)
6106 {
6107 case compbranch:
6108 case condbranch:
6109 /* e.g. CBZ or B.COND */
6110 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6111 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6112 break;
6113 case testbranch:
6114 /* e.g. TBZ */
6115 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6116 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6117 break;
6118 case branch_imm:
6119 /* e.g. B or BL */
6120 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6121 inst.reloc.type =
6122 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6123 : BFD_RELOC_AARCH64_JUMP26;
6124 break;
6125 case loadlit:
6126 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6127 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6128 break;
6129 case pcreladdr:
6130 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6131 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6132 break;
6133 default:
6134 gas_assert (0);
6135 abort ();
6136 }
6137 inst.reloc.pc_rel = 1;
6138 }
6139 break;
6140
6141 case AARCH64_OPND_ADDR_SIMPLE:
6142 case AARCH64_OPND_ADDR_SIMPLE_2:
6143 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6144 {
6145 /* [<Xn|SP>{, #<simm>}] */
6146 char *start = str;
6147 /* First use the normal address-parsing routines, to get
6148 the usual syntax errors. */
6149 po_misc_or_fail (parse_address (&str, info));
6150 if (info->addr.pcrel || info->addr.offset.is_reg
6151 || !info->addr.preind || info->addr.postind
6152 || (info->addr.writeback
6153 && operands[i] != AARCH64_OPND_ADDR_SIMPLE_2))
6154 {
6155 set_syntax_error (_("invalid addressing mode"));
6156 goto failure;
6157 }
6158
6159 /* Then retry, matching the specific syntax of these addresses. */
6160 str = start;
6161 po_char_or_fail ('[');
6162 po_reg_or_fail (REG_TYPE_R64_SP);
6163 /* Accept optional ", #0". */
6164 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6165 && skip_past_char (&str, ','))
6166 {
6167 skip_past_char (&str, '#');
6168 if (! skip_past_char (&str, '0'))
6169 {
6170 set_fatal_syntax_error
6171 (_("the optional immediate offset can only be 0"));
6172 goto failure;
6173 }
6174 }
6175 po_char_or_fail (']');
6176 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE_2)
6177 po_char_or_fail ('!');
6178 break;
6179 }
6180
6181 case AARCH64_OPND_ADDR_REGOFF:
6182 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6183 po_misc_or_fail (parse_address (&str, info));
6184 regoff_addr:
6185 if (info->addr.pcrel || !info->addr.offset.is_reg
6186 || !info->addr.preind || info->addr.postind
6187 || info->addr.writeback)
6188 {
6189 set_syntax_error (_("invalid addressing mode"));
6190 goto failure;
6191 }
6192 if (!info->shifter.operator_present)
6193 {
6194 /* Default to LSL if not present. Libopcodes prefers shifter
6195 kind to be explicit. */
6196 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6197 info->shifter.kind = AARCH64_MOD_LSL;
6198 }
6199 /* Qualifier to be deduced by libopcodes. */
6200 break;
6201
6202 case AARCH64_OPND_ADDR_SIMM7:
6203 po_misc_or_fail (parse_address (&str, info));
6204 if (info->addr.pcrel || info->addr.offset.is_reg
6205 || (!info->addr.preind && !info->addr.postind))
6206 {
6207 set_syntax_error (_("invalid addressing mode"));
6208 goto failure;
6209 }
6210 if (inst.reloc.type != BFD_RELOC_UNUSED)
6211 {
6212 set_syntax_error (_("relocation not allowed"));
6213 goto failure;
6214 }
6215 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6216 /* addr_off_p */ 1,
6217 /* need_libopcodes_p */ 1,
6218 /* skip_p */ 0);
6219 break;
6220
6221 case AARCH64_OPND_ADDR_SIMM9:
6222 case AARCH64_OPND_ADDR_SIMM9_2:
6223 case AARCH64_OPND_ADDR_SIMM11:
6224 case AARCH64_OPND_ADDR_SIMM13:
6225 po_misc_or_fail (parse_address (&str, info));
6226 if (info->addr.pcrel || info->addr.offset.is_reg
6227 || (!info->addr.preind && !info->addr.postind)
6228 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6229 && info->addr.writeback))
6230 {
6231 set_syntax_error (_("invalid addressing mode"));
6232 goto failure;
6233 }
6234 if (inst.reloc.type != BFD_RELOC_UNUSED)
6235 {
6236 set_syntax_error (_("relocation not allowed"));
6237 goto failure;
6238 }
6239 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6240 /* addr_off_p */ 1,
6241 /* need_libopcodes_p */ 1,
6242 /* skip_p */ 0);
6243 break;
6244
6245 case AARCH64_OPND_ADDR_SIMM10:
6246 case AARCH64_OPND_ADDR_OFFSET:
6247 po_misc_or_fail (parse_address (&str, info));
6248 if (info->addr.pcrel || info->addr.offset.is_reg
6249 || !info->addr.preind || info->addr.postind)
6250 {
6251 set_syntax_error (_("invalid addressing mode"));
6252 goto failure;
6253 }
6254 if (inst.reloc.type != BFD_RELOC_UNUSED)
6255 {
6256 set_syntax_error (_("relocation not allowed"));
6257 goto failure;
6258 }
6259 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6260 /* addr_off_p */ 1,
6261 /* need_libopcodes_p */ 1,
6262 /* skip_p */ 0);
6263 break;
6264
6265 case AARCH64_OPND_ADDR_UIMM12:
6266 po_misc_or_fail (parse_address (&str, info));
6267 if (info->addr.pcrel || info->addr.offset.is_reg
6268 || !info->addr.preind || info->addr.writeback)
6269 {
6270 set_syntax_error (_("invalid addressing mode"));
6271 goto failure;
6272 }
6273 if (inst.reloc.type == BFD_RELOC_UNUSED)
6274 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6275 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6276 || (inst.reloc.type
6277 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6278 || (inst.reloc.type
6279 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6280 || (inst.reloc.type
6281 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6282 || (inst.reloc.type
6283 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6284 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6285 /* Leave qualifier to be determined by libopcodes. */
6286 break;
6287
6288 case AARCH64_OPND_SIMD_ADDR_POST:
6289 /* [<Xn|SP>], <Xm|#<amount>> */
6290 po_misc_or_fail (parse_address (&str, info));
6291 if (!info->addr.postind || !info->addr.writeback)
6292 {
6293 set_syntax_error (_("invalid addressing mode"));
6294 goto failure;
6295 }
6296 if (!info->addr.offset.is_reg)
6297 {
6298 if (inst.reloc.exp.X_op == O_constant)
6299 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6300 else
6301 {
6302 set_fatal_syntax_error
6303 (_("writeback value must be an immediate constant"));
6304 goto failure;
6305 }
6306 }
6307 /* No qualifier. */
6308 break;
6309
6310 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6311 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6312 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6313 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6314 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6315 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6316 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6317 case AARCH64_OPND_SVE_ADDR_RI_U6:
6318 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6319 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6320 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6321 /* [X<n>{, #imm, MUL VL}]
6322 [X<n>{, #imm}]
6323 but recognizing SVE registers. */
6324 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6325 &offset_qualifier));
6326 if (base_qualifier != AARCH64_OPND_QLF_X)
6327 {
6328 set_syntax_error (_("invalid addressing mode"));
6329 goto failure;
6330 }
6331 sve_regimm:
6332 if (info->addr.pcrel || info->addr.offset.is_reg
6333 || !info->addr.preind || info->addr.writeback)
6334 {
6335 set_syntax_error (_("invalid addressing mode"));
6336 goto failure;
6337 }
6338 if (inst.reloc.type != BFD_RELOC_UNUSED
6339 || inst.reloc.exp.X_op != O_constant)
6340 {
6341 /* Make sure this has priority over
6342 "invalid addressing mode". */
6343 set_fatal_syntax_error (_("constant offset required"));
6344 goto failure;
6345 }
6346 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6347 break;
6348
6349 case AARCH64_OPND_SVE_ADDR_R:
6350 /* [<Xn|SP>{, <R><m>}]
6351 but recognizing SVE registers. */
6352 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6353 &offset_qualifier));
6354 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6355 {
6356 offset_qualifier = AARCH64_OPND_QLF_X;
6357 info->addr.offset.is_reg = 1;
6358 info->addr.offset.regno = 31;
6359 }
6360 else if (base_qualifier != AARCH64_OPND_QLF_X
6361 || offset_qualifier != AARCH64_OPND_QLF_X)
6362 {
6363 set_syntax_error (_("invalid addressing mode"));
6364 goto failure;
6365 }
6366 goto regoff_addr;
6367
6368 case AARCH64_OPND_SVE_ADDR_RR:
6369 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6370 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6371 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6372 case AARCH64_OPND_SVE_ADDR_RX:
6373 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6374 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6375 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6376 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6377 but recognizing SVE registers. */
6378 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6379 &offset_qualifier));
6380 if (base_qualifier != AARCH64_OPND_QLF_X
6381 || offset_qualifier != AARCH64_OPND_QLF_X)
6382 {
6383 set_syntax_error (_("invalid addressing mode"));
6384 goto failure;
6385 }
6386 goto regoff_addr;
6387
6388 case AARCH64_OPND_SVE_ADDR_RZ:
6389 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6390 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6391 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6392 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6393 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6394 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6395 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6396 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6397 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6398 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6399 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6400 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6401 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6402 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6403 &offset_qualifier));
6404 if (base_qualifier != AARCH64_OPND_QLF_X
6405 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6406 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6407 {
6408 set_syntax_error (_("invalid addressing mode"));
6409 goto failure;
6410 }
6411 info->qualifier = offset_qualifier;
6412 goto regoff_addr;
6413
6414 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6415 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6416 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6417 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6418 /* [Z<n>.<T>{, #imm}] */
6419 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6420 &offset_qualifier));
6421 if (base_qualifier != AARCH64_OPND_QLF_S_S
6422 && base_qualifier != AARCH64_OPND_QLF_S_D)
6423 {
6424 set_syntax_error (_("invalid addressing mode"));
6425 goto failure;
6426 }
6427 info->qualifier = base_qualifier;
6428 goto sve_regimm;
6429
6430 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6431 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6432 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6433 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6434 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6435
6436 We don't reject:
6437
6438 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6439
6440 here since we get better error messages by leaving it to
6441 the qualifier checking routines. */
6442 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6443 &offset_qualifier));
6444 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6445 && base_qualifier != AARCH64_OPND_QLF_S_D)
6446 || offset_qualifier != base_qualifier)
6447 {
6448 set_syntax_error (_("invalid addressing mode"));
6449 goto failure;
6450 }
6451 info->qualifier = base_qualifier;
6452 goto regoff_addr;
6453
6454 case AARCH64_OPND_SYSREG:
6455 {
6456 uint32_t sysreg_flags;
6457 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6458 &sysreg_flags)) == PARSE_FAIL)
6459 {
6460 set_syntax_error (_("unknown or missing system register name"));
6461 goto failure;
6462 }
6463 inst.base.operands[i].sysreg.value = val;
6464 inst.base.operands[i].sysreg.flags = sysreg_flags;
6465 break;
6466 }
6467
6468 case AARCH64_OPND_PSTATEFIELD:
6469 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6470 == PARSE_FAIL)
6471 {
6472 set_syntax_error (_("unknown or missing PSTATE field name"));
6473 goto failure;
6474 }
6475 inst.base.operands[i].pstatefield = val;
6476 break;
6477
6478 case AARCH64_OPND_SYSREG_IC:
6479 inst.base.operands[i].sysins_op =
6480 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6481 goto sys_reg_ins;
6482
6483 case AARCH64_OPND_SYSREG_DC:
6484 inst.base.operands[i].sysins_op =
6485 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6486 goto sys_reg_ins;
6487
6488 case AARCH64_OPND_SYSREG_AT:
6489 inst.base.operands[i].sysins_op =
6490 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6491 goto sys_reg_ins;
6492
6493 case AARCH64_OPND_SYSREG_SR:
6494 inst.base.operands[i].sysins_op =
6495 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6496 goto sys_reg_ins;
6497
6498 case AARCH64_OPND_SYSREG_TLBI:
6499 inst.base.operands[i].sysins_op =
6500 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6501 sys_reg_ins:
6502 if (inst.base.operands[i].sysins_op == NULL)
6503 {
6504 set_fatal_syntax_error ( _("unknown or missing operation name"));
6505 goto failure;
6506 }
6507 break;
6508
6509 case AARCH64_OPND_BARRIER:
6510 case AARCH64_OPND_BARRIER_ISB:
6511 val = parse_barrier (&str);
6512 if (val != PARSE_FAIL
6513 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6514 {
6515 /* ISB only accepts options name 'sy'. */
6516 set_syntax_error
6517 (_("the specified option is not accepted in ISB"));
6518 /* Turn off backtrack as this optional operand is present. */
6519 backtrack_pos = 0;
6520 goto failure;
6521 }
6522 /* This is an extension to accept a 0..15 immediate. */
6523 if (val == PARSE_FAIL)
6524 po_imm_or_fail (0, 15);
6525 info->barrier = aarch64_barrier_options + val;
6526 break;
6527
6528 case AARCH64_OPND_PRFOP:
6529 val = parse_pldop (&str);
6530 /* This is an extension to accept a 0..31 immediate. */
6531 if (val == PARSE_FAIL)
6532 po_imm_or_fail (0, 31);
6533 inst.base.operands[i].prfop = aarch64_prfops + val;
6534 break;
6535
6536 case AARCH64_OPND_BARRIER_PSB:
6537 val = parse_barrier_psb (&str, &(info->hint_option));
6538 if (val == PARSE_FAIL)
6539 goto failure;
6540 break;
6541
6542 case AARCH64_OPND_BTI_TARGET:
6543 val = parse_bti_operand (&str, &(info->hint_option));
6544 if (val == PARSE_FAIL)
6545 goto failure;
6546 break;
6547
6548 default:
6549 as_fatal (_("unhandled operand code %d"), operands[i]);
6550 }
6551
6552 /* If we get here, this operand was successfully parsed. */
6553 inst.base.operands[i].present = 1;
6554 continue;
6555
6556 failure:
6557 /* The parse routine should already have set the error, but in case
6558 not, set a default one here. */
6559 if (! error_p ())
6560 set_default_error ();
6561
6562 if (! backtrack_pos)
6563 goto parse_operands_return;
6564
6565 {
6566 /* We reach here because this operand is marked as optional, and
6567 either no operand was supplied or the operand was supplied but it
6568 was syntactically incorrect. In the latter case we report an
6569 error. In the former case we perform a few more checks before
6570 dropping through to the code to insert the default operand. */
6571
6572 char *tmp = backtrack_pos;
6573 char endchar = END_OF_INSN;
6574
6575 if (i != (aarch64_num_of_operands (opcode) - 1))
6576 endchar = ',';
6577 skip_past_char (&tmp, ',');
6578
6579 if (*tmp != endchar)
6580 /* The user has supplied an operand in the wrong format. */
6581 goto parse_operands_return;
6582
6583 /* Make sure there is not a comma before the optional operand.
6584 For example the fifth operand of 'sys' is optional:
6585
6586 sys #0,c0,c0,#0, <--- wrong
6587 sys #0,c0,c0,#0 <--- correct. */
6588 if (comma_skipped_p && i && endchar == END_OF_INSN)
6589 {
6590 set_fatal_syntax_error
6591 (_("unexpected comma before the omitted optional operand"));
6592 goto parse_operands_return;
6593 }
6594 }
6595
6596 /* Reaching here means we are dealing with an optional operand that is
6597 omitted from the assembly line. */
6598 gas_assert (optional_operand_p (opcode, i));
6599 info->present = 0;
6600 process_omitted_operand (operands[i], opcode, i, info);
6601
6602 /* Try again, skipping the optional operand at backtrack_pos. */
6603 str = backtrack_pos;
6604 backtrack_pos = 0;
6605
6606 /* Clear any error record after the omitted optional operand has been
6607 successfully handled. */
6608 clear_error ();
6609 }
6610
6611 /* Check if we have parsed all the operands. */
6612 if (*str != '\0' && ! error_p ())
6613 {
6614 /* Set I to the index of the last present operand; this is
6615 for the purpose of diagnostics. */
6616 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6617 ;
6618 set_fatal_syntax_error
6619 (_("unexpected characters following instruction"));
6620 }
6621
6622 parse_operands_return:
6623
6624 if (error_p ())
6625 {
6626 DEBUG_TRACE ("parsing FAIL: %s - %s",
6627 operand_mismatch_kind_names[get_error_kind ()],
6628 get_error_message ());
6629 /* Record the operand error properly; this is useful when there
6630 are multiple instruction templates for a mnemonic name, so that
6631 later on, we can select the error that most closely describes
6632 the problem. */
6633 record_operand_error (opcode, i, get_error_kind (),
6634 get_error_message ());
6635 return FALSE;
6636 }
6637 else
6638 {
6639 DEBUG_TRACE ("parsing SUCCESS");
6640 return TRUE;
6641 }
6642 }
6643
6644 /* It does some fix-up to provide some programmer friendly feature while
6645 keeping the libopcodes happy, i.e. libopcodes only accepts
6646 the preferred architectural syntax.
6647 Return FALSE if there is any failure; otherwise return TRUE. */
6648
6649 static bfd_boolean
6650 programmer_friendly_fixup (aarch64_instruction *instr)
6651 {
6652 aarch64_inst *base = &instr->base;
6653 const aarch64_opcode *opcode = base->opcode;
6654 enum aarch64_op op = opcode->op;
6655 aarch64_opnd_info *operands = base->operands;
6656
6657 DEBUG_TRACE ("enter");
6658
6659 switch (opcode->iclass)
6660 {
6661 case testbranch:
6662 /* TBNZ Xn|Wn, #uimm6, label
6663 Test and Branch Not Zero: conditionally jumps to label if bit number
6664 uimm6 in register Xn is not zero. The bit number implies the width of
6665 the register, which may be written and should be disassembled as Wn if
6666 uimm is less than 32. */
6667 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6668 {
6669 if (operands[1].imm.value >= 32)
6670 {
6671 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6672 0, 31);
6673 return FALSE;
6674 }
6675 operands[0].qualifier = AARCH64_OPND_QLF_X;
6676 }
6677 break;
6678 case loadlit:
6679 /* LDR Wt, label | =value
6680 As a convenience assemblers will typically permit the notation
6681 "=value" in conjunction with the pc-relative literal load instructions
6682 to automatically place an immediate value or symbolic address in a
6683 nearby literal pool and generate a hidden label which references it.
6684 ISREG has been set to 0 in the case of =value. */
6685 if (instr->gen_lit_pool
6686 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6687 {
6688 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6689 if (op == OP_LDRSW_LIT)
6690 size = 4;
6691 if (instr->reloc.exp.X_op != O_constant
6692 && instr->reloc.exp.X_op != O_big
6693 && instr->reloc.exp.X_op != O_symbol)
6694 {
6695 record_operand_error (opcode, 1,
6696 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6697 _("constant expression expected"));
6698 return FALSE;
6699 }
6700 if (! add_to_lit_pool (&instr->reloc.exp, size))
6701 {
6702 record_operand_error (opcode, 1,
6703 AARCH64_OPDE_OTHER_ERROR,
6704 _("literal pool insertion failed"));
6705 return FALSE;
6706 }
6707 }
6708 break;
6709 case log_shift:
6710 case bitfield:
6711 /* UXT[BHW] Wd, Wn
6712 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6713 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6714 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6715 A programmer-friendly assembler should accept a destination Xd in
6716 place of Wd, however that is not the preferred form for disassembly.
6717 */
6718 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6719 && operands[1].qualifier == AARCH64_OPND_QLF_W
6720 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6721 operands[0].qualifier = AARCH64_OPND_QLF_W;
6722 break;
6723
6724 case addsub_ext:
6725 {
6726 /* In the 64-bit form, the final register operand is written as Wm
6727 for all but the (possibly omitted) UXTX/LSL and SXTX
6728 operators.
6729 As a programmer-friendly assembler, we accept e.g.
6730 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6731 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6732 int idx = aarch64_operand_index (opcode->operands,
6733 AARCH64_OPND_Rm_EXT);
6734 gas_assert (idx == 1 || idx == 2);
6735 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6736 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6737 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6738 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6739 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6740 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6741 }
6742 break;
6743
6744 default:
6745 break;
6746 }
6747
6748 DEBUG_TRACE ("exit with SUCCESS");
6749 return TRUE;
6750 }
6751
6752 /* Check for loads and stores that will cause unpredictable behavior. */
6753
6754 static void
6755 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6756 {
6757 aarch64_inst *base = &instr->base;
6758 const aarch64_opcode *opcode = base->opcode;
6759 const aarch64_opnd_info *opnds = base->operands;
6760 switch (opcode->iclass)
6761 {
6762 case ldst_pos:
6763 case ldst_imm9:
6764 case ldst_imm10:
6765 case ldst_unscaled:
6766 case ldst_unpriv:
6767 /* Loading/storing the base register is unpredictable if writeback. */
6768 if ((aarch64_get_operand_class (opnds[0].type)
6769 == AARCH64_OPND_CLASS_INT_REG)
6770 && opnds[0].reg.regno == opnds[1].addr.base_regno
6771 && opnds[1].addr.base_regno != REG_SP
6772 && opnds[1].addr.writeback)
6773 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6774 break;
6775
6776 case ldstgv_indexed:
6777 /* Load operations must load different registers. */
6778 if ((opcode->opcode & (1 << 22))
6779 && opnds[0].reg.regno == opnds[1].addr.base_regno)
6780 as_warn (_("unpredictable load of register -- `%s'"), str);
6781 break;
6782
6783 case ldstpair_off:
6784 case ldstnapair_offs:
6785 case ldstpair_indexed:
6786 /* Loading/storing the base register is unpredictable if writeback. */
6787 if ((aarch64_get_operand_class (opnds[0].type)
6788 == AARCH64_OPND_CLASS_INT_REG)
6789 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6790 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6791 && opnds[2].addr.base_regno != REG_SP
6792 /* Exempt STGP. */
6793 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
6794 && opnds[2].addr.writeback)
6795 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6796 /* Load operations must load different registers. */
6797 if ((opcode->opcode & (1 << 22))
6798 && opnds[0].reg.regno == opnds[1].reg.regno)
6799 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6800 break;
6801
6802 case ldstexcl:
6803 /* It is unpredictable if the destination and status registers are the
6804 same. */
6805 if ((aarch64_get_operand_class (opnds[0].type)
6806 == AARCH64_OPND_CLASS_INT_REG)
6807 && (aarch64_get_operand_class (opnds[1].type)
6808 == AARCH64_OPND_CLASS_INT_REG)
6809 && (opnds[0].reg.regno == opnds[1].reg.regno
6810 || opnds[0].reg.regno == opnds[2].reg.regno))
6811 as_warn (_("unpredictable: identical transfer and status registers"
6812 " --`%s'"),
6813 str);
6814
6815 break;
6816
6817 default:
6818 break;
6819 }
6820 }
6821
6822 static void
6823 force_automatic_sequence_close (void)
6824 {
6825 if (now_instr_sequence.instr)
6826 {
6827 as_warn (_("previous `%s' sequence has not been closed"),
6828 now_instr_sequence.instr->opcode->name);
6829 init_insn_sequence (NULL, &now_instr_sequence);
6830 }
6831 }
6832
6833 /* A wrapper function to interface with libopcodes on encoding and
6834 record the error message if there is any.
6835
6836 Return TRUE on success; otherwise return FALSE. */
6837
6838 static bfd_boolean
6839 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6840 aarch64_insn *code)
6841 {
6842 aarch64_operand_error error_info;
6843 memset (&error_info, '\0', sizeof (error_info));
6844 error_info.kind = AARCH64_OPDE_NIL;
6845 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
6846 && !error_info.non_fatal)
6847 return TRUE;
6848
6849 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6850 record_operand_error_info (opcode, &error_info);
6851 return error_info.non_fatal;
6852 }
6853
6854 #ifdef DEBUG_AARCH64
6855 static inline void
6856 dump_opcode_operands (const aarch64_opcode *opcode)
6857 {
6858 int i = 0;
6859 while (opcode->operands[i] != AARCH64_OPND_NIL)
6860 {
6861 aarch64_verbose ("\t\t opnd%d: %s", i,
6862 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6863 ? aarch64_get_operand_name (opcode->operands[i])
6864 : aarch64_get_operand_desc (opcode->operands[i]));
6865 ++i;
6866 }
6867 }
6868 #endif /* DEBUG_AARCH64 */
6869
6870 /* This is the guts of the machine-dependent assembler. STR points to a
6871 machine dependent instruction. This function is supposed to emit
6872 the frags/bytes it assembles to. */
6873
6874 void
6875 md_assemble (char *str)
6876 {
6877 char *p = str;
6878 templates *template;
6879 aarch64_opcode *opcode;
6880 aarch64_inst *inst_base;
6881 unsigned saved_cond;
6882
6883 /* Align the previous label if needed. */
6884 if (last_label_seen != NULL)
6885 {
6886 symbol_set_frag (last_label_seen, frag_now);
6887 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6888 S_SET_SEGMENT (last_label_seen, now_seg);
6889 }
6890
6891 /* Update the current insn_sequence from the segment. */
6892 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
6893
6894 inst.reloc.type = BFD_RELOC_UNUSED;
6895
6896 DEBUG_TRACE ("\n\n");
6897 DEBUG_TRACE ("==============================");
6898 DEBUG_TRACE ("Enter md_assemble with %s", str);
6899
6900 template = opcode_lookup (&p);
6901 if (!template)
6902 {
6903 /* It wasn't an instruction, but it might be a register alias of
6904 the form alias .req reg directive. */
6905 if (!create_register_alias (str, p))
6906 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6907 str);
6908 return;
6909 }
6910
6911 skip_whitespace (p);
6912 if (*p == ',')
6913 {
6914 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6915 get_mnemonic_name (str), str);
6916 return;
6917 }
6918
6919 init_operand_error_report ();
6920
6921 /* Sections are assumed to start aligned. In executable section, there is no
6922 MAP_DATA symbol pending. So we only align the address during
6923 MAP_DATA --> MAP_INSN transition.
6924 For other sections, this is not guaranteed. */
6925 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6926 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6927 frag_align_code (2, 0);
6928
6929 saved_cond = inst.cond;
6930 reset_aarch64_instruction (&inst);
6931 inst.cond = saved_cond;
6932
6933 /* Iterate through all opcode entries with the same mnemonic name. */
6934 do
6935 {
6936 opcode = template->opcode;
6937
6938 DEBUG_TRACE ("opcode %s found", opcode->name);
6939 #ifdef DEBUG_AARCH64
6940 if (debug_dump)
6941 dump_opcode_operands (opcode);
6942 #endif /* DEBUG_AARCH64 */
6943
6944 mapping_state (MAP_INSN);
6945
6946 inst_base = &inst.base;
6947 inst_base->opcode = opcode;
6948
6949 /* Truly conditionally executed instructions, e.g. b.cond. */
6950 if (opcode->flags & F_COND)
6951 {
6952 gas_assert (inst.cond != COND_ALWAYS);
6953 inst_base->cond = get_cond_from_value (inst.cond);
6954 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6955 }
6956 else if (inst.cond != COND_ALWAYS)
6957 {
6958 /* It shouldn't arrive here, where the assembly looks like a
6959 conditional instruction but the found opcode is unconditional. */
6960 gas_assert (0);
6961 continue;
6962 }
6963
6964 if (parse_operands (p, opcode)
6965 && programmer_friendly_fixup (&inst)
6966 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6967 {
6968 /* Check that this instruction is supported for this CPU. */
6969 if (!opcode->avariant
6970 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6971 {
6972 as_bad (_("selected processor does not support `%s'"), str);
6973 return;
6974 }
6975
6976 warn_unpredictable_ldst (&inst, str);
6977
6978 if (inst.reloc.type == BFD_RELOC_UNUSED
6979 || !inst.reloc.need_libopcodes_p)
6980 output_inst (NULL);
6981 else
6982 {
6983 /* If there is relocation generated for the instruction,
6984 store the instruction information for the future fix-up. */
6985 struct aarch64_inst *copy;
6986 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6987 copy = XNEW (struct aarch64_inst);
6988 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6989 output_inst (copy);
6990 }
6991
6992 /* Issue non-fatal messages if any. */
6993 output_operand_error_report (str, TRUE);
6994 return;
6995 }
6996
6997 template = template->next;
6998 if (template != NULL)
6999 {
7000 reset_aarch64_instruction (&inst);
7001 inst.cond = saved_cond;
7002 }
7003 }
7004 while (template != NULL);
7005
7006 /* Issue the error messages if any. */
7007 output_operand_error_report (str, FALSE);
7008 }
7009
7010 /* Various frobbings of labels and their addresses. */
7011
7012 void
7013 aarch64_start_line_hook (void)
7014 {
7015 last_label_seen = NULL;
7016 }
7017
7018 void
7019 aarch64_frob_label (symbolS * sym)
7020 {
7021 last_label_seen = sym;
7022
7023 dwarf2_emit_label (sym);
7024 }
7025
7026 void
7027 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7028 {
7029 /* Check to see if we have a block to close. */
7030 force_automatic_sequence_close ();
7031 }
7032
7033 int
7034 aarch64_data_in_code (void)
7035 {
7036 if (!strncmp (input_line_pointer + 1, "data:", 5))
7037 {
7038 *input_line_pointer = '/';
7039 input_line_pointer += 5;
7040 *input_line_pointer = 0;
7041 return 1;
7042 }
7043
7044 return 0;
7045 }
7046
7047 char *
7048 aarch64_canonicalize_symbol_name (char *name)
7049 {
7050 int len;
7051
7052 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7053 *(name + len - 5) = 0;
7054
7055 return name;
7056 }
7057 \f
7058 /* Table of all register names defined by default. The user can
7059 define additional names with .req. Note that all register names
7060 should appear in both upper and lowercase variants. Some registers
7061 also have mixed-case names. */
7062
7063 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7064 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7065 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7066 #define REGSET16(p,t) \
7067 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7068 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7069 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7070 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7071 #define REGSET31(p,t) \
7072 REGSET16(p, t), \
7073 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7074 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7075 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7076 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7077 #define REGSET(p,t) \
7078 REGSET31(p,t), REGNUM(p,31,t)
7079
7080 /* These go into aarch64_reg_hsh hash-table. */
7081 static const reg_entry reg_names[] = {
7082 /* Integer registers. */
7083 REGSET31 (x, R_64), REGSET31 (X, R_64),
7084 REGSET31 (w, R_32), REGSET31 (W, R_32),
7085
7086 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7087 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7088 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7089 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7090 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7091 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7092
7093 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7094 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7095
7096 /* Floating-point single precision registers. */
7097 REGSET (s, FP_S), REGSET (S, FP_S),
7098
7099 /* Floating-point double precision registers. */
7100 REGSET (d, FP_D), REGSET (D, FP_D),
7101
7102 /* Floating-point half precision registers. */
7103 REGSET (h, FP_H), REGSET (H, FP_H),
7104
7105 /* Floating-point byte precision registers. */
7106 REGSET (b, FP_B), REGSET (B, FP_B),
7107
7108 /* Floating-point quad precision registers. */
7109 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7110
7111 /* FP/SIMD registers. */
7112 REGSET (v, VN), REGSET (V, VN),
7113
7114 /* SVE vector registers. */
7115 REGSET (z, ZN), REGSET (Z, ZN),
7116
7117 /* SVE predicate registers. */
7118 REGSET16 (p, PN), REGSET16 (P, PN)
7119 };
7120
7121 #undef REGDEF
7122 #undef REGDEF_ALIAS
7123 #undef REGNUM
7124 #undef REGSET16
7125 #undef REGSET31
7126 #undef REGSET
7127
7128 #define N 1
7129 #define n 0
7130 #define Z 1
7131 #define z 0
7132 #define C 1
7133 #define c 0
7134 #define V 1
7135 #define v 0
7136 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7137 static const asm_nzcv nzcv_names[] = {
7138 {"nzcv", B (n, z, c, v)},
7139 {"nzcV", B (n, z, c, V)},
7140 {"nzCv", B (n, z, C, v)},
7141 {"nzCV", B (n, z, C, V)},
7142 {"nZcv", B (n, Z, c, v)},
7143 {"nZcV", B (n, Z, c, V)},
7144 {"nZCv", B (n, Z, C, v)},
7145 {"nZCV", B (n, Z, C, V)},
7146 {"Nzcv", B (N, z, c, v)},
7147 {"NzcV", B (N, z, c, V)},
7148 {"NzCv", B (N, z, C, v)},
7149 {"NzCV", B (N, z, C, V)},
7150 {"NZcv", B (N, Z, c, v)},
7151 {"NZcV", B (N, Z, c, V)},
7152 {"NZCv", B (N, Z, C, v)},
7153 {"NZCV", B (N, Z, C, V)}
7154 };
7155
7156 #undef N
7157 #undef n
7158 #undef Z
7159 #undef z
7160 #undef C
7161 #undef c
7162 #undef V
7163 #undef v
7164 #undef B
7165 \f
7166 /* MD interface: bits in the object file. */
7167
7168 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7169 for use in the a.out file, and stores them in the array pointed to by buf.
7170 This knows about the endian-ness of the target machine and does
7171 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7172 2 (short) and 4 (long) Floating numbers are put out as a series of
7173 LITTLENUMS (shorts, here at least). */
7174
7175 void
7176 md_number_to_chars (char *buf, valueT val, int n)
7177 {
7178 if (target_big_endian)
7179 number_to_chars_bigendian (buf, val, n);
7180 else
7181 number_to_chars_littleendian (buf, val, n);
7182 }
7183
7184 /* MD interface: Sections. */
7185
7186 /* Estimate the size of a frag before relaxing. Assume everything fits in
7187 4 bytes. */
7188
7189 int
7190 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7191 {
7192 fragp->fr_var = 4;
7193 return 4;
7194 }
7195
7196 /* Round up a section size to the appropriate boundary. */
7197
7198 valueT
7199 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7200 {
7201 return size;
7202 }
7203
7204 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7205 of an rs_align_code fragment.
7206
7207 Here we fill the frag with the appropriate info for padding the
7208 output stream. The resulting frag will consist of a fixed (fr_fix)
7209 and of a repeating (fr_var) part.
7210
7211 The fixed content is always emitted before the repeating content and
7212 these two parts are used as follows in constructing the output:
7213 - the fixed part will be used to align to a valid instruction word
7214 boundary, in case that we start at a misaligned address; as no
7215 executable instruction can live at the misaligned location, we
7216 simply fill with zeros;
7217 - the variable part will be used to cover the remaining padding and
7218 we fill using the AArch64 NOP instruction.
7219
7220 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7221 enough storage space for up to 3 bytes for padding the back to a valid
7222 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7223
7224 void
7225 aarch64_handle_align (fragS * fragP)
7226 {
7227 /* NOP = d503201f */
7228 /* AArch64 instructions are always little-endian. */
7229 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7230
7231 int bytes, fix, noop_size;
7232 char *p;
7233
7234 if (fragP->fr_type != rs_align_code)
7235 return;
7236
7237 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7238 p = fragP->fr_literal + fragP->fr_fix;
7239
7240 #ifdef OBJ_ELF
7241 gas_assert (fragP->tc_frag_data.recorded);
7242 #endif
7243
7244 noop_size = sizeof (aarch64_noop);
7245
7246 fix = bytes & (noop_size - 1);
7247 if (fix)
7248 {
7249 #ifdef OBJ_ELF
7250 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7251 #endif
7252 memset (p, 0, fix);
7253 p += fix;
7254 fragP->fr_fix += fix;
7255 }
7256
7257 if (noop_size)
7258 memcpy (p, aarch64_noop, noop_size);
7259 fragP->fr_var = noop_size;
7260 }
7261
7262 /* Perform target specific initialisation of a frag.
7263 Note - despite the name this initialisation is not done when the frag
7264 is created, but only when its type is assigned. A frag can be created
7265 and used a long time before its type is set, so beware of assuming that
7266 this initialisation is performed first. */
7267
7268 #ifndef OBJ_ELF
7269 void
7270 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7271 int max_chars ATTRIBUTE_UNUSED)
7272 {
7273 }
7274
7275 #else /* OBJ_ELF is defined. */
7276 void
7277 aarch64_init_frag (fragS * fragP, int max_chars)
7278 {
7279 /* Record a mapping symbol for alignment frags. We will delete this
7280 later if the alignment ends up empty. */
7281 if (!fragP->tc_frag_data.recorded)
7282 fragP->tc_frag_data.recorded = 1;
7283
7284 /* PR 21809: Do not set a mapping state for debug sections
7285 - it just confuses other tools. */
7286 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
7287 return;
7288
7289 switch (fragP->fr_type)
7290 {
7291 case rs_align_test:
7292 case rs_fill:
7293 mapping_state_2 (MAP_DATA, max_chars);
7294 break;
7295 case rs_align:
7296 /* PR 20364: We can get alignment frags in code sections,
7297 so do not just assume that we should use the MAP_DATA state. */
7298 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7299 break;
7300 case rs_align_code:
7301 mapping_state_2 (MAP_INSN, max_chars);
7302 break;
7303 default:
7304 break;
7305 }
7306 }
7307 \f
7308 /* Initialize the DWARF-2 unwind information for this procedure. */
7309
7310 void
7311 tc_aarch64_frame_initial_instructions (void)
7312 {
7313 cfi_add_CFA_def_cfa (REG_SP, 0);
7314 }
7315 #endif /* OBJ_ELF */
7316
7317 /* Convert REGNAME to a DWARF-2 register number. */
7318
7319 int
7320 tc_aarch64_regname_to_dw2regnum (char *regname)
7321 {
7322 const reg_entry *reg = parse_reg (&regname);
7323 if (reg == NULL)
7324 return -1;
7325
7326 switch (reg->type)
7327 {
7328 case REG_TYPE_SP_32:
7329 case REG_TYPE_SP_64:
7330 case REG_TYPE_R_32:
7331 case REG_TYPE_R_64:
7332 return reg->number;
7333
7334 case REG_TYPE_FP_B:
7335 case REG_TYPE_FP_H:
7336 case REG_TYPE_FP_S:
7337 case REG_TYPE_FP_D:
7338 case REG_TYPE_FP_Q:
7339 return reg->number + 64;
7340
7341 default:
7342 break;
7343 }
7344 return -1;
7345 }
7346
7347 /* Implement DWARF2_ADDR_SIZE. */
7348
7349 int
7350 aarch64_dwarf2_addr_size (void)
7351 {
7352 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7353 if (ilp32_p)
7354 return 4;
7355 #endif
7356 return bfd_arch_bits_per_address (stdoutput) / 8;
7357 }
7358
7359 /* MD interface: Symbol and relocation handling. */
7360
7361 /* Return the address within the segment that a PC-relative fixup is
7362 relative to. For AArch64 PC-relative fixups applied to instructions
7363 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7364
7365 long
7366 md_pcrel_from_section (fixS * fixP, segT seg)
7367 {
7368 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7369
7370 /* If this is pc-relative and we are going to emit a relocation
7371 then we just want to put out any pipeline compensation that the linker
7372 will need. Otherwise we want to use the calculated base. */
7373 if (fixP->fx_pcrel
7374 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7375 || aarch64_force_relocation (fixP)))
7376 base = 0;
7377
7378 /* AArch64 should be consistent for all pc-relative relocations. */
7379 return base + AARCH64_PCREL_OFFSET;
7380 }
7381
7382 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7383 Otherwise we have no need to default values of symbols. */
7384
7385 symbolS *
7386 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7387 {
7388 #ifdef OBJ_ELF
7389 if (name[0] == '_' && name[1] == 'G'
7390 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7391 {
7392 if (!GOT_symbol)
7393 {
7394 if (symbol_find (name))
7395 as_bad (_("GOT already in the symbol table"));
7396
7397 GOT_symbol = symbol_new (name, undefined_section,
7398 (valueT) 0, &zero_address_frag);
7399 }
7400
7401 return GOT_symbol;
7402 }
7403 #endif
7404
7405 return 0;
7406 }
7407
7408 /* Return non-zero if the indicated VALUE has overflowed the maximum
7409 range expressible by a unsigned number with the indicated number of
7410 BITS. */
7411
7412 static bfd_boolean
7413 unsigned_overflow (valueT value, unsigned bits)
7414 {
7415 valueT lim;
7416 if (bits >= sizeof (valueT) * 8)
7417 return FALSE;
7418 lim = (valueT) 1 << bits;
7419 return (value >= lim);
7420 }
7421
7422
7423 /* Return non-zero if the indicated VALUE has overflowed the maximum
7424 range expressible by an signed number with the indicated number of
7425 BITS. */
7426
7427 static bfd_boolean
7428 signed_overflow (offsetT value, unsigned bits)
7429 {
7430 offsetT lim;
7431 if (bits >= sizeof (offsetT) * 8)
7432 return FALSE;
7433 lim = (offsetT) 1 << (bits - 1);
7434 return (value < -lim || value >= lim);
7435 }
7436
7437 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7438 unsigned immediate offset load/store instruction, try to encode it as
7439 an unscaled, 9-bit, signed immediate offset load/store instruction.
7440 Return TRUE if it is successful; otherwise return FALSE.
7441
7442 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7443 in response to the standard LDR/STR mnemonics when the immediate offset is
7444 unambiguous, i.e. when it is negative or unaligned. */
7445
7446 static bfd_boolean
7447 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7448 {
7449 int idx;
7450 enum aarch64_op new_op;
7451 const aarch64_opcode *new_opcode;
7452
7453 gas_assert (instr->opcode->iclass == ldst_pos);
7454
7455 switch (instr->opcode->op)
7456 {
7457 case OP_LDRB_POS:new_op = OP_LDURB; break;
7458 case OP_STRB_POS: new_op = OP_STURB; break;
7459 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7460 case OP_LDRH_POS: new_op = OP_LDURH; break;
7461 case OP_STRH_POS: new_op = OP_STURH; break;
7462 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7463 case OP_LDR_POS: new_op = OP_LDUR; break;
7464 case OP_STR_POS: new_op = OP_STUR; break;
7465 case OP_LDRF_POS: new_op = OP_LDURV; break;
7466 case OP_STRF_POS: new_op = OP_STURV; break;
7467 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7468 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7469 default: new_op = OP_NIL; break;
7470 }
7471
7472 if (new_op == OP_NIL)
7473 return FALSE;
7474
7475 new_opcode = aarch64_get_opcode (new_op);
7476 gas_assert (new_opcode != NULL);
7477
7478 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7479 instr->opcode->op, new_opcode->op);
7480
7481 aarch64_replace_opcode (instr, new_opcode);
7482
7483 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7484 qualifier matching may fail because the out-of-date qualifier will
7485 prevent the operand being updated with a new and correct qualifier. */
7486 idx = aarch64_operand_index (instr->opcode->operands,
7487 AARCH64_OPND_ADDR_SIMM9);
7488 gas_assert (idx == 1);
7489 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7490
7491 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7492
7493 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7494 insn_sequence))
7495 return FALSE;
7496
7497 return TRUE;
7498 }
7499
7500 /* Called by fix_insn to fix a MOV immediate alias instruction.
7501
7502 Operand for a generic move immediate instruction, which is an alias
7503 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7504 a 32-bit/64-bit immediate value into general register. An assembler error
7505 shall result if the immediate cannot be created by a single one of these
7506 instructions. If there is a choice, then to ensure reversability an
7507 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7508
7509 static void
7510 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7511 {
7512 const aarch64_opcode *opcode;
7513
7514 /* Need to check if the destination is SP/ZR. The check has to be done
7515 before any aarch64_replace_opcode. */
7516 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7517 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7518
7519 instr->operands[1].imm.value = value;
7520 instr->operands[1].skip = 0;
7521
7522 if (try_mov_wide_p)
7523 {
7524 /* Try the MOVZ alias. */
7525 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7526 aarch64_replace_opcode (instr, opcode);
7527 if (aarch64_opcode_encode (instr->opcode, instr,
7528 &instr->value, NULL, NULL, insn_sequence))
7529 {
7530 put_aarch64_insn (buf, instr->value);
7531 return;
7532 }
7533 /* Try the MOVK alias. */
7534 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7535 aarch64_replace_opcode (instr, opcode);
7536 if (aarch64_opcode_encode (instr->opcode, instr,
7537 &instr->value, NULL, NULL, insn_sequence))
7538 {
7539 put_aarch64_insn (buf, instr->value);
7540 return;
7541 }
7542 }
7543
7544 if (try_mov_bitmask_p)
7545 {
7546 /* Try the ORR alias. */
7547 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7548 aarch64_replace_opcode (instr, opcode);
7549 if (aarch64_opcode_encode (instr->opcode, instr,
7550 &instr->value, NULL, NULL, insn_sequence))
7551 {
7552 put_aarch64_insn (buf, instr->value);
7553 return;
7554 }
7555 }
7556
7557 as_bad_where (fixP->fx_file, fixP->fx_line,
7558 _("immediate cannot be moved by a single instruction"));
7559 }
7560
7561 /* An instruction operand which is immediate related may have symbol used
7562 in the assembly, e.g.
7563
7564 mov w0, u32
7565 .set u32, 0x00ffff00
7566
7567 At the time when the assembly instruction is parsed, a referenced symbol,
7568 like 'u32' in the above example may not have been seen; a fixS is created
7569 in such a case and is handled here after symbols have been resolved.
7570 Instruction is fixed up with VALUE using the information in *FIXP plus
7571 extra information in FLAGS.
7572
7573 This function is called by md_apply_fix to fix up instructions that need
7574 a fix-up described above but does not involve any linker-time relocation. */
7575
7576 static void
7577 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7578 {
7579 int idx;
7580 uint32_t insn;
7581 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7582 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7583 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7584
7585 if (new_inst)
7586 {
7587 /* Now the instruction is about to be fixed-up, so the operand that
7588 was previously marked as 'ignored' needs to be unmarked in order
7589 to get the encoding done properly. */
7590 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7591 new_inst->operands[idx].skip = 0;
7592 }
7593
7594 gas_assert (opnd != AARCH64_OPND_NIL);
7595
7596 switch (opnd)
7597 {
7598 case AARCH64_OPND_EXCEPTION:
7599 if (unsigned_overflow (value, 16))
7600 as_bad_where (fixP->fx_file, fixP->fx_line,
7601 _("immediate out of range"));
7602 insn = get_aarch64_insn (buf);
7603 insn |= encode_svc_imm (value);
7604 put_aarch64_insn (buf, insn);
7605 break;
7606
7607 case AARCH64_OPND_AIMM:
7608 /* ADD or SUB with immediate.
7609 NOTE this assumes we come here with a add/sub shifted reg encoding
7610 3 322|2222|2 2 2 21111 111111
7611 1 098|7654|3 2 1 09876 543210 98765 43210
7612 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7613 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7614 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7615 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7616 ->
7617 3 322|2222|2 2 221111111111
7618 1 098|7654|3 2 109876543210 98765 43210
7619 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7620 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7621 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7622 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7623 Fields sf Rn Rd are already set. */
7624 insn = get_aarch64_insn (buf);
7625 if (value < 0)
7626 {
7627 /* Add <-> sub. */
7628 insn = reencode_addsub_switch_add_sub (insn);
7629 value = -value;
7630 }
7631
7632 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7633 && unsigned_overflow (value, 12))
7634 {
7635 /* Try to shift the value by 12 to make it fit. */
7636 if (((value >> 12) << 12) == value
7637 && ! unsigned_overflow (value, 12 + 12))
7638 {
7639 value >>= 12;
7640 insn |= encode_addsub_imm_shift_amount (1);
7641 }
7642 }
7643
7644 if (unsigned_overflow (value, 12))
7645 as_bad_where (fixP->fx_file, fixP->fx_line,
7646 _("immediate out of range"));
7647
7648 insn |= encode_addsub_imm (value);
7649
7650 put_aarch64_insn (buf, insn);
7651 break;
7652
7653 case AARCH64_OPND_SIMD_IMM:
7654 case AARCH64_OPND_SIMD_IMM_SFT:
7655 case AARCH64_OPND_LIMM:
7656 /* Bit mask immediate. */
7657 gas_assert (new_inst != NULL);
7658 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7659 new_inst->operands[idx].imm.value = value;
7660 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7661 &new_inst->value, NULL, NULL, insn_sequence))
7662 put_aarch64_insn (buf, new_inst->value);
7663 else
7664 as_bad_where (fixP->fx_file, fixP->fx_line,
7665 _("invalid immediate"));
7666 break;
7667
7668 case AARCH64_OPND_HALF:
7669 /* 16-bit unsigned immediate. */
7670 if (unsigned_overflow (value, 16))
7671 as_bad_where (fixP->fx_file, fixP->fx_line,
7672 _("immediate out of range"));
7673 insn = get_aarch64_insn (buf);
7674 insn |= encode_movw_imm (value & 0xffff);
7675 put_aarch64_insn (buf, insn);
7676 break;
7677
7678 case AARCH64_OPND_IMM_MOV:
7679 /* Operand for a generic move immediate instruction, which is
7680 an alias instruction that generates a single MOVZ, MOVN or ORR
7681 instruction to loads a 32-bit/64-bit immediate value into general
7682 register. An assembler error shall result if the immediate cannot be
7683 created by a single one of these instructions. If there is a choice,
7684 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7685 and MOVZ or MOVN to ORR. */
7686 gas_assert (new_inst != NULL);
7687 fix_mov_imm_insn (fixP, buf, new_inst, value);
7688 break;
7689
7690 case AARCH64_OPND_ADDR_SIMM7:
7691 case AARCH64_OPND_ADDR_SIMM9:
7692 case AARCH64_OPND_ADDR_SIMM9_2:
7693 case AARCH64_OPND_ADDR_SIMM10:
7694 case AARCH64_OPND_ADDR_UIMM12:
7695 case AARCH64_OPND_ADDR_SIMM11:
7696 case AARCH64_OPND_ADDR_SIMM13:
7697 /* Immediate offset in an address. */
7698 insn = get_aarch64_insn (buf);
7699
7700 gas_assert (new_inst != NULL && new_inst->value == insn);
7701 gas_assert (new_inst->opcode->operands[1] == opnd
7702 || new_inst->opcode->operands[2] == opnd);
7703
7704 /* Get the index of the address operand. */
7705 if (new_inst->opcode->operands[1] == opnd)
7706 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7707 idx = 1;
7708 else
7709 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7710 idx = 2;
7711
7712 /* Update the resolved offset value. */
7713 new_inst->operands[idx].addr.offset.imm = value;
7714
7715 /* Encode/fix-up. */
7716 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7717 &new_inst->value, NULL, NULL, insn_sequence))
7718 {
7719 put_aarch64_insn (buf, new_inst->value);
7720 break;
7721 }
7722 else if (new_inst->opcode->iclass == ldst_pos
7723 && try_to_encode_as_unscaled_ldst (new_inst))
7724 {
7725 put_aarch64_insn (buf, new_inst->value);
7726 break;
7727 }
7728
7729 as_bad_where (fixP->fx_file, fixP->fx_line,
7730 _("immediate offset out of range"));
7731 break;
7732
7733 default:
7734 gas_assert (0);
7735 as_fatal (_("unhandled operand code %d"), opnd);
7736 }
7737 }
7738
7739 /* Apply a fixup (fixP) to segment data, once it has been determined
7740 by our caller that we have all the info we need to fix it up.
7741
7742 Parameter valP is the pointer to the value of the bits. */
7743
7744 void
7745 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7746 {
7747 offsetT value = *valP;
7748 uint32_t insn;
7749 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7750 int scale;
7751 unsigned flags = fixP->fx_addnumber;
7752
7753 DEBUG_TRACE ("\n\n");
7754 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7755 DEBUG_TRACE ("Enter md_apply_fix");
7756
7757 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7758
7759 /* Note whether this will delete the relocation. */
7760
7761 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7762 fixP->fx_done = 1;
7763
7764 /* Process the relocations. */
7765 switch (fixP->fx_r_type)
7766 {
7767 case BFD_RELOC_NONE:
7768 /* This will need to go in the object file. */
7769 fixP->fx_done = 0;
7770 break;
7771
7772 case BFD_RELOC_8:
7773 case BFD_RELOC_8_PCREL:
7774 if (fixP->fx_done || !seg->use_rela_p)
7775 md_number_to_chars (buf, value, 1);
7776 break;
7777
7778 case BFD_RELOC_16:
7779 case BFD_RELOC_16_PCREL:
7780 if (fixP->fx_done || !seg->use_rela_p)
7781 md_number_to_chars (buf, value, 2);
7782 break;
7783
7784 case BFD_RELOC_32:
7785 case BFD_RELOC_32_PCREL:
7786 if (fixP->fx_done || !seg->use_rela_p)
7787 md_number_to_chars (buf, value, 4);
7788 break;
7789
7790 case BFD_RELOC_64:
7791 case BFD_RELOC_64_PCREL:
7792 if (fixP->fx_done || !seg->use_rela_p)
7793 md_number_to_chars (buf, value, 8);
7794 break;
7795
7796 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7797 /* We claim that these fixups have been processed here, even if
7798 in fact we generate an error because we do not have a reloc
7799 for them, so tc_gen_reloc() will reject them. */
7800 fixP->fx_done = 1;
7801 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7802 {
7803 as_bad_where (fixP->fx_file, fixP->fx_line,
7804 _("undefined symbol %s used as an immediate value"),
7805 S_GET_NAME (fixP->fx_addsy));
7806 goto apply_fix_return;
7807 }
7808 fix_insn (fixP, flags, value);
7809 break;
7810
7811 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7812 if (fixP->fx_done || !seg->use_rela_p)
7813 {
7814 if (value & 3)
7815 as_bad_where (fixP->fx_file, fixP->fx_line,
7816 _("pc-relative load offset not word aligned"));
7817 if (signed_overflow (value, 21))
7818 as_bad_where (fixP->fx_file, fixP->fx_line,
7819 _("pc-relative load offset out of range"));
7820 insn = get_aarch64_insn (buf);
7821 insn |= encode_ld_lit_ofs_19 (value >> 2);
7822 put_aarch64_insn (buf, insn);
7823 }
7824 break;
7825
7826 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7827 if (fixP->fx_done || !seg->use_rela_p)
7828 {
7829 if (signed_overflow (value, 21))
7830 as_bad_where (fixP->fx_file, fixP->fx_line,
7831 _("pc-relative address offset out of range"));
7832 insn = get_aarch64_insn (buf);
7833 insn |= encode_adr_imm (value);
7834 put_aarch64_insn (buf, insn);
7835 }
7836 break;
7837
7838 case BFD_RELOC_AARCH64_BRANCH19:
7839 if (fixP->fx_done || !seg->use_rela_p)
7840 {
7841 if (value & 3)
7842 as_bad_where (fixP->fx_file, fixP->fx_line,
7843 _("conditional branch target not word aligned"));
7844 if (signed_overflow (value, 21))
7845 as_bad_where (fixP->fx_file, fixP->fx_line,
7846 _("conditional branch out of range"));
7847 insn = get_aarch64_insn (buf);
7848 insn |= encode_cond_branch_ofs_19 (value >> 2);
7849 put_aarch64_insn (buf, insn);
7850 }
7851 break;
7852
7853 case BFD_RELOC_AARCH64_TSTBR14:
7854 if (fixP->fx_done || !seg->use_rela_p)
7855 {
7856 if (value & 3)
7857 as_bad_where (fixP->fx_file, fixP->fx_line,
7858 _("conditional branch target not word aligned"));
7859 if (signed_overflow (value, 16))
7860 as_bad_where (fixP->fx_file, fixP->fx_line,
7861 _("conditional branch out of range"));
7862 insn = get_aarch64_insn (buf);
7863 insn |= encode_tst_branch_ofs_14 (value >> 2);
7864 put_aarch64_insn (buf, insn);
7865 }
7866 break;
7867
7868 case BFD_RELOC_AARCH64_CALL26:
7869 case BFD_RELOC_AARCH64_JUMP26:
7870 if (fixP->fx_done || !seg->use_rela_p)
7871 {
7872 if (value & 3)
7873 as_bad_where (fixP->fx_file, fixP->fx_line,
7874 _("branch target not word aligned"));
7875 if (signed_overflow (value, 28))
7876 as_bad_where (fixP->fx_file, fixP->fx_line,
7877 _("branch out of range"));
7878 insn = get_aarch64_insn (buf);
7879 insn |= encode_branch_ofs_26 (value >> 2);
7880 put_aarch64_insn (buf, insn);
7881 }
7882 break;
7883
7884 case BFD_RELOC_AARCH64_MOVW_G0:
7885 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7886 case BFD_RELOC_AARCH64_MOVW_G0_S:
7887 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7888 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7889 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
7890 scale = 0;
7891 goto movw_common;
7892 case BFD_RELOC_AARCH64_MOVW_G1:
7893 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7894 case BFD_RELOC_AARCH64_MOVW_G1_S:
7895 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7896 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7897 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
7898 scale = 16;
7899 goto movw_common;
7900 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7901 scale = 0;
7902 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7903 /* Should always be exported to object file, see
7904 aarch64_force_relocation(). */
7905 gas_assert (!fixP->fx_done);
7906 gas_assert (seg->use_rela_p);
7907 goto movw_common;
7908 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7909 scale = 16;
7910 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7911 /* Should always be exported to object file, see
7912 aarch64_force_relocation(). */
7913 gas_assert (!fixP->fx_done);
7914 gas_assert (seg->use_rela_p);
7915 goto movw_common;
7916 case BFD_RELOC_AARCH64_MOVW_G2:
7917 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7918 case BFD_RELOC_AARCH64_MOVW_G2_S:
7919 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7920 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
7921 scale = 32;
7922 goto movw_common;
7923 case BFD_RELOC_AARCH64_MOVW_G3:
7924 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
7925 scale = 48;
7926 movw_common:
7927 if (fixP->fx_done || !seg->use_rela_p)
7928 {
7929 insn = get_aarch64_insn (buf);
7930
7931 if (!fixP->fx_done)
7932 {
7933 /* REL signed addend must fit in 16 bits */
7934 if (signed_overflow (value, 16))
7935 as_bad_where (fixP->fx_file, fixP->fx_line,
7936 _("offset out of range"));
7937 }
7938 else
7939 {
7940 /* Check for overflow and scale. */
7941 switch (fixP->fx_r_type)
7942 {
7943 case BFD_RELOC_AARCH64_MOVW_G0:
7944 case BFD_RELOC_AARCH64_MOVW_G1:
7945 case BFD_RELOC_AARCH64_MOVW_G2:
7946 case BFD_RELOC_AARCH64_MOVW_G3:
7947 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7948 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7949 if (unsigned_overflow (value, scale + 16))
7950 as_bad_where (fixP->fx_file, fixP->fx_line,
7951 _("unsigned value out of range"));
7952 break;
7953 case BFD_RELOC_AARCH64_MOVW_G0_S:
7954 case BFD_RELOC_AARCH64_MOVW_G1_S:
7955 case BFD_RELOC_AARCH64_MOVW_G2_S:
7956 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7957 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7958 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7959 /* NOTE: We can only come here with movz or movn. */
7960 if (signed_overflow (value, scale + 16))
7961 as_bad_where (fixP->fx_file, fixP->fx_line,
7962 _("signed value out of range"));
7963 if (value < 0)
7964 {
7965 /* Force use of MOVN. */
7966 value = ~value;
7967 insn = reencode_movzn_to_movn (insn);
7968 }
7969 else
7970 {
7971 /* Force use of MOVZ. */
7972 insn = reencode_movzn_to_movz (insn);
7973 }
7974 break;
7975 default:
7976 /* Unchecked relocations. */
7977 break;
7978 }
7979 value >>= scale;
7980 }
7981
7982 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7983 insn |= encode_movw_imm (value & 0xffff);
7984
7985 put_aarch64_insn (buf, insn);
7986 }
7987 break;
7988
7989 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7990 fixP->fx_r_type = (ilp32_p
7991 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7992 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7993 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7994 /* Should always be exported to object file, see
7995 aarch64_force_relocation(). */
7996 gas_assert (!fixP->fx_done);
7997 gas_assert (seg->use_rela_p);
7998 break;
7999
8000 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8001 fixP->fx_r_type = (ilp32_p
8002 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8003 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8004 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8005 /* Should always be exported to object file, see
8006 aarch64_force_relocation(). */
8007 gas_assert (!fixP->fx_done);
8008 gas_assert (seg->use_rela_p);
8009 break;
8010
8011 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8012 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8013 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8014 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8015 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8016 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8017 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8018 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8019 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8020 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8021 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8022 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8023 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8024 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8025 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8026 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8027 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8028 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8029 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8030 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8031 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8032 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8033 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8034 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8035 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8036 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8037 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8038 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8039 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8040 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8041 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8042 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8043 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8044 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8045 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8046 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8047 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8048 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8049 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8050 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8051 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8052 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8053 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8054 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8055 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8056 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8057 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8058 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8059 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8060 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8061 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8062 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8063 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8064 /* Should always be exported to object file, see
8065 aarch64_force_relocation(). */
8066 gas_assert (!fixP->fx_done);
8067 gas_assert (seg->use_rela_p);
8068 break;
8069
8070 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8071 /* Should always be exported to object file, see
8072 aarch64_force_relocation(). */
8073 fixP->fx_r_type = (ilp32_p
8074 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8075 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8076 gas_assert (!fixP->fx_done);
8077 gas_assert (seg->use_rela_p);
8078 break;
8079
8080 case BFD_RELOC_AARCH64_ADD_LO12:
8081 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8082 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8083 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8084 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8085 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8086 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8087 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8088 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8089 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8090 case BFD_RELOC_AARCH64_LDST128_LO12:
8091 case BFD_RELOC_AARCH64_LDST16_LO12:
8092 case BFD_RELOC_AARCH64_LDST32_LO12:
8093 case BFD_RELOC_AARCH64_LDST64_LO12:
8094 case BFD_RELOC_AARCH64_LDST8_LO12:
8095 /* Should always be exported to object file, see
8096 aarch64_force_relocation(). */
8097 gas_assert (!fixP->fx_done);
8098 gas_assert (seg->use_rela_p);
8099 break;
8100
8101 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8102 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8103 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8104 break;
8105
8106 case BFD_RELOC_UNUSED:
8107 /* An error will already have been reported. */
8108 break;
8109
8110 default:
8111 as_bad_where (fixP->fx_file, fixP->fx_line,
8112 _("unexpected %s fixup"),
8113 bfd_get_reloc_code_name (fixP->fx_r_type));
8114 break;
8115 }
8116
8117 apply_fix_return:
8118 /* Free the allocated the struct aarch64_inst.
8119 N.B. currently there are very limited number of fix-up types actually use
8120 this field, so the impact on the performance should be minimal . */
8121 if (fixP->tc_fix_data.inst != NULL)
8122 free (fixP->tc_fix_data.inst);
8123
8124 return;
8125 }
8126
8127 /* Translate internal representation of relocation info to BFD target
8128 format. */
8129
8130 arelent *
8131 tc_gen_reloc (asection * section, fixS * fixp)
8132 {
8133 arelent *reloc;
8134 bfd_reloc_code_real_type code;
8135
8136 reloc = XNEW (arelent);
8137
8138 reloc->sym_ptr_ptr = XNEW (asymbol *);
8139 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8140 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8141
8142 if (fixp->fx_pcrel)
8143 {
8144 if (section->use_rela_p)
8145 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8146 else
8147 fixp->fx_offset = reloc->address;
8148 }
8149 reloc->addend = fixp->fx_offset;
8150
8151 code = fixp->fx_r_type;
8152 switch (code)
8153 {
8154 case BFD_RELOC_16:
8155 if (fixp->fx_pcrel)
8156 code = BFD_RELOC_16_PCREL;
8157 break;
8158
8159 case BFD_RELOC_32:
8160 if (fixp->fx_pcrel)
8161 code = BFD_RELOC_32_PCREL;
8162 break;
8163
8164 case BFD_RELOC_64:
8165 if (fixp->fx_pcrel)
8166 code = BFD_RELOC_64_PCREL;
8167 break;
8168
8169 default:
8170 break;
8171 }
8172
8173 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8174 if (reloc->howto == NULL)
8175 {
8176 as_bad_where (fixp->fx_file, fixp->fx_line,
8177 _
8178 ("cannot represent %s relocation in this object file format"),
8179 bfd_get_reloc_code_name (code));
8180 return NULL;
8181 }
8182
8183 return reloc;
8184 }
8185
8186 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8187
8188 void
8189 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8190 {
8191 bfd_reloc_code_real_type type;
8192 int pcrel = 0;
8193
8194 /* Pick a reloc.
8195 FIXME: @@ Should look at CPU word size. */
8196 switch (size)
8197 {
8198 case 1:
8199 type = BFD_RELOC_8;
8200 break;
8201 case 2:
8202 type = BFD_RELOC_16;
8203 break;
8204 case 4:
8205 type = BFD_RELOC_32;
8206 break;
8207 case 8:
8208 type = BFD_RELOC_64;
8209 break;
8210 default:
8211 as_bad (_("cannot do %u-byte relocation"), size);
8212 type = BFD_RELOC_UNUSED;
8213 break;
8214 }
8215
8216 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8217 }
8218
8219 int
8220 aarch64_force_relocation (struct fix *fixp)
8221 {
8222 switch (fixp->fx_r_type)
8223 {
8224 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8225 /* Perform these "immediate" internal relocations
8226 even if the symbol is extern or weak. */
8227 return 0;
8228
8229 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8230 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8231 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8232 /* Pseudo relocs that need to be fixed up according to
8233 ilp32_p. */
8234 return 0;
8235
8236 case BFD_RELOC_AARCH64_ADD_LO12:
8237 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8238 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8239 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8240 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8241 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8242 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8243 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8244 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8245 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8246 case BFD_RELOC_AARCH64_LDST128_LO12:
8247 case BFD_RELOC_AARCH64_LDST16_LO12:
8248 case BFD_RELOC_AARCH64_LDST32_LO12:
8249 case BFD_RELOC_AARCH64_LDST64_LO12:
8250 case BFD_RELOC_AARCH64_LDST8_LO12:
8251 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8252 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8253 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8254 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8255 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8256 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8257 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8258 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8259 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8260 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8261 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8262 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8263 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8264 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8265 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8266 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8267 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8268 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8269 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8270 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8271 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8272 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8273 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8274 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8275 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8276 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8277 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8278 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8279 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8280 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8281 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8282 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8283 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8284 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8285 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8286 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8287 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8288 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8289 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8290 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8291 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8292 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8293 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8294 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8295 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8296 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8297 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8298 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8299 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8300 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8301 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8302 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8303 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8304 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8305 /* Always leave these relocations for the linker. */
8306 return 1;
8307
8308 default:
8309 break;
8310 }
8311
8312 return generic_force_reloc (fixp);
8313 }
8314
8315 #ifdef OBJ_ELF
8316
8317 /* Implement md_after_parse_args. This is the earliest time we need to decide
8318 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8319
8320 void
8321 aarch64_after_parse_args (void)
8322 {
8323 if (aarch64_abi != AARCH64_ABI_NONE)
8324 return;
8325
8326 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8327 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8328 aarch64_abi = AARCH64_ABI_ILP32;
8329 else
8330 aarch64_abi = AARCH64_ABI_LP64;
8331 }
8332
8333 const char *
8334 elf64_aarch64_target_format (void)
8335 {
8336 if (strcmp (TARGET_OS, "cloudabi") == 0)
8337 {
8338 /* FIXME: What to do for ilp32_p ? */
8339 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
8340 }
8341 if (target_big_endian)
8342 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8343 else
8344 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8345 }
8346
8347 void
8348 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8349 {
8350 elf_frob_symbol (symp, puntp);
8351 }
8352 #endif
8353
8354 /* MD interface: Finalization. */
8355
8356 /* A good place to do this, although this was probably not intended
8357 for this kind of use. We need to dump the literal pool before
8358 references are made to a null symbol pointer. */
8359
8360 void
8361 aarch64_cleanup (void)
8362 {
8363 literal_pool *pool;
8364
8365 for (pool = list_of_pools; pool; pool = pool->next)
8366 {
8367 /* Put it at the end of the relevant section. */
8368 subseg_set (pool->section, pool->sub_section);
8369 s_ltorg (0);
8370 }
8371 }
8372
8373 #ifdef OBJ_ELF
8374 /* Remove any excess mapping symbols generated for alignment frags in
8375 SEC. We may have created a mapping symbol before a zero byte
8376 alignment; remove it if there's a mapping symbol after the
8377 alignment. */
8378 static void
8379 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8380 void *dummy ATTRIBUTE_UNUSED)
8381 {
8382 segment_info_type *seginfo = seg_info (sec);
8383 fragS *fragp;
8384
8385 if (seginfo == NULL || seginfo->frchainP == NULL)
8386 return;
8387
8388 for (fragp = seginfo->frchainP->frch_root;
8389 fragp != NULL; fragp = fragp->fr_next)
8390 {
8391 symbolS *sym = fragp->tc_frag_data.last_map;
8392 fragS *next = fragp->fr_next;
8393
8394 /* Variable-sized frags have been converted to fixed size by
8395 this point. But if this was variable-sized to start with,
8396 there will be a fixed-size frag after it. So don't handle
8397 next == NULL. */
8398 if (sym == NULL || next == NULL)
8399 continue;
8400
8401 if (S_GET_VALUE (sym) < next->fr_address)
8402 /* Not at the end of this frag. */
8403 continue;
8404 know (S_GET_VALUE (sym) == next->fr_address);
8405
8406 do
8407 {
8408 if (next->tc_frag_data.first_map != NULL)
8409 {
8410 /* Next frag starts with a mapping symbol. Discard this
8411 one. */
8412 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8413 break;
8414 }
8415
8416 if (next->fr_next == NULL)
8417 {
8418 /* This mapping symbol is at the end of the section. Discard
8419 it. */
8420 know (next->fr_fix == 0 && next->fr_var == 0);
8421 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8422 break;
8423 }
8424
8425 /* As long as we have empty frags without any mapping symbols,
8426 keep looking. */
8427 /* If the next frag is non-empty and does not start with a
8428 mapping symbol, then this mapping symbol is required. */
8429 if (next->fr_address != next->fr_next->fr_address)
8430 break;
8431
8432 next = next->fr_next;
8433 }
8434 while (next != NULL);
8435 }
8436 }
8437 #endif
8438
8439 /* Adjust the symbol table. */
8440
8441 void
8442 aarch64_adjust_symtab (void)
8443 {
8444 #ifdef OBJ_ELF
8445 /* Remove any overlapping mapping symbols generated by alignment frags. */
8446 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8447 /* Now do generic ELF adjustments. */
8448 elf_adjust_symtab ();
8449 #endif
8450 }
8451
8452 static void
8453 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8454 {
8455 const char *hash_err;
8456
8457 hash_err = hash_insert (table, key, value);
8458 if (hash_err)
8459 printf ("Internal Error: Can't hash %s\n", key);
8460 }
8461
8462 static void
8463 fill_instruction_hash_table (void)
8464 {
8465 aarch64_opcode *opcode = aarch64_opcode_table;
8466
8467 while (opcode->name != NULL)
8468 {
8469 templates *templ, *new_templ;
8470 templ = hash_find (aarch64_ops_hsh, opcode->name);
8471
8472 new_templ = XNEW (templates);
8473 new_templ->opcode = opcode;
8474 new_templ->next = NULL;
8475
8476 if (!templ)
8477 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8478 else
8479 {
8480 new_templ->next = templ->next;
8481 templ->next = new_templ;
8482 }
8483 ++opcode;
8484 }
8485 }
8486
8487 static inline void
8488 convert_to_upper (char *dst, const char *src, size_t num)
8489 {
8490 unsigned int i;
8491 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8492 *dst = TOUPPER (*src);
8493 *dst = '\0';
8494 }
8495
8496 /* Assume STR point to a lower-case string, allocate, convert and return
8497 the corresponding upper-case string. */
8498 static inline const char*
8499 get_upper_str (const char *str)
8500 {
8501 char *ret;
8502 size_t len = strlen (str);
8503 ret = XNEWVEC (char, len + 1);
8504 convert_to_upper (ret, str, len);
8505 return ret;
8506 }
8507
8508 /* MD interface: Initialization. */
8509
8510 void
8511 md_begin (void)
8512 {
8513 unsigned mach;
8514 unsigned int i;
8515
8516 if ((aarch64_ops_hsh = hash_new ()) == NULL
8517 || (aarch64_cond_hsh = hash_new ()) == NULL
8518 || (aarch64_shift_hsh = hash_new ()) == NULL
8519 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8520 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8521 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8522 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8523 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8524 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8525 || (aarch64_sys_regs_sr_hsh = hash_new ()) == NULL
8526 || (aarch64_reg_hsh = hash_new ()) == NULL
8527 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8528 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8529 || (aarch64_pldop_hsh = hash_new ()) == NULL
8530 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8531 as_fatal (_("virtual memory exhausted"));
8532
8533 fill_instruction_hash_table ();
8534
8535 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8536 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8537 (void *) (aarch64_sys_regs + i));
8538
8539 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8540 checked_hash_insert (aarch64_pstatefield_hsh,
8541 aarch64_pstatefields[i].name,
8542 (void *) (aarch64_pstatefields + i));
8543
8544 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8545 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8546 aarch64_sys_regs_ic[i].name,
8547 (void *) (aarch64_sys_regs_ic + i));
8548
8549 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8550 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8551 aarch64_sys_regs_dc[i].name,
8552 (void *) (aarch64_sys_regs_dc + i));
8553
8554 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8555 checked_hash_insert (aarch64_sys_regs_at_hsh,
8556 aarch64_sys_regs_at[i].name,
8557 (void *) (aarch64_sys_regs_at + i));
8558
8559 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8560 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8561 aarch64_sys_regs_tlbi[i].name,
8562 (void *) (aarch64_sys_regs_tlbi + i));
8563
8564 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8565 checked_hash_insert (aarch64_sys_regs_sr_hsh,
8566 aarch64_sys_regs_sr[i].name,
8567 (void *) (aarch64_sys_regs_sr + i));
8568
8569 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8570 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8571 (void *) (reg_names + i));
8572
8573 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8574 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8575 (void *) (nzcv_names + i));
8576
8577 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8578 {
8579 const char *name = aarch64_operand_modifiers[i].name;
8580 checked_hash_insert (aarch64_shift_hsh, name,
8581 (void *) (aarch64_operand_modifiers + i));
8582 /* Also hash the name in the upper case. */
8583 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8584 (void *) (aarch64_operand_modifiers + i));
8585 }
8586
8587 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8588 {
8589 unsigned int j;
8590 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8591 the same condition code. */
8592 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8593 {
8594 const char *name = aarch64_conds[i].names[j];
8595 if (name == NULL)
8596 break;
8597 checked_hash_insert (aarch64_cond_hsh, name,
8598 (void *) (aarch64_conds + i));
8599 /* Also hash the name in the upper case. */
8600 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8601 (void *) (aarch64_conds + i));
8602 }
8603 }
8604
8605 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8606 {
8607 const char *name = aarch64_barrier_options[i].name;
8608 /* Skip xx00 - the unallocated values of option. */
8609 if ((i & 0x3) == 0)
8610 continue;
8611 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8612 (void *) (aarch64_barrier_options + i));
8613 /* Also hash the name in the upper case. */
8614 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8615 (void *) (aarch64_barrier_options + i));
8616 }
8617
8618 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8619 {
8620 const char* name = aarch64_prfops[i].name;
8621 /* Skip the unallocated hint encodings. */
8622 if (name == NULL)
8623 continue;
8624 checked_hash_insert (aarch64_pldop_hsh, name,
8625 (void *) (aarch64_prfops + i));
8626 /* Also hash the name in the upper case. */
8627 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8628 (void *) (aarch64_prfops + i));
8629 }
8630
8631 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8632 {
8633 const char* name = aarch64_hint_options[i].name;
8634
8635 checked_hash_insert (aarch64_hint_opt_hsh, name,
8636 (void *) (aarch64_hint_options + i));
8637 /* Also hash the name in the upper case. */
8638 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8639 (void *) (aarch64_hint_options + i));
8640 }
8641
8642 /* Set the cpu variant based on the command-line options. */
8643 if (!mcpu_cpu_opt)
8644 mcpu_cpu_opt = march_cpu_opt;
8645
8646 if (!mcpu_cpu_opt)
8647 mcpu_cpu_opt = &cpu_default;
8648
8649 cpu_variant = *mcpu_cpu_opt;
8650
8651 /* Record the CPU type. */
8652 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8653
8654 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8655 }
8656
8657 /* Command line processing. */
8658
8659 const char *md_shortopts = "m:";
8660
8661 #ifdef AARCH64_BI_ENDIAN
8662 #define OPTION_EB (OPTION_MD_BASE + 0)
8663 #define OPTION_EL (OPTION_MD_BASE + 1)
8664 #else
8665 #if TARGET_BYTES_BIG_ENDIAN
8666 #define OPTION_EB (OPTION_MD_BASE + 0)
8667 #else
8668 #define OPTION_EL (OPTION_MD_BASE + 1)
8669 #endif
8670 #endif
8671
8672 struct option md_longopts[] = {
8673 #ifdef OPTION_EB
8674 {"EB", no_argument, NULL, OPTION_EB},
8675 #endif
8676 #ifdef OPTION_EL
8677 {"EL", no_argument, NULL, OPTION_EL},
8678 #endif
8679 {NULL, no_argument, NULL, 0}
8680 };
8681
8682 size_t md_longopts_size = sizeof (md_longopts);
8683
8684 struct aarch64_option_table
8685 {
8686 const char *option; /* Option name to match. */
8687 const char *help; /* Help information. */
8688 int *var; /* Variable to change. */
8689 int value; /* What to change it to. */
8690 char *deprecated; /* If non-null, print this message. */
8691 };
8692
8693 static struct aarch64_option_table aarch64_opts[] = {
8694 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8695 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8696 NULL},
8697 #ifdef DEBUG_AARCH64
8698 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8699 #endif /* DEBUG_AARCH64 */
8700 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8701 NULL},
8702 {"mno-verbose-error", N_("do not output verbose error messages"),
8703 &verbose_error_p, 0, NULL},
8704 {NULL, NULL, NULL, 0, NULL}
8705 };
8706
8707 struct aarch64_cpu_option_table
8708 {
8709 const char *name;
8710 const aarch64_feature_set value;
8711 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8712 case. */
8713 const char *canonical_name;
8714 };
8715
8716 /* This list should, at a minimum, contain all the cpu names
8717 recognized by GCC. */
8718 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8719 {"all", AARCH64_ANY, NULL},
8720 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8721 AARCH64_FEATURE_CRC), "Cortex-A35"},
8722 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8723 AARCH64_FEATURE_CRC), "Cortex-A53"},
8724 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8725 AARCH64_FEATURE_CRC), "Cortex-A57"},
8726 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8727 AARCH64_FEATURE_CRC), "Cortex-A72"},
8728 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8729 AARCH64_FEATURE_CRC), "Cortex-A73"},
8730 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8731 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8732 "Cortex-A55"},
8733 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8734 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8735 "Cortex-A75"},
8736 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8737 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8738 "Cortex-A76"},
8739 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8740 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8741 "Samsung Exynos M1"},
8742 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8743 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8744 | AARCH64_FEATURE_RDMA),
8745 "Qualcomm Falkor"},
8746 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8747 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8748 | AARCH64_FEATURE_RDMA),
8749 "Qualcomm QDF24XX"},
8750 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8751 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8752 "Qualcomm Saphira"},
8753 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8754 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8755 "Cavium ThunderX"},
8756 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8757 AARCH64_FEATURE_CRYPTO),
8758 "Broadcom Vulcan"},
8759 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8760 in earlier releases and is superseded by 'xgene1' in all
8761 tools. */
8762 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8763 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8764 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8765 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8766 {"generic", AARCH64_ARCH_V8, NULL},
8767
8768 {NULL, AARCH64_ARCH_NONE, NULL}
8769 };
8770
8771 struct aarch64_arch_option_table
8772 {
8773 const char *name;
8774 const aarch64_feature_set value;
8775 };
8776
8777 /* This list should, at a minimum, contain all the architecture names
8778 recognized by GCC. */
8779 static const struct aarch64_arch_option_table aarch64_archs[] = {
8780 {"all", AARCH64_ANY},
8781 {"armv8-a", AARCH64_ARCH_V8},
8782 {"armv8.1-a", AARCH64_ARCH_V8_1},
8783 {"armv8.2-a", AARCH64_ARCH_V8_2},
8784 {"armv8.3-a", AARCH64_ARCH_V8_3},
8785 {"armv8.4-a", AARCH64_ARCH_V8_4},
8786 {"armv8.5-a", AARCH64_ARCH_V8_5},
8787 {NULL, AARCH64_ARCH_NONE}
8788 };
8789
8790 /* ISA extensions. */
8791 struct aarch64_option_cpu_value_table
8792 {
8793 const char *name;
8794 const aarch64_feature_set value;
8795 const aarch64_feature_set require; /* Feature dependencies. */
8796 };
8797
8798 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8799 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8800 AARCH64_ARCH_NONE},
8801 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO
8802 | AARCH64_FEATURE_AES
8803 | AARCH64_FEATURE_SHA2, 0),
8804 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8805 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8806 AARCH64_ARCH_NONE},
8807 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8808 AARCH64_ARCH_NONE},
8809 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8810 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8811 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8812 AARCH64_ARCH_NONE},
8813 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8814 AARCH64_ARCH_NONE},
8815 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8816 AARCH64_ARCH_NONE},
8817 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8818 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8819 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8820 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8821 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
8822 AARCH64_FEATURE (AARCH64_FEATURE_FP
8823 | AARCH64_FEATURE_F16, 0)},
8824 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8825 AARCH64_ARCH_NONE},
8826 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8827 AARCH64_FEATURE (AARCH64_FEATURE_F16
8828 | AARCH64_FEATURE_SIMD
8829 | AARCH64_FEATURE_COMPNUM, 0)},
8830 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8831 AARCH64_FEATURE (AARCH64_FEATURE_F16
8832 | AARCH64_FEATURE_SIMD, 0)},
8833 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8834 AARCH64_ARCH_NONE},
8835 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8836 AARCH64_ARCH_NONE},
8837 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
8838 AARCH64_ARCH_NONE},
8839 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
8840 AARCH64_ARCH_NONE},
8841 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
8842 AARCH64_ARCH_NONE},
8843 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
8844 AARCH64_ARCH_NONE},
8845 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
8846 AARCH64_ARCH_NONE},
8847 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA2
8848 | AARCH64_FEATURE_SHA3, 0),
8849 AARCH64_ARCH_NONE},
8850 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
8851 AARCH64_ARCH_NONE},
8852 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
8853 AARCH64_ARCH_NONE},
8854 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
8855 AARCH64_ARCH_NONE},
8856 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8857 };
8858
8859 struct aarch64_long_option_table
8860 {
8861 const char *option; /* Substring to match. */
8862 const char *help; /* Help information. */
8863 int (*func) (const char *subopt); /* Function to decode sub-option. */
8864 char *deprecated; /* If non-null, print this message. */
8865 };
8866
8867 /* Transitive closure of features depending on set. */
8868 static aarch64_feature_set
8869 aarch64_feature_disable_set (aarch64_feature_set set)
8870 {
8871 const struct aarch64_option_cpu_value_table *opt;
8872 aarch64_feature_set prev = 0;
8873
8874 while (prev != set) {
8875 prev = set;
8876 for (opt = aarch64_features; opt->name != NULL; opt++)
8877 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8878 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8879 }
8880 return set;
8881 }
8882
8883 /* Transitive closure of dependencies of set. */
8884 static aarch64_feature_set
8885 aarch64_feature_enable_set (aarch64_feature_set set)
8886 {
8887 const struct aarch64_option_cpu_value_table *opt;
8888 aarch64_feature_set prev = 0;
8889
8890 while (prev != set) {
8891 prev = set;
8892 for (opt = aarch64_features; opt->name != NULL; opt++)
8893 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8894 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8895 }
8896 return set;
8897 }
8898
8899 static int
8900 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8901 bfd_boolean ext_only)
8902 {
8903 /* We insist on extensions being added before being removed. We achieve
8904 this by using the ADDING_VALUE variable to indicate whether we are
8905 adding an extension (1) or removing it (0) and only allowing it to
8906 change in the order -1 -> 1 -> 0. */
8907 int adding_value = -1;
8908 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8909
8910 /* Copy the feature set, so that we can modify it. */
8911 *ext_set = **opt_p;
8912 *opt_p = ext_set;
8913
8914 while (str != NULL && *str != 0)
8915 {
8916 const struct aarch64_option_cpu_value_table *opt;
8917 const char *ext = NULL;
8918 int optlen;
8919
8920 if (!ext_only)
8921 {
8922 if (*str != '+')
8923 {
8924 as_bad (_("invalid architectural extension"));
8925 return 0;
8926 }
8927
8928 ext = strchr (++str, '+');
8929 }
8930
8931 if (ext != NULL)
8932 optlen = ext - str;
8933 else
8934 optlen = strlen (str);
8935
8936 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8937 {
8938 if (adding_value != 0)
8939 adding_value = 0;
8940 optlen -= 2;
8941 str += 2;
8942 }
8943 else if (optlen > 0)
8944 {
8945 if (adding_value == -1)
8946 adding_value = 1;
8947 else if (adding_value != 1)
8948 {
8949 as_bad (_("must specify extensions to add before specifying "
8950 "those to remove"));
8951 return FALSE;
8952 }
8953 }
8954
8955 if (optlen == 0)
8956 {
8957 as_bad (_("missing architectural extension"));
8958 return 0;
8959 }
8960
8961 gas_assert (adding_value != -1);
8962
8963 for (opt = aarch64_features; opt->name != NULL; opt++)
8964 if (strncmp (opt->name, str, optlen) == 0)
8965 {
8966 aarch64_feature_set set;
8967
8968 /* Add or remove the extension. */
8969 if (adding_value)
8970 {
8971 set = aarch64_feature_enable_set (opt->value);
8972 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8973 }
8974 else
8975 {
8976 set = aarch64_feature_disable_set (opt->value);
8977 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8978 }
8979 break;
8980 }
8981
8982 if (opt->name == NULL)
8983 {
8984 as_bad (_("unknown architectural extension `%s'"), str);
8985 return 0;
8986 }
8987
8988 str = ext;
8989 };
8990
8991 return 1;
8992 }
8993
8994 static int
8995 aarch64_parse_cpu (const char *str)
8996 {
8997 const struct aarch64_cpu_option_table *opt;
8998 const char *ext = strchr (str, '+');
8999 size_t optlen;
9000
9001 if (ext != NULL)
9002 optlen = ext - str;
9003 else
9004 optlen = strlen (str);
9005
9006 if (optlen == 0)
9007 {
9008 as_bad (_("missing cpu name `%s'"), str);
9009 return 0;
9010 }
9011
9012 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9013 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9014 {
9015 mcpu_cpu_opt = &opt->value;
9016 if (ext != NULL)
9017 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9018
9019 return 1;
9020 }
9021
9022 as_bad (_("unknown cpu `%s'"), str);
9023 return 0;
9024 }
9025
9026 static int
9027 aarch64_parse_arch (const char *str)
9028 {
9029 const struct aarch64_arch_option_table *opt;
9030 const char *ext = strchr (str, '+');
9031 size_t optlen;
9032
9033 if (ext != NULL)
9034 optlen = ext - str;
9035 else
9036 optlen = strlen (str);
9037
9038 if (optlen == 0)
9039 {
9040 as_bad (_("missing architecture name `%s'"), str);
9041 return 0;
9042 }
9043
9044 for (opt = aarch64_archs; opt->name != NULL; opt++)
9045 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9046 {
9047 march_cpu_opt = &opt->value;
9048 if (ext != NULL)
9049 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9050
9051 return 1;
9052 }
9053
9054 as_bad (_("unknown architecture `%s'\n"), str);
9055 return 0;
9056 }
9057
9058 /* ABIs. */
9059 struct aarch64_option_abi_value_table
9060 {
9061 const char *name;
9062 enum aarch64_abi_type value;
9063 };
9064
9065 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9066 {"ilp32", AARCH64_ABI_ILP32},
9067 {"lp64", AARCH64_ABI_LP64},
9068 };
9069
9070 static int
9071 aarch64_parse_abi (const char *str)
9072 {
9073 unsigned int i;
9074
9075 if (str[0] == '\0')
9076 {
9077 as_bad (_("missing abi name `%s'"), str);
9078 return 0;
9079 }
9080
9081 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9082 if (strcmp (str, aarch64_abis[i].name) == 0)
9083 {
9084 aarch64_abi = aarch64_abis[i].value;
9085 return 1;
9086 }
9087
9088 as_bad (_("unknown abi `%s'\n"), str);
9089 return 0;
9090 }
9091
9092 static struct aarch64_long_option_table aarch64_long_opts[] = {
9093 #ifdef OBJ_ELF
9094 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9095 aarch64_parse_abi, NULL},
9096 #endif /* OBJ_ELF */
9097 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9098 aarch64_parse_cpu, NULL},
9099 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9100 aarch64_parse_arch, NULL},
9101 {NULL, NULL, 0, NULL}
9102 };
9103
9104 int
9105 md_parse_option (int c, const char *arg)
9106 {
9107 struct aarch64_option_table *opt;
9108 struct aarch64_long_option_table *lopt;
9109
9110 switch (c)
9111 {
9112 #ifdef OPTION_EB
9113 case OPTION_EB:
9114 target_big_endian = 1;
9115 break;
9116 #endif
9117
9118 #ifdef OPTION_EL
9119 case OPTION_EL:
9120 target_big_endian = 0;
9121 break;
9122 #endif
9123
9124 case 'a':
9125 /* Listing option. Just ignore these, we don't support additional
9126 ones. */
9127 return 0;
9128
9129 default:
9130 for (opt = aarch64_opts; opt->option != NULL; opt++)
9131 {
9132 if (c == opt->option[0]
9133 && ((arg == NULL && opt->option[1] == 0)
9134 || streq (arg, opt->option + 1)))
9135 {
9136 /* If the option is deprecated, tell the user. */
9137 if (opt->deprecated != NULL)
9138 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9139 arg ? arg : "", _(opt->deprecated));
9140
9141 if (opt->var != NULL)
9142 *opt->var = opt->value;
9143
9144 return 1;
9145 }
9146 }
9147
9148 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9149 {
9150 /* These options are expected to have an argument. */
9151 if (c == lopt->option[0]
9152 && arg != NULL
9153 && strncmp (arg, lopt->option + 1,
9154 strlen (lopt->option + 1)) == 0)
9155 {
9156 /* If the option is deprecated, tell the user. */
9157 if (lopt->deprecated != NULL)
9158 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9159 _(lopt->deprecated));
9160
9161 /* Call the sup-option parser. */
9162 return lopt->func (arg + strlen (lopt->option) - 1);
9163 }
9164 }
9165
9166 return 0;
9167 }
9168
9169 return 1;
9170 }
9171
9172 void
9173 md_show_usage (FILE * fp)
9174 {
9175 struct aarch64_option_table *opt;
9176 struct aarch64_long_option_table *lopt;
9177
9178 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9179
9180 for (opt = aarch64_opts; opt->option != NULL; opt++)
9181 if (opt->help != NULL)
9182 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9183
9184 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9185 if (lopt->help != NULL)
9186 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9187
9188 #ifdef OPTION_EB
9189 fprintf (fp, _("\
9190 -EB assemble code for a big-endian cpu\n"));
9191 #endif
9192
9193 #ifdef OPTION_EL
9194 fprintf (fp, _("\
9195 -EL assemble code for a little-endian cpu\n"));
9196 #endif
9197 }
9198
9199 /* Parse a .cpu directive. */
9200
9201 static void
9202 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9203 {
9204 const struct aarch64_cpu_option_table *opt;
9205 char saved_char;
9206 char *name;
9207 char *ext;
9208 size_t optlen;
9209
9210 name = input_line_pointer;
9211 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9212 input_line_pointer++;
9213 saved_char = *input_line_pointer;
9214 *input_line_pointer = 0;
9215
9216 ext = strchr (name, '+');
9217
9218 if (ext != NULL)
9219 optlen = ext - name;
9220 else
9221 optlen = strlen (name);
9222
9223 /* Skip the first "all" entry. */
9224 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9225 if (strlen (opt->name) == optlen
9226 && strncmp (name, opt->name, optlen) == 0)
9227 {
9228 mcpu_cpu_opt = &opt->value;
9229 if (ext != NULL)
9230 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9231 return;
9232
9233 cpu_variant = *mcpu_cpu_opt;
9234
9235 *input_line_pointer = saved_char;
9236 demand_empty_rest_of_line ();
9237 return;
9238 }
9239 as_bad (_("unknown cpu `%s'"), name);
9240 *input_line_pointer = saved_char;
9241 ignore_rest_of_line ();
9242 }
9243
9244
9245 /* Parse a .arch directive. */
9246
9247 static void
9248 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9249 {
9250 const struct aarch64_arch_option_table *opt;
9251 char saved_char;
9252 char *name;
9253 char *ext;
9254 size_t optlen;
9255
9256 name = input_line_pointer;
9257 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9258 input_line_pointer++;
9259 saved_char = *input_line_pointer;
9260 *input_line_pointer = 0;
9261
9262 ext = strchr (name, '+');
9263
9264 if (ext != NULL)
9265 optlen = ext - name;
9266 else
9267 optlen = strlen (name);
9268
9269 /* Skip the first "all" entry. */
9270 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9271 if (strlen (opt->name) == optlen
9272 && strncmp (name, opt->name, optlen) == 0)
9273 {
9274 mcpu_cpu_opt = &opt->value;
9275 if (ext != NULL)
9276 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9277 return;
9278
9279 cpu_variant = *mcpu_cpu_opt;
9280
9281 *input_line_pointer = saved_char;
9282 demand_empty_rest_of_line ();
9283 return;
9284 }
9285
9286 as_bad (_("unknown architecture `%s'\n"), name);
9287 *input_line_pointer = saved_char;
9288 ignore_rest_of_line ();
9289 }
9290
9291 /* Parse a .arch_extension directive. */
9292
9293 static void
9294 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9295 {
9296 char saved_char;
9297 char *ext = input_line_pointer;;
9298
9299 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9300 input_line_pointer++;
9301 saved_char = *input_line_pointer;
9302 *input_line_pointer = 0;
9303
9304 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9305 return;
9306
9307 cpu_variant = *mcpu_cpu_opt;
9308
9309 *input_line_pointer = saved_char;
9310 demand_empty_rest_of_line ();
9311 }
9312
9313 /* Copy symbol information. */
9314
9315 void
9316 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9317 {
9318 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9319 }
This page took 0.220456 seconds and 5 git commands to generate.