[BINUTILS, AARCH64, 4/8] Add Tag setting instructions in Memory Tagging Extension
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Number of littlenums required to hold an extended precision number. */
242 #define MAX_LITTLENUMS 6
243
244 /* Return value for certain parsers when the parsing fails; those parsers
245 return the information of the parsed result, e.g. register number, on
246 success. */
247 #define PARSE_FAIL -1
248
249 /* This is an invalid condition code that means no conditional field is
250 present. */
251 #define COND_ALWAYS 0x10
252
253 typedef struct
254 {
255 const char *template;
256 unsigned long value;
257 } asm_barrier_opt;
258
259 typedef struct
260 {
261 const char *template;
262 uint32_t value;
263 } asm_nzcv;
264
265 struct reloc_entry
266 {
267 char *name;
268 bfd_reloc_code_real_type reloc;
269 };
270
271 /* Macros to define the register types and masks for the purpose
272 of parsing. */
273
274 #undef AARCH64_REG_TYPES
275 #define AARCH64_REG_TYPES \
276 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
277 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
278 BASIC_REG_TYPE(SP_32) /* wsp */ \
279 BASIC_REG_TYPE(SP_64) /* sp */ \
280 BASIC_REG_TYPE(Z_32) /* wzr */ \
281 BASIC_REG_TYPE(Z_64) /* xzr */ \
282 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
283 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
284 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
285 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
286 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
287 BASIC_REG_TYPE(VN) /* v[0-31] */ \
288 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
289 BASIC_REG_TYPE(PN) /* p[0-15] */ \
290 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
291 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
292 /* Typecheck: same, plus SVE registers. */ \
293 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
294 | REG_TYPE(ZN)) \
295 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
296 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
298 /* Typecheck: same, plus SVE registers. */ \
299 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
300 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
301 | REG_TYPE(ZN)) \
302 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
303 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
305 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
306 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
307 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
309 /* Typecheck: any [BHSDQ]P FP. */ \
310 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
311 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
312 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
313 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
314 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
315 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
317 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
318 be used for SVE instructions, since Zn and Pn are valid symbols \
319 in other contexts. */ \
320 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
322 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
323 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
324 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
325 | REG_TYPE(ZN) | REG_TYPE(PN)) \
326 /* Any integer register; used for error messages only. */ \
327 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
329 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
330 /* Pseudo type to mark the end of the enumerator sequence. */ \
331 BASIC_REG_TYPE(MAX)
332
333 #undef BASIC_REG_TYPE
334 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
335 #undef MULTI_REG_TYPE
336 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
337
338 /* Register type enumerators. */
339 typedef enum aarch64_reg_type_
340 {
341 /* A list of REG_TYPE_*. */
342 AARCH64_REG_TYPES
343 } aarch64_reg_type;
344
345 #undef BASIC_REG_TYPE
346 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
347 #undef REG_TYPE
348 #define REG_TYPE(T) (1 << REG_TYPE_##T)
349 #undef MULTI_REG_TYPE
350 #define MULTI_REG_TYPE(T,V) V,
351
352 /* Structure for a hash table entry for a register. */
353 typedef struct
354 {
355 const char *name;
356 unsigned char number;
357 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
358 unsigned char builtin;
359 } reg_entry;
360
361 /* Values indexed by aarch64_reg_type to assist the type checking. */
362 static const unsigned reg_type_masks[] =
363 {
364 AARCH64_REG_TYPES
365 };
366
367 #undef BASIC_REG_TYPE
368 #undef REG_TYPE
369 #undef MULTI_REG_TYPE
370 #undef AARCH64_REG_TYPES
371
372 /* Diagnostics used when we don't get a register of the expected type.
373 Note: this has to synchronized with aarch64_reg_type definitions
374 above. */
375 static const char *
376 get_reg_expected_msg (aarch64_reg_type reg_type)
377 {
378 const char *msg;
379
380 switch (reg_type)
381 {
382 case REG_TYPE_R_32:
383 msg = N_("integer 32-bit register expected");
384 break;
385 case REG_TYPE_R_64:
386 msg = N_("integer 64-bit register expected");
387 break;
388 case REG_TYPE_R_N:
389 msg = N_("integer register expected");
390 break;
391 case REG_TYPE_R64_SP:
392 msg = N_("64-bit integer or SP register expected");
393 break;
394 case REG_TYPE_SVE_BASE:
395 msg = N_("base register expected");
396 break;
397 case REG_TYPE_R_Z:
398 msg = N_("integer or zero register expected");
399 break;
400 case REG_TYPE_SVE_OFFSET:
401 msg = N_("offset register expected");
402 break;
403 case REG_TYPE_R_SP:
404 msg = N_("integer or SP register expected");
405 break;
406 case REG_TYPE_R_Z_SP:
407 msg = N_("integer, zero or SP register expected");
408 break;
409 case REG_TYPE_FP_B:
410 msg = N_("8-bit SIMD scalar register expected");
411 break;
412 case REG_TYPE_FP_H:
413 msg = N_("16-bit SIMD scalar or floating-point half precision "
414 "register expected");
415 break;
416 case REG_TYPE_FP_S:
417 msg = N_("32-bit SIMD scalar or floating-point single precision "
418 "register expected");
419 break;
420 case REG_TYPE_FP_D:
421 msg = N_("64-bit SIMD scalar or floating-point double precision "
422 "register expected");
423 break;
424 case REG_TYPE_FP_Q:
425 msg = N_("128-bit SIMD scalar or floating-point quad precision "
426 "register expected");
427 break;
428 case REG_TYPE_R_Z_BHSDQ_V:
429 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
430 msg = N_("register expected");
431 break;
432 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
433 msg = N_("SIMD scalar or floating-point register expected");
434 break;
435 case REG_TYPE_VN: /* any V reg */
436 msg = N_("vector register expected");
437 break;
438 case REG_TYPE_ZN:
439 msg = N_("SVE vector register expected");
440 break;
441 case REG_TYPE_PN:
442 msg = N_("SVE predicate register expected");
443 break;
444 default:
445 as_fatal (_("invalid register type %d"), reg_type);
446 }
447 return msg;
448 }
449
450 /* Some well known registers that we refer to directly elsewhere. */
451 #define REG_SP 31
452
453 /* Instructions take 4 bytes in the object file. */
454 #define INSN_SIZE 4
455
456 static struct hash_control *aarch64_ops_hsh;
457 static struct hash_control *aarch64_cond_hsh;
458 static struct hash_control *aarch64_shift_hsh;
459 static struct hash_control *aarch64_sys_regs_hsh;
460 static struct hash_control *aarch64_pstatefield_hsh;
461 static struct hash_control *aarch64_sys_regs_ic_hsh;
462 static struct hash_control *aarch64_sys_regs_dc_hsh;
463 static struct hash_control *aarch64_sys_regs_at_hsh;
464 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
465 static struct hash_control *aarch64_sys_regs_sr_hsh;
466 static struct hash_control *aarch64_reg_hsh;
467 static struct hash_control *aarch64_barrier_opt_hsh;
468 static struct hash_control *aarch64_nzcv_hsh;
469 static struct hash_control *aarch64_pldop_hsh;
470 static struct hash_control *aarch64_hint_opt_hsh;
471
472 /* Stuff needed to resolve the label ambiguity
473 As:
474 ...
475 label: <insn>
476 may differ from:
477 ...
478 label:
479 <insn> */
480
481 static symbolS *last_label_seen;
482
483 /* Literal pool structure. Held on a per-section
484 and per-sub-section basis. */
485
486 #define MAX_LITERAL_POOL_SIZE 1024
487 typedef struct literal_expression
488 {
489 expressionS exp;
490 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
491 LITTLENUM_TYPE * bignum;
492 } literal_expression;
493
494 typedef struct literal_pool
495 {
496 literal_expression literals[MAX_LITERAL_POOL_SIZE];
497 unsigned int next_free_entry;
498 unsigned int id;
499 symbolS *symbol;
500 segT section;
501 subsegT sub_section;
502 int size;
503 struct literal_pool *next;
504 } literal_pool;
505
506 /* Pointer to a linked list of literal pools. */
507 static literal_pool *list_of_pools = NULL;
508 \f
509 /* Pure syntax. */
510
511 /* This array holds the chars that always start a comment. If the
512 pre-processor is disabled, these aren't very useful. */
513 const char comment_chars[] = "";
514
515 /* This array holds the chars that only start a comment at the beginning of
516 a line. If the line seems to have the form '# 123 filename'
517 .line and .file directives will appear in the pre-processed output. */
518 /* Note that input_file.c hand checks for '#' at the beginning of the
519 first line of the input file. This is because the compiler outputs
520 #NO_APP at the beginning of its output. */
521 /* Also note that comments like this one will always work. */
522 const char line_comment_chars[] = "#";
523
524 const char line_separator_chars[] = ";";
525
526 /* Chars that can be used to separate mant
527 from exp in floating point numbers. */
528 const char EXP_CHARS[] = "eE";
529
530 /* Chars that mean this number is a floating point constant. */
531 /* As in 0f12.456 */
532 /* or 0d1.2345e12 */
533
534 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
535
536 /* Prefix character that indicates the start of an immediate value. */
537 #define is_immediate_prefix(C) ((C) == '#')
538
539 /* Separator character handling. */
540
541 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
542
543 static inline bfd_boolean
544 skip_past_char (char **str, char c)
545 {
546 if (**str == c)
547 {
548 (*str)++;
549 return TRUE;
550 }
551 else
552 return FALSE;
553 }
554
555 #define skip_past_comma(str) skip_past_char (str, ',')
556
557 /* Arithmetic expressions (possibly involving symbols). */
558
559 static bfd_boolean in_my_get_expression_p = FALSE;
560
561 /* Third argument to my_get_expression. */
562 #define GE_NO_PREFIX 0
563 #define GE_OPT_PREFIX 1
564
565 /* Return TRUE if the string pointed by *STR is successfully parsed
566 as an valid expression; *EP will be filled with the information of
567 such an expression. Otherwise return FALSE. */
568
569 static bfd_boolean
570 my_get_expression (expressionS * ep, char **str, int prefix_mode,
571 int reject_absent)
572 {
573 char *save_in;
574 segT seg;
575 int prefix_present_p = 0;
576
577 switch (prefix_mode)
578 {
579 case GE_NO_PREFIX:
580 break;
581 case GE_OPT_PREFIX:
582 if (is_immediate_prefix (**str))
583 {
584 (*str)++;
585 prefix_present_p = 1;
586 }
587 break;
588 default:
589 abort ();
590 }
591
592 memset (ep, 0, sizeof (expressionS));
593
594 save_in = input_line_pointer;
595 input_line_pointer = *str;
596 in_my_get_expression_p = TRUE;
597 seg = expression (ep);
598 in_my_get_expression_p = FALSE;
599
600 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
601 {
602 /* We found a bad expression in md_operand(). */
603 *str = input_line_pointer;
604 input_line_pointer = save_in;
605 if (prefix_present_p && ! error_p ())
606 set_fatal_syntax_error (_("bad expression"));
607 else
608 set_first_syntax_error (_("bad expression"));
609 return FALSE;
610 }
611
612 #ifdef OBJ_AOUT
613 if (seg != absolute_section
614 && seg != text_section
615 && seg != data_section
616 && seg != bss_section && seg != undefined_section)
617 {
618 set_syntax_error (_("bad segment"));
619 *str = input_line_pointer;
620 input_line_pointer = save_in;
621 return FALSE;
622 }
623 #else
624 (void) seg;
625 #endif
626
627 *str = input_line_pointer;
628 input_line_pointer = save_in;
629 return TRUE;
630 }
631
632 /* Turn a string in input_line_pointer into a floating point constant
633 of type TYPE, and store the appropriate bytes in *LITP. The number
634 of LITTLENUMS emitted is stored in *SIZEP. An error message is
635 returned, or NULL on OK. */
636
637 const char *
638 md_atof (int type, char *litP, int *sizeP)
639 {
640 return ieee_md_atof (type, litP, sizeP, target_big_endian);
641 }
642
643 /* We handle all bad expressions here, so that we can report the faulty
644 instruction in the error message. */
645 void
646 md_operand (expressionS * exp)
647 {
648 if (in_my_get_expression_p)
649 exp->X_op = O_illegal;
650 }
651
652 /* Immediate values. */
653
654 /* Errors may be set multiple times during parsing or bit encoding
655 (particularly in the Neon bits), but usually the earliest error which is set
656 will be the most meaningful. Avoid overwriting it with later (cascading)
657 errors by calling this function. */
658
659 static void
660 first_error (const char *error)
661 {
662 if (! error_p ())
663 set_syntax_error (error);
664 }
665
666 /* Similar to first_error, but this function accepts formatted error
667 message. */
668 static void
669 first_error_fmt (const char *format, ...)
670 {
671 va_list args;
672 enum
673 { size = 100 };
674 /* N.B. this single buffer will not cause error messages for different
675 instructions to pollute each other; this is because at the end of
676 processing of each assembly line, error message if any will be
677 collected by as_bad. */
678 static char buffer[size];
679
680 if (! error_p ())
681 {
682 int ret ATTRIBUTE_UNUSED;
683 va_start (args, format);
684 ret = vsnprintf (buffer, size, format, args);
685 know (ret <= size - 1 && ret >= 0);
686 va_end (args);
687 set_syntax_error (buffer);
688 }
689 }
690
691 /* Register parsing. */
692
693 /* Generic register parser which is called by other specialized
694 register parsers.
695 CCP points to what should be the beginning of a register name.
696 If it is indeed a valid register name, advance CCP over it and
697 return the reg_entry structure; otherwise return NULL.
698 It does not issue diagnostics. */
699
700 static reg_entry *
701 parse_reg (char **ccp)
702 {
703 char *start = *ccp;
704 char *p;
705 reg_entry *reg;
706
707 #ifdef REGISTER_PREFIX
708 if (*start != REGISTER_PREFIX)
709 return NULL;
710 start++;
711 #endif
712
713 p = start;
714 if (!ISALPHA (*p) || !is_name_beginner (*p))
715 return NULL;
716
717 do
718 p++;
719 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
720
721 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
722
723 if (!reg)
724 return NULL;
725
726 *ccp = p;
727 return reg;
728 }
729
730 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
731 return FALSE. */
732 static bfd_boolean
733 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
734 {
735 return (reg_type_masks[type] & (1 << reg->type)) != 0;
736 }
737
738 /* Try to parse a base or offset register. Allow SVE base and offset
739 registers if REG_TYPE includes SVE registers. Return the register
740 entry on success, setting *QUALIFIER to the register qualifier.
741 Return null otherwise.
742
743 Note that this function does not issue any diagnostics. */
744
745 static const reg_entry *
746 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
747 aarch64_opnd_qualifier_t *qualifier)
748 {
749 char *str = *ccp;
750 const reg_entry *reg = parse_reg (&str);
751
752 if (reg == NULL)
753 return NULL;
754
755 switch (reg->type)
756 {
757 case REG_TYPE_R_32:
758 case REG_TYPE_SP_32:
759 case REG_TYPE_Z_32:
760 *qualifier = AARCH64_OPND_QLF_W;
761 break;
762
763 case REG_TYPE_R_64:
764 case REG_TYPE_SP_64:
765 case REG_TYPE_Z_64:
766 *qualifier = AARCH64_OPND_QLF_X;
767 break;
768
769 case REG_TYPE_ZN:
770 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
771 || str[0] != '.')
772 return NULL;
773 switch (TOLOWER (str[1]))
774 {
775 case 's':
776 *qualifier = AARCH64_OPND_QLF_S_S;
777 break;
778 case 'd':
779 *qualifier = AARCH64_OPND_QLF_S_D;
780 break;
781 default:
782 return NULL;
783 }
784 str += 2;
785 break;
786
787 default:
788 return NULL;
789 }
790
791 *ccp = str;
792
793 return reg;
794 }
795
796 /* Try to parse a base or offset register. Return the register entry
797 on success, setting *QUALIFIER to the register qualifier. Return null
798 otherwise.
799
800 Note that this function does not issue any diagnostics. */
801
802 static const reg_entry *
803 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
804 {
805 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
806 }
807
808 /* Parse the qualifier of a vector register or vector element of type
809 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
810 succeeds; otherwise return FALSE.
811
812 Accept only one occurrence of:
813 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
814 b h s d q */
815 static bfd_boolean
816 parse_vector_type_for_operand (aarch64_reg_type reg_type,
817 struct vector_type_el *parsed_type, char **str)
818 {
819 char *ptr = *str;
820 unsigned width;
821 unsigned element_size;
822 enum vector_el_type type;
823
824 /* skip '.' */
825 gas_assert (*ptr == '.');
826 ptr++;
827
828 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
829 {
830 width = 0;
831 goto elt_size;
832 }
833 width = strtoul (ptr, &ptr, 10);
834 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
835 {
836 first_error_fmt (_("bad size %d in vector width specifier"), width);
837 return FALSE;
838 }
839
840 elt_size:
841 switch (TOLOWER (*ptr))
842 {
843 case 'b':
844 type = NT_b;
845 element_size = 8;
846 break;
847 case 'h':
848 type = NT_h;
849 element_size = 16;
850 break;
851 case 's':
852 type = NT_s;
853 element_size = 32;
854 break;
855 case 'd':
856 type = NT_d;
857 element_size = 64;
858 break;
859 case 'q':
860 if (reg_type == REG_TYPE_ZN || width == 1)
861 {
862 type = NT_q;
863 element_size = 128;
864 break;
865 }
866 /* fall through. */
867 default:
868 if (*ptr != '\0')
869 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
870 else
871 first_error (_("missing element size"));
872 return FALSE;
873 }
874 if (width != 0 && width * element_size != 64
875 && width * element_size != 128
876 && !(width == 2 && element_size == 16)
877 && !(width == 4 && element_size == 8))
878 {
879 first_error_fmt (_
880 ("invalid element size %d and vector size combination %c"),
881 width, *ptr);
882 return FALSE;
883 }
884 ptr++;
885
886 parsed_type->type = type;
887 parsed_type->width = width;
888
889 *str = ptr;
890
891 return TRUE;
892 }
893
894 /* *STR contains an SVE zero/merge predication suffix. Parse it into
895 *PARSED_TYPE and point *STR at the end of the suffix. */
896
897 static bfd_boolean
898 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
899 {
900 char *ptr = *str;
901
902 /* Skip '/'. */
903 gas_assert (*ptr == '/');
904 ptr++;
905 switch (TOLOWER (*ptr))
906 {
907 case 'z':
908 parsed_type->type = NT_zero;
909 break;
910 case 'm':
911 parsed_type->type = NT_merge;
912 break;
913 default:
914 if (*ptr != '\0' && *ptr != ',')
915 first_error_fmt (_("unexpected character `%c' in predication type"),
916 *ptr);
917 else
918 first_error (_("missing predication type"));
919 return FALSE;
920 }
921 parsed_type->width = 0;
922 *str = ptr + 1;
923 return TRUE;
924 }
925
926 /* Parse a register of the type TYPE.
927
928 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
929 name or the parsed register is not of TYPE.
930
931 Otherwise return the register number, and optionally fill in the actual
932 type of the register in *RTYPE when multiple alternatives were given, and
933 return the register shape and element index information in *TYPEINFO.
934
935 IN_REG_LIST should be set with TRUE if the caller is parsing a register
936 list. */
937
938 static int
939 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
940 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
941 {
942 char *str = *ccp;
943 const reg_entry *reg = parse_reg (&str);
944 struct vector_type_el atype;
945 struct vector_type_el parsetype;
946 bfd_boolean is_typed_vecreg = FALSE;
947
948 atype.defined = 0;
949 atype.type = NT_invtype;
950 atype.width = -1;
951 atype.index = 0;
952
953 if (reg == NULL)
954 {
955 if (typeinfo)
956 *typeinfo = atype;
957 set_default_error ();
958 return PARSE_FAIL;
959 }
960
961 if (! aarch64_check_reg_type (reg, type))
962 {
963 DEBUG_TRACE ("reg type check failed");
964 set_default_error ();
965 return PARSE_FAIL;
966 }
967 type = reg->type;
968
969 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
970 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
971 {
972 if (*str == '.')
973 {
974 if (!parse_vector_type_for_operand (type, &parsetype, &str))
975 return PARSE_FAIL;
976 }
977 else
978 {
979 if (!parse_predication_for_operand (&parsetype, &str))
980 return PARSE_FAIL;
981 }
982
983 /* Register if of the form Vn.[bhsdq]. */
984 is_typed_vecreg = TRUE;
985
986 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
987 {
988 /* The width is always variable; we don't allow an integer width
989 to be specified. */
990 gas_assert (parsetype.width == 0);
991 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
992 }
993 else if (parsetype.width == 0)
994 /* Expect index. In the new scheme we cannot have
995 Vn.[bhsdq] represent a scalar. Therefore any
996 Vn.[bhsdq] should have an index following it.
997 Except in reglists of course. */
998 atype.defined |= NTA_HASINDEX;
999 else
1000 atype.defined |= NTA_HASTYPE;
1001
1002 atype.type = parsetype.type;
1003 atype.width = parsetype.width;
1004 }
1005
1006 if (skip_past_char (&str, '['))
1007 {
1008 expressionS exp;
1009
1010 /* Reject Sn[index] syntax. */
1011 if (!is_typed_vecreg)
1012 {
1013 first_error (_("this type of register can't be indexed"));
1014 return PARSE_FAIL;
1015 }
1016
1017 if (in_reg_list)
1018 {
1019 first_error (_("index not allowed inside register list"));
1020 return PARSE_FAIL;
1021 }
1022
1023 atype.defined |= NTA_HASINDEX;
1024
1025 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1026
1027 if (exp.X_op != O_constant)
1028 {
1029 first_error (_("constant expression required"));
1030 return PARSE_FAIL;
1031 }
1032
1033 if (! skip_past_char (&str, ']'))
1034 return PARSE_FAIL;
1035
1036 atype.index = exp.X_add_number;
1037 }
1038 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1039 {
1040 /* Indexed vector register expected. */
1041 first_error (_("indexed vector register expected"));
1042 return PARSE_FAIL;
1043 }
1044
1045 /* A vector reg Vn should be typed or indexed. */
1046 if (type == REG_TYPE_VN && atype.defined == 0)
1047 {
1048 first_error (_("invalid use of vector register"));
1049 }
1050
1051 if (typeinfo)
1052 *typeinfo = atype;
1053
1054 if (rtype)
1055 *rtype = type;
1056
1057 *ccp = str;
1058
1059 return reg->number;
1060 }
1061
1062 /* Parse register.
1063
1064 Return the register number on success; return PARSE_FAIL otherwise.
1065
1066 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1067 the register (e.g. NEON double or quad reg when either has been requested).
1068
1069 If this is a NEON vector register with additional type information, fill
1070 in the struct pointed to by VECTYPE (if non-NULL).
1071
1072 This parser does not handle register list. */
1073
1074 static int
1075 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1076 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1077 {
1078 struct vector_type_el atype;
1079 char *str = *ccp;
1080 int reg = parse_typed_reg (&str, type, rtype, &atype,
1081 /*in_reg_list= */ FALSE);
1082
1083 if (reg == PARSE_FAIL)
1084 return PARSE_FAIL;
1085
1086 if (vectype)
1087 *vectype = atype;
1088
1089 *ccp = str;
1090
1091 return reg;
1092 }
1093
1094 static inline bfd_boolean
1095 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1096 {
1097 return
1098 e1.type == e2.type
1099 && e1.defined == e2.defined
1100 && e1.width == e2.width && e1.index == e2.index;
1101 }
1102
1103 /* This function parses a list of vector registers of type TYPE.
1104 On success, it returns the parsed register list information in the
1105 following encoded format:
1106
1107 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1108 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1109
1110 The information of the register shape and/or index is returned in
1111 *VECTYPE.
1112
1113 It returns PARSE_FAIL if the register list is invalid.
1114
1115 The list contains one to four registers.
1116 Each register can be one of:
1117 <Vt>.<T>[<index>]
1118 <Vt>.<T>
1119 All <T> should be identical.
1120 All <index> should be identical.
1121 There are restrictions on <Vt> numbers which are checked later
1122 (by reg_list_valid_p). */
1123
1124 static int
1125 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1126 struct vector_type_el *vectype)
1127 {
1128 char *str = *ccp;
1129 int nb_regs;
1130 struct vector_type_el typeinfo, typeinfo_first;
1131 int val, val_range;
1132 int in_range;
1133 int ret_val;
1134 int i;
1135 bfd_boolean error = FALSE;
1136 bfd_boolean expect_index = FALSE;
1137
1138 if (*str != '{')
1139 {
1140 set_syntax_error (_("expecting {"));
1141 return PARSE_FAIL;
1142 }
1143 str++;
1144
1145 nb_regs = 0;
1146 typeinfo_first.defined = 0;
1147 typeinfo_first.type = NT_invtype;
1148 typeinfo_first.width = -1;
1149 typeinfo_first.index = 0;
1150 ret_val = 0;
1151 val = -1;
1152 val_range = -1;
1153 in_range = 0;
1154 do
1155 {
1156 if (in_range)
1157 {
1158 str++; /* skip over '-' */
1159 val_range = val;
1160 }
1161 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1162 /*in_reg_list= */ TRUE);
1163 if (val == PARSE_FAIL)
1164 {
1165 set_first_syntax_error (_("invalid vector register in list"));
1166 error = TRUE;
1167 continue;
1168 }
1169 /* reject [bhsd]n */
1170 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1171 {
1172 set_first_syntax_error (_("invalid scalar register in list"));
1173 error = TRUE;
1174 continue;
1175 }
1176
1177 if (typeinfo.defined & NTA_HASINDEX)
1178 expect_index = TRUE;
1179
1180 if (in_range)
1181 {
1182 if (val < val_range)
1183 {
1184 set_first_syntax_error
1185 (_("invalid range in vector register list"));
1186 error = TRUE;
1187 }
1188 val_range++;
1189 }
1190 else
1191 {
1192 val_range = val;
1193 if (nb_regs == 0)
1194 typeinfo_first = typeinfo;
1195 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1196 {
1197 set_first_syntax_error
1198 (_("type mismatch in vector register list"));
1199 error = TRUE;
1200 }
1201 }
1202 if (! error)
1203 for (i = val_range; i <= val; i++)
1204 {
1205 ret_val |= i << (5 * nb_regs);
1206 nb_regs++;
1207 }
1208 in_range = 0;
1209 }
1210 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1211
1212 skip_whitespace (str);
1213 if (*str != '}')
1214 {
1215 set_first_syntax_error (_("end of vector register list not found"));
1216 error = TRUE;
1217 }
1218 str++;
1219
1220 skip_whitespace (str);
1221
1222 if (expect_index)
1223 {
1224 if (skip_past_char (&str, '['))
1225 {
1226 expressionS exp;
1227
1228 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1229 if (exp.X_op != O_constant)
1230 {
1231 set_first_syntax_error (_("constant expression required."));
1232 error = TRUE;
1233 }
1234 if (! skip_past_char (&str, ']'))
1235 error = TRUE;
1236 else
1237 typeinfo_first.index = exp.X_add_number;
1238 }
1239 else
1240 {
1241 set_first_syntax_error (_("expected index"));
1242 error = TRUE;
1243 }
1244 }
1245
1246 if (nb_regs > 4)
1247 {
1248 set_first_syntax_error (_("too many registers in vector register list"));
1249 error = TRUE;
1250 }
1251 else if (nb_regs == 0)
1252 {
1253 set_first_syntax_error (_("empty vector register list"));
1254 error = TRUE;
1255 }
1256
1257 *ccp = str;
1258 if (! error)
1259 *vectype = typeinfo_first;
1260
1261 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1262 }
1263
1264 /* Directives: register aliases. */
1265
1266 static reg_entry *
1267 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1268 {
1269 reg_entry *new;
1270 const char *name;
1271
1272 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1273 {
1274 if (new->builtin)
1275 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1276 str);
1277
1278 /* Only warn about a redefinition if it's not defined as the
1279 same register. */
1280 else if (new->number != number || new->type != type)
1281 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1282
1283 return NULL;
1284 }
1285
1286 name = xstrdup (str);
1287 new = XNEW (reg_entry);
1288
1289 new->name = name;
1290 new->number = number;
1291 new->type = type;
1292 new->builtin = FALSE;
1293
1294 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1295 abort ();
1296
1297 return new;
1298 }
1299
1300 /* Look for the .req directive. This is of the form:
1301
1302 new_register_name .req existing_register_name
1303
1304 If we find one, or if it looks sufficiently like one that we want to
1305 handle any error here, return TRUE. Otherwise return FALSE. */
1306
1307 static bfd_boolean
1308 create_register_alias (char *newname, char *p)
1309 {
1310 const reg_entry *old;
1311 char *oldname, *nbuf;
1312 size_t nlen;
1313
1314 /* The input scrubber ensures that whitespace after the mnemonic is
1315 collapsed to single spaces. */
1316 oldname = p;
1317 if (strncmp (oldname, " .req ", 6) != 0)
1318 return FALSE;
1319
1320 oldname += 6;
1321 if (*oldname == '\0')
1322 return FALSE;
1323
1324 old = hash_find (aarch64_reg_hsh, oldname);
1325 if (!old)
1326 {
1327 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1328 return TRUE;
1329 }
1330
1331 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1332 the desired alias name, and p points to its end. If not, then
1333 the desired alias name is in the global original_case_string. */
1334 #ifdef TC_CASE_SENSITIVE
1335 nlen = p - newname;
1336 #else
1337 newname = original_case_string;
1338 nlen = strlen (newname);
1339 #endif
1340
1341 nbuf = xmemdup0 (newname, nlen);
1342
1343 /* Create aliases under the new name as stated; an all-lowercase
1344 version of the new name; and an all-uppercase version of the new
1345 name. */
1346 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1347 {
1348 for (p = nbuf; *p; p++)
1349 *p = TOUPPER (*p);
1350
1351 if (strncmp (nbuf, newname, nlen))
1352 {
1353 /* If this attempt to create an additional alias fails, do not bother
1354 trying to create the all-lower case alias. We will fail and issue
1355 a second, duplicate error message. This situation arises when the
1356 programmer does something like:
1357 foo .req r0
1358 Foo .req r1
1359 The second .req creates the "Foo" alias but then fails to create
1360 the artificial FOO alias because it has already been created by the
1361 first .req. */
1362 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1363 {
1364 free (nbuf);
1365 return TRUE;
1366 }
1367 }
1368
1369 for (p = nbuf; *p; p++)
1370 *p = TOLOWER (*p);
1371
1372 if (strncmp (nbuf, newname, nlen))
1373 insert_reg_alias (nbuf, old->number, old->type);
1374 }
1375
1376 free (nbuf);
1377 return TRUE;
1378 }
1379
1380 /* Should never be called, as .req goes between the alias and the
1381 register name, not at the beginning of the line. */
1382 static void
1383 s_req (int a ATTRIBUTE_UNUSED)
1384 {
1385 as_bad (_("invalid syntax for .req directive"));
1386 }
1387
1388 /* The .unreq directive deletes an alias which was previously defined
1389 by .req. For example:
1390
1391 my_alias .req r11
1392 .unreq my_alias */
1393
1394 static void
1395 s_unreq (int a ATTRIBUTE_UNUSED)
1396 {
1397 char *name;
1398 char saved_char;
1399
1400 name = input_line_pointer;
1401
1402 while (*input_line_pointer != 0
1403 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1404 ++input_line_pointer;
1405
1406 saved_char = *input_line_pointer;
1407 *input_line_pointer = 0;
1408
1409 if (!*name)
1410 as_bad (_("invalid syntax for .unreq directive"));
1411 else
1412 {
1413 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1414
1415 if (!reg)
1416 as_bad (_("unknown register alias '%s'"), name);
1417 else if (reg->builtin)
1418 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1419 name);
1420 else
1421 {
1422 char *p;
1423 char *nbuf;
1424
1425 hash_delete (aarch64_reg_hsh, name, FALSE);
1426 free ((char *) reg->name);
1427 free (reg);
1428
1429 /* Also locate the all upper case and all lower case versions.
1430 Do not complain if we cannot find one or the other as it
1431 was probably deleted above. */
1432
1433 nbuf = strdup (name);
1434 for (p = nbuf; *p; p++)
1435 *p = TOUPPER (*p);
1436 reg = hash_find (aarch64_reg_hsh, nbuf);
1437 if (reg)
1438 {
1439 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1440 free ((char *) reg->name);
1441 free (reg);
1442 }
1443
1444 for (p = nbuf; *p; p++)
1445 *p = TOLOWER (*p);
1446 reg = hash_find (aarch64_reg_hsh, nbuf);
1447 if (reg)
1448 {
1449 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1450 free ((char *) reg->name);
1451 free (reg);
1452 }
1453
1454 free (nbuf);
1455 }
1456 }
1457
1458 *input_line_pointer = saved_char;
1459 demand_empty_rest_of_line ();
1460 }
1461
1462 /* Directives: Instruction set selection. */
1463
1464 #ifdef OBJ_ELF
1465 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1466 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1467 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1468 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1469
1470 /* Create a new mapping symbol for the transition to STATE. */
1471
1472 static void
1473 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1474 {
1475 symbolS *symbolP;
1476 const char *symname;
1477 int type;
1478
1479 switch (state)
1480 {
1481 case MAP_DATA:
1482 symname = "$d";
1483 type = BSF_NO_FLAGS;
1484 break;
1485 case MAP_INSN:
1486 symname = "$x";
1487 type = BSF_NO_FLAGS;
1488 break;
1489 default:
1490 abort ();
1491 }
1492
1493 symbolP = symbol_new (symname, now_seg, value, frag);
1494 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1495
1496 /* Save the mapping symbols for future reference. Also check that
1497 we do not place two mapping symbols at the same offset within a
1498 frag. We'll handle overlap between frags in
1499 check_mapping_symbols.
1500
1501 If .fill or other data filling directive generates zero sized data,
1502 the mapping symbol for the following code will have the same value
1503 as the one generated for the data filling directive. In this case,
1504 we replace the old symbol with the new one at the same address. */
1505 if (value == 0)
1506 {
1507 if (frag->tc_frag_data.first_map != NULL)
1508 {
1509 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1510 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1511 &symbol_lastP);
1512 }
1513 frag->tc_frag_data.first_map = symbolP;
1514 }
1515 if (frag->tc_frag_data.last_map != NULL)
1516 {
1517 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1518 S_GET_VALUE (symbolP));
1519 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1520 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1521 &symbol_lastP);
1522 }
1523 frag->tc_frag_data.last_map = symbolP;
1524 }
1525
1526 /* We must sometimes convert a region marked as code to data during
1527 code alignment, if an odd number of bytes have to be padded. The
1528 code mapping symbol is pushed to an aligned address. */
1529
1530 static void
1531 insert_data_mapping_symbol (enum mstate state,
1532 valueT value, fragS * frag, offsetT bytes)
1533 {
1534 /* If there was already a mapping symbol, remove it. */
1535 if (frag->tc_frag_data.last_map != NULL
1536 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1537 frag->fr_address + value)
1538 {
1539 symbolS *symp = frag->tc_frag_data.last_map;
1540
1541 if (value == 0)
1542 {
1543 know (frag->tc_frag_data.first_map == symp);
1544 frag->tc_frag_data.first_map = NULL;
1545 }
1546 frag->tc_frag_data.last_map = NULL;
1547 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1548 }
1549
1550 make_mapping_symbol (MAP_DATA, value, frag);
1551 make_mapping_symbol (state, value + bytes, frag);
1552 }
1553
1554 static void mapping_state_2 (enum mstate state, int max_chars);
1555
1556 /* Set the mapping state to STATE. Only call this when about to
1557 emit some STATE bytes to the file. */
1558
1559 void
1560 mapping_state (enum mstate state)
1561 {
1562 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1563
1564 if (state == MAP_INSN)
1565 /* AArch64 instructions require 4-byte alignment. When emitting
1566 instructions into any section, record the appropriate section
1567 alignment. */
1568 record_alignment (now_seg, 2);
1569
1570 if (mapstate == state)
1571 /* The mapping symbol has already been emitted.
1572 There is nothing else to do. */
1573 return;
1574
1575 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1576 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1577 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1578 evaluated later in the next else. */
1579 return;
1580 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1581 {
1582 /* Only add the symbol if the offset is > 0:
1583 if we're at the first frag, check it's size > 0;
1584 if we're not at the first frag, then for sure
1585 the offset is > 0. */
1586 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1587 const int add_symbol = (frag_now != frag_first)
1588 || (frag_now_fix () > 0);
1589
1590 if (add_symbol)
1591 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1592 }
1593 #undef TRANSITION
1594
1595 mapping_state_2 (state, 0);
1596 }
1597
1598 /* Same as mapping_state, but MAX_CHARS bytes have already been
1599 allocated. Put the mapping symbol that far back. */
1600
1601 static void
1602 mapping_state_2 (enum mstate state, int max_chars)
1603 {
1604 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1605
1606 if (!SEG_NORMAL (now_seg))
1607 return;
1608
1609 if (mapstate == state)
1610 /* The mapping symbol has already been emitted.
1611 There is nothing else to do. */
1612 return;
1613
1614 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1615 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1616 }
1617 #else
1618 #define mapping_state(x) /* nothing */
1619 #define mapping_state_2(x, y) /* nothing */
1620 #endif
1621
1622 /* Directives: sectioning and alignment. */
1623
1624 static void
1625 s_bss (int ignore ATTRIBUTE_UNUSED)
1626 {
1627 /* We don't support putting frags in the BSS segment, we fake it by
1628 marking in_bss, then looking at s_skip for clues. */
1629 subseg_set (bss_section, 0);
1630 demand_empty_rest_of_line ();
1631 mapping_state (MAP_DATA);
1632 }
1633
1634 static void
1635 s_even (int ignore ATTRIBUTE_UNUSED)
1636 {
1637 /* Never make frag if expect extra pass. */
1638 if (!need_pass_2)
1639 frag_align (1, 0, 0);
1640
1641 record_alignment (now_seg, 1);
1642
1643 demand_empty_rest_of_line ();
1644 }
1645
1646 /* Directives: Literal pools. */
1647
1648 static literal_pool *
1649 find_literal_pool (int size)
1650 {
1651 literal_pool *pool;
1652
1653 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1654 {
1655 if (pool->section == now_seg
1656 && pool->sub_section == now_subseg && pool->size == size)
1657 break;
1658 }
1659
1660 return pool;
1661 }
1662
1663 static literal_pool *
1664 find_or_make_literal_pool (int size)
1665 {
1666 /* Next literal pool ID number. */
1667 static unsigned int latest_pool_num = 1;
1668 literal_pool *pool;
1669
1670 pool = find_literal_pool (size);
1671
1672 if (pool == NULL)
1673 {
1674 /* Create a new pool. */
1675 pool = XNEW (literal_pool);
1676 if (!pool)
1677 return NULL;
1678
1679 /* Currently we always put the literal pool in the current text
1680 section. If we were generating "small" model code where we
1681 knew that all code and initialised data was within 1MB then
1682 we could output literals to mergeable, read-only data
1683 sections. */
1684
1685 pool->next_free_entry = 0;
1686 pool->section = now_seg;
1687 pool->sub_section = now_subseg;
1688 pool->size = size;
1689 pool->next = list_of_pools;
1690 pool->symbol = NULL;
1691
1692 /* Add it to the list. */
1693 list_of_pools = pool;
1694 }
1695
1696 /* New pools, and emptied pools, will have a NULL symbol. */
1697 if (pool->symbol == NULL)
1698 {
1699 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1700 (valueT) 0, &zero_address_frag);
1701 pool->id = latest_pool_num++;
1702 }
1703
1704 /* Done. */
1705 return pool;
1706 }
1707
1708 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1709 Return TRUE on success, otherwise return FALSE. */
1710 static bfd_boolean
1711 add_to_lit_pool (expressionS *exp, int size)
1712 {
1713 literal_pool *pool;
1714 unsigned int entry;
1715
1716 pool = find_or_make_literal_pool (size);
1717
1718 /* Check if this literal value is already in the pool. */
1719 for (entry = 0; entry < pool->next_free_entry; entry++)
1720 {
1721 expressionS * litexp = & pool->literals[entry].exp;
1722
1723 if ((litexp->X_op == exp->X_op)
1724 && (exp->X_op == O_constant)
1725 && (litexp->X_add_number == exp->X_add_number)
1726 && (litexp->X_unsigned == exp->X_unsigned))
1727 break;
1728
1729 if ((litexp->X_op == exp->X_op)
1730 && (exp->X_op == O_symbol)
1731 && (litexp->X_add_number == exp->X_add_number)
1732 && (litexp->X_add_symbol == exp->X_add_symbol)
1733 && (litexp->X_op_symbol == exp->X_op_symbol))
1734 break;
1735 }
1736
1737 /* Do we need to create a new entry? */
1738 if (entry == pool->next_free_entry)
1739 {
1740 if (entry >= MAX_LITERAL_POOL_SIZE)
1741 {
1742 set_syntax_error (_("literal pool overflow"));
1743 return FALSE;
1744 }
1745
1746 pool->literals[entry].exp = *exp;
1747 pool->next_free_entry += 1;
1748 if (exp->X_op == O_big)
1749 {
1750 /* PR 16688: Bignums are held in a single global array. We must
1751 copy and preserve that value now, before it is overwritten. */
1752 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1753 exp->X_add_number);
1754 memcpy (pool->literals[entry].bignum, generic_bignum,
1755 CHARS_PER_LITTLENUM * exp->X_add_number);
1756 }
1757 else
1758 pool->literals[entry].bignum = NULL;
1759 }
1760
1761 exp->X_op = O_symbol;
1762 exp->X_add_number = ((int) entry) * size;
1763 exp->X_add_symbol = pool->symbol;
1764
1765 return TRUE;
1766 }
1767
1768 /* Can't use symbol_new here, so have to create a symbol and then at
1769 a later date assign it a value. That's what these functions do. */
1770
1771 static void
1772 symbol_locate (symbolS * symbolP,
1773 const char *name,/* It is copied, the caller can modify. */
1774 segT segment, /* Segment identifier (SEG_<something>). */
1775 valueT valu, /* Symbol value. */
1776 fragS * frag) /* Associated fragment. */
1777 {
1778 size_t name_length;
1779 char *preserved_copy_of_name;
1780
1781 name_length = strlen (name) + 1; /* +1 for \0. */
1782 obstack_grow (&notes, name, name_length);
1783 preserved_copy_of_name = obstack_finish (&notes);
1784
1785 #ifdef tc_canonicalize_symbol_name
1786 preserved_copy_of_name =
1787 tc_canonicalize_symbol_name (preserved_copy_of_name);
1788 #endif
1789
1790 S_SET_NAME (symbolP, preserved_copy_of_name);
1791
1792 S_SET_SEGMENT (symbolP, segment);
1793 S_SET_VALUE (symbolP, valu);
1794 symbol_clear_list_pointers (symbolP);
1795
1796 symbol_set_frag (symbolP, frag);
1797
1798 /* Link to end of symbol chain. */
1799 {
1800 extern int symbol_table_frozen;
1801
1802 if (symbol_table_frozen)
1803 abort ();
1804 }
1805
1806 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1807
1808 obj_symbol_new_hook (symbolP);
1809
1810 #ifdef tc_symbol_new_hook
1811 tc_symbol_new_hook (symbolP);
1812 #endif
1813
1814 #ifdef DEBUG_SYMS
1815 verify_symbol_chain (symbol_rootP, symbol_lastP);
1816 #endif /* DEBUG_SYMS */
1817 }
1818
1819
1820 static void
1821 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1822 {
1823 unsigned int entry;
1824 literal_pool *pool;
1825 char sym_name[20];
1826 int align;
1827
1828 for (align = 2; align <= 4; align++)
1829 {
1830 int size = 1 << align;
1831
1832 pool = find_literal_pool (size);
1833 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1834 continue;
1835
1836 /* Align pool as you have word accesses.
1837 Only make a frag if we have to. */
1838 if (!need_pass_2)
1839 frag_align (align, 0, 0);
1840
1841 mapping_state (MAP_DATA);
1842
1843 record_alignment (now_seg, align);
1844
1845 sprintf (sym_name, "$$lit_\002%x", pool->id);
1846
1847 symbol_locate (pool->symbol, sym_name, now_seg,
1848 (valueT) frag_now_fix (), frag_now);
1849 symbol_table_insert (pool->symbol);
1850
1851 for (entry = 0; entry < pool->next_free_entry; entry++)
1852 {
1853 expressionS * exp = & pool->literals[entry].exp;
1854
1855 if (exp->X_op == O_big)
1856 {
1857 /* PR 16688: Restore the global bignum value. */
1858 gas_assert (pool->literals[entry].bignum != NULL);
1859 memcpy (generic_bignum, pool->literals[entry].bignum,
1860 CHARS_PER_LITTLENUM * exp->X_add_number);
1861 }
1862
1863 /* First output the expression in the instruction to the pool. */
1864 emit_expr (exp, size); /* .word|.xword */
1865
1866 if (exp->X_op == O_big)
1867 {
1868 free (pool->literals[entry].bignum);
1869 pool->literals[entry].bignum = NULL;
1870 }
1871 }
1872
1873 /* Mark the pool as empty. */
1874 pool->next_free_entry = 0;
1875 pool->symbol = NULL;
1876 }
1877 }
1878
1879 #ifdef OBJ_ELF
1880 /* Forward declarations for functions below, in the MD interface
1881 section. */
1882 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1883 static struct reloc_table_entry * find_reloc_table_entry (char **);
1884
1885 /* Directives: Data. */
1886 /* N.B. the support for relocation suffix in this directive needs to be
1887 implemented properly. */
1888
1889 static void
1890 s_aarch64_elf_cons (int nbytes)
1891 {
1892 expressionS exp;
1893
1894 #ifdef md_flush_pending_output
1895 md_flush_pending_output ();
1896 #endif
1897
1898 if (is_it_end_of_statement ())
1899 {
1900 demand_empty_rest_of_line ();
1901 return;
1902 }
1903
1904 #ifdef md_cons_align
1905 md_cons_align (nbytes);
1906 #endif
1907
1908 mapping_state (MAP_DATA);
1909 do
1910 {
1911 struct reloc_table_entry *reloc;
1912
1913 expression (&exp);
1914
1915 if (exp.X_op != O_symbol)
1916 emit_expr (&exp, (unsigned int) nbytes);
1917 else
1918 {
1919 skip_past_char (&input_line_pointer, '#');
1920 if (skip_past_char (&input_line_pointer, ':'))
1921 {
1922 reloc = find_reloc_table_entry (&input_line_pointer);
1923 if (reloc == NULL)
1924 as_bad (_("unrecognized relocation suffix"));
1925 else
1926 as_bad (_("unimplemented relocation suffix"));
1927 ignore_rest_of_line ();
1928 return;
1929 }
1930 else
1931 emit_expr (&exp, (unsigned int) nbytes);
1932 }
1933 }
1934 while (*input_line_pointer++ == ',');
1935
1936 /* Put terminator back into stream. */
1937 input_line_pointer--;
1938 demand_empty_rest_of_line ();
1939 }
1940
1941 #endif /* OBJ_ELF */
1942
1943 /* Output a 32-bit word, but mark as an instruction. */
1944
1945 static void
1946 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1947 {
1948 expressionS exp;
1949
1950 #ifdef md_flush_pending_output
1951 md_flush_pending_output ();
1952 #endif
1953
1954 if (is_it_end_of_statement ())
1955 {
1956 demand_empty_rest_of_line ();
1957 return;
1958 }
1959
1960 /* Sections are assumed to start aligned. In executable section, there is no
1961 MAP_DATA symbol pending. So we only align the address during
1962 MAP_DATA --> MAP_INSN transition.
1963 For other sections, this is not guaranteed. */
1964 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1965 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1966 frag_align_code (2, 0);
1967
1968 #ifdef OBJ_ELF
1969 mapping_state (MAP_INSN);
1970 #endif
1971
1972 do
1973 {
1974 expression (&exp);
1975 if (exp.X_op != O_constant)
1976 {
1977 as_bad (_("constant expression required"));
1978 ignore_rest_of_line ();
1979 return;
1980 }
1981
1982 if (target_big_endian)
1983 {
1984 unsigned int val = exp.X_add_number;
1985 exp.X_add_number = SWAP_32 (val);
1986 }
1987 emit_expr (&exp, 4);
1988 }
1989 while (*input_line_pointer++ == ',');
1990
1991 /* Put terminator back into stream. */
1992 input_line_pointer--;
1993 demand_empty_rest_of_line ();
1994 }
1995
1996 #ifdef OBJ_ELF
1997 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1998
1999 static void
2000 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2001 {
2002 expressionS exp;
2003
2004 expression (&exp);
2005 frag_grow (4);
2006 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2007 BFD_RELOC_AARCH64_TLSDESC_ADD);
2008
2009 demand_empty_rest_of_line ();
2010 }
2011
2012 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2013
2014 static void
2015 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2016 {
2017 expressionS exp;
2018
2019 /* Since we're just labelling the code, there's no need to define a
2020 mapping symbol. */
2021 expression (&exp);
2022 /* Make sure there is enough room in this frag for the following
2023 blr. This trick only works if the blr follows immediately after
2024 the .tlsdesc directive. */
2025 frag_grow (4);
2026 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2027 BFD_RELOC_AARCH64_TLSDESC_CALL);
2028
2029 demand_empty_rest_of_line ();
2030 }
2031
2032 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2033
2034 static void
2035 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2036 {
2037 expressionS exp;
2038
2039 expression (&exp);
2040 frag_grow (4);
2041 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2042 BFD_RELOC_AARCH64_TLSDESC_LDR);
2043
2044 demand_empty_rest_of_line ();
2045 }
2046 #endif /* OBJ_ELF */
2047
2048 static void s_aarch64_arch (int);
2049 static void s_aarch64_cpu (int);
2050 static void s_aarch64_arch_extension (int);
2051
2052 /* This table describes all the machine specific pseudo-ops the assembler
2053 has to support. The fields are:
2054 pseudo-op name without dot
2055 function to call to execute this pseudo-op
2056 Integer arg to pass to the function. */
2057
2058 const pseudo_typeS md_pseudo_table[] = {
2059 /* Never called because '.req' does not start a line. */
2060 {"req", s_req, 0},
2061 {"unreq", s_unreq, 0},
2062 {"bss", s_bss, 0},
2063 {"even", s_even, 0},
2064 {"ltorg", s_ltorg, 0},
2065 {"pool", s_ltorg, 0},
2066 {"cpu", s_aarch64_cpu, 0},
2067 {"arch", s_aarch64_arch, 0},
2068 {"arch_extension", s_aarch64_arch_extension, 0},
2069 {"inst", s_aarch64_inst, 0},
2070 #ifdef OBJ_ELF
2071 {"tlsdescadd", s_tlsdescadd, 0},
2072 {"tlsdesccall", s_tlsdesccall, 0},
2073 {"tlsdescldr", s_tlsdescldr, 0},
2074 {"word", s_aarch64_elf_cons, 4},
2075 {"long", s_aarch64_elf_cons, 4},
2076 {"xword", s_aarch64_elf_cons, 8},
2077 {"dword", s_aarch64_elf_cons, 8},
2078 #endif
2079 {0, 0, 0}
2080 };
2081 \f
2082
2083 /* Check whether STR points to a register name followed by a comma or the
2084 end of line; REG_TYPE indicates which register types are checked
2085 against. Return TRUE if STR is such a register name; otherwise return
2086 FALSE. The function does not intend to produce any diagnostics, but since
2087 the register parser aarch64_reg_parse, which is called by this function,
2088 does produce diagnostics, we call clear_error to clear any diagnostics
2089 that may be generated by aarch64_reg_parse.
2090 Also, the function returns FALSE directly if there is any user error
2091 present at the function entry. This prevents the existing diagnostics
2092 state from being spoiled.
2093 The function currently serves parse_constant_immediate and
2094 parse_big_immediate only. */
2095 static bfd_boolean
2096 reg_name_p (char *str, aarch64_reg_type reg_type)
2097 {
2098 int reg;
2099
2100 /* Prevent the diagnostics state from being spoiled. */
2101 if (error_p ())
2102 return FALSE;
2103
2104 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2105
2106 /* Clear the parsing error that may be set by the reg parser. */
2107 clear_error ();
2108
2109 if (reg == PARSE_FAIL)
2110 return FALSE;
2111
2112 skip_whitespace (str);
2113 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2114 return TRUE;
2115
2116 return FALSE;
2117 }
2118
2119 /* Parser functions used exclusively in instruction operands. */
2120
2121 /* Parse an immediate expression which may not be constant.
2122
2123 To prevent the expression parser from pushing a register name
2124 into the symbol table as an undefined symbol, firstly a check is
2125 done to find out whether STR is a register of type REG_TYPE followed
2126 by a comma or the end of line. Return FALSE if STR is such a string. */
2127
2128 static bfd_boolean
2129 parse_immediate_expression (char **str, expressionS *exp,
2130 aarch64_reg_type reg_type)
2131 {
2132 if (reg_name_p (*str, reg_type))
2133 {
2134 set_recoverable_error (_("immediate operand required"));
2135 return FALSE;
2136 }
2137
2138 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2139
2140 if (exp->X_op == O_absent)
2141 {
2142 set_fatal_syntax_error (_("missing immediate expression"));
2143 return FALSE;
2144 }
2145
2146 return TRUE;
2147 }
2148
2149 /* Constant immediate-value read function for use in insn parsing.
2150 STR points to the beginning of the immediate (with the optional
2151 leading #); *VAL receives the value. REG_TYPE says which register
2152 names should be treated as registers rather than as symbolic immediates.
2153
2154 Return TRUE on success; otherwise return FALSE. */
2155
2156 static bfd_boolean
2157 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2158 {
2159 expressionS exp;
2160
2161 if (! parse_immediate_expression (str, &exp, reg_type))
2162 return FALSE;
2163
2164 if (exp.X_op != O_constant)
2165 {
2166 set_syntax_error (_("constant expression required"));
2167 return FALSE;
2168 }
2169
2170 *val = exp.X_add_number;
2171 return TRUE;
2172 }
2173
2174 static uint32_t
2175 encode_imm_float_bits (uint32_t imm)
2176 {
2177 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2178 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2179 }
2180
2181 /* Return TRUE if the single-precision floating-point value encoded in IMM
2182 can be expressed in the AArch64 8-bit signed floating-point format with
2183 3-bit exponent and normalized 4 bits of precision; in other words, the
2184 floating-point value must be expressable as
2185 (+/-) n / 16 * power (2, r)
2186 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2187
2188 static bfd_boolean
2189 aarch64_imm_float_p (uint32_t imm)
2190 {
2191 /* If a single-precision floating-point value has the following bit
2192 pattern, it can be expressed in the AArch64 8-bit floating-point
2193 format:
2194
2195 3 32222222 2221111111111
2196 1 09876543 21098765432109876543210
2197 n Eeeeeexx xxxx0000000000000000000
2198
2199 where n, e and each x are either 0 or 1 independently, with
2200 E == ~ e. */
2201
2202 uint32_t pattern;
2203
2204 /* Prepare the pattern for 'Eeeeee'. */
2205 if (((imm >> 30) & 0x1) == 0)
2206 pattern = 0x3e000000;
2207 else
2208 pattern = 0x40000000;
2209
2210 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2211 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2212 }
2213
2214 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2215 as an IEEE float without any loss of precision. Store the value in
2216 *FPWORD if so. */
2217
2218 static bfd_boolean
2219 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2220 {
2221 /* If a double-precision floating-point value has the following bit
2222 pattern, it can be expressed in a float:
2223
2224 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2225 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2226 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2227
2228 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2229 if Eeee_eeee != 1111_1111
2230
2231 where n, e, s and S are either 0 or 1 independently and where ~ is the
2232 inverse of E. */
2233
2234 uint32_t pattern;
2235 uint32_t high32 = imm >> 32;
2236 uint32_t low32 = imm;
2237
2238 /* Lower 29 bits need to be 0s. */
2239 if ((imm & 0x1fffffff) != 0)
2240 return FALSE;
2241
2242 /* Prepare the pattern for 'Eeeeeeeee'. */
2243 if (((high32 >> 30) & 0x1) == 0)
2244 pattern = 0x38000000;
2245 else
2246 pattern = 0x40000000;
2247
2248 /* Check E~~~. */
2249 if ((high32 & 0x78000000) != pattern)
2250 return FALSE;
2251
2252 /* Check Eeee_eeee != 1111_1111. */
2253 if ((high32 & 0x7ff00000) == 0x47f00000)
2254 return FALSE;
2255
2256 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2257 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2258 | (low32 >> 29)); /* 3 S bits. */
2259 return TRUE;
2260 }
2261
2262 /* Return true if we should treat OPERAND as a double-precision
2263 floating-point operand rather than a single-precision one. */
2264 static bfd_boolean
2265 double_precision_operand_p (const aarch64_opnd_info *operand)
2266 {
2267 /* Check for unsuffixed SVE registers, which are allowed
2268 for LDR and STR but not in instructions that require an
2269 immediate. We get better error messages if we arbitrarily
2270 pick one size, parse the immediate normally, and then
2271 report the match failure in the normal way. */
2272 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2273 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2274 }
2275
2276 /* Parse a floating-point immediate. Return TRUE on success and return the
2277 value in *IMMED in the format of IEEE754 single-precision encoding.
2278 *CCP points to the start of the string; DP_P is TRUE when the immediate
2279 is expected to be in double-precision (N.B. this only matters when
2280 hexadecimal representation is involved). REG_TYPE says which register
2281 names should be treated as registers rather than as symbolic immediates.
2282
2283 This routine accepts any IEEE float; it is up to the callers to reject
2284 invalid ones. */
2285
2286 static bfd_boolean
2287 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2288 aarch64_reg_type reg_type)
2289 {
2290 char *str = *ccp;
2291 char *fpnum;
2292 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2293 int64_t val = 0;
2294 unsigned fpword = 0;
2295 bfd_boolean hex_p = FALSE;
2296
2297 skip_past_char (&str, '#');
2298
2299 fpnum = str;
2300 skip_whitespace (fpnum);
2301
2302 if (strncmp (fpnum, "0x", 2) == 0)
2303 {
2304 /* Support the hexadecimal representation of the IEEE754 encoding.
2305 Double-precision is expected when DP_P is TRUE, otherwise the
2306 representation should be in single-precision. */
2307 if (! parse_constant_immediate (&str, &val, reg_type))
2308 goto invalid_fp;
2309
2310 if (dp_p)
2311 {
2312 if (!can_convert_double_to_float (val, &fpword))
2313 goto invalid_fp;
2314 }
2315 else if ((uint64_t) val > 0xffffffff)
2316 goto invalid_fp;
2317 else
2318 fpword = val;
2319
2320 hex_p = TRUE;
2321 }
2322 else if (reg_name_p (str, reg_type))
2323 {
2324 set_recoverable_error (_("immediate operand required"));
2325 return FALSE;
2326 }
2327
2328 if (! hex_p)
2329 {
2330 int i;
2331
2332 if ((str = atof_ieee (str, 's', words)) == NULL)
2333 goto invalid_fp;
2334
2335 /* Our FP word must be 32 bits (single-precision FP). */
2336 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2337 {
2338 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2339 fpword |= words[i];
2340 }
2341 }
2342
2343 *immed = fpword;
2344 *ccp = str;
2345 return TRUE;
2346
2347 invalid_fp:
2348 set_fatal_syntax_error (_("invalid floating-point constant"));
2349 return FALSE;
2350 }
2351
2352 /* Less-generic immediate-value read function with the possibility of loading
2353 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2354 instructions.
2355
2356 To prevent the expression parser from pushing a register name into the
2357 symbol table as an undefined symbol, a check is firstly done to find
2358 out whether STR is a register of type REG_TYPE followed by a comma or
2359 the end of line. Return FALSE if STR is such a register. */
2360
2361 static bfd_boolean
2362 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2363 {
2364 char *ptr = *str;
2365
2366 if (reg_name_p (ptr, reg_type))
2367 {
2368 set_syntax_error (_("immediate operand required"));
2369 return FALSE;
2370 }
2371
2372 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2373
2374 if (inst.reloc.exp.X_op == O_constant)
2375 *imm = inst.reloc.exp.X_add_number;
2376
2377 *str = ptr;
2378
2379 return TRUE;
2380 }
2381
2382 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2383 if NEED_LIBOPCODES is non-zero, the fixup will need
2384 assistance from the libopcodes. */
2385
2386 static inline void
2387 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2388 const aarch64_opnd_info *operand,
2389 int need_libopcodes_p)
2390 {
2391 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2392 reloc->opnd = operand->type;
2393 if (need_libopcodes_p)
2394 reloc->need_libopcodes_p = 1;
2395 };
2396
2397 /* Return TRUE if the instruction needs to be fixed up later internally by
2398 the GAS; otherwise return FALSE. */
2399
2400 static inline bfd_boolean
2401 aarch64_gas_internal_fixup_p (void)
2402 {
2403 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2404 }
2405
2406 /* Assign the immediate value to the relevant field in *OPERAND if
2407 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2408 needs an internal fixup in a later stage.
2409 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2410 IMM.VALUE that may get assigned with the constant. */
2411 static inline void
2412 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2413 aarch64_opnd_info *operand,
2414 int addr_off_p,
2415 int need_libopcodes_p,
2416 int skip_p)
2417 {
2418 if (reloc->exp.X_op == O_constant)
2419 {
2420 if (addr_off_p)
2421 operand->addr.offset.imm = reloc->exp.X_add_number;
2422 else
2423 operand->imm.value = reloc->exp.X_add_number;
2424 reloc->type = BFD_RELOC_UNUSED;
2425 }
2426 else
2427 {
2428 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2429 /* Tell libopcodes to ignore this operand or not. This is helpful
2430 when one of the operands needs to be fixed up later but we need
2431 libopcodes to check the other operands. */
2432 operand->skip = skip_p;
2433 }
2434 }
2435
2436 /* Relocation modifiers. Each entry in the table contains the textual
2437 name for the relocation which may be placed before a symbol used as
2438 a load/store offset, or add immediate. It must be surrounded by a
2439 leading and trailing colon, for example:
2440
2441 ldr x0, [x1, #:rello:varsym]
2442 add x0, x1, #:rello:varsym */
2443
2444 struct reloc_table_entry
2445 {
2446 const char *name;
2447 int pc_rel;
2448 bfd_reloc_code_real_type adr_type;
2449 bfd_reloc_code_real_type adrp_type;
2450 bfd_reloc_code_real_type movw_type;
2451 bfd_reloc_code_real_type add_type;
2452 bfd_reloc_code_real_type ldst_type;
2453 bfd_reloc_code_real_type ld_literal_type;
2454 };
2455
2456 static struct reloc_table_entry reloc_table[] = {
2457 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2458 {"lo12", 0,
2459 0, /* adr_type */
2460 0,
2461 0,
2462 BFD_RELOC_AARCH64_ADD_LO12,
2463 BFD_RELOC_AARCH64_LDST_LO12,
2464 0},
2465
2466 /* Higher 21 bits of pc-relative page offset: ADRP */
2467 {"pg_hi21", 1,
2468 0, /* adr_type */
2469 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2470 0,
2471 0,
2472 0,
2473 0},
2474
2475 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2476 {"pg_hi21_nc", 1,
2477 0, /* adr_type */
2478 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2479 0,
2480 0,
2481 0,
2482 0},
2483
2484 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2485 {"abs_g0", 0,
2486 0, /* adr_type */
2487 0,
2488 BFD_RELOC_AARCH64_MOVW_G0,
2489 0,
2490 0,
2491 0},
2492
2493 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2494 {"abs_g0_s", 0,
2495 0, /* adr_type */
2496 0,
2497 BFD_RELOC_AARCH64_MOVW_G0_S,
2498 0,
2499 0,
2500 0},
2501
2502 /* Less significant bits 0-15 of address/value: MOVK, no check */
2503 {"abs_g0_nc", 0,
2504 0, /* adr_type */
2505 0,
2506 BFD_RELOC_AARCH64_MOVW_G0_NC,
2507 0,
2508 0,
2509 0},
2510
2511 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2512 {"abs_g1", 0,
2513 0, /* adr_type */
2514 0,
2515 BFD_RELOC_AARCH64_MOVW_G1,
2516 0,
2517 0,
2518 0},
2519
2520 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2521 {"abs_g1_s", 0,
2522 0, /* adr_type */
2523 0,
2524 BFD_RELOC_AARCH64_MOVW_G1_S,
2525 0,
2526 0,
2527 0},
2528
2529 /* Less significant bits 16-31 of address/value: MOVK, no check */
2530 {"abs_g1_nc", 0,
2531 0, /* adr_type */
2532 0,
2533 BFD_RELOC_AARCH64_MOVW_G1_NC,
2534 0,
2535 0,
2536 0},
2537
2538 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2539 {"abs_g2", 0,
2540 0, /* adr_type */
2541 0,
2542 BFD_RELOC_AARCH64_MOVW_G2,
2543 0,
2544 0,
2545 0},
2546
2547 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2548 {"abs_g2_s", 0,
2549 0, /* adr_type */
2550 0,
2551 BFD_RELOC_AARCH64_MOVW_G2_S,
2552 0,
2553 0,
2554 0},
2555
2556 /* Less significant bits 32-47 of address/value: MOVK, no check */
2557 {"abs_g2_nc", 0,
2558 0, /* adr_type */
2559 0,
2560 BFD_RELOC_AARCH64_MOVW_G2_NC,
2561 0,
2562 0,
2563 0},
2564
2565 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2566 {"abs_g3", 0,
2567 0, /* adr_type */
2568 0,
2569 BFD_RELOC_AARCH64_MOVW_G3,
2570 0,
2571 0,
2572 0},
2573
2574 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2575 {"prel_g0", 1,
2576 0, /* adr_type */
2577 0,
2578 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2579 0,
2580 0,
2581 0},
2582
2583 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2584 {"prel_g0_nc", 1,
2585 0, /* adr_type */
2586 0,
2587 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2588 0,
2589 0,
2590 0},
2591
2592 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2593 {"prel_g1", 1,
2594 0, /* adr_type */
2595 0,
2596 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2597 0,
2598 0,
2599 0},
2600
2601 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2602 {"prel_g1_nc", 1,
2603 0, /* adr_type */
2604 0,
2605 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2606 0,
2607 0,
2608 0},
2609
2610 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2611 {"prel_g2", 1,
2612 0, /* adr_type */
2613 0,
2614 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2615 0,
2616 0,
2617 0},
2618
2619 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2620 {"prel_g2_nc", 1,
2621 0, /* adr_type */
2622 0,
2623 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2624 0,
2625 0,
2626 0},
2627
2628 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2629 {"prel_g3", 1,
2630 0, /* adr_type */
2631 0,
2632 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2633 0,
2634 0,
2635 0},
2636
2637 /* Get to the page containing GOT entry for a symbol. */
2638 {"got", 1,
2639 0, /* adr_type */
2640 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2641 0,
2642 0,
2643 0,
2644 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2645
2646 /* 12 bit offset into the page containing GOT entry for that symbol. */
2647 {"got_lo12", 0,
2648 0, /* adr_type */
2649 0,
2650 0,
2651 0,
2652 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2653 0},
2654
2655 /* 0-15 bits of address/value: MOVk, no check. */
2656 {"gotoff_g0_nc", 0,
2657 0, /* adr_type */
2658 0,
2659 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2660 0,
2661 0,
2662 0},
2663
2664 /* Most significant bits 16-31 of address/value: MOVZ. */
2665 {"gotoff_g1", 0,
2666 0, /* adr_type */
2667 0,
2668 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2669 0,
2670 0,
2671 0},
2672
2673 /* 15 bit offset into the page containing GOT entry for that symbol. */
2674 {"gotoff_lo15", 0,
2675 0, /* adr_type */
2676 0,
2677 0,
2678 0,
2679 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2680 0},
2681
2682 /* Get to the page containing GOT TLS entry for a symbol */
2683 {"gottprel_g0_nc", 0,
2684 0, /* adr_type */
2685 0,
2686 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2687 0,
2688 0,
2689 0},
2690
2691 /* Get to the page containing GOT TLS entry for a symbol */
2692 {"gottprel_g1", 0,
2693 0, /* adr_type */
2694 0,
2695 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2696 0,
2697 0,
2698 0},
2699
2700 /* Get to the page containing GOT TLS entry for a symbol */
2701 {"tlsgd", 0,
2702 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2703 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2704 0,
2705 0,
2706 0,
2707 0},
2708
2709 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2710 {"tlsgd_lo12", 0,
2711 0, /* adr_type */
2712 0,
2713 0,
2714 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2715 0,
2716 0},
2717
2718 /* Lower 16 bits address/value: MOVk. */
2719 {"tlsgd_g0_nc", 0,
2720 0, /* adr_type */
2721 0,
2722 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2723 0,
2724 0,
2725 0},
2726
2727 /* Most significant bits 16-31 of address/value: MOVZ. */
2728 {"tlsgd_g1", 0,
2729 0, /* adr_type */
2730 0,
2731 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2732 0,
2733 0,
2734 0},
2735
2736 /* Get to the page containing GOT TLS entry for a symbol */
2737 {"tlsdesc", 0,
2738 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2739 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2740 0,
2741 0,
2742 0,
2743 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2744
2745 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2746 {"tlsdesc_lo12", 0,
2747 0, /* adr_type */
2748 0,
2749 0,
2750 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2751 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2752 0},
2753
2754 /* Get to the page containing GOT TLS entry for a symbol.
2755 The same as GD, we allocate two consecutive GOT slots
2756 for module index and module offset, the only difference
2757 with GD is the module offset should be initialized to
2758 zero without any outstanding runtime relocation. */
2759 {"tlsldm", 0,
2760 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2761 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2762 0,
2763 0,
2764 0,
2765 0},
2766
2767 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2768 {"tlsldm_lo12_nc", 0,
2769 0, /* adr_type */
2770 0,
2771 0,
2772 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2773 0,
2774 0},
2775
2776 /* 12 bit offset into the module TLS base address. */
2777 {"dtprel_lo12", 0,
2778 0, /* adr_type */
2779 0,
2780 0,
2781 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2782 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2783 0},
2784
2785 /* Same as dtprel_lo12, no overflow check. */
2786 {"dtprel_lo12_nc", 0,
2787 0, /* adr_type */
2788 0,
2789 0,
2790 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2791 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2792 0},
2793
2794 /* bits[23:12] of offset to the module TLS base address. */
2795 {"dtprel_hi12", 0,
2796 0, /* adr_type */
2797 0,
2798 0,
2799 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2800 0,
2801 0},
2802
2803 /* bits[15:0] of offset to the module TLS base address. */
2804 {"dtprel_g0", 0,
2805 0, /* adr_type */
2806 0,
2807 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2808 0,
2809 0,
2810 0},
2811
2812 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2813 {"dtprel_g0_nc", 0,
2814 0, /* adr_type */
2815 0,
2816 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2817 0,
2818 0,
2819 0},
2820
2821 /* bits[31:16] of offset to the module TLS base address. */
2822 {"dtprel_g1", 0,
2823 0, /* adr_type */
2824 0,
2825 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2826 0,
2827 0,
2828 0},
2829
2830 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2831 {"dtprel_g1_nc", 0,
2832 0, /* adr_type */
2833 0,
2834 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2835 0,
2836 0,
2837 0},
2838
2839 /* bits[47:32] of offset to the module TLS base address. */
2840 {"dtprel_g2", 0,
2841 0, /* adr_type */
2842 0,
2843 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2844 0,
2845 0,
2846 0},
2847
2848 /* Lower 16 bit offset into GOT entry for a symbol */
2849 {"tlsdesc_off_g0_nc", 0,
2850 0, /* adr_type */
2851 0,
2852 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2853 0,
2854 0,
2855 0},
2856
2857 /* Higher 16 bit offset into GOT entry for a symbol */
2858 {"tlsdesc_off_g1", 0,
2859 0, /* adr_type */
2860 0,
2861 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2862 0,
2863 0,
2864 0},
2865
2866 /* Get to the page containing GOT TLS entry for a symbol */
2867 {"gottprel", 0,
2868 0, /* adr_type */
2869 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2870 0,
2871 0,
2872 0,
2873 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2874
2875 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2876 {"gottprel_lo12", 0,
2877 0, /* adr_type */
2878 0,
2879 0,
2880 0,
2881 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2882 0},
2883
2884 /* Get tp offset for a symbol. */
2885 {"tprel", 0,
2886 0, /* adr_type */
2887 0,
2888 0,
2889 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2890 0,
2891 0},
2892
2893 /* Get tp offset for a symbol. */
2894 {"tprel_lo12", 0,
2895 0, /* adr_type */
2896 0,
2897 0,
2898 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2899 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2900 0},
2901
2902 /* Get tp offset for a symbol. */
2903 {"tprel_hi12", 0,
2904 0, /* adr_type */
2905 0,
2906 0,
2907 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2908 0,
2909 0},
2910
2911 /* Get tp offset for a symbol. */
2912 {"tprel_lo12_nc", 0,
2913 0, /* adr_type */
2914 0,
2915 0,
2916 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2917 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2918 0},
2919
2920 /* Most significant bits 32-47 of address/value: MOVZ. */
2921 {"tprel_g2", 0,
2922 0, /* adr_type */
2923 0,
2924 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2925 0,
2926 0,
2927 0},
2928
2929 /* Most significant bits 16-31 of address/value: MOVZ. */
2930 {"tprel_g1", 0,
2931 0, /* adr_type */
2932 0,
2933 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2934 0,
2935 0,
2936 0},
2937
2938 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2939 {"tprel_g1_nc", 0,
2940 0, /* adr_type */
2941 0,
2942 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2943 0,
2944 0,
2945 0},
2946
2947 /* Most significant bits 0-15 of address/value: MOVZ. */
2948 {"tprel_g0", 0,
2949 0, /* adr_type */
2950 0,
2951 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2952 0,
2953 0,
2954 0},
2955
2956 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2957 {"tprel_g0_nc", 0,
2958 0, /* adr_type */
2959 0,
2960 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2961 0,
2962 0,
2963 0},
2964
2965 /* 15bit offset from got entry to base address of GOT table. */
2966 {"gotpage_lo15", 0,
2967 0,
2968 0,
2969 0,
2970 0,
2971 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2972 0},
2973
2974 /* 14bit offset from got entry to base address of GOT table. */
2975 {"gotpage_lo14", 0,
2976 0,
2977 0,
2978 0,
2979 0,
2980 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2981 0},
2982 };
2983
2984 /* Given the address of a pointer pointing to the textual name of a
2985 relocation as may appear in assembler source, attempt to find its
2986 details in reloc_table. The pointer will be updated to the character
2987 after the trailing colon. On failure, NULL will be returned;
2988 otherwise return the reloc_table_entry. */
2989
2990 static struct reloc_table_entry *
2991 find_reloc_table_entry (char **str)
2992 {
2993 unsigned int i;
2994 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2995 {
2996 int length = strlen (reloc_table[i].name);
2997
2998 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2999 && (*str)[length] == ':')
3000 {
3001 *str += (length + 1);
3002 return &reloc_table[i];
3003 }
3004 }
3005
3006 return NULL;
3007 }
3008
3009 /* Mode argument to parse_shift and parser_shifter_operand. */
3010 enum parse_shift_mode
3011 {
3012 SHIFTED_NONE, /* no shifter allowed */
3013 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3014 "#imm{,lsl #n}" */
3015 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3016 "#imm" */
3017 SHIFTED_LSL, /* bare "lsl #n" */
3018 SHIFTED_MUL, /* bare "mul #n" */
3019 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3020 SHIFTED_MUL_VL, /* "mul vl" */
3021 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3022 };
3023
3024 /* Parse a <shift> operator on an AArch64 data processing instruction.
3025 Return TRUE on success; otherwise return FALSE. */
3026 static bfd_boolean
3027 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3028 {
3029 const struct aarch64_name_value_pair *shift_op;
3030 enum aarch64_modifier_kind kind;
3031 expressionS exp;
3032 int exp_has_prefix;
3033 char *s = *str;
3034 char *p = s;
3035
3036 for (p = *str; ISALPHA (*p); p++)
3037 ;
3038
3039 if (p == *str)
3040 {
3041 set_syntax_error (_("shift expression expected"));
3042 return FALSE;
3043 }
3044
3045 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
3046
3047 if (shift_op == NULL)
3048 {
3049 set_syntax_error (_("shift operator expected"));
3050 return FALSE;
3051 }
3052
3053 kind = aarch64_get_operand_modifier (shift_op);
3054
3055 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3056 {
3057 set_syntax_error (_("invalid use of 'MSL'"));
3058 return FALSE;
3059 }
3060
3061 if (kind == AARCH64_MOD_MUL
3062 && mode != SHIFTED_MUL
3063 && mode != SHIFTED_MUL_VL)
3064 {
3065 set_syntax_error (_("invalid use of 'MUL'"));
3066 return FALSE;
3067 }
3068
3069 switch (mode)
3070 {
3071 case SHIFTED_LOGIC_IMM:
3072 if (aarch64_extend_operator_p (kind))
3073 {
3074 set_syntax_error (_("extending shift is not permitted"));
3075 return FALSE;
3076 }
3077 break;
3078
3079 case SHIFTED_ARITH_IMM:
3080 if (kind == AARCH64_MOD_ROR)
3081 {
3082 set_syntax_error (_("'ROR' shift is not permitted"));
3083 return FALSE;
3084 }
3085 break;
3086
3087 case SHIFTED_LSL:
3088 if (kind != AARCH64_MOD_LSL)
3089 {
3090 set_syntax_error (_("only 'LSL' shift is permitted"));
3091 return FALSE;
3092 }
3093 break;
3094
3095 case SHIFTED_MUL:
3096 if (kind != AARCH64_MOD_MUL)
3097 {
3098 set_syntax_error (_("only 'MUL' is permitted"));
3099 return FALSE;
3100 }
3101 break;
3102
3103 case SHIFTED_MUL_VL:
3104 /* "MUL VL" consists of two separate tokens. Require the first
3105 token to be "MUL" and look for a following "VL". */
3106 if (kind == AARCH64_MOD_MUL)
3107 {
3108 skip_whitespace (p);
3109 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3110 {
3111 p += 2;
3112 kind = AARCH64_MOD_MUL_VL;
3113 break;
3114 }
3115 }
3116 set_syntax_error (_("only 'MUL VL' is permitted"));
3117 return FALSE;
3118
3119 case SHIFTED_REG_OFFSET:
3120 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3121 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3122 {
3123 set_fatal_syntax_error
3124 (_("invalid shift for the register offset addressing mode"));
3125 return FALSE;
3126 }
3127 break;
3128
3129 case SHIFTED_LSL_MSL:
3130 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3131 {
3132 set_syntax_error (_("invalid shift operator"));
3133 return FALSE;
3134 }
3135 break;
3136
3137 default:
3138 abort ();
3139 }
3140
3141 /* Whitespace can appear here if the next thing is a bare digit. */
3142 skip_whitespace (p);
3143
3144 /* Parse shift amount. */
3145 exp_has_prefix = 0;
3146 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3147 exp.X_op = O_absent;
3148 else
3149 {
3150 if (is_immediate_prefix (*p))
3151 {
3152 p++;
3153 exp_has_prefix = 1;
3154 }
3155 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3156 }
3157 if (kind == AARCH64_MOD_MUL_VL)
3158 /* For consistency, give MUL VL the same shift amount as an implicit
3159 MUL #1. */
3160 operand->shifter.amount = 1;
3161 else if (exp.X_op == O_absent)
3162 {
3163 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3164 {
3165 set_syntax_error (_("missing shift amount"));
3166 return FALSE;
3167 }
3168 operand->shifter.amount = 0;
3169 }
3170 else if (exp.X_op != O_constant)
3171 {
3172 set_syntax_error (_("constant shift amount required"));
3173 return FALSE;
3174 }
3175 /* For parsing purposes, MUL #n has no inherent range. The range
3176 depends on the operand and will be checked by operand-specific
3177 routines. */
3178 else if (kind != AARCH64_MOD_MUL
3179 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3180 {
3181 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3182 return FALSE;
3183 }
3184 else
3185 {
3186 operand->shifter.amount = exp.X_add_number;
3187 operand->shifter.amount_present = 1;
3188 }
3189
3190 operand->shifter.operator_present = 1;
3191 operand->shifter.kind = kind;
3192
3193 *str = p;
3194 return TRUE;
3195 }
3196
3197 /* Parse a <shifter_operand> for a data processing instruction:
3198
3199 #<immediate>
3200 #<immediate>, LSL #imm
3201
3202 Validation of immediate operands is deferred to md_apply_fix.
3203
3204 Return TRUE on success; otherwise return FALSE. */
3205
3206 static bfd_boolean
3207 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3208 enum parse_shift_mode mode)
3209 {
3210 char *p;
3211
3212 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3213 return FALSE;
3214
3215 p = *str;
3216
3217 /* Accept an immediate expression. */
3218 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3219 return FALSE;
3220
3221 /* Accept optional LSL for arithmetic immediate values. */
3222 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3223 if (! parse_shift (&p, operand, SHIFTED_LSL))
3224 return FALSE;
3225
3226 /* Not accept any shifter for logical immediate values. */
3227 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3228 && parse_shift (&p, operand, mode))
3229 {
3230 set_syntax_error (_("unexpected shift operator"));
3231 return FALSE;
3232 }
3233
3234 *str = p;
3235 return TRUE;
3236 }
3237
3238 /* Parse a <shifter_operand> for a data processing instruction:
3239
3240 <Rm>
3241 <Rm>, <shift>
3242 #<immediate>
3243 #<immediate>, LSL #imm
3244
3245 where <shift> is handled by parse_shift above, and the last two
3246 cases are handled by the function above.
3247
3248 Validation of immediate operands is deferred to md_apply_fix.
3249
3250 Return TRUE on success; otherwise return FALSE. */
3251
3252 static bfd_boolean
3253 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3254 enum parse_shift_mode mode)
3255 {
3256 const reg_entry *reg;
3257 aarch64_opnd_qualifier_t qualifier;
3258 enum aarch64_operand_class opd_class
3259 = aarch64_get_operand_class (operand->type);
3260
3261 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3262 if (reg)
3263 {
3264 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3265 {
3266 set_syntax_error (_("unexpected register in the immediate operand"));
3267 return FALSE;
3268 }
3269
3270 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3271 {
3272 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3273 return FALSE;
3274 }
3275
3276 operand->reg.regno = reg->number;
3277 operand->qualifier = qualifier;
3278
3279 /* Accept optional shift operation on register. */
3280 if (! skip_past_comma (str))
3281 return TRUE;
3282
3283 if (! parse_shift (str, operand, mode))
3284 return FALSE;
3285
3286 return TRUE;
3287 }
3288 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3289 {
3290 set_syntax_error
3291 (_("integer register expected in the extended/shifted operand "
3292 "register"));
3293 return FALSE;
3294 }
3295
3296 /* We have a shifted immediate variable. */
3297 return parse_shifter_operand_imm (str, operand, mode);
3298 }
3299
3300 /* Return TRUE on success; return FALSE otherwise. */
3301
3302 static bfd_boolean
3303 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3304 enum parse_shift_mode mode)
3305 {
3306 char *p = *str;
3307
3308 /* Determine if we have the sequence of characters #: or just :
3309 coming next. If we do, then we check for a :rello: relocation
3310 modifier. If we don't, punt the whole lot to
3311 parse_shifter_operand. */
3312
3313 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3314 {
3315 struct reloc_table_entry *entry;
3316
3317 if (p[0] == '#')
3318 p += 2;
3319 else
3320 p++;
3321 *str = p;
3322
3323 /* Try to parse a relocation. Anything else is an error. */
3324 if (!(entry = find_reloc_table_entry (str)))
3325 {
3326 set_syntax_error (_("unknown relocation modifier"));
3327 return FALSE;
3328 }
3329
3330 if (entry->add_type == 0)
3331 {
3332 set_syntax_error
3333 (_("this relocation modifier is not allowed on this instruction"));
3334 return FALSE;
3335 }
3336
3337 /* Save str before we decompose it. */
3338 p = *str;
3339
3340 /* Next, we parse the expression. */
3341 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3342 return FALSE;
3343
3344 /* Record the relocation type (use the ADD variant here). */
3345 inst.reloc.type = entry->add_type;
3346 inst.reloc.pc_rel = entry->pc_rel;
3347
3348 /* If str is empty, we've reached the end, stop here. */
3349 if (**str == '\0')
3350 return TRUE;
3351
3352 /* Otherwise, we have a shifted reloc modifier, so rewind to
3353 recover the variable name and continue parsing for the shifter. */
3354 *str = p;
3355 return parse_shifter_operand_imm (str, operand, mode);
3356 }
3357
3358 return parse_shifter_operand (str, operand, mode);
3359 }
3360
3361 /* Parse all forms of an address expression. Information is written
3362 to *OPERAND and/or inst.reloc.
3363
3364 The A64 instruction set has the following addressing modes:
3365
3366 Offset
3367 [base] // in SIMD ld/st structure
3368 [base{,#0}] // in ld/st exclusive
3369 [base{,#imm}]
3370 [base,Xm{,LSL #imm}]
3371 [base,Xm,SXTX {#imm}]
3372 [base,Wm,(S|U)XTW {#imm}]
3373 Pre-indexed
3374 [base,#imm]!
3375 Post-indexed
3376 [base],#imm
3377 [base],Xm // in SIMD ld/st structure
3378 PC-relative (literal)
3379 label
3380 SVE:
3381 [base,#imm,MUL VL]
3382 [base,Zm.D{,LSL #imm}]
3383 [base,Zm.S,(S|U)XTW {#imm}]
3384 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3385 [Zn.S,#imm]
3386 [Zn.D,#imm]
3387 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3388 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3389 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3390
3391 (As a convenience, the notation "=immediate" is permitted in conjunction
3392 with the pc-relative literal load instructions to automatically place an
3393 immediate value or symbolic address in a nearby literal pool and generate
3394 a hidden label which references it.)
3395
3396 Upon a successful parsing, the address structure in *OPERAND will be
3397 filled in the following way:
3398
3399 .base_regno = <base>
3400 .offset.is_reg // 1 if the offset is a register
3401 .offset.imm = <imm>
3402 .offset.regno = <Rm>
3403
3404 For different addressing modes defined in the A64 ISA:
3405
3406 Offset
3407 .pcrel=0; .preind=1; .postind=0; .writeback=0
3408 Pre-indexed
3409 .pcrel=0; .preind=1; .postind=0; .writeback=1
3410 Post-indexed
3411 .pcrel=0; .preind=0; .postind=1; .writeback=1
3412 PC-relative (literal)
3413 .pcrel=1; .preind=1; .postind=0; .writeback=0
3414
3415 The shift/extension information, if any, will be stored in .shifter.
3416 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3417 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3418 corresponding register.
3419
3420 BASE_TYPE says which types of base register should be accepted and
3421 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3422 is the type of shifter that is allowed for immediate offsets,
3423 or SHIFTED_NONE if none.
3424
3425 In all other respects, it is the caller's responsibility to check
3426 for addressing modes not supported by the instruction, and to set
3427 inst.reloc.type. */
3428
3429 static bfd_boolean
3430 parse_address_main (char **str, aarch64_opnd_info *operand,
3431 aarch64_opnd_qualifier_t *base_qualifier,
3432 aarch64_opnd_qualifier_t *offset_qualifier,
3433 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3434 enum parse_shift_mode imm_shift_mode)
3435 {
3436 char *p = *str;
3437 const reg_entry *reg;
3438 expressionS *exp = &inst.reloc.exp;
3439
3440 *base_qualifier = AARCH64_OPND_QLF_NIL;
3441 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3442 if (! skip_past_char (&p, '['))
3443 {
3444 /* =immediate or label. */
3445 operand->addr.pcrel = 1;
3446 operand->addr.preind = 1;
3447
3448 /* #:<reloc_op>:<symbol> */
3449 skip_past_char (&p, '#');
3450 if (skip_past_char (&p, ':'))
3451 {
3452 bfd_reloc_code_real_type ty;
3453 struct reloc_table_entry *entry;
3454
3455 /* Try to parse a relocation modifier. Anything else is
3456 an error. */
3457 entry = find_reloc_table_entry (&p);
3458 if (! entry)
3459 {
3460 set_syntax_error (_("unknown relocation modifier"));
3461 return FALSE;
3462 }
3463
3464 switch (operand->type)
3465 {
3466 case AARCH64_OPND_ADDR_PCREL21:
3467 /* adr */
3468 ty = entry->adr_type;
3469 break;
3470
3471 default:
3472 ty = entry->ld_literal_type;
3473 break;
3474 }
3475
3476 if (ty == 0)
3477 {
3478 set_syntax_error
3479 (_("this relocation modifier is not allowed on this "
3480 "instruction"));
3481 return FALSE;
3482 }
3483
3484 /* #:<reloc_op>: */
3485 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3486 {
3487 set_syntax_error (_("invalid relocation expression"));
3488 return FALSE;
3489 }
3490
3491 /* #:<reloc_op>:<expr> */
3492 /* Record the relocation type. */
3493 inst.reloc.type = ty;
3494 inst.reloc.pc_rel = entry->pc_rel;
3495 }
3496 else
3497 {
3498
3499 if (skip_past_char (&p, '='))
3500 /* =immediate; need to generate the literal in the literal pool. */
3501 inst.gen_lit_pool = 1;
3502
3503 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3504 {
3505 set_syntax_error (_("invalid address"));
3506 return FALSE;
3507 }
3508 }
3509
3510 *str = p;
3511 return TRUE;
3512 }
3513
3514 /* [ */
3515
3516 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3517 if (!reg || !aarch64_check_reg_type (reg, base_type))
3518 {
3519 set_syntax_error (_(get_reg_expected_msg (base_type)));
3520 return FALSE;
3521 }
3522 operand->addr.base_regno = reg->number;
3523
3524 /* [Xn */
3525 if (skip_past_comma (&p))
3526 {
3527 /* [Xn, */
3528 operand->addr.preind = 1;
3529
3530 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3531 if (reg)
3532 {
3533 if (!aarch64_check_reg_type (reg, offset_type))
3534 {
3535 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3536 return FALSE;
3537 }
3538
3539 /* [Xn,Rm */
3540 operand->addr.offset.regno = reg->number;
3541 operand->addr.offset.is_reg = 1;
3542 /* Shifted index. */
3543 if (skip_past_comma (&p))
3544 {
3545 /* [Xn,Rm, */
3546 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3547 /* Use the diagnostics set in parse_shift, so not set new
3548 error message here. */
3549 return FALSE;
3550 }
3551 /* We only accept:
3552 [base,Xm{,LSL #imm}]
3553 [base,Xm,SXTX {#imm}]
3554 [base,Wm,(S|U)XTW {#imm}] */
3555 if (operand->shifter.kind == AARCH64_MOD_NONE
3556 || operand->shifter.kind == AARCH64_MOD_LSL
3557 || operand->shifter.kind == AARCH64_MOD_SXTX)
3558 {
3559 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3560 {
3561 set_syntax_error (_("invalid use of 32-bit register offset"));
3562 return FALSE;
3563 }
3564 if (aarch64_get_qualifier_esize (*base_qualifier)
3565 != aarch64_get_qualifier_esize (*offset_qualifier))
3566 {
3567 set_syntax_error (_("offset has different size from base"));
3568 return FALSE;
3569 }
3570 }
3571 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3572 {
3573 set_syntax_error (_("invalid use of 64-bit register offset"));
3574 return FALSE;
3575 }
3576 }
3577 else
3578 {
3579 /* [Xn,#:<reloc_op>:<symbol> */
3580 skip_past_char (&p, '#');
3581 if (skip_past_char (&p, ':'))
3582 {
3583 struct reloc_table_entry *entry;
3584
3585 /* Try to parse a relocation modifier. Anything else is
3586 an error. */
3587 if (!(entry = find_reloc_table_entry (&p)))
3588 {
3589 set_syntax_error (_("unknown relocation modifier"));
3590 return FALSE;
3591 }
3592
3593 if (entry->ldst_type == 0)
3594 {
3595 set_syntax_error
3596 (_("this relocation modifier is not allowed on this "
3597 "instruction"));
3598 return FALSE;
3599 }
3600
3601 /* [Xn,#:<reloc_op>: */
3602 /* We now have the group relocation table entry corresponding to
3603 the name in the assembler source. Next, we parse the
3604 expression. */
3605 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3606 {
3607 set_syntax_error (_("invalid relocation expression"));
3608 return FALSE;
3609 }
3610
3611 /* [Xn,#:<reloc_op>:<expr> */
3612 /* Record the load/store relocation type. */
3613 inst.reloc.type = entry->ldst_type;
3614 inst.reloc.pc_rel = entry->pc_rel;
3615 }
3616 else
3617 {
3618 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3619 {
3620 set_syntax_error (_("invalid expression in the address"));
3621 return FALSE;
3622 }
3623 /* [Xn,<expr> */
3624 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3625 /* [Xn,<expr>,<shifter> */
3626 if (! parse_shift (&p, operand, imm_shift_mode))
3627 return FALSE;
3628 }
3629 }
3630 }
3631
3632 if (! skip_past_char (&p, ']'))
3633 {
3634 set_syntax_error (_("']' expected"));
3635 return FALSE;
3636 }
3637
3638 if (skip_past_char (&p, '!'))
3639 {
3640 if (operand->addr.preind && operand->addr.offset.is_reg)
3641 {
3642 set_syntax_error (_("register offset not allowed in pre-indexed "
3643 "addressing mode"));
3644 return FALSE;
3645 }
3646 /* [Xn]! */
3647 operand->addr.writeback = 1;
3648 }
3649 else if (skip_past_comma (&p))
3650 {
3651 /* [Xn], */
3652 operand->addr.postind = 1;
3653 operand->addr.writeback = 1;
3654
3655 if (operand->addr.preind)
3656 {
3657 set_syntax_error (_("cannot combine pre- and post-indexing"));
3658 return FALSE;
3659 }
3660
3661 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3662 if (reg)
3663 {
3664 /* [Xn],Xm */
3665 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3666 {
3667 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3668 return FALSE;
3669 }
3670
3671 operand->addr.offset.regno = reg->number;
3672 operand->addr.offset.is_reg = 1;
3673 }
3674 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3675 {
3676 /* [Xn],#expr */
3677 set_syntax_error (_("invalid expression in the address"));
3678 return FALSE;
3679 }
3680 }
3681
3682 /* If at this point neither .preind nor .postind is set, we have a
3683 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3684 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3685 {
3686 if (operand->addr.writeback)
3687 {
3688 /* Reject [Rn]! */
3689 set_syntax_error (_("missing offset in the pre-indexed address"));
3690 return FALSE;
3691 }
3692
3693 operand->addr.preind = 1;
3694 inst.reloc.exp.X_op = O_constant;
3695 inst.reloc.exp.X_add_number = 0;
3696 }
3697
3698 *str = p;
3699 return TRUE;
3700 }
3701
3702 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3703 on success. */
3704 static bfd_boolean
3705 parse_address (char **str, aarch64_opnd_info *operand)
3706 {
3707 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3708 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3709 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3710 }
3711
3712 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3713 The arguments have the same meaning as for parse_address_main.
3714 Return TRUE on success. */
3715 static bfd_boolean
3716 parse_sve_address (char **str, aarch64_opnd_info *operand,
3717 aarch64_opnd_qualifier_t *base_qualifier,
3718 aarch64_opnd_qualifier_t *offset_qualifier)
3719 {
3720 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3721 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3722 SHIFTED_MUL_VL);
3723 }
3724
3725 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3726 Return TRUE on success; otherwise return FALSE. */
3727 static bfd_boolean
3728 parse_half (char **str, int *internal_fixup_p)
3729 {
3730 char *p = *str;
3731
3732 skip_past_char (&p, '#');
3733
3734 gas_assert (internal_fixup_p);
3735 *internal_fixup_p = 0;
3736
3737 if (*p == ':')
3738 {
3739 struct reloc_table_entry *entry;
3740
3741 /* Try to parse a relocation. Anything else is an error. */
3742 ++p;
3743 if (!(entry = find_reloc_table_entry (&p)))
3744 {
3745 set_syntax_error (_("unknown relocation modifier"));
3746 return FALSE;
3747 }
3748
3749 if (entry->movw_type == 0)
3750 {
3751 set_syntax_error
3752 (_("this relocation modifier is not allowed on this instruction"));
3753 return FALSE;
3754 }
3755
3756 inst.reloc.type = entry->movw_type;
3757 }
3758 else
3759 *internal_fixup_p = 1;
3760
3761 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3762 return FALSE;
3763
3764 *str = p;
3765 return TRUE;
3766 }
3767
3768 /* Parse an operand for an ADRP instruction:
3769 ADRP <Xd>, <label>
3770 Return TRUE on success; otherwise return FALSE. */
3771
3772 static bfd_boolean
3773 parse_adrp (char **str)
3774 {
3775 char *p;
3776
3777 p = *str;
3778 if (*p == ':')
3779 {
3780 struct reloc_table_entry *entry;
3781
3782 /* Try to parse a relocation. Anything else is an error. */
3783 ++p;
3784 if (!(entry = find_reloc_table_entry (&p)))
3785 {
3786 set_syntax_error (_("unknown relocation modifier"));
3787 return FALSE;
3788 }
3789
3790 if (entry->adrp_type == 0)
3791 {
3792 set_syntax_error
3793 (_("this relocation modifier is not allowed on this instruction"));
3794 return FALSE;
3795 }
3796
3797 inst.reloc.type = entry->adrp_type;
3798 }
3799 else
3800 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3801
3802 inst.reloc.pc_rel = 1;
3803
3804 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3805 return FALSE;
3806
3807 *str = p;
3808 return TRUE;
3809 }
3810
3811 /* Miscellaneous. */
3812
3813 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3814 of SIZE tokens in which index I gives the token for field value I,
3815 or is null if field value I is invalid. REG_TYPE says which register
3816 names should be treated as registers rather than as symbolic immediates.
3817
3818 Return true on success, moving *STR past the operand and storing the
3819 field value in *VAL. */
3820
3821 static int
3822 parse_enum_string (char **str, int64_t *val, const char *const *array,
3823 size_t size, aarch64_reg_type reg_type)
3824 {
3825 expressionS exp;
3826 char *p, *q;
3827 size_t i;
3828
3829 /* Match C-like tokens. */
3830 p = q = *str;
3831 while (ISALNUM (*q))
3832 q++;
3833
3834 for (i = 0; i < size; ++i)
3835 if (array[i]
3836 && strncasecmp (array[i], p, q - p) == 0
3837 && array[i][q - p] == 0)
3838 {
3839 *val = i;
3840 *str = q;
3841 return TRUE;
3842 }
3843
3844 if (!parse_immediate_expression (&p, &exp, reg_type))
3845 return FALSE;
3846
3847 if (exp.X_op == O_constant
3848 && (uint64_t) exp.X_add_number < size)
3849 {
3850 *val = exp.X_add_number;
3851 *str = p;
3852 return TRUE;
3853 }
3854
3855 /* Use the default error for this operand. */
3856 return FALSE;
3857 }
3858
3859 /* Parse an option for a preload instruction. Returns the encoding for the
3860 option, or PARSE_FAIL. */
3861
3862 static int
3863 parse_pldop (char **str)
3864 {
3865 char *p, *q;
3866 const struct aarch64_name_value_pair *o;
3867
3868 p = q = *str;
3869 while (ISALNUM (*q))
3870 q++;
3871
3872 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3873 if (!o)
3874 return PARSE_FAIL;
3875
3876 *str = q;
3877 return o->value;
3878 }
3879
3880 /* Parse an option for a barrier instruction. Returns the encoding for the
3881 option, or PARSE_FAIL. */
3882
3883 static int
3884 parse_barrier (char **str)
3885 {
3886 char *p, *q;
3887 const asm_barrier_opt *o;
3888
3889 p = q = *str;
3890 while (ISALPHA (*q))
3891 q++;
3892
3893 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3894 if (!o)
3895 return PARSE_FAIL;
3896
3897 *str = q;
3898 return o->value;
3899 }
3900
3901 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3902 return 0 if successful. Otherwise return PARSE_FAIL. */
3903
3904 static int
3905 parse_barrier_psb (char **str,
3906 const struct aarch64_name_value_pair ** hint_opt)
3907 {
3908 char *p, *q;
3909 const struct aarch64_name_value_pair *o;
3910
3911 p = q = *str;
3912 while (ISALPHA (*q))
3913 q++;
3914
3915 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3916 if (!o)
3917 {
3918 set_fatal_syntax_error
3919 ( _("unknown or missing option to PSB"));
3920 return PARSE_FAIL;
3921 }
3922
3923 if (o->value != 0x11)
3924 {
3925 /* PSB only accepts option name 'CSYNC'. */
3926 set_syntax_error
3927 (_("the specified option is not accepted for PSB"));
3928 return PARSE_FAIL;
3929 }
3930
3931 *str = q;
3932 *hint_opt = o;
3933 return 0;
3934 }
3935
3936 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
3937 return 0 if successful. Otherwise return PARSE_FAIL. */
3938
3939 static int
3940 parse_bti_operand (char **str,
3941 const struct aarch64_name_value_pair ** hint_opt)
3942 {
3943 char *p, *q;
3944 const struct aarch64_name_value_pair *o;
3945
3946 p = q = *str;
3947 while (ISALPHA (*q))
3948 q++;
3949
3950 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3951 if (!o)
3952 {
3953 set_fatal_syntax_error
3954 ( _("unknown option to BTI"));
3955 return PARSE_FAIL;
3956 }
3957
3958 switch (o->value)
3959 {
3960 /* Valid BTI operands. */
3961 case HINT_OPD_C:
3962 case HINT_OPD_J:
3963 case HINT_OPD_JC:
3964 break;
3965
3966 default:
3967 set_syntax_error
3968 (_("unknown option to BTI"));
3969 return PARSE_FAIL;
3970 }
3971
3972 *str = q;
3973 *hint_opt = o;
3974 return 0;
3975 }
3976
3977 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3978 Returns the encoding for the option, or PARSE_FAIL.
3979
3980 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3981 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3982
3983 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3984 field, otherwise as a system register.
3985 */
3986
3987 static int
3988 parse_sys_reg (char **str, struct hash_control *sys_regs,
3989 int imple_defined_p, int pstatefield_p,
3990 uint32_t* flags)
3991 {
3992 char *p, *q;
3993 char buf[32];
3994 const aarch64_sys_reg *o;
3995 int value;
3996
3997 p = buf;
3998 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3999 if (p < buf + 31)
4000 *p++ = TOLOWER (*q);
4001 *p = '\0';
4002 /* Assert that BUF be large enough. */
4003 gas_assert (p - buf == q - *str);
4004
4005 o = hash_find (sys_regs, buf);
4006 if (!o)
4007 {
4008 if (!imple_defined_p)
4009 return PARSE_FAIL;
4010 else
4011 {
4012 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4013 unsigned int op0, op1, cn, cm, op2;
4014
4015 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4016 != 5)
4017 return PARSE_FAIL;
4018 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4019 return PARSE_FAIL;
4020 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4021 if (flags)
4022 *flags = 0;
4023 }
4024 }
4025 else
4026 {
4027 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4028 as_bad (_("selected processor does not support PSTATE field "
4029 "name '%s'"), buf);
4030 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
4031 as_bad (_("selected processor does not support system register "
4032 "name '%s'"), buf);
4033 if (aarch64_sys_reg_deprecated_p (o))
4034 as_warn (_("system register name '%s' is deprecated and may be "
4035 "removed in a future release"), buf);
4036 value = o->value;
4037 if (flags)
4038 *flags = o->flags;
4039 }
4040
4041 *str = q;
4042 return value;
4043 }
4044
4045 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4046 for the option, or NULL. */
4047
4048 static const aarch64_sys_ins_reg *
4049 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
4050 {
4051 char *p, *q;
4052 char buf[32];
4053 const aarch64_sys_ins_reg *o;
4054
4055 p = buf;
4056 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4057 if (p < buf + 31)
4058 *p++ = TOLOWER (*q);
4059 *p = '\0';
4060
4061 o = hash_find (sys_ins_regs, buf);
4062 if (!o)
4063 return NULL;
4064
4065 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
4066 as_bad (_("selected processor does not support system register "
4067 "name '%s'"), buf);
4068
4069 *str = q;
4070 return o;
4071 }
4072 \f
4073 #define po_char_or_fail(chr) do { \
4074 if (! skip_past_char (&str, chr)) \
4075 goto failure; \
4076 } while (0)
4077
4078 #define po_reg_or_fail(regtype) do { \
4079 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4080 if (val == PARSE_FAIL) \
4081 { \
4082 set_default_error (); \
4083 goto failure; \
4084 } \
4085 } while (0)
4086
4087 #define po_int_reg_or_fail(reg_type) do { \
4088 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4089 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4090 { \
4091 set_default_error (); \
4092 goto failure; \
4093 } \
4094 info->reg.regno = reg->number; \
4095 info->qualifier = qualifier; \
4096 } while (0)
4097
4098 #define po_imm_nc_or_fail() do { \
4099 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4100 goto failure; \
4101 } while (0)
4102
4103 #define po_imm_or_fail(min, max) do { \
4104 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4105 goto failure; \
4106 if (val < min || val > max) \
4107 { \
4108 set_fatal_syntax_error (_("immediate value out of range "\
4109 #min " to "#max)); \
4110 goto failure; \
4111 } \
4112 } while (0)
4113
4114 #define po_enum_or_fail(array) do { \
4115 if (!parse_enum_string (&str, &val, array, \
4116 ARRAY_SIZE (array), imm_reg_type)) \
4117 goto failure; \
4118 } while (0)
4119
4120 #define po_misc_or_fail(expr) do { \
4121 if (!expr) \
4122 goto failure; \
4123 } while (0)
4124 \f
4125 /* encode the 12-bit imm field of Add/sub immediate */
4126 static inline uint32_t
4127 encode_addsub_imm (uint32_t imm)
4128 {
4129 return imm << 10;
4130 }
4131
4132 /* encode the shift amount field of Add/sub immediate */
4133 static inline uint32_t
4134 encode_addsub_imm_shift_amount (uint32_t cnt)
4135 {
4136 return cnt << 22;
4137 }
4138
4139
4140 /* encode the imm field of Adr instruction */
4141 static inline uint32_t
4142 encode_adr_imm (uint32_t imm)
4143 {
4144 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4145 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4146 }
4147
4148 /* encode the immediate field of Move wide immediate */
4149 static inline uint32_t
4150 encode_movw_imm (uint32_t imm)
4151 {
4152 return imm << 5;
4153 }
4154
4155 /* encode the 26-bit offset of unconditional branch */
4156 static inline uint32_t
4157 encode_branch_ofs_26 (uint32_t ofs)
4158 {
4159 return ofs & ((1 << 26) - 1);
4160 }
4161
4162 /* encode the 19-bit offset of conditional branch and compare & branch */
4163 static inline uint32_t
4164 encode_cond_branch_ofs_19 (uint32_t ofs)
4165 {
4166 return (ofs & ((1 << 19) - 1)) << 5;
4167 }
4168
4169 /* encode the 19-bit offset of ld literal */
4170 static inline uint32_t
4171 encode_ld_lit_ofs_19 (uint32_t ofs)
4172 {
4173 return (ofs & ((1 << 19) - 1)) << 5;
4174 }
4175
4176 /* Encode the 14-bit offset of test & branch. */
4177 static inline uint32_t
4178 encode_tst_branch_ofs_14 (uint32_t ofs)
4179 {
4180 return (ofs & ((1 << 14) - 1)) << 5;
4181 }
4182
4183 /* Encode the 16-bit imm field of svc/hvc/smc. */
4184 static inline uint32_t
4185 encode_svc_imm (uint32_t imm)
4186 {
4187 return imm << 5;
4188 }
4189
4190 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4191 static inline uint32_t
4192 reencode_addsub_switch_add_sub (uint32_t opcode)
4193 {
4194 return opcode ^ (1 << 30);
4195 }
4196
4197 static inline uint32_t
4198 reencode_movzn_to_movz (uint32_t opcode)
4199 {
4200 return opcode | (1 << 30);
4201 }
4202
4203 static inline uint32_t
4204 reencode_movzn_to_movn (uint32_t opcode)
4205 {
4206 return opcode & ~(1 << 30);
4207 }
4208
4209 /* Overall per-instruction processing. */
4210
4211 /* We need to be able to fix up arbitrary expressions in some statements.
4212 This is so that we can handle symbols that are an arbitrary distance from
4213 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4214 which returns part of an address in a form which will be valid for
4215 a data instruction. We do this by pushing the expression into a symbol
4216 in the expr_section, and creating a fix for that. */
4217
4218 static fixS *
4219 fix_new_aarch64 (fragS * frag,
4220 int where,
4221 short int size, expressionS * exp, int pc_rel, int reloc)
4222 {
4223 fixS *new_fix;
4224
4225 switch (exp->X_op)
4226 {
4227 case O_constant:
4228 case O_symbol:
4229 case O_add:
4230 case O_subtract:
4231 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4232 break;
4233
4234 default:
4235 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4236 pc_rel, reloc);
4237 break;
4238 }
4239 return new_fix;
4240 }
4241 \f
4242 /* Diagnostics on operands errors. */
4243
4244 /* By default, output verbose error message.
4245 Disable the verbose error message by -mno-verbose-error. */
4246 static int verbose_error_p = 1;
4247
4248 #ifdef DEBUG_AARCH64
4249 /* N.B. this is only for the purpose of debugging. */
4250 const char* operand_mismatch_kind_names[] =
4251 {
4252 "AARCH64_OPDE_NIL",
4253 "AARCH64_OPDE_RECOVERABLE",
4254 "AARCH64_OPDE_SYNTAX_ERROR",
4255 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4256 "AARCH64_OPDE_INVALID_VARIANT",
4257 "AARCH64_OPDE_OUT_OF_RANGE",
4258 "AARCH64_OPDE_UNALIGNED",
4259 "AARCH64_OPDE_REG_LIST",
4260 "AARCH64_OPDE_OTHER_ERROR",
4261 };
4262 #endif /* DEBUG_AARCH64 */
4263
4264 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4265
4266 When multiple errors of different kinds are found in the same assembly
4267 line, only the error of the highest severity will be picked up for
4268 issuing the diagnostics. */
4269
4270 static inline bfd_boolean
4271 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4272 enum aarch64_operand_error_kind rhs)
4273 {
4274 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4275 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4276 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4277 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4278 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4279 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4280 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4281 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4282 return lhs > rhs;
4283 }
4284
4285 /* Helper routine to get the mnemonic name from the assembly instruction
4286 line; should only be called for the diagnosis purpose, as there is
4287 string copy operation involved, which may affect the runtime
4288 performance if used in elsewhere. */
4289
4290 static const char*
4291 get_mnemonic_name (const char *str)
4292 {
4293 static char mnemonic[32];
4294 char *ptr;
4295
4296 /* Get the first 15 bytes and assume that the full name is included. */
4297 strncpy (mnemonic, str, 31);
4298 mnemonic[31] = '\0';
4299
4300 /* Scan up to the end of the mnemonic, which must end in white space,
4301 '.', or end of string. */
4302 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4303 ;
4304
4305 *ptr = '\0';
4306
4307 /* Append '...' to the truncated long name. */
4308 if (ptr - mnemonic == 31)
4309 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4310
4311 return mnemonic;
4312 }
4313
4314 static void
4315 reset_aarch64_instruction (aarch64_instruction *instruction)
4316 {
4317 memset (instruction, '\0', sizeof (aarch64_instruction));
4318 instruction->reloc.type = BFD_RELOC_UNUSED;
4319 }
4320
4321 /* Data structures storing one user error in the assembly code related to
4322 operands. */
4323
4324 struct operand_error_record
4325 {
4326 const aarch64_opcode *opcode;
4327 aarch64_operand_error detail;
4328 struct operand_error_record *next;
4329 };
4330
4331 typedef struct operand_error_record operand_error_record;
4332
4333 struct operand_errors
4334 {
4335 operand_error_record *head;
4336 operand_error_record *tail;
4337 };
4338
4339 typedef struct operand_errors operand_errors;
4340
4341 /* Top-level data structure reporting user errors for the current line of
4342 the assembly code.
4343 The way md_assemble works is that all opcodes sharing the same mnemonic
4344 name are iterated to find a match to the assembly line. In this data
4345 structure, each of the such opcodes will have one operand_error_record
4346 allocated and inserted. In other words, excessive errors related with
4347 a single opcode are disregarded. */
4348 operand_errors operand_error_report;
4349
4350 /* Free record nodes. */
4351 static operand_error_record *free_opnd_error_record_nodes = NULL;
4352
4353 /* Initialize the data structure that stores the operand mismatch
4354 information on assembling one line of the assembly code. */
4355 static void
4356 init_operand_error_report (void)
4357 {
4358 if (operand_error_report.head != NULL)
4359 {
4360 gas_assert (operand_error_report.tail != NULL);
4361 operand_error_report.tail->next = free_opnd_error_record_nodes;
4362 free_opnd_error_record_nodes = operand_error_report.head;
4363 operand_error_report.head = NULL;
4364 operand_error_report.tail = NULL;
4365 return;
4366 }
4367 gas_assert (operand_error_report.tail == NULL);
4368 }
4369
4370 /* Return TRUE if some operand error has been recorded during the
4371 parsing of the current assembly line using the opcode *OPCODE;
4372 otherwise return FALSE. */
4373 static inline bfd_boolean
4374 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4375 {
4376 operand_error_record *record = operand_error_report.head;
4377 return record && record->opcode == opcode;
4378 }
4379
4380 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4381 OPCODE field is initialized with OPCODE.
4382 N.B. only one record for each opcode, i.e. the maximum of one error is
4383 recorded for each instruction template. */
4384
4385 static void
4386 add_operand_error_record (const operand_error_record* new_record)
4387 {
4388 const aarch64_opcode *opcode = new_record->opcode;
4389 operand_error_record* record = operand_error_report.head;
4390
4391 /* The record may have been created for this opcode. If not, we need
4392 to prepare one. */
4393 if (! opcode_has_operand_error_p (opcode))
4394 {
4395 /* Get one empty record. */
4396 if (free_opnd_error_record_nodes == NULL)
4397 {
4398 record = XNEW (operand_error_record);
4399 }
4400 else
4401 {
4402 record = free_opnd_error_record_nodes;
4403 free_opnd_error_record_nodes = record->next;
4404 }
4405 record->opcode = opcode;
4406 /* Insert at the head. */
4407 record->next = operand_error_report.head;
4408 operand_error_report.head = record;
4409 if (operand_error_report.tail == NULL)
4410 operand_error_report.tail = record;
4411 }
4412 else if (record->detail.kind != AARCH64_OPDE_NIL
4413 && record->detail.index <= new_record->detail.index
4414 && operand_error_higher_severity_p (record->detail.kind,
4415 new_record->detail.kind))
4416 {
4417 /* In the case of multiple errors found on operands related with a
4418 single opcode, only record the error of the leftmost operand and
4419 only if the error is of higher severity. */
4420 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4421 " the existing error %s on operand %d",
4422 operand_mismatch_kind_names[new_record->detail.kind],
4423 new_record->detail.index,
4424 operand_mismatch_kind_names[record->detail.kind],
4425 record->detail.index);
4426 return;
4427 }
4428
4429 record->detail = new_record->detail;
4430 }
4431
4432 static inline void
4433 record_operand_error_info (const aarch64_opcode *opcode,
4434 aarch64_operand_error *error_info)
4435 {
4436 operand_error_record record;
4437 record.opcode = opcode;
4438 record.detail = *error_info;
4439 add_operand_error_record (&record);
4440 }
4441
4442 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4443 error message *ERROR, for operand IDX (count from 0). */
4444
4445 static void
4446 record_operand_error (const aarch64_opcode *opcode, int idx,
4447 enum aarch64_operand_error_kind kind,
4448 const char* error)
4449 {
4450 aarch64_operand_error info;
4451 memset(&info, 0, sizeof (info));
4452 info.index = idx;
4453 info.kind = kind;
4454 info.error = error;
4455 info.non_fatal = FALSE;
4456 record_operand_error_info (opcode, &info);
4457 }
4458
4459 static void
4460 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4461 enum aarch64_operand_error_kind kind,
4462 const char* error, const int *extra_data)
4463 {
4464 aarch64_operand_error info;
4465 info.index = idx;
4466 info.kind = kind;
4467 info.error = error;
4468 info.data[0] = extra_data[0];
4469 info.data[1] = extra_data[1];
4470 info.data[2] = extra_data[2];
4471 info.non_fatal = FALSE;
4472 record_operand_error_info (opcode, &info);
4473 }
4474
4475 static void
4476 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4477 const char* error, int lower_bound,
4478 int upper_bound)
4479 {
4480 int data[3] = {lower_bound, upper_bound, 0};
4481 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4482 error, data);
4483 }
4484
4485 /* Remove the operand error record for *OPCODE. */
4486 static void ATTRIBUTE_UNUSED
4487 remove_operand_error_record (const aarch64_opcode *opcode)
4488 {
4489 if (opcode_has_operand_error_p (opcode))
4490 {
4491 operand_error_record* record = operand_error_report.head;
4492 gas_assert (record != NULL && operand_error_report.tail != NULL);
4493 operand_error_report.head = record->next;
4494 record->next = free_opnd_error_record_nodes;
4495 free_opnd_error_record_nodes = record;
4496 if (operand_error_report.head == NULL)
4497 {
4498 gas_assert (operand_error_report.tail == record);
4499 operand_error_report.tail = NULL;
4500 }
4501 }
4502 }
4503
4504 /* Given the instruction in *INSTR, return the index of the best matched
4505 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4506
4507 Return -1 if there is no qualifier sequence; return the first match
4508 if there is multiple matches found. */
4509
4510 static int
4511 find_best_match (const aarch64_inst *instr,
4512 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4513 {
4514 int i, num_opnds, max_num_matched, idx;
4515
4516 num_opnds = aarch64_num_of_operands (instr->opcode);
4517 if (num_opnds == 0)
4518 {
4519 DEBUG_TRACE ("no operand");
4520 return -1;
4521 }
4522
4523 max_num_matched = 0;
4524 idx = 0;
4525
4526 /* For each pattern. */
4527 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4528 {
4529 int j, num_matched;
4530 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4531
4532 /* Most opcodes has much fewer patterns in the list. */
4533 if (empty_qualifier_sequence_p (qualifiers))
4534 {
4535 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4536 break;
4537 }
4538
4539 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4540 if (*qualifiers == instr->operands[j].qualifier)
4541 ++num_matched;
4542
4543 if (num_matched > max_num_matched)
4544 {
4545 max_num_matched = num_matched;
4546 idx = i;
4547 }
4548 }
4549
4550 DEBUG_TRACE ("return with %d", idx);
4551 return idx;
4552 }
4553
4554 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4555 corresponding operands in *INSTR. */
4556
4557 static inline void
4558 assign_qualifier_sequence (aarch64_inst *instr,
4559 const aarch64_opnd_qualifier_t *qualifiers)
4560 {
4561 int i = 0;
4562 int num_opnds = aarch64_num_of_operands (instr->opcode);
4563 gas_assert (num_opnds);
4564 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4565 instr->operands[i].qualifier = *qualifiers;
4566 }
4567
4568 /* Print operands for the diagnosis purpose. */
4569
4570 static void
4571 print_operands (char *buf, const aarch64_opcode *opcode,
4572 const aarch64_opnd_info *opnds)
4573 {
4574 int i;
4575
4576 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4577 {
4578 char str[128];
4579
4580 /* We regard the opcode operand info more, however we also look into
4581 the inst->operands to support the disassembling of the optional
4582 operand.
4583 The two operand code should be the same in all cases, apart from
4584 when the operand can be optional. */
4585 if (opcode->operands[i] == AARCH64_OPND_NIL
4586 || opnds[i].type == AARCH64_OPND_NIL)
4587 break;
4588
4589 /* Generate the operand string in STR. */
4590 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4591 NULL);
4592
4593 /* Delimiter. */
4594 if (str[0] != '\0')
4595 strcat (buf, i == 0 ? " " : ", ");
4596
4597 /* Append the operand string. */
4598 strcat (buf, str);
4599 }
4600 }
4601
4602 /* Send to stderr a string as information. */
4603
4604 static void
4605 output_info (const char *format, ...)
4606 {
4607 const char *file;
4608 unsigned int line;
4609 va_list args;
4610
4611 file = as_where (&line);
4612 if (file)
4613 {
4614 if (line != 0)
4615 fprintf (stderr, "%s:%u: ", file, line);
4616 else
4617 fprintf (stderr, "%s: ", file);
4618 }
4619 fprintf (stderr, _("Info: "));
4620 va_start (args, format);
4621 vfprintf (stderr, format, args);
4622 va_end (args);
4623 (void) putc ('\n', stderr);
4624 }
4625
4626 /* Output one operand error record. */
4627
4628 static void
4629 output_operand_error_record (const operand_error_record *record, char *str)
4630 {
4631 const aarch64_operand_error *detail = &record->detail;
4632 int idx = detail->index;
4633 const aarch64_opcode *opcode = record->opcode;
4634 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4635 : AARCH64_OPND_NIL);
4636
4637 typedef void (*handler_t)(const char *format, ...);
4638 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4639
4640 switch (detail->kind)
4641 {
4642 case AARCH64_OPDE_NIL:
4643 gas_assert (0);
4644 break;
4645 case AARCH64_OPDE_SYNTAX_ERROR:
4646 case AARCH64_OPDE_RECOVERABLE:
4647 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4648 case AARCH64_OPDE_OTHER_ERROR:
4649 /* Use the prepared error message if there is, otherwise use the
4650 operand description string to describe the error. */
4651 if (detail->error != NULL)
4652 {
4653 if (idx < 0)
4654 handler (_("%s -- `%s'"), detail->error, str);
4655 else
4656 handler (_("%s at operand %d -- `%s'"),
4657 detail->error, idx + 1, str);
4658 }
4659 else
4660 {
4661 gas_assert (idx >= 0);
4662 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4663 aarch64_get_operand_desc (opd_code), str);
4664 }
4665 break;
4666
4667 case AARCH64_OPDE_INVALID_VARIANT:
4668 handler (_("operand mismatch -- `%s'"), str);
4669 if (verbose_error_p)
4670 {
4671 /* We will try to correct the erroneous instruction and also provide
4672 more information e.g. all other valid variants.
4673
4674 The string representation of the corrected instruction and other
4675 valid variants are generated by
4676
4677 1) obtaining the intermediate representation of the erroneous
4678 instruction;
4679 2) manipulating the IR, e.g. replacing the operand qualifier;
4680 3) printing out the instruction by calling the printer functions
4681 shared with the disassembler.
4682
4683 The limitation of this method is that the exact input assembly
4684 line cannot be accurately reproduced in some cases, for example an
4685 optional operand present in the actual assembly line will be
4686 omitted in the output; likewise for the optional syntax rules,
4687 e.g. the # before the immediate. Another limitation is that the
4688 assembly symbols and relocation operations in the assembly line
4689 currently cannot be printed out in the error report. Last but not
4690 least, when there is other error(s) co-exist with this error, the
4691 'corrected' instruction may be still incorrect, e.g. given
4692 'ldnp h0,h1,[x0,#6]!'
4693 this diagnosis will provide the version:
4694 'ldnp s0,s1,[x0,#6]!'
4695 which is still not right. */
4696 size_t len = strlen (get_mnemonic_name (str));
4697 int i, qlf_idx;
4698 bfd_boolean result;
4699 char buf[2048];
4700 aarch64_inst *inst_base = &inst.base;
4701 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4702
4703 /* Init inst. */
4704 reset_aarch64_instruction (&inst);
4705 inst_base->opcode = opcode;
4706
4707 /* Reset the error report so that there is no side effect on the
4708 following operand parsing. */
4709 init_operand_error_report ();
4710
4711 /* Fill inst. */
4712 result = parse_operands (str + len, opcode)
4713 && programmer_friendly_fixup (&inst);
4714 gas_assert (result);
4715 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4716 NULL, NULL, insn_sequence);
4717 gas_assert (!result);
4718
4719 /* Find the most matched qualifier sequence. */
4720 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4721 gas_assert (qlf_idx > -1);
4722
4723 /* Assign the qualifiers. */
4724 assign_qualifier_sequence (inst_base,
4725 opcode->qualifiers_list[qlf_idx]);
4726
4727 /* Print the hint. */
4728 output_info (_(" did you mean this?"));
4729 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4730 print_operands (buf, opcode, inst_base->operands);
4731 output_info (_(" %s"), buf);
4732
4733 /* Print out other variant(s) if there is any. */
4734 if (qlf_idx != 0 ||
4735 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4736 output_info (_(" other valid variant(s):"));
4737
4738 /* For each pattern. */
4739 qualifiers_list = opcode->qualifiers_list;
4740 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4741 {
4742 /* Most opcodes has much fewer patterns in the list.
4743 First NIL qualifier indicates the end in the list. */
4744 if (empty_qualifier_sequence_p (*qualifiers_list))
4745 break;
4746
4747 if (i != qlf_idx)
4748 {
4749 /* Mnemonics name. */
4750 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4751
4752 /* Assign the qualifiers. */
4753 assign_qualifier_sequence (inst_base, *qualifiers_list);
4754
4755 /* Print instruction. */
4756 print_operands (buf, opcode, inst_base->operands);
4757
4758 output_info (_(" %s"), buf);
4759 }
4760 }
4761 }
4762 break;
4763
4764 case AARCH64_OPDE_UNTIED_OPERAND:
4765 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4766 detail->index + 1, str);
4767 break;
4768
4769 case AARCH64_OPDE_OUT_OF_RANGE:
4770 if (detail->data[0] != detail->data[1])
4771 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4772 detail->error ? detail->error : _("immediate value"),
4773 detail->data[0], detail->data[1], idx + 1, str);
4774 else
4775 handler (_("%s must be %d at operand %d -- `%s'"),
4776 detail->error ? detail->error : _("immediate value"),
4777 detail->data[0], idx + 1, str);
4778 break;
4779
4780 case AARCH64_OPDE_REG_LIST:
4781 if (detail->data[0] == 1)
4782 handler (_("invalid number of registers in the list; "
4783 "only 1 register is expected at operand %d -- `%s'"),
4784 idx + 1, str);
4785 else
4786 handler (_("invalid number of registers in the list; "
4787 "%d registers are expected at operand %d -- `%s'"),
4788 detail->data[0], idx + 1, str);
4789 break;
4790
4791 case AARCH64_OPDE_UNALIGNED:
4792 handler (_("immediate value must be a multiple of "
4793 "%d at operand %d -- `%s'"),
4794 detail->data[0], idx + 1, str);
4795 break;
4796
4797 default:
4798 gas_assert (0);
4799 break;
4800 }
4801 }
4802
4803 /* Process and output the error message about the operand mismatching.
4804
4805 When this function is called, the operand error information had
4806 been collected for an assembly line and there will be multiple
4807 errors in the case of multiple instruction templates; output the
4808 error message that most closely describes the problem.
4809
4810 The errors to be printed can be filtered on printing all errors
4811 or only non-fatal errors. This distinction has to be made because
4812 the error buffer may already be filled with fatal errors we don't want to
4813 print due to the different instruction templates. */
4814
4815 static void
4816 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4817 {
4818 int largest_error_pos;
4819 const char *msg = NULL;
4820 enum aarch64_operand_error_kind kind;
4821 operand_error_record *curr;
4822 operand_error_record *head = operand_error_report.head;
4823 operand_error_record *record = NULL;
4824
4825 /* No error to report. */
4826 if (head == NULL)
4827 return;
4828
4829 gas_assert (head != NULL && operand_error_report.tail != NULL);
4830
4831 /* Only one error. */
4832 if (head == operand_error_report.tail)
4833 {
4834 /* If the only error is a non-fatal one and we don't want to print it,
4835 just exit. */
4836 if (!non_fatal_only || head->detail.non_fatal)
4837 {
4838 DEBUG_TRACE ("single opcode entry with error kind: %s",
4839 operand_mismatch_kind_names[head->detail.kind]);
4840 output_operand_error_record (head, str);
4841 }
4842 return;
4843 }
4844
4845 /* Find the error kind of the highest severity. */
4846 DEBUG_TRACE ("multiple opcode entries with error kind");
4847 kind = AARCH64_OPDE_NIL;
4848 for (curr = head; curr != NULL; curr = curr->next)
4849 {
4850 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4851 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4852 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4853 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4854 kind = curr->detail.kind;
4855 }
4856
4857 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4858
4859 /* Pick up one of errors of KIND to report. */
4860 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4861 for (curr = head; curr != NULL; curr = curr->next)
4862 {
4863 /* If we don't want to print non-fatal errors then don't consider them
4864 at all. */
4865 if (curr->detail.kind != kind
4866 || (non_fatal_only && !curr->detail.non_fatal))
4867 continue;
4868 /* If there are multiple errors, pick up the one with the highest
4869 mismatching operand index. In the case of multiple errors with
4870 the equally highest operand index, pick up the first one or the
4871 first one with non-NULL error message. */
4872 if (curr->detail.index > largest_error_pos
4873 || (curr->detail.index == largest_error_pos && msg == NULL
4874 && curr->detail.error != NULL))
4875 {
4876 largest_error_pos = curr->detail.index;
4877 record = curr;
4878 msg = record->detail.error;
4879 }
4880 }
4881
4882 /* The way errors are collected in the back-end is a bit non-intuitive. But
4883 essentially, because each operand template is tried recursively you may
4884 always have errors collected from the previous tried OPND. These are
4885 usually skipped if there is one successful match. However now with the
4886 non-fatal errors we have to ignore those previously collected hard errors
4887 when we're only interested in printing the non-fatal ones. This condition
4888 prevents us from printing errors that are not appropriate, since we did
4889 match a condition, but it also has warnings that it wants to print. */
4890 if (non_fatal_only && !record)
4891 return;
4892
4893 gas_assert (largest_error_pos != -2 && record != NULL);
4894 DEBUG_TRACE ("Pick up error kind %s to report",
4895 operand_mismatch_kind_names[record->detail.kind]);
4896
4897 /* Output. */
4898 output_operand_error_record (record, str);
4899 }
4900 \f
4901 /* Write an AARCH64 instruction to buf - always little-endian. */
4902 static void
4903 put_aarch64_insn (char *buf, uint32_t insn)
4904 {
4905 unsigned char *where = (unsigned char *) buf;
4906 where[0] = insn;
4907 where[1] = insn >> 8;
4908 where[2] = insn >> 16;
4909 where[3] = insn >> 24;
4910 }
4911
4912 static uint32_t
4913 get_aarch64_insn (char *buf)
4914 {
4915 unsigned char *where = (unsigned char *) buf;
4916 uint32_t result;
4917 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4918 return result;
4919 }
4920
4921 static void
4922 output_inst (struct aarch64_inst *new_inst)
4923 {
4924 char *to = NULL;
4925
4926 to = frag_more (INSN_SIZE);
4927
4928 frag_now->tc_frag_data.recorded = 1;
4929
4930 put_aarch64_insn (to, inst.base.value);
4931
4932 if (inst.reloc.type != BFD_RELOC_UNUSED)
4933 {
4934 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4935 INSN_SIZE, &inst.reloc.exp,
4936 inst.reloc.pc_rel,
4937 inst.reloc.type);
4938 DEBUG_TRACE ("Prepared relocation fix up");
4939 /* Don't check the addend value against the instruction size,
4940 that's the job of our code in md_apply_fix(). */
4941 fixp->fx_no_overflow = 1;
4942 if (new_inst != NULL)
4943 fixp->tc_fix_data.inst = new_inst;
4944 if (aarch64_gas_internal_fixup_p ())
4945 {
4946 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4947 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4948 fixp->fx_addnumber = inst.reloc.flags;
4949 }
4950 }
4951
4952 dwarf2_emit_insn (INSN_SIZE);
4953 }
4954
4955 /* Link together opcodes of the same name. */
4956
4957 struct templates
4958 {
4959 aarch64_opcode *opcode;
4960 struct templates *next;
4961 };
4962
4963 typedef struct templates templates;
4964
4965 static templates *
4966 lookup_mnemonic (const char *start, int len)
4967 {
4968 templates *templ = NULL;
4969
4970 templ = hash_find_n (aarch64_ops_hsh, start, len);
4971 return templ;
4972 }
4973
4974 /* Subroutine of md_assemble, responsible for looking up the primary
4975 opcode from the mnemonic the user wrote. STR points to the
4976 beginning of the mnemonic. */
4977
4978 static templates *
4979 opcode_lookup (char **str)
4980 {
4981 char *end, *base, *dot;
4982 const aarch64_cond *cond;
4983 char condname[16];
4984 int len;
4985
4986 /* Scan up to the end of the mnemonic, which must end in white space,
4987 '.', or end of string. */
4988 dot = 0;
4989 for (base = end = *str; is_part_of_name(*end); end++)
4990 if (*end == '.' && !dot)
4991 dot = end;
4992
4993 if (end == base || dot == base)
4994 return 0;
4995
4996 inst.cond = COND_ALWAYS;
4997
4998 /* Handle a possible condition. */
4999 if (dot)
5000 {
5001 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5002 if (cond)
5003 {
5004 inst.cond = cond->value;
5005 *str = end;
5006 }
5007 else
5008 {
5009 *str = dot;
5010 return 0;
5011 }
5012 len = dot - base;
5013 }
5014 else
5015 {
5016 *str = end;
5017 len = end - base;
5018 }
5019
5020 if (inst.cond == COND_ALWAYS)
5021 {
5022 /* Look for unaffixed mnemonic. */
5023 return lookup_mnemonic (base, len);
5024 }
5025 else if (len <= 13)
5026 {
5027 /* append ".c" to mnemonic if conditional */
5028 memcpy (condname, base, len);
5029 memcpy (condname + len, ".c", 2);
5030 base = condname;
5031 len += 2;
5032 return lookup_mnemonic (base, len);
5033 }
5034
5035 return NULL;
5036 }
5037
5038 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5039 to a corresponding operand qualifier. */
5040
5041 static inline aarch64_opnd_qualifier_t
5042 vectype_to_qualifier (const struct vector_type_el *vectype)
5043 {
5044 /* Element size in bytes indexed by vector_el_type. */
5045 const unsigned char ele_size[5]
5046 = {1, 2, 4, 8, 16};
5047 const unsigned int ele_base [5] =
5048 {
5049 AARCH64_OPND_QLF_V_4B,
5050 AARCH64_OPND_QLF_V_2H,
5051 AARCH64_OPND_QLF_V_2S,
5052 AARCH64_OPND_QLF_V_1D,
5053 AARCH64_OPND_QLF_V_1Q
5054 };
5055
5056 if (!vectype->defined || vectype->type == NT_invtype)
5057 goto vectype_conversion_fail;
5058
5059 if (vectype->type == NT_zero)
5060 return AARCH64_OPND_QLF_P_Z;
5061 if (vectype->type == NT_merge)
5062 return AARCH64_OPND_QLF_P_M;
5063
5064 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5065
5066 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5067 {
5068 /* Special case S_4B. */
5069 if (vectype->type == NT_b && vectype->width == 4)
5070 return AARCH64_OPND_QLF_S_4B;
5071
5072 /* Vector element register. */
5073 return AARCH64_OPND_QLF_S_B + vectype->type;
5074 }
5075 else
5076 {
5077 /* Vector register. */
5078 int reg_size = ele_size[vectype->type] * vectype->width;
5079 unsigned offset;
5080 unsigned shift;
5081 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5082 goto vectype_conversion_fail;
5083
5084 /* The conversion is by calculating the offset from the base operand
5085 qualifier for the vector type. The operand qualifiers are regular
5086 enough that the offset can established by shifting the vector width by
5087 a vector-type dependent amount. */
5088 shift = 0;
5089 if (vectype->type == NT_b)
5090 shift = 3;
5091 else if (vectype->type == NT_h || vectype->type == NT_s)
5092 shift = 2;
5093 else if (vectype->type >= NT_d)
5094 shift = 1;
5095 else
5096 gas_assert (0);
5097
5098 offset = ele_base [vectype->type] + (vectype->width >> shift);
5099 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5100 && offset <= AARCH64_OPND_QLF_V_1Q);
5101 return offset;
5102 }
5103
5104 vectype_conversion_fail:
5105 first_error (_("bad vector arrangement type"));
5106 return AARCH64_OPND_QLF_NIL;
5107 }
5108
5109 /* Process an optional operand that is found omitted from the assembly line.
5110 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5111 instruction's opcode entry while IDX is the index of this omitted operand.
5112 */
5113
5114 static void
5115 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5116 int idx, aarch64_opnd_info *operand)
5117 {
5118 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5119 gas_assert (optional_operand_p (opcode, idx));
5120 gas_assert (!operand->present);
5121
5122 switch (type)
5123 {
5124 case AARCH64_OPND_Rd:
5125 case AARCH64_OPND_Rn:
5126 case AARCH64_OPND_Rm:
5127 case AARCH64_OPND_Rt:
5128 case AARCH64_OPND_Rt2:
5129 case AARCH64_OPND_Rs:
5130 case AARCH64_OPND_Ra:
5131 case AARCH64_OPND_Rt_SYS:
5132 case AARCH64_OPND_Rd_SP:
5133 case AARCH64_OPND_Rn_SP:
5134 case AARCH64_OPND_Rm_SP:
5135 case AARCH64_OPND_Fd:
5136 case AARCH64_OPND_Fn:
5137 case AARCH64_OPND_Fm:
5138 case AARCH64_OPND_Fa:
5139 case AARCH64_OPND_Ft:
5140 case AARCH64_OPND_Ft2:
5141 case AARCH64_OPND_Sd:
5142 case AARCH64_OPND_Sn:
5143 case AARCH64_OPND_Sm:
5144 case AARCH64_OPND_Va:
5145 case AARCH64_OPND_Vd:
5146 case AARCH64_OPND_Vn:
5147 case AARCH64_OPND_Vm:
5148 case AARCH64_OPND_VdD1:
5149 case AARCH64_OPND_VnD1:
5150 operand->reg.regno = default_value;
5151 break;
5152
5153 case AARCH64_OPND_Ed:
5154 case AARCH64_OPND_En:
5155 case AARCH64_OPND_Em:
5156 case AARCH64_OPND_Em16:
5157 case AARCH64_OPND_SM3_IMM2:
5158 operand->reglane.regno = default_value;
5159 break;
5160
5161 case AARCH64_OPND_IDX:
5162 case AARCH64_OPND_BIT_NUM:
5163 case AARCH64_OPND_IMMR:
5164 case AARCH64_OPND_IMMS:
5165 case AARCH64_OPND_SHLL_IMM:
5166 case AARCH64_OPND_IMM_VLSL:
5167 case AARCH64_OPND_IMM_VLSR:
5168 case AARCH64_OPND_CCMP_IMM:
5169 case AARCH64_OPND_FBITS:
5170 case AARCH64_OPND_UIMM4:
5171 case AARCH64_OPND_UIMM3_OP1:
5172 case AARCH64_OPND_UIMM3_OP2:
5173 case AARCH64_OPND_IMM:
5174 case AARCH64_OPND_IMM_2:
5175 case AARCH64_OPND_WIDTH:
5176 case AARCH64_OPND_UIMM7:
5177 case AARCH64_OPND_NZCV:
5178 case AARCH64_OPND_SVE_PATTERN:
5179 case AARCH64_OPND_SVE_PRFOP:
5180 operand->imm.value = default_value;
5181 break;
5182
5183 case AARCH64_OPND_SVE_PATTERN_SCALED:
5184 operand->imm.value = default_value;
5185 operand->shifter.kind = AARCH64_MOD_MUL;
5186 operand->shifter.amount = 1;
5187 break;
5188
5189 case AARCH64_OPND_EXCEPTION:
5190 inst.reloc.type = BFD_RELOC_UNUSED;
5191 break;
5192
5193 case AARCH64_OPND_BARRIER_ISB:
5194 operand->barrier = aarch64_barrier_options + default_value;
5195 break;
5196
5197 case AARCH64_OPND_BTI_TARGET:
5198 operand->hint_option = aarch64_hint_options + default_value;
5199 break;
5200
5201 default:
5202 break;
5203 }
5204 }
5205
5206 /* Process the relocation type for move wide instructions.
5207 Return TRUE on success; otherwise return FALSE. */
5208
5209 static bfd_boolean
5210 process_movw_reloc_info (void)
5211 {
5212 int is32;
5213 unsigned shift;
5214
5215 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5216
5217 if (inst.base.opcode->op == OP_MOVK)
5218 switch (inst.reloc.type)
5219 {
5220 case BFD_RELOC_AARCH64_MOVW_G0_S:
5221 case BFD_RELOC_AARCH64_MOVW_G1_S:
5222 case BFD_RELOC_AARCH64_MOVW_G2_S:
5223 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5224 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5225 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5226 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5227 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5228 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5229 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5230 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5231 set_syntax_error
5232 (_("the specified relocation type is not allowed for MOVK"));
5233 return FALSE;
5234 default:
5235 break;
5236 }
5237
5238 switch (inst.reloc.type)
5239 {
5240 case BFD_RELOC_AARCH64_MOVW_G0:
5241 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5242 case BFD_RELOC_AARCH64_MOVW_G0_S:
5243 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5244 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5245 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5246 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5247 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5248 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5249 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5250 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5251 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5252 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5253 shift = 0;
5254 break;
5255 case BFD_RELOC_AARCH64_MOVW_G1:
5256 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5257 case BFD_RELOC_AARCH64_MOVW_G1_S:
5258 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5259 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5260 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5261 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5262 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5263 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5264 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5265 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5266 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5267 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5268 shift = 16;
5269 break;
5270 case BFD_RELOC_AARCH64_MOVW_G2:
5271 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5272 case BFD_RELOC_AARCH64_MOVW_G2_S:
5273 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5274 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5275 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5276 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5277 if (is32)
5278 {
5279 set_fatal_syntax_error
5280 (_("the specified relocation type is not allowed for 32-bit "
5281 "register"));
5282 return FALSE;
5283 }
5284 shift = 32;
5285 break;
5286 case BFD_RELOC_AARCH64_MOVW_G3:
5287 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5288 if (is32)
5289 {
5290 set_fatal_syntax_error
5291 (_("the specified relocation type is not allowed for 32-bit "
5292 "register"));
5293 return FALSE;
5294 }
5295 shift = 48;
5296 break;
5297 default:
5298 /* More cases should be added when more MOVW-related relocation types
5299 are supported in GAS. */
5300 gas_assert (aarch64_gas_internal_fixup_p ());
5301 /* The shift amount should have already been set by the parser. */
5302 return TRUE;
5303 }
5304 inst.base.operands[1].shifter.amount = shift;
5305 return TRUE;
5306 }
5307
5308 /* A primitive log calculator. */
5309
5310 static inline unsigned int
5311 get_logsz (unsigned int size)
5312 {
5313 const unsigned char ls[16] =
5314 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5315 if (size > 16)
5316 {
5317 gas_assert (0);
5318 return -1;
5319 }
5320 gas_assert (ls[size - 1] != (unsigned char)-1);
5321 return ls[size - 1];
5322 }
5323
5324 /* Determine and return the real reloc type code for an instruction
5325 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5326
5327 static inline bfd_reloc_code_real_type
5328 ldst_lo12_determine_real_reloc_type (void)
5329 {
5330 unsigned logsz;
5331 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5332 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5333
5334 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5335 {
5336 BFD_RELOC_AARCH64_LDST8_LO12,
5337 BFD_RELOC_AARCH64_LDST16_LO12,
5338 BFD_RELOC_AARCH64_LDST32_LO12,
5339 BFD_RELOC_AARCH64_LDST64_LO12,
5340 BFD_RELOC_AARCH64_LDST128_LO12
5341 },
5342 {
5343 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5344 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5345 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5346 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5347 BFD_RELOC_AARCH64_NONE
5348 },
5349 {
5350 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5351 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5352 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5353 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5354 BFD_RELOC_AARCH64_NONE
5355 },
5356 {
5357 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5358 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5359 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5360 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5361 BFD_RELOC_AARCH64_NONE
5362 },
5363 {
5364 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5365 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5366 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5367 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5368 BFD_RELOC_AARCH64_NONE
5369 }
5370 };
5371
5372 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5373 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5374 || (inst.reloc.type
5375 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5376 || (inst.reloc.type
5377 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5378 || (inst.reloc.type
5379 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5380 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5381
5382 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5383 opd1_qlf =
5384 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5385 1, opd0_qlf, 0);
5386 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5387
5388 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5389 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5390 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5391 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5392 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5393 gas_assert (logsz <= 3);
5394 else
5395 gas_assert (logsz <= 4);
5396
5397 /* In reloc.c, these pseudo relocation types should be defined in similar
5398 order as above reloc_ldst_lo12 array. Because the array index calculation
5399 below relies on this. */
5400 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5401 }
5402
5403 /* Check whether a register list REGINFO is valid. The registers must be
5404 numbered in increasing order (modulo 32), in increments of one or two.
5405
5406 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5407 increments of two.
5408
5409 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5410
5411 static bfd_boolean
5412 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5413 {
5414 uint32_t i, nb_regs, prev_regno, incr;
5415
5416 nb_regs = 1 + (reginfo & 0x3);
5417 reginfo >>= 2;
5418 prev_regno = reginfo & 0x1f;
5419 incr = accept_alternate ? 2 : 1;
5420
5421 for (i = 1; i < nb_regs; ++i)
5422 {
5423 uint32_t curr_regno;
5424 reginfo >>= 5;
5425 curr_regno = reginfo & 0x1f;
5426 if (curr_regno != ((prev_regno + incr) & 0x1f))
5427 return FALSE;
5428 prev_regno = curr_regno;
5429 }
5430
5431 return TRUE;
5432 }
5433
5434 /* Generic instruction operand parser. This does no encoding and no
5435 semantic validation; it merely squirrels values away in the inst
5436 structure. Returns TRUE or FALSE depending on whether the
5437 specified grammar matched. */
5438
5439 static bfd_boolean
5440 parse_operands (char *str, const aarch64_opcode *opcode)
5441 {
5442 int i;
5443 char *backtrack_pos = 0;
5444 const enum aarch64_opnd *operands = opcode->operands;
5445 aarch64_reg_type imm_reg_type;
5446
5447 clear_error ();
5448 skip_whitespace (str);
5449
5450 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5451 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5452 else
5453 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5454
5455 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5456 {
5457 int64_t val;
5458 const reg_entry *reg;
5459 int comma_skipped_p = 0;
5460 aarch64_reg_type rtype;
5461 struct vector_type_el vectype;
5462 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5463 aarch64_opnd_info *info = &inst.base.operands[i];
5464 aarch64_reg_type reg_type;
5465
5466 DEBUG_TRACE ("parse operand %d", i);
5467
5468 /* Assign the operand code. */
5469 info->type = operands[i];
5470
5471 if (optional_operand_p (opcode, i))
5472 {
5473 /* Remember where we are in case we need to backtrack. */
5474 gas_assert (!backtrack_pos);
5475 backtrack_pos = str;
5476 }
5477
5478 /* Expect comma between operands; the backtrack mechanism will take
5479 care of cases of omitted optional operand. */
5480 if (i > 0 && ! skip_past_char (&str, ','))
5481 {
5482 set_syntax_error (_("comma expected between operands"));
5483 goto failure;
5484 }
5485 else
5486 comma_skipped_p = 1;
5487
5488 switch (operands[i])
5489 {
5490 case AARCH64_OPND_Rd:
5491 case AARCH64_OPND_Rn:
5492 case AARCH64_OPND_Rm:
5493 case AARCH64_OPND_Rt:
5494 case AARCH64_OPND_Rt2:
5495 case AARCH64_OPND_Rs:
5496 case AARCH64_OPND_Ra:
5497 case AARCH64_OPND_Rt_SYS:
5498 case AARCH64_OPND_PAIRREG:
5499 case AARCH64_OPND_SVE_Rm:
5500 po_int_reg_or_fail (REG_TYPE_R_Z);
5501 break;
5502
5503 case AARCH64_OPND_Rd_SP:
5504 case AARCH64_OPND_Rn_SP:
5505 case AARCH64_OPND_SVE_Rn_SP:
5506 case AARCH64_OPND_Rm_SP:
5507 po_int_reg_or_fail (REG_TYPE_R_SP);
5508 break;
5509
5510 case AARCH64_OPND_Rm_EXT:
5511 case AARCH64_OPND_Rm_SFT:
5512 po_misc_or_fail (parse_shifter_operand
5513 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5514 ? SHIFTED_ARITH_IMM
5515 : SHIFTED_LOGIC_IMM)));
5516 if (!info->shifter.operator_present)
5517 {
5518 /* Default to LSL if not present. Libopcodes prefers shifter
5519 kind to be explicit. */
5520 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5521 info->shifter.kind = AARCH64_MOD_LSL;
5522 /* For Rm_EXT, libopcodes will carry out further check on whether
5523 or not stack pointer is used in the instruction (Recall that
5524 "the extend operator is not optional unless at least one of
5525 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5526 }
5527 break;
5528
5529 case AARCH64_OPND_Fd:
5530 case AARCH64_OPND_Fn:
5531 case AARCH64_OPND_Fm:
5532 case AARCH64_OPND_Fa:
5533 case AARCH64_OPND_Ft:
5534 case AARCH64_OPND_Ft2:
5535 case AARCH64_OPND_Sd:
5536 case AARCH64_OPND_Sn:
5537 case AARCH64_OPND_Sm:
5538 case AARCH64_OPND_SVE_VZn:
5539 case AARCH64_OPND_SVE_Vd:
5540 case AARCH64_OPND_SVE_Vm:
5541 case AARCH64_OPND_SVE_Vn:
5542 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5543 if (val == PARSE_FAIL)
5544 {
5545 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5546 goto failure;
5547 }
5548 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5549
5550 info->reg.regno = val;
5551 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5552 break;
5553
5554 case AARCH64_OPND_SVE_Pd:
5555 case AARCH64_OPND_SVE_Pg3:
5556 case AARCH64_OPND_SVE_Pg4_5:
5557 case AARCH64_OPND_SVE_Pg4_10:
5558 case AARCH64_OPND_SVE_Pg4_16:
5559 case AARCH64_OPND_SVE_Pm:
5560 case AARCH64_OPND_SVE_Pn:
5561 case AARCH64_OPND_SVE_Pt:
5562 reg_type = REG_TYPE_PN;
5563 goto vector_reg;
5564
5565 case AARCH64_OPND_SVE_Za_5:
5566 case AARCH64_OPND_SVE_Za_16:
5567 case AARCH64_OPND_SVE_Zd:
5568 case AARCH64_OPND_SVE_Zm_5:
5569 case AARCH64_OPND_SVE_Zm_16:
5570 case AARCH64_OPND_SVE_Zn:
5571 case AARCH64_OPND_SVE_Zt:
5572 reg_type = REG_TYPE_ZN;
5573 goto vector_reg;
5574
5575 case AARCH64_OPND_Va:
5576 case AARCH64_OPND_Vd:
5577 case AARCH64_OPND_Vn:
5578 case AARCH64_OPND_Vm:
5579 reg_type = REG_TYPE_VN;
5580 vector_reg:
5581 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5582 if (val == PARSE_FAIL)
5583 {
5584 first_error (_(get_reg_expected_msg (reg_type)));
5585 goto failure;
5586 }
5587 if (vectype.defined & NTA_HASINDEX)
5588 goto failure;
5589
5590 info->reg.regno = val;
5591 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5592 && vectype.type == NT_invtype)
5593 /* Unqualified Pn and Zn registers are allowed in certain
5594 contexts. Rely on F_STRICT qualifier checking to catch
5595 invalid uses. */
5596 info->qualifier = AARCH64_OPND_QLF_NIL;
5597 else
5598 {
5599 info->qualifier = vectype_to_qualifier (&vectype);
5600 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5601 goto failure;
5602 }
5603 break;
5604
5605 case AARCH64_OPND_VdD1:
5606 case AARCH64_OPND_VnD1:
5607 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5608 if (val == PARSE_FAIL)
5609 {
5610 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5611 goto failure;
5612 }
5613 if (vectype.type != NT_d || vectype.index != 1)
5614 {
5615 set_fatal_syntax_error
5616 (_("the top half of a 128-bit FP/SIMD register is expected"));
5617 goto failure;
5618 }
5619 info->reg.regno = val;
5620 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5621 here; it is correct for the purpose of encoding/decoding since
5622 only the register number is explicitly encoded in the related
5623 instructions, although this appears a bit hacky. */
5624 info->qualifier = AARCH64_OPND_QLF_S_D;
5625 break;
5626
5627 case AARCH64_OPND_SVE_Zm3_INDEX:
5628 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5629 case AARCH64_OPND_SVE_Zm4_INDEX:
5630 case AARCH64_OPND_SVE_Zn_INDEX:
5631 reg_type = REG_TYPE_ZN;
5632 goto vector_reg_index;
5633
5634 case AARCH64_OPND_Ed:
5635 case AARCH64_OPND_En:
5636 case AARCH64_OPND_Em:
5637 case AARCH64_OPND_Em16:
5638 case AARCH64_OPND_SM3_IMM2:
5639 reg_type = REG_TYPE_VN;
5640 vector_reg_index:
5641 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5642 if (val == PARSE_FAIL)
5643 {
5644 first_error (_(get_reg_expected_msg (reg_type)));
5645 goto failure;
5646 }
5647 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5648 goto failure;
5649
5650 info->reglane.regno = val;
5651 info->reglane.index = vectype.index;
5652 info->qualifier = vectype_to_qualifier (&vectype);
5653 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5654 goto failure;
5655 break;
5656
5657 case AARCH64_OPND_SVE_ZnxN:
5658 case AARCH64_OPND_SVE_ZtxN:
5659 reg_type = REG_TYPE_ZN;
5660 goto vector_reg_list;
5661
5662 case AARCH64_OPND_LVn:
5663 case AARCH64_OPND_LVt:
5664 case AARCH64_OPND_LVt_AL:
5665 case AARCH64_OPND_LEt:
5666 reg_type = REG_TYPE_VN;
5667 vector_reg_list:
5668 if (reg_type == REG_TYPE_ZN
5669 && get_opcode_dependent_value (opcode) == 1
5670 && *str != '{')
5671 {
5672 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5673 if (val == PARSE_FAIL)
5674 {
5675 first_error (_(get_reg_expected_msg (reg_type)));
5676 goto failure;
5677 }
5678 info->reglist.first_regno = val;
5679 info->reglist.num_regs = 1;
5680 }
5681 else
5682 {
5683 val = parse_vector_reg_list (&str, reg_type, &vectype);
5684 if (val == PARSE_FAIL)
5685 goto failure;
5686 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5687 {
5688 set_fatal_syntax_error (_("invalid register list"));
5689 goto failure;
5690 }
5691 info->reglist.first_regno = (val >> 2) & 0x1f;
5692 info->reglist.num_regs = (val & 0x3) + 1;
5693 }
5694 if (operands[i] == AARCH64_OPND_LEt)
5695 {
5696 if (!(vectype.defined & NTA_HASINDEX))
5697 goto failure;
5698 info->reglist.has_index = 1;
5699 info->reglist.index = vectype.index;
5700 }
5701 else
5702 {
5703 if (vectype.defined & NTA_HASINDEX)
5704 goto failure;
5705 if (!(vectype.defined & NTA_HASTYPE))
5706 {
5707 if (reg_type == REG_TYPE_ZN)
5708 set_fatal_syntax_error (_("missing type suffix"));
5709 goto failure;
5710 }
5711 }
5712 info->qualifier = vectype_to_qualifier (&vectype);
5713 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5714 goto failure;
5715 break;
5716
5717 case AARCH64_OPND_CRn:
5718 case AARCH64_OPND_CRm:
5719 {
5720 char prefix = *(str++);
5721 if (prefix != 'c' && prefix != 'C')
5722 goto failure;
5723
5724 po_imm_nc_or_fail ();
5725 if (val > 15)
5726 {
5727 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5728 goto failure;
5729 }
5730 info->qualifier = AARCH64_OPND_QLF_CR;
5731 info->imm.value = val;
5732 break;
5733 }
5734
5735 case AARCH64_OPND_SHLL_IMM:
5736 case AARCH64_OPND_IMM_VLSR:
5737 po_imm_or_fail (1, 64);
5738 info->imm.value = val;
5739 break;
5740
5741 case AARCH64_OPND_CCMP_IMM:
5742 case AARCH64_OPND_SIMM5:
5743 case AARCH64_OPND_FBITS:
5744 case AARCH64_OPND_UIMM4:
5745 case AARCH64_OPND_UIMM4_ADDG:
5746 case AARCH64_OPND_UIMM10:
5747 case AARCH64_OPND_UIMM3_OP1:
5748 case AARCH64_OPND_UIMM3_OP2:
5749 case AARCH64_OPND_IMM_VLSL:
5750 case AARCH64_OPND_IMM:
5751 case AARCH64_OPND_IMM_2:
5752 case AARCH64_OPND_WIDTH:
5753 case AARCH64_OPND_SVE_INV_LIMM:
5754 case AARCH64_OPND_SVE_LIMM:
5755 case AARCH64_OPND_SVE_LIMM_MOV:
5756 case AARCH64_OPND_SVE_SHLIMM_PRED:
5757 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5758 case AARCH64_OPND_SVE_SHRIMM_PRED:
5759 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5760 case AARCH64_OPND_SVE_SIMM5:
5761 case AARCH64_OPND_SVE_SIMM5B:
5762 case AARCH64_OPND_SVE_SIMM6:
5763 case AARCH64_OPND_SVE_SIMM8:
5764 case AARCH64_OPND_SVE_UIMM3:
5765 case AARCH64_OPND_SVE_UIMM7:
5766 case AARCH64_OPND_SVE_UIMM8:
5767 case AARCH64_OPND_SVE_UIMM8_53:
5768 case AARCH64_OPND_IMM_ROT1:
5769 case AARCH64_OPND_IMM_ROT2:
5770 case AARCH64_OPND_IMM_ROT3:
5771 case AARCH64_OPND_SVE_IMM_ROT1:
5772 case AARCH64_OPND_SVE_IMM_ROT2:
5773 po_imm_nc_or_fail ();
5774 info->imm.value = val;
5775 break;
5776
5777 case AARCH64_OPND_SVE_AIMM:
5778 case AARCH64_OPND_SVE_ASIMM:
5779 po_imm_nc_or_fail ();
5780 info->imm.value = val;
5781 skip_whitespace (str);
5782 if (skip_past_comma (&str))
5783 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5784 else
5785 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5786 break;
5787
5788 case AARCH64_OPND_SVE_PATTERN:
5789 po_enum_or_fail (aarch64_sve_pattern_array);
5790 info->imm.value = val;
5791 break;
5792
5793 case AARCH64_OPND_SVE_PATTERN_SCALED:
5794 po_enum_or_fail (aarch64_sve_pattern_array);
5795 info->imm.value = val;
5796 if (skip_past_comma (&str)
5797 && !parse_shift (&str, info, SHIFTED_MUL))
5798 goto failure;
5799 if (!info->shifter.operator_present)
5800 {
5801 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5802 info->shifter.kind = AARCH64_MOD_MUL;
5803 info->shifter.amount = 1;
5804 }
5805 break;
5806
5807 case AARCH64_OPND_SVE_PRFOP:
5808 po_enum_or_fail (aarch64_sve_prfop_array);
5809 info->imm.value = val;
5810 break;
5811
5812 case AARCH64_OPND_UIMM7:
5813 po_imm_or_fail (0, 127);
5814 info->imm.value = val;
5815 break;
5816
5817 case AARCH64_OPND_IDX:
5818 case AARCH64_OPND_MASK:
5819 case AARCH64_OPND_BIT_NUM:
5820 case AARCH64_OPND_IMMR:
5821 case AARCH64_OPND_IMMS:
5822 po_imm_or_fail (0, 63);
5823 info->imm.value = val;
5824 break;
5825
5826 case AARCH64_OPND_IMM0:
5827 po_imm_nc_or_fail ();
5828 if (val != 0)
5829 {
5830 set_fatal_syntax_error (_("immediate zero expected"));
5831 goto failure;
5832 }
5833 info->imm.value = 0;
5834 break;
5835
5836 case AARCH64_OPND_FPIMM0:
5837 {
5838 int qfloat;
5839 bfd_boolean res1 = FALSE, res2 = FALSE;
5840 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5841 it is probably not worth the effort to support it. */
5842 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5843 imm_reg_type))
5844 && (error_p ()
5845 || !(res2 = parse_constant_immediate (&str, &val,
5846 imm_reg_type))))
5847 goto failure;
5848 if ((res1 && qfloat == 0) || (res2 && val == 0))
5849 {
5850 info->imm.value = 0;
5851 info->imm.is_fp = 1;
5852 break;
5853 }
5854 set_fatal_syntax_error (_("immediate zero expected"));
5855 goto failure;
5856 }
5857
5858 case AARCH64_OPND_IMM_MOV:
5859 {
5860 char *saved = str;
5861 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5862 reg_name_p (str, REG_TYPE_VN))
5863 goto failure;
5864 str = saved;
5865 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5866 GE_OPT_PREFIX, 1));
5867 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5868 later. fix_mov_imm_insn will try to determine a machine
5869 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5870 message if the immediate cannot be moved by a single
5871 instruction. */
5872 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5873 inst.base.operands[i].skip = 1;
5874 }
5875 break;
5876
5877 case AARCH64_OPND_SIMD_IMM:
5878 case AARCH64_OPND_SIMD_IMM_SFT:
5879 if (! parse_big_immediate (&str, &val, imm_reg_type))
5880 goto failure;
5881 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5882 /* addr_off_p */ 0,
5883 /* need_libopcodes_p */ 1,
5884 /* skip_p */ 1);
5885 /* Parse shift.
5886 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5887 shift, we don't check it here; we leave the checking to
5888 the libopcodes (operand_general_constraint_met_p). By
5889 doing this, we achieve better diagnostics. */
5890 if (skip_past_comma (&str)
5891 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5892 goto failure;
5893 if (!info->shifter.operator_present
5894 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5895 {
5896 /* Default to LSL if not present. Libopcodes prefers shifter
5897 kind to be explicit. */
5898 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5899 info->shifter.kind = AARCH64_MOD_LSL;
5900 }
5901 break;
5902
5903 case AARCH64_OPND_FPIMM:
5904 case AARCH64_OPND_SIMD_FPIMM:
5905 case AARCH64_OPND_SVE_FPIMM8:
5906 {
5907 int qfloat;
5908 bfd_boolean dp_p;
5909
5910 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5911 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5912 || !aarch64_imm_float_p (qfloat))
5913 {
5914 if (!error_p ())
5915 set_fatal_syntax_error (_("invalid floating-point"
5916 " constant"));
5917 goto failure;
5918 }
5919 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5920 inst.base.operands[i].imm.is_fp = 1;
5921 }
5922 break;
5923
5924 case AARCH64_OPND_SVE_I1_HALF_ONE:
5925 case AARCH64_OPND_SVE_I1_HALF_TWO:
5926 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5927 {
5928 int qfloat;
5929 bfd_boolean dp_p;
5930
5931 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5932 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5933 {
5934 if (!error_p ())
5935 set_fatal_syntax_error (_("invalid floating-point"
5936 " constant"));
5937 goto failure;
5938 }
5939 inst.base.operands[i].imm.value = qfloat;
5940 inst.base.operands[i].imm.is_fp = 1;
5941 }
5942 break;
5943
5944 case AARCH64_OPND_LIMM:
5945 po_misc_or_fail (parse_shifter_operand (&str, info,
5946 SHIFTED_LOGIC_IMM));
5947 if (info->shifter.operator_present)
5948 {
5949 set_fatal_syntax_error
5950 (_("shift not allowed for bitmask immediate"));
5951 goto failure;
5952 }
5953 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5954 /* addr_off_p */ 0,
5955 /* need_libopcodes_p */ 1,
5956 /* skip_p */ 1);
5957 break;
5958
5959 case AARCH64_OPND_AIMM:
5960 if (opcode->op == OP_ADD)
5961 /* ADD may have relocation types. */
5962 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5963 SHIFTED_ARITH_IMM));
5964 else
5965 po_misc_or_fail (parse_shifter_operand (&str, info,
5966 SHIFTED_ARITH_IMM));
5967 switch (inst.reloc.type)
5968 {
5969 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5970 info->shifter.amount = 12;
5971 break;
5972 case BFD_RELOC_UNUSED:
5973 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5974 if (info->shifter.kind != AARCH64_MOD_NONE)
5975 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5976 inst.reloc.pc_rel = 0;
5977 break;
5978 default:
5979 break;
5980 }
5981 info->imm.value = 0;
5982 if (!info->shifter.operator_present)
5983 {
5984 /* Default to LSL if not present. Libopcodes prefers shifter
5985 kind to be explicit. */
5986 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5987 info->shifter.kind = AARCH64_MOD_LSL;
5988 }
5989 break;
5990
5991 case AARCH64_OPND_HALF:
5992 {
5993 /* #<imm16> or relocation. */
5994 int internal_fixup_p;
5995 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5996 if (internal_fixup_p)
5997 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5998 skip_whitespace (str);
5999 if (skip_past_comma (&str))
6000 {
6001 /* {, LSL #<shift>} */
6002 if (! aarch64_gas_internal_fixup_p ())
6003 {
6004 set_fatal_syntax_error (_("can't mix relocation modifier "
6005 "with explicit shift"));
6006 goto failure;
6007 }
6008 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6009 }
6010 else
6011 inst.base.operands[i].shifter.amount = 0;
6012 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6013 inst.base.operands[i].imm.value = 0;
6014 if (! process_movw_reloc_info ())
6015 goto failure;
6016 }
6017 break;
6018
6019 case AARCH64_OPND_EXCEPTION:
6020 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6021 imm_reg_type));
6022 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6023 /* addr_off_p */ 0,
6024 /* need_libopcodes_p */ 0,
6025 /* skip_p */ 1);
6026 break;
6027
6028 case AARCH64_OPND_NZCV:
6029 {
6030 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
6031 if (nzcv != NULL)
6032 {
6033 str += 4;
6034 info->imm.value = nzcv->value;
6035 break;
6036 }
6037 po_imm_or_fail (0, 15);
6038 info->imm.value = val;
6039 }
6040 break;
6041
6042 case AARCH64_OPND_COND:
6043 case AARCH64_OPND_COND1:
6044 {
6045 char *start = str;
6046 do
6047 str++;
6048 while (ISALPHA (*str));
6049 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
6050 if (info->cond == NULL)
6051 {
6052 set_syntax_error (_("invalid condition"));
6053 goto failure;
6054 }
6055 else if (operands[i] == AARCH64_OPND_COND1
6056 && (info->cond->value & 0xe) == 0xe)
6057 {
6058 /* Do not allow AL or NV. */
6059 set_default_error ();
6060 goto failure;
6061 }
6062 }
6063 break;
6064
6065 case AARCH64_OPND_ADDR_ADRP:
6066 po_misc_or_fail (parse_adrp (&str));
6067 /* Clear the value as operand needs to be relocated. */
6068 info->imm.value = 0;
6069 break;
6070
6071 case AARCH64_OPND_ADDR_PCREL14:
6072 case AARCH64_OPND_ADDR_PCREL19:
6073 case AARCH64_OPND_ADDR_PCREL21:
6074 case AARCH64_OPND_ADDR_PCREL26:
6075 po_misc_or_fail (parse_address (&str, info));
6076 if (!info->addr.pcrel)
6077 {
6078 set_syntax_error (_("invalid pc-relative address"));
6079 goto failure;
6080 }
6081 if (inst.gen_lit_pool
6082 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6083 {
6084 /* Only permit "=value" in the literal load instructions.
6085 The literal will be generated by programmer_friendly_fixup. */
6086 set_syntax_error (_("invalid use of \"=immediate\""));
6087 goto failure;
6088 }
6089 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6090 {
6091 set_syntax_error (_("unrecognized relocation suffix"));
6092 goto failure;
6093 }
6094 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6095 {
6096 info->imm.value = inst.reloc.exp.X_add_number;
6097 inst.reloc.type = BFD_RELOC_UNUSED;
6098 }
6099 else
6100 {
6101 info->imm.value = 0;
6102 if (inst.reloc.type == BFD_RELOC_UNUSED)
6103 switch (opcode->iclass)
6104 {
6105 case compbranch:
6106 case condbranch:
6107 /* e.g. CBZ or B.COND */
6108 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6109 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6110 break;
6111 case testbranch:
6112 /* e.g. TBZ */
6113 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6114 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6115 break;
6116 case branch_imm:
6117 /* e.g. B or BL */
6118 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6119 inst.reloc.type =
6120 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6121 : BFD_RELOC_AARCH64_JUMP26;
6122 break;
6123 case loadlit:
6124 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6125 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6126 break;
6127 case pcreladdr:
6128 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6129 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6130 break;
6131 default:
6132 gas_assert (0);
6133 abort ();
6134 }
6135 inst.reloc.pc_rel = 1;
6136 }
6137 break;
6138
6139 case AARCH64_OPND_ADDR_SIMPLE:
6140 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6141 {
6142 /* [<Xn|SP>{, #<simm>}] */
6143 char *start = str;
6144 /* First use the normal address-parsing routines, to get
6145 the usual syntax errors. */
6146 po_misc_or_fail (parse_address (&str, info));
6147 if (info->addr.pcrel || info->addr.offset.is_reg
6148 || !info->addr.preind || info->addr.postind
6149 || info->addr.writeback)
6150 {
6151 set_syntax_error (_("invalid addressing mode"));
6152 goto failure;
6153 }
6154
6155 /* Then retry, matching the specific syntax of these addresses. */
6156 str = start;
6157 po_char_or_fail ('[');
6158 po_reg_or_fail (REG_TYPE_R64_SP);
6159 /* Accept optional ", #0". */
6160 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6161 && skip_past_char (&str, ','))
6162 {
6163 skip_past_char (&str, '#');
6164 if (! skip_past_char (&str, '0'))
6165 {
6166 set_fatal_syntax_error
6167 (_("the optional immediate offset can only be 0"));
6168 goto failure;
6169 }
6170 }
6171 po_char_or_fail (']');
6172 break;
6173 }
6174
6175 case AARCH64_OPND_ADDR_REGOFF:
6176 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6177 po_misc_or_fail (parse_address (&str, info));
6178 regoff_addr:
6179 if (info->addr.pcrel || !info->addr.offset.is_reg
6180 || !info->addr.preind || info->addr.postind
6181 || info->addr.writeback)
6182 {
6183 set_syntax_error (_("invalid addressing mode"));
6184 goto failure;
6185 }
6186 if (!info->shifter.operator_present)
6187 {
6188 /* Default to LSL if not present. Libopcodes prefers shifter
6189 kind to be explicit. */
6190 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6191 info->shifter.kind = AARCH64_MOD_LSL;
6192 }
6193 /* Qualifier to be deduced by libopcodes. */
6194 break;
6195
6196 case AARCH64_OPND_ADDR_SIMM7:
6197 po_misc_or_fail (parse_address (&str, info));
6198 if (info->addr.pcrel || info->addr.offset.is_reg
6199 || (!info->addr.preind && !info->addr.postind))
6200 {
6201 set_syntax_error (_("invalid addressing mode"));
6202 goto failure;
6203 }
6204 if (inst.reloc.type != BFD_RELOC_UNUSED)
6205 {
6206 set_syntax_error (_("relocation not allowed"));
6207 goto failure;
6208 }
6209 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6210 /* addr_off_p */ 1,
6211 /* need_libopcodes_p */ 1,
6212 /* skip_p */ 0);
6213 break;
6214
6215 case AARCH64_OPND_ADDR_SIMM9:
6216 case AARCH64_OPND_ADDR_SIMM9_2:
6217 case AARCH64_OPND_ADDR_SIMM11:
6218 case AARCH64_OPND_ADDR_SIMM13:
6219 po_misc_or_fail (parse_address (&str, info));
6220 if (info->addr.pcrel || info->addr.offset.is_reg
6221 || (!info->addr.preind && !info->addr.postind)
6222 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6223 && info->addr.writeback))
6224 {
6225 set_syntax_error (_("invalid addressing mode"));
6226 goto failure;
6227 }
6228 if (inst.reloc.type != BFD_RELOC_UNUSED)
6229 {
6230 set_syntax_error (_("relocation not allowed"));
6231 goto failure;
6232 }
6233 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6234 /* addr_off_p */ 1,
6235 /* need_libopcodes_p */ 1,
6236 /* skip_p */ 0);
6237 break;
6238
6239 case AARCH64_OPND_ADDR_SIMM10:
6240 case AARCH64_OPND_ADDR_OFFSET:
6241 po_misc_or_fail (parse_address (&str, info));
6242 if (info->addr.pcrel || info->addr.offset.is_reg
6243 || !info->addr.preind || info->addr.postind)
6244 {
6245 set_syntax_error (_("invalid addressing mode"));
6246 goto failure;
6247 }
6248 if (inst.reloc.type != BFD_RELOC_UNUSED)
6249 {
6250 set_syntax_error (_("relocation not allowed"));
6251 goto failure;
6252 }
6253 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6254 /* addr_off_p */ 1,
6255 /* need_libopcodes_p */ 1,
6256 /* skip_p */ 0);
6257 break;
6258
6259 case AARCH64_OPND_ADDR_UIMM12:
6260 po_misc_or_fail (parse_address (&str, info));
6261 if (info->addr.pcrel || info->addr.offset.is_reg
6262 || !info->addr.preind || info->addr.writeback)
6263 {
6264 set_syntax_error (_("invalid addressing mode"));
6265 goto failure;
6266 }
6267 if (inst.reloc.type == BFD_RELOC_UNUSED)
6268 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6269 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6270 || (inst.reloc.type
6271 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6272 || (inst.reloc.type
6273 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6274 || (inst.reloc.type
6275 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6276 || (inst.reloc.type
6277 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6278 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6279 /* Leave qualifier to be determined by libopcodes. */
6280 break;
6281
6282 case AARCH64_OPND_SIMD_ADDR_POST:
6283 /* [<Xn|SP>], <Xm|#<amount>> */
6284 po_misc_or_fail (parse_address (&str, info));
6285 if (!info->addr.postind || !info->addr.writeback)
6286 {
6287 set_syntax_error (_("invalid addressing mode"));
6288 goto failure;
6289 }
6290 if (!info->addr.offset.is_reg)
6291 {
6292 if (inst.reloc.exp.X_op == O_constant)
6293 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6294 else
6295 {
6296 set_fatal_syntax_error
6297 (_("writeback value must be an immediate constant"));
6298 goto failure;
6299 }
6300 }
6301 /* No qualifier. */
6302 break;
6303
6304 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6305 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6306 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6307 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6308 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6309 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6310 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6311 case AARCH64_OPND_SVE_ADDR_RI_U6:
6312 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6313 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6314 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6315 /* [X<n>{, #imm, MUL VL}]
6316 [X<n>{, #imm}]
6317 but recognizing SVE registers. */
6318 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6319 &offset_qualifier));
6320 if (base_qualifier != AARCH64_OPND_QLF_X)
6321 {
6322 set_syntax_error (_("invalid addressing mode"));
6323 goto failure;
6324 }
6325 sve_regimm:
6326 if (info->addr.pcrel || info->addr.offset.is_reg
6327 || !info->addr.preind || info->addr.writeback)
6328 {
6329 set_syntax_error (_("invalid addressing mode"));
6330 goto failure;
6331 }
6332 if (inst.reloc.type != BFD_RELOC_UNUSED
6333 || inst.reloc.exp.X_op != O_constant)
6334 {
6335 /* Make sure this has priority over
6336 "invalid addressing mode". */
6337 set_fatal_syntax_error (_("constant offset required"));
6338 goto failure;
6339 }
6340 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6341 break;
6342
6343 case AARCH64_OPND_SVE_ADDR_R:
6344 /* [<Xn|SP>{, <R><m>}]
6345 but recognizing SVE registers. */
6346 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6347 &offset_qualifier));
6348 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6349 {
6350 offset_qualifier = AARCH64_OPND_QLF_X;
6351 info->addr.offset.is_reg = 1;
6352 info->addr.offset.regno = 31;
6353 }
6354 else if (base_qualifier != AARCH64_OPND_QLF_X
6355 || offset_qualifier != AARCH64_OPND_QLF_X)
6356 {
6357 set_syntax_error (_("invalid addressing mode"));
6358 goto failure;
6359 }
6360 goto regoff_addr;
6361
6362 case AARCH64_OPND_SVE_ADDR_RR:
6363 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6364 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6365 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6366 case AARCH64_OPND_SVE_ADDR_RX:
6367 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6368 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6369 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6370 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6371 but recognizing SVE registers. */
6372 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6373 &offset_qualifier));
6374 if (base_qualifier != AARCH64_OPND_QLF_X
6375 || offset_qualifier != AARCH64_OPND_QLF_X)
6376 {
6377 set_syntax_error (_("invalid addressing mode"));
6378 goto failure;
6379 }
6380 goto regoff_addr;
6381
6382 case AARCH64_OPND_SVE_ADDR_RZ:
6383 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6384 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6385 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6386 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6387 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6388 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6389 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6390 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6391 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6392 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6393 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6394 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6395 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6396 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6397 &offset_qualifier));
6398 if (base_qualifier != AARCH64_OPND_QLF_X
6399 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6400 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6401 {
6402 set_syntax_error (_("invalid addressing mode"));
6403 goto failure;
6404 }
6405 info->qualifier = offset_qualifier;
6406 goto regoff_addr;
6407
6408 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6409 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6410 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6411 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6412 /* [Z<n>.<T>{, #imm}] */
6413 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6414 &offset_qualifier));
6415 if (base_qualifier != AARCH64_OPND_QLF_S_S
6416 && base_qualifier != AARCH64_OPND_QLF_S_D)
6417 {
6418 set_syntax_error (_("invalid addressing mode"));
6419 goto failure;
6420 }
6421 info->qualifier = base_qualifier;
6422 goto sve_regimm;
6423
6424 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6425 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6426 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6427 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6428 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6429
6430 We don't reject:
6431
6432 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6433
6434 here since we get better error messages by leaving it to
6435 the qualifier checking routines. */
6436 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6437 &offset_qualifier));
6438 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6439 && base_qualifier != AARCH64_OPND_QLF_S_D)
6440 || offset_qualifier != base_qualifier)
6441 {
6442 set_syntax_error (_("invalid addressing mode"));
6443 goto failure;
6444 }
6445 info->qualifier = base_qualifier;
6446 goto regoff_addr;
6447
6448 case AARCH64_OPND_SYSREG:
6449 {
6450 uint32_t sysreg_flags;
6451 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6452 &sysreg_flags)) == PARSE_FAIL)
6453 {
6454 set_syntax_error (_("unknown or missing system register name"));
6455 goto failure;
6456 }
6457 inst.base.operands[i].sysreg.value = val;
6458 inst.base.operands[i].sysreg.flags = sysreg_flags;
6459 break;
6460 }
6461
6462 case AARCH64_OPND_PSTATEFIELD:
6463 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6464 == PARSE_FAIL)
6465 {
6466 set_syntax_error (_("unknown or missing PSTATE field name"));
6467 goto failure;
6468 }
6469 inst.base.operands[i].pstatefield = val;
6470 break;
6471
6472 case AARCH64_OPND_SYSREG_IC:
6473 inst.base.operands[i].sysins_op =
6474 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6475 goto sys_reg_ins;
6476
6477 case AARCH64_OPND_SYSREG_DC:
6478 inst.base.operands[i].sysins_op =
6479 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6480 goto sys_reg_ins;
6481
6482 case AARCH64_OPND_SYSREG_AT:
6483 inst.base.operands[i].sysins_op =
6484 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6485 goto sys_reg_ins;
6486
6487 case AARCH64_OPND_SYSREG_SR:
6488 inst.base.operands[i].sysins_op =
6489 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6490 goto sys_reg_ins;
6491
6492 case AARCH64_OPND_SYSREG_TLBI:
6493 inst.base.operands[i].sysins_op =
6494 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6495 sys_reg_ins:
6496 if (inst.base.operands[i].sysins_op == NULL)
6497 {
6498 set_fatal_syntax_error ( _("unknown or missing operation name"));
6499 goto failure;
6500 }
6501 break;
6502
6503 case AARCH64_OPND_BARRIER:
6504 case AARCH64_OPND_BARRIER_ISB:
6505 val = parse_barrier (&str);
6506 if (val != PARSE_FAIL
6507 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6508 {
6509 /* ISB only accepts options name 'sy'. */
6510 set_syntax_error
6511 (_("the specified option is not accepted in ISB"));
6512 /* Turn off backtrack as this optional operand is present. */
6513 backtrack_pos = 0;
6514 goto failure;
6515 }
6516 /* This is an extension to accept a 0..15 immediate. */
6517 if (val == PARSE_FAIL)
6518 po_imm_or_fail (0, 15);
6519 info->barrier = aarch64_barrier_options + val;
6520 break;
6521
6522 case AARCH64_OPND_PRFOP:
6523 val = parse_pldop (&str);
6524 /* This is an extension to accept a 0..31 immediate. */
6525 if (val == PARSE_FAIL)
6526 po_imm_or_fail (0, 31);
6527 inst.base.operands[i].prfop = aarch64_prfops + val;
6528 break;
6529
6530 case AARCH64_OPND_BARRIER_PSB:
6531 val = parse_barrier_psb (&str, &(info->hint_option));
6532 if (val == PARSE_FAIL)
6533 goto failure;
6534 break;
6535
6536 case AARCH64_OPND_BTI_TARGET:
6537 val = parse_bti_operand (&str, &(info->hint_option));
6538 if (val == PARSE_FAIL)
6539 goto failure;
6540 break;
6541
6542 default:
6543 as_fatal (_("unhandled operand code %d"), operands[i]);
6544 }
6545
6546 /* If we get here, this operand was successfully parsed. */
6547 inst.base.operands[i].present = 1;
6548 continue;
6549
6550 failure:
6551 /* The parse routine should already have set the error, but in case
6552 not, set a default one here. */
6553 if (! error_p ())
6554 set_default_error ();
6555
6556 if (! backtrack_pos)
6557 goto parse_operands_return;
6558
6559 {
6560 /* We reach here because this operand is marked as optional, and
6561 either no operand was supplied or the operand was supplied but it
6562 was syntactically incorrect. In the latter case we report an
6563 error. In the former case we perform a few more checks before
6564 dropping through to the code to insert the default operand. */
6565
6566 char *tmp = backtrack_pos;
6567 char endchar = END_OF_INSN;
6568
6569 if (i != (aarch64_num_of_operands (opcode) - 1))
6570 endchar = ',';
6571 skip_past_char (&tmp, ',');
6572
6573 if (*tmp != endchar)
6574 /* The user has supplied an operand in the wrong format. */
6575 goto parse_operands_return;
6576
6577 /* Make sure there is not a comma before the optional operand.
6578 For example the fifth operand of 'sys' is optional:
6579
6580 sys #0,c0,c0,#0, <--- wrong
6581 sys #0,c0,c0,#0 <--- correct. */
6582 if (comma_skipped_p && i && endchar == END_OF_INSN)
6583 {
6584 set_fatal_syntax_error
6585 (_("unexpected comma before the omitted optional operand"));
6586 goto parse_operands_return;
6587 }
6588 }
6589
6590 /* Reaching here means we are dealing with an optional operand that is
6591 omitted from the assembly line. */
6592 gas_assert (optional_operand_p (opcode, i));
6593 info->present = 0;
6594 process_omitted_operand (operands[i], opcode, i, info);
6595
6596 /* Try again, skipping the optional operand at backtrack_pos. */
6597 str = backtrack_pos;
6598 backtrack_pos = 0;
6599
6600 /* Clear any error record after the omitted optional operand has been
6601 successfully handled. */
6602 clear_error ();
6603 }
6604
6605 /* Check if we have parsed all the operands. */
6606 if (*str != '\0' && ! error_p ())
6607 {
6608 /* Set I to the index of the last present operand; this is
6609 for the purpose of diagnostics. */
6610 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6611 ;
6612 set_fatal_syntax_error
6613 (_("unexpected characters following instruction"));
6614 }
6615
6616 parse_operands_return:
6617
6618 if (error_p ())
6619 {
6620 DEBUG_TRACE ("parsing FAIL: %s - %s",
6621 operand_mismatch_kind_names[get_error_kind ()],
6622 get_error_message ());
6623 /* Record the operand error properly; this is useful when there
6624 are multiple instruction templates for a mnemonic name, so that
6625 later on, we can select the error that most closely describes
6626 the problem. */
6627 record_operand_error (opcode, i, get_error_kind (),
6628 get_error_message ());
6629 return FALSE;
6630 }
6631 else
6632 {
6633 DEBUG_TRACE ("parsing SUCCESS");
6634 return TRUE;
6635 }
6636 }
6637
6638 /* It does some fix-up to provide some programmer friendly feature while
6639 keeping the libopcodes happy, i.e. libopcodes only accepts
6640 the preferred architectural syntax.
6641 Return FALSE if there is any failure; otherwise return TRUE. */
6642
6643 static bfd_boolean
6644 programmer_friendly_fixup (aarch64_instruction *instr)
6645 {
6646 aarch64_inst *base = &instr->base;
6647 const aarch64_opcode *opcode = base->opcode;
6648 enum aarch64_op op = opcode->op;
6649 aarch64_opnd_info *operands = base->operands;
6650
6651 DEBUG_TRACE ("enter");
6652
6653 switch (opcode->iclass)
6654 {
6655 case testbranch:
6656 /* TBNZ Xn|Wn, #uimm6, label
6657 Test and Branch Not Zero: conditionally jumps to label if bit number
6658 uimm6 in register Xn is not zero. The bit number implies the width of
6659 the register, which may be written and should be disassembled as Wn if
6660 uimm is less than 32. */
6661 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6662 {
6663 if (operands[1].imm.value >= 32)
6664 {
6665 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6666 0, 31);
6667 return FALSE;
6668 }
6669 operands[0].qualifier = AARCH64_OPND_QLF_X;
6670 }
6671 break;
6672 case loadlit:
6673 /* LDR Wt, label | =value
6674 As a convenience assemblers will typically permit the notation
6675 "=value" in conjunction with the pc-relative literal load instructions
6676 to automatically place an immediate value or symbolic address in a
6677 nearby literal pool and generate a hidden label which references it.
6678 ISREG has been set to 0 in the case of =value. */
6679 if (instr->gen_lit_pool
6680 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6681 {
6682 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6683 if (op == OP_LDRSW_LIT)
6684 size = 4;
6685 if (instr->reloc.exp.X_op != O_constant
6686 && instr->reloc.exp.X_op != O_big
6687 && instr->reloc.exp.X_op != O_symbol)
6688 {
6689 record_operand_error (opcode, 1,
6690 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6691 _("constant expression expected"));
6692 return FALSE;
6693 }
6694 if (! add_to_lit_pool (&instr->reloc.exp, size))
6695 {
6696 record_operand_error (opcode, 1,
6697 AARCH64_OPDE_OTHER_ERROR,
6698 _("literal pool insertion failed"));
6699 return FALSE;
6700 }
6701 }
6702 break;
6703 case log_shift:
6704 case bitfield:
6705 /* UXT[BHW] Wd, Wn
6706 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6707 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6708 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6709 A programmer-friendly assembler should accept a destination Xd in
6710 place of Wd, however that is not the preferred form for disassembly.
6711 */
6712 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6713 && operands[1].qualifier == AARCH64_OPND_QLF_W
6714 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6715 operands[0].qualifier = AARCH64_OPND_QLF_W;
6716 break;
6717
6718 case addsub_ext:
6719 {
6720 /* In the 64-bit form, the final register operand is written as Wm
6721 for all but the (possibly omitted) UXTX/LSL and SXTX
6722 operators.
6723 As a programmer-friendly assembler, we accept e.g.
6724 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6725 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6726 int idx = aarch64_operand_index (opcode->operands,
6727 AARCH64_OPND_Rm_EXT);
6728 gas_assert (idx == 1 || idx == 2);
6729 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6730 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6731 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6732 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6733 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6734 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6735 }
6736 break;
6737
6738 default:
6739 break;
6740 }
6741
6742 DEBUG_TRACE ("exit with SUCCESS");
6743 return TRUE;
6744 }
6745
6746 /* Check for loads and stores that will cause unpredictable behavior. */
6747
6748 static void
6749 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6750 {
6751 aarch64_inst *base = &instr->base;
6752 const aarch64_opcode *opcode = base->opcode;
6753 const aarch64_opnd_info *opnds = base->operands;
6754 switch (opcode->iclass)
6755 {
6756 case ldst_pos:
6757 case ldst_imm9:
6758 case ldst_imm10:
6759 case ldst_unscaled:
6760 case ldst_unpriv:
6761 /* Loading/storing the base register is unpredictable if writeback. */
6762 if ((aarch64_get_operand_class (opnds[0].type)
6763 == AARCH64_OPND_CLASS_INT_REG)
6764 && opnds[0].reg.regno == opnds[1].addr.base_regno
6765 && opnds[1].addr.base_regno != REG_SP
6766 && opnds[1].addr.writeback)
6767 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6768 break;
6769 case ldstpair_off:
6770 case ldstnapair_offs:
6771 case ldstpair_indexed:
6772 /* Loading/storing the base register is unpredictable if writeback. */
6773 if ((aarch64_get_operand_class (opnds[0].type)
6774 == AARCH64_OPND_CLASS_INT_REG)
6775 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6776 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6777 && opnds[2].addr.base_regno != REG_SP
6778 /* Exempt STGP. */
6779 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
6780 && opnds[2].addr.writeback)
6781 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6782 /* Load operations must load different registers. */
6783 if ((opcode->opcode & (1 << 22))
6784 && opnds[0].reg.regno == opnds[1].reg.regno)
6785 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6786 break;
6787
6788 case ldstexcl:
6789 /* It is unpredictable if the destination and status registers are the
6790 same. */
6791 if ((aarch64_get_operand_class (opnds[0].type)
6792 == AARCH64_OPND_CLASS_INT_REG)
6793 && (aarch64_get_operand_class (opnds[1].type)
6794 == AARCH64_OPND_CLASS_INT_REG)
6795 && (opnds[0].reg.regno == opnds[1].reg.regno
6796 || opnds[0].reg.regno == opnds[2].reg.regno))
6797 as_warn (_("unpredictable: identical transfer and status registers"
6798 " --`%s'"),
6799 str);
6800
6801 break;
6802
6803 default:
6804 break;
6805 }
6806 }
6807
6808 static void
6809 force_automatic_sequence_close (void)
6810 {
6811 if (now_instr_sequence.instr)
6812 {
6813 as_warn (_("previous `%s' sequence has not been closed"),
6814 now_instr_sequence.instr->opcode->name);
6815 init_insn_sequence (NULL, &now_instr_sequence);
6816 }
6817 }
6818
6819 /* A wrapper function to interface with libopcodes on encoding and
6820 record the error message if there is any.
6821
6822 Return TRUE on success; otherwise return FALSE. */
6823
6824 static bfd_boolean
6825 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6826 aarch64_insn *code)
6827 {
6828 aarch64_operand_error error_info;
6829 memset (&error_info, '\0', sizeof (error_info));
6830 error_info.kind = AARCH64_OPDE_NIL;
6831 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
6832 && !error_info.non_fatal)
6833 return TRUE;
6834
6835 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6836 record_operand_error_info (opcode, &error_info);
6837 return error_info.non_fatal;
6838 }
6839
6840 #ifdef DEBUG_AARCH64
6841 static inline void
6842 dump_opcode_operands (const aarch64_opcode *opcode)
6843 {
6844 int i = 0;
6845 while (opcode->operands[i] != AARCH64_OPND_NIL)
6846 {
6847 aarch64_verbose ("\t\t opnd%d: %s", i,
6848 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6849 ? aarch64_get_operand_name (opcode->operands[i])
6850 : aarch64_get_operand_desc (opcode->operands[i]));
6851 ++i;
6852 }
6853 }
6854 #endif /* DEBUG_AARCH64 */
6855
6856 /* This is the guts of the machine-dependent assembler. STR points to a
6857 machine dependent instruction. This function is supposed to emit
6858 the frags/bytes it assembles to. */
6859
6860 void
6861 md_assemble (char *str)
6862 {
6863 char *p = str;
6864 templates *template;
6865 aarch64_opcode *opcode;
6866 aarch64_inst *inst_base;
6867 unsigned saved_cond;
6868
6869 /* Align the previous label if needed. */
6870 if (last_label_seen != NULL)
6871 {
6872 symbol_set_frag (last_label_seen, frag_now);
6873 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6874 S_SET_SEGMENT (last_label_seen, now_seg);
6875 }
6876
6877 /* Update the current insn_sequence from the segment. */
6878 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
6879
6880 inst.reloc.type = BFD_RELOC_UNUSED;
6881
6882 DEBUG_TRACE ("\n\n");
6883 DEBUG_TRACE ("==============================");
6884 DEBUG_TRACE ("Enter md_assemble with %s", str);
6885
6886 template = opcode_lookup (&p);
6887 if (!template)
6888 {
6889 /* It wasn't an instruction, but it might be a register alias of
6890 the form alias .req reg directive. */
6891 if (!create_register_alias (str, p))
6892 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6893 str);
6894 return;
6895 }
6896
6897 skip_whitespace (p);
6898 if (*p == ',')
6899 {
6900 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6901 get_mnemonic_name (str), str);
6902 return;
6903 }
6904
6905 init_operand_error_report ();
6906
6907 /* Sections are assumed to start aligned. In executable section, there is no
6908 MAP_DATA symbol pending. So we only align the address during
6909 MAP_DATA --> MAP_INSN transition.
6910 For other sections, this is not guaranteed. */
6911 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6912 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6913 frag_align_code (2, 0);
6914
6915 saved_cond = inst.cond;
6916 reset_aarch64_instruction (&inst);
6917 inst.cond = saved_cond;
6918
6919 /* Iterate through all opcode entries with the same mnemonic name. */
6920 do
6921 {
6922 opcode = template->opcode;
6923
6924 DEBUG_TRACE ("opcode %s found", opcode->name);
6925 #ifdef DEBUG_AARCH64
6926 if (debug_dump)
6927 dump_opcode_operands (opcode);
6928 #endif /* DEBUG_AARCH64 */
6929
6930 mapping_state (MAP_INSN);
6931
6932 inst_base = &inst.base;
6933 inst_base->opcode = opcode;
6934
6935 /* Truly conditionally executed instructions, e.g. b.cond. */
6936 if (opcode->flags & F_COND)
6937 {
6938 gas_assert (inst.cond != COND_ALWAYS);
6939 inst_base->cond = get_cond_from_value (inst.cond);
6940 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6941 }
6942 else if (inst.cond != COND_ALWAYS)
6943 {
6944 /* It shouldn't arrive here, where the assembly looks like a
6945 conditional instruction but the found opcode is unconditional. */
6946 gas_assert (0);
6947 continue;
6948 }
6949
6950 if (parse_operands (p, opcode)
6951 && programmer_friendly_fixup (&inst)
6952 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6953 {
6954 /* Check that this instruction is supported for this CPU. */
6955 if (!opcode->avariant
6956 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6957 {
6958 as_bad (_("selected processor does not support `%s'"), str);
6959 return;
6960 }
6961
6962 warn_unpredictable_ldst (&inst, str);
6963
6964 if (inst.reloc.type == BFD_RELOC_UNUSED
6965 || !inst.reloc.need_libopcodes_p)
6966 output_inst (NULL);
6967 else
6968 {
6969 /* If there is relocation generated for the instruction,
6970 store the instruction information for the future fix-up. */
6971 struct aarch64_inst *copy;
6972 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6973 copy = XNEW (struct aarch64_inst);
6974 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6975 output_inst (copy);
6976 }
6977
6978 /* Issue non-fatal messages if any. */
6979 output_operand_error_report (str, TRUE);
6980 return;
6981 }
6982
6983 template = template->next;
6984 if (template != NULL)
6985 {
6986 reset_aarch64_instruction (&inst);
6987 inst.cond = saved_cond;
6988 }
6989 }
6990 while (template != NULL);
6991
6992 /* Issue the error messages if any. */
6993 output_operand_error_report (str, FALSE);
6994 }
6995
6996 /* Various frobbings of labels and their addresses. */
6997
6998 void
6999 aarch64_start_line_hook (void)
7000 {
7001 last_label_seen = NULL;
7002 }
7003
7004 void
7005 aarch64_frob_label (symbolS * sym)
7006 {
7007 last_label_seen = sym;
7008
7009 dwarf2_emit_label (sym);
7010 }
7011
7012 void
7013 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7014 {
7015 /* Check to see if we have a block to close. */
7016 force_automatic_sequence_close ();
7017 }
7018
7019 int
7020 aarch64_data_in_code (void)
7021 {
7022 if (!strncmp (input_line_pointer + 1, "data:", 5))
7023 {
7024 *input_line_pointer = '/';
7025 input_line_pointer += 5;
7026 *input_line_pointer = 0;
7027 return 1;
7028 }
7029
7030 return 0;
7031 }
7032
7033 char *
7034 aarch64_canonicalize_symbol_name (char *name)
7035 {
7036 int len;
7037
7038 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7039 *(name + len - 5) = 0;
7040
7041 return name;
7042 }
7043 \f
7044 /* Table of all register names defined by default. The user can
7045 define additional names with .req. Note that all register names
7046 should appear in both upper and lowercase variants. Some registers
7047 also have mixed-case names. */
7048
7049 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7050 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7051 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7052 #define REGSET16(p,t) \
7053 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7054 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7055 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7056 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7057 #define REGSET31(p,t) \
7058 REGSET16(p, t), \
7059 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7060 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7061 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7062 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7063 #define REGSET(p,t) \
7064 REGSET31(p,t), REGNUM(p,31,t)
7065
7066 /* These go into aarch64_reg_hsh hash-table. */
7067 static const reg_entry reg_names[] = {
7068 /* Integer registers. */
7069 REGSET31 (x, R_64), REGSET31 (X, R_64),
7070 REGSET31 (w, R_32), REGSET31 (W, R_32),
7071
7072 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7073 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7074 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7075 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7076 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7077 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7078
7079 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7080 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7081
7082 /* Floating-point single precision registers. */
7083 REGSET (s, FP_S), REGSET (S, FP_S),
7084
7085 /* Floating-point double precision registers. */
7086 REGSET (d, FP_D), REGSET (D, FP_D),
7087
7088 /* Floating-point half precision registers. */
7089 REGSET (h, FP_H), REGSET (H, FP_H),
7090
7091 /* Floating-point byte precision registers. */
7092 REGSET (b, FP_B), REGSET (B, FP_B),
7093
7094 /* Floating-point quad precision registers. */
7095 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7096
7097 /* FP/SIMD registers. */
7098 REGSET (v, VN), REGSET (V, VN),
7099
7100 /* SVE vector registers. */
7101 REGSET (z, ZN), REGSET (Z, ZN),
7102
7103 /* SVE predicate registers. */
7104 REGSET16 (p, PN), REGSET16 (P, PN)
7105 };
7106
7107 #undef REGDEF
7108 #undef REGDEF_ALIAS
7109 #undef REGNUM
7110 #undef REGSET16
7111 #undef REGSET31
7112 #undef REGSET
7113
7114 #define N 1
7115 #define n 0
7116 #define Z 1
7117 #define z 0
7118 #define C 1
7119 #define c 0
7120 #define V 1
7121 #define v 0
7122 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7123 static const asm_nzcv nzcv_names[] = {
7124 {"nzcv", B (n, z, c, v)},
7125 {"nzcV", B (n, z, c, V)},
7126 {"nzCv", B (n, z, C, v)},
7127 {"nzCV", B (n, z, C, V)},
7128 {"nZcv", B (n, Z, c, v)},
7129 {"nZcV", B (n, Z, c, V)},
7130 {"nZCv", B (n, Z, C, v)},
7131 {"nZCV", B (n, Z, C, V)},
7132 {"Nzcv", B (N, z, c, v)},
7133 {"NzcV", B (N, z, c, V)},
7134 {"NzCv", B (N, z, C, v)},
7135 {"NzCV", B (N, z, C, V)},
7136 {"NZcv", B (N, Z, c, v)},
7137 {"NZcV", B (N, Z, c, V)},
7138 {"NZCv", B (N, Z, C, v)},
7139 {"NZCV", B (N, Z, C, V)}
7140 };
7141
7142 #undef N
7143 #undef n
7144 #undef Z
7145 #undef z
7146 #undef C
7147 #undef c
7148 #undef V
7149 #undef v
7150 #undef B
7151 \f
7152 /* MD interface: bits in the object file. */
7153
7154 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7155 for use in the a.out file, and stores them in the array pointed to by buf.
7156 This knows about the endian-ness of the target machine and does
7157 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7158 2 (short) and 4 (long) Floating numbers are put out as a series of
7159 LITTLENUMS (shorts, here at least). */
7160
7161 void
7162 md_number_to_chars (char *buf, valueT val, int n)
7163 {
7164 if (target_big_endian)
7165 number_to_chars_bigendian (buf, val, n);
7166 else
7167 number_to_chars_littleendian (buf, val, n);
7168 }
7169
7170 /* MD interface: Sections. */
7171
7172 /* Estimate the size of a frag before relaxing. Assume everything fits in
7173 4 bytes. */
7174
7175 int
7176 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7177 {
7178 fragp->fr_var = 4;
7179 return 4;
7180 }
7181
7182 /* Round up a section size to the appropriate boundary. */
7183
7184 valueT
7185 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7186 {
7187 return size;
7188 }
7189
7190 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7191 of an rs_align_code fragment.
7192
7193 Here we fill the frag with the appropriate info for padding the
7194 output stream. The resulting frag will consist of a fixed (fr_fix)
7195 and of a repeating (fr_var) part.
7196
7197 The fixed content is always emitted before the repeating content and
7198 these two parts are used as follows in constructing the output:
7199 - the fixed part will be used to align to a valid instruction word
7200 boundary, in case that we start at a misaligned address; as no
7201 executable instruction can live at the misaligned location, we
7202 simply fill with zeros;
7203 - the variable part will be used to cover the remaining padding and
7204 we fill using the AArch64 NOP instruction.
7205
7206 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7207 enough storage space for up to 3 bytes for padding the back to a valid
7208 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7209
7210 void
7211 aarch64_handle_align (fragS * fragP)
7212 {
7213 /* NOP = d503201f */
7214 /* AArch64 instructions are always little-endian. */
7215 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7216
7217 int bytes, fix, noop_size;
7218 char *p;
7219
7220 if (fragP->fr_type != rs_align_code)
7221 return;
7222
7223 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7224 p = fragP->fr_literal + fragP->fr_fix;
7225
7226 #ifdef OBJ_ELF
7227 gas_assert (fragP->tc_frag_data.recorded);
7228 #endif
7229
7230 noop_size = sizeof (aarch64_noop);
7231
7232 fix = bytes & (noop_size - 1);
7233 if (fix)
7234 {
7235 #ifdef OBJ_ELF
7236 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7237 #endif
7238 memset (p, 0, fix);
7239 p += fix;
7240 fragP->fr_fix += fix;
7241 }
7242
7243 if (noop_size)
7244 memcpy (p, aarch64_noop, noop_size);
7245 fragP->fr_var = noop_size;
7246 }
7247
7248 /* Perform target specific initialisation of a frag.
7249 Note - despite the name this initialisation is not done when the frag
7250 is created, but only when its type is assigned. A frag can be created
7251 and used a long time before its type is set, so beware of assuming that
7252 this initialisation is performed first. */
7253
7254 #ifndef OBJ_ELF
7255 void
7256 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7257 int max_chars ATTRIBUTE_UNUSED)
7258 {
7259 }
7260
7261 #else /* OBJ_ELF is defined. */
7262 void
7263 aarch64_init_frag (fragS * fragP, int max_chars)
7264 {
7265 /* Record a mapping symbol for alignment frags. We will delete this
7266 later if the alignment ends up empty. */
7267 if (!fragP->tc_frag_data.recorded)
7268 fragP->tc_frag_data.recorded = 1;
7269
7270 /* PR 21809: Do not set a mapping state for debug sections
7271 - it just confuses other tools. */
7272 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
7273 return;
7274
7275 switch (fragP->fr_type)
7276 {
7277 case rs_align_test:
7278 case rs_fill:
7279 mapping_state_2 (MAP_DATA, max_chars);
7280 break;
7281 case rs_align:
7282 /* PR 20364: We can get alignment frags in code sections,
7283 so do not just assume that we should use the MAP_DATA state. */
7284 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7285 break;
7286 case rs_align_code:
7287 mapping_state_2 (MAP_INSN, max_chars);
7288 break;
7289 default:
7290 break;
7291 }
7292 }
7293 \f
7294 /* Initialize the DWARF-2 unwind information for this procedure. */
7295
7296 void
7297 tc_aarch64_frame_initial_instructions (void)
7298 {
7299 cfi_add_CFA_def_cfa (REG_SP, 0);
7300 }
7301 #endif /* OBJ_ELF */
7302
7303 /* Convert REGNAME to a DWARF-2 register number. */
7304
7305 int
7306 tc_aarch64_regname_to_dw2regnum (char *regname)
7307 {
7308 const reg_entry *reg = parse_reg (&regname);
7309 if (reg == NULL)
7310 return -1;
7311
7312 switch (reg->type)
7313 {
7314 case REG_TYPE_SP_32:
7315 case REG_TYPE_SP_64:
7316 case REG_TYPE_R_32:
7317 case REG_TYPE_R_64:
7318 return reg->number;
7319
7320 case REG_TYPE_FP_B:
7321 case REG_TYPE_FP_H:
7322 case REG_TYPE_FP_S:
7323 case REG_TYPE_FP_D:
7324 case REG_TYPE_FP_Q:
7325 return reg->number + 64;
7326
7327 default:
7328 break;
7329 }
7330 return -1;
7331 }
7332
7333 /* Implement DWARF2_ADDR_SIZE. */
7334
7335 int
7336 aarch64_dwarf2_addr_size (void)
7337 {
7338 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7339 if (ilp32_p)
7340 return 4;
7341 #endif
7342 return bfd_arch_bits_per_address (stdoutput) / 8;
7343 }
7344
7345 /* MD interface: Symbol and relocation handling. */
7346
7347 /* Return the address within the segment that a PC-relative fixup is
7348 relative to. For AArch64 PC-relative fixups applied to instructions
7349 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7350
7351 long
7352 md_pcrel_from_section (fixS * fixP, segT seg)
7353 {
7354 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7355
7356 /* If this is pc-relative and we are going to emit a relocation
7357 then we just want to put out any pipeline compensation that the linker
7358 will need. Otherwise we want to use the calculated base. */
7359 if (fixP->fx_pcrel
7360 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7361 || aarch64_force_relocation (fixP)))
7362 base = 0;
7363
7364 /* AArch64 should be consistent for all pc-relative relocations. */
7365 return base + AARCH64_PCREL_OFFSET;
7366 }
7367
7368 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7369 Otherwise we have no need to default values of symbols. */
7370
7371 symbolS *
7372 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7373 {
7374 #ifdef OBJ_ELF
7375 if (name[0] == '_' && name[1] == 'G'
7376 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7377 {
7378 if (!GOT_symbol)
7379 {
7380 if (symbol_find (name))
7381 as_bad (_("GOT already in the symbol table"));
7382
7383 GOT_symbol = symbol_new (name, undefined_section,
7384 (valueT) 0, &zero_address_frag);
7385 }
7386
7387 return GOT_symbol;
7388 }
7389 #endif
7390
7391 return 0;
7392 }
7393
7394 /* Return non-zero if the indicated VALUE has overflowed the maximum
7395 range expressible by a unsigned number with the indicated number of
7396 BITS. */
7397
7398 static bfd_boolean
7399 unsigned_overflow (valueT value, unsigned bits)
7400 {
7401 valueT lim;
7402 if (bits >= sizeof (valueT) * 8)
7403 return FALSE;
7404 lim = (valueT) 1 << bits;
7405 return (value >= lim);
7406 }
7407
7408
7409 /* Return non-zero if the indicated VALUE has overflowed the maximum
7410 range expressible by an signed number with the indicated number of
7411 BITS. */
7412
7413 static bfd_boolean
7414 signed_overflow (offsetT value, unsigned bits)
7415 {
7416 offsetT lim;
7417 if (bits >= sizeof (offsetT) * 8)
7418 return FALSE;
7419 lim = (offsetT) 1 << (bits - 1);
7420 return (value < -lim || value >= lim);
7421 }
7422
7423 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7424 unsigned immediate offset load/store instruction, try to encode it as
7425 an unscaled, 9-bit, signed immediate offset load/store instruction.
7426 Return TRUE if it is successful; otherwise return FALSE.
7427
7428 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7429 in response to the standard LDR/STR mnemonics when the immediate offset is
7430 unambiguous, i.e. when it is negative or unaligned. */
7431
7432 static bfd_boolean
7433 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7434 {
7435 int idx;
7436 enum aarch64_op new_op;
7437 const aarch64_opcode *new_opcode;
7438
7439 gas_assert (instr->opcode->iclass == ldst_pos);
7440
7441 switch (instr->opcode->op)
7442 {
7443 case OP_LDRB_POS:new_op = OP_LDURB; break;
7444 case OP_STRB_POS: new_op = OP_STURB; break;
7445 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7446 case OP_LDRH_POS: new_op = OP_LDURH; break;
7447 case OP_STRH_POS: new_op = OP_STURH; break;
7448 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7449 case OP_LDR_POS: new_op = OP_LDUR; break;
7450 case OP_STR_POS: new_op = OP_STUR; break;
7451 case OP_LDRF_POS: new_op = OP_LDURV; break;
7452 case OP_STRF_POS: new_op = OP_STURV; break;
7453 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7454 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7455 default: new_op = OP_NIL; break;
7456 }
7457
7458 if (new_op == OP_NIL)
7459 return FALSE;
7460
7461 new_opcode = aarch64_get_opcode (new_op);
7462 gas_assert (new_opcode != NULL);
7463
7464 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7465 instr->opcode->op, new_opcode->op);
7466
7467 aarch64_replace_opcode (instr, new_opcode);
7468
7469 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7470 qualifier matching may fail because the out-of-date qualifier will
7471 prevent the operand being updated with a new and correct qualifier. */
7472 idx = aarch64_operand_index (instr->opcode->operands,
7473 AARCH64_OPND_ADDR_SIMM9);
7474 gas_assert (idx == 1);
7475 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7476
7477 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7478
7479 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7480 insn_sequence))
7481 return FALSE;
7482
7483 return TRUE;
7484 }
7485
7486 /* Called by fix_insn to fix a MOV immediate alias instruction.
7487
7488 Operand for a generic move immediate instruction, which is an alias
7489 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7490 a 32-bit/64-bit immediate value into general register. An assembler error
7491 shall result if the immediate cannot be created by a single one of these
7492 instructions. If there is a choice, then to ensure reversability an
7493 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7494
7495 static void
7496 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7497 {
7498 const aarch64_opcode *opcode;
7499
7500 /* Need to check if the destination is SP/ZR. The check has to be done
7501 before any aarch64_replace_opcode. */
7502 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7503 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7504
7505 instr->operands[1].imm.value = value;
7506 instr->operands[1].skip = 0;
7507
7508 if (try_mov_wide_p)
7509 {
7510 /* Try the MOVZ alias. */
7511 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7512 aarch64_replace_opcode (instr, opcode);
7513 if (aarch64_opcode_encode (instr->opcode, instr,
7514 &instr->value, NULL, NULL, insn_sequence))
7515 {
7516 put_aarch64_insn (buf, instr->value);
7517 return;
7518 }
7519 /* Try the MOVK alias. */
7520 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7521 aarch64_replace_opcode (instr, opcode);
7522 if (aarch64_opcode_encode (instr->opcode, instr,
7523 &instr->value, NULL, NULL, insn_sequence))
7524 {
7525 put_aarch64_insn (buf, instr->value);
7526 return;
7527 }
7528 }
7529
7530 if (try_mov_bitmask_p)
7531 {
7532 /* Try the ORR alias. */
7533 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7534 aarch64_replace_opcode (instr, opcode);
7535 if (aarch64_opcode_encode (instr->opcode, instr,
7536 &instr->value, NULL, NULL, insn_sequence))
7537 {
7538 put_aarch64_insn (buf, instr->value);
7539 return;
7540 }
7541 }
7542
7543 as_bad_where (fixP->fx_file, fixP->fx_line,
7544 _("immediate cannot be moved by a single instruction"));
7545 }
7546
7547 /* An instruction operand which is immediate related may have symbol used
7548 in the assembly, e.g.
7549
7550 mov w0, u32
7551 .set u32, 0x00ffff00
7552
7553 At the time when the assembly instruction is parsed, a referenced symbol,
7554 like 'u32' in the above example may not have been seen; a fixS is created
7555 in such a case and is handled here after symbols have been resolved.
7556 Instruction is fixed up with VALUE using the information in *FIXP plus
7557 extra information in FLAGS.
7558
7559 This function is called by md_apply_fix to fix up instructions that need
7560 a fix-up described above but does not involve any linker-time relocation. */
7561
7562 static void
7563 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7564 {
7565 int idx;
7566 uint32_t insn;
7567 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7568 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7569 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7570
7571 if (new_inst)
7572 {
7573 /* Now the instruction is about to be fixed-up, so the operand that
7574 was previously marked as 'ignored' needs to be unmarked in order
7575 to get the encoding done properly. */
7576 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7577 new_inst->operands[idx].skip = 0;
7578 }
7579
7580 gas_assert (opnd != AARCH64_OPND_NIL);
7581
7582 switch (opnd)
7583 {
7584 case AARCH64_OPND_EXCEPTION:
7585 if (unsigned_overflow (value, 16))
7586 as_bad_where (fixP->fx_file, fixP->fx_line,
7587 _("immediate out of range"));
7588 insn = get_aarch64_insn (buf);
7589 insn |= encode_svc_imm (value);
7590 put_aarch64_insn (buf, insn);
7591 break;
7592
7593 case AARCH64_OPND_AIMM:
7594 /* ADD or SUB with immediate.
7595 NOTE this assumes we come here with a add/sub shifted reg encoding
7596 3 322|2222|2 2 2 21111 111111
7597 1 098|7654|3 2 1 09876 543210 98765 43210
7598 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7599 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7600 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7601 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7602 ->
7603 3 322|2222|2 2 221111111111
7604 1 098|7654|3 2 109876543210 98765 43210
7605 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7606 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7607 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7608 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7609 Fields sf Rn Rd are already set. */
7610 insn = get_aarch64_insn (buf);
7611 if (value < 0)
7612 {
7613 /* Add <-> sub. */
7614 insn = reencode_addsub_switch_add_sub (insn);
7615 value = -value;
7616 }
7617
7618 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7619 && unsigned_overflow (value, 12))
7620 {
7621 /* Try to shift the value by 12 to make it fit. */
7622 if (((value >> 12) << 12) == value
7623 && ! unsigned_overflow (value, 12 + 12))
7624 {
7625 value >>= 12;
7626 insn |= encode_addsub_imm_shift_amount (1);
7627 }
7628 }
7629
7630 if (unsigned_overflow (value, 12))
7631 as_bad_where (fixP->fx_file, fixP->fx_line,
7632 _("immediate out of range"));
7633
7634 insn |= encode_addsub_imm (value);
7635
7636 put_aarch64_insn (buf, insn);
7637 break;
7638
7639 case AARCH64_OPND_SIMD_IMM:
7640 case AARCH64_OPND_SIMD_IMM_SFT:
7641 case AARCH64_OPND_LIMM:
7642 /* Bit mask immediate. */
7643 gas_assert (new_inst != NULL);
7644 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7645 new_inst->operands[idx].imm.value = value;
7646 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7647 &new_inst->value, NULL, NULL, insn_sequence))
7648 put_aarch64_insn (buf, new_inst->value);
7649 else
7650 as_bad_where (fixP->fx_file, fixP->fx_line,
7651 _("invalid immediate"));
7652 break;
7653
7654 case AARCH64_OPND_HALF:
7655 /* 16-bit unsigned immediate. */
7656 if (unsigned_overflow (value, 16))
7657 as_bad_where (fixP->fx_file, fixP->fx_line,
7658 _("immediate out of range"));
7659 insn = get_aarch64_insn (buf);
7660 insn |= encode_movw_imm (value & 0xffff);
7661 put_aarch64_insn (buf, insn);
7662 break;
7663
7664 case AARCH64_OPND_IMM_MOV:
7665 /* Operand for a generic move immediate instruction, which is
7666 an alias instruction that generates a single MOVZ, MOVN or ORR
7667 instruction to loads a 32-bit/64-bit immediate value into general
7668 register. An assembler error shall result if the immediate cannot be
7669 created by a single one of these instructions. If there is a choice,
7670 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7671 and MOVZ or MOVN to ORR. */
7672 gas_assert (new_inst != NULL);
7673 fix_mov_imm_insn (fixP, buf, new_inst, value);
7674 break;
7675
7676 case AARCH64_OPND_ADDR_SIMM7:
7677 case AARCH64_OPND_ADDR_SIMM9:
7678 case AARCH64_OPND_ADDR_SIMM9_2:
7679 case AARCH64_OPND_ADDR_SIMM10:
7680 case AARCH64_OPND_ADDR_UIMM12:
7681 case AARCH64_OPND_ADDR_SIMM11:
7682 case AARCH64_OPND_ADDR_SIMM13:
7683 /* Immediate offset in an address. */
7684 insn = get_aarch64_insn (buf);
7685
7686 gas_assert (new_inst != NULL && new_inst->value == insn);
7687 gas_assert (new_inst->opcode->operands[1] == opnd
7688 || new_inst->opcode->operands[2] == opnd);
7689
7690 /* Get the index of the address operand. */
7691 if (new_inst->opcode->operands[1] == opnd)
7692 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7693 idx = 1;
7694 else
7695 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7696 idx = 2;
7697
7698 /* Update the resolved offset value. */
7699 new_inst->operands[idx].addr.offset.imm = value;
7700
7701 /* Encode/fix-up. */
7702 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7703 &new_inst->value, NULL, NULL, insn_sequence))
7704 {
7705 put_aarch64_insn (buf, new_inst->value);
7706 break;
7707 }
7708 else if (new_inst->opcode->iclass == ldst_pos
7709 && try_to_encode_as_unscaled_ldst (new_inst))
7710 {
7711 put_aarch64_insn (buf, new_inst->value);
7712 break;
7713 }
7714
7715 as_bad_where (fixP->fx_file, fixP->fx_line,
7716 _("immediate offset out of range"));
7717 break;
7718
7719 default:
7720 gas_assert (0);
7721 as_fatal (_("unhandled operand code %d"), opnd);
7722 }
7723 }
7724
7725 /* Apply a fixup (fixP) to segment data, once it has been determined
7726 by our caller that we have all the info we need to fix it up.
7727
7728 Parameter valP is the pointer to the value of the bits. */
7729
7730 void
7731 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7732 {
7733 offsetT value = *valP;
7734 uint32_t insn;
7735 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7736 int scale;
7737 unsigned flags = fixP->fx_addnumber;
7738
7739 DEBUG_TRACE ("\n\n");
7740 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7741 DEBUG_TRACE ("Enter md_apply_fix");
7742
7743 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7744
7745 /* Note whether this will delete the relocation. */
7746
7747 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7748 fixP->fx_done = 1;
7749
7750 /* Process the relocations. */
7751 switch (fixP->fx_r_type)
7752 {
7753 case BFD_RELOC_NONE:
7754 /* This will need to go in the object file. */
7755 fixP->fx_done = 0;
7756 break;
7757
7758 case BFD_RELOC_8:
7759 case BFD_RELOC_8_PCREL:
7760 if (fixP->fx_done || !seg->use_rela_p)
7761 md_number_to_chars (buf, value, 1);
7762 break;
7763
7764 case BFD_RELOC_16:
7765 case BFD_RELOC_16_PCREL:
7766 if (fixP->fx_done || !seg->use_rela_p)
7767 md_number_to_chars (buf, value, 2);
7768 break;
7769
7770 case BFD_RELOC_32:
7771 case BFD_RELOC_32_PCREL:
7772 if (fixP->fx_done || !seg->use_rela_p)
7773 md_number_to_chars (buf, value, 4);
7774 break;
7775
7776 case BFD_RELOC_64:
7777 case BFD_RELOC_64_PCREL:
7778 if (fixP->fx_done || !seg->use_rela_p)
7779 md_number_to_chars (buf, value, 8);
7780 break;
7781
7782 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7783 /* We claim that these fixups have been processed here, even if
7784 in fact we generate an error because we do not have a reloc
7785 for them, so tc_gen_reloc() will reject them. */
7786 fixP->fx_done = 1;
7787 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7788 {
7789 as_bad_where (fixP->fx_file, fixP->fx_line,
7790 _("undefined symbol %s used as an immediate value"),
7791 S_GET_NAME (fixP->fx_addsy));
7792 goto apply_fix_return;
7793 }
7794 fix_insn (fixP, flags, value);
7795 break;
7796
7797 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7798 if (fixP->fx_done || !seg->use_rela_p)
7799 {
7800 if (value & 3)
7801 as_bad_where (fixP->fx_file, fixP->fx_line,
7802 _("pc-relative load offset not word aligned"));
7803 if (signed_overflow (value, 21))
7804 as_bad_where (fixP->fx_file, fixP->fx_line,
7805 _("pc-relative load offset out of range"));
7806 insn = get_aarch64_insn (buf);
7807 insn |= encode_ld_lit_ofs_19 (value >> 2);
7808 put_aarch64_insn (buf, insn);
7809 }
7810 break;
7811
7812 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7813 if (fixP->fx_done || !seg->use_rela_p)
7814 {
7815 if (signed_overflow (value, 21))
7816 as_bad_where (fixP->fx_file, fixP->fx_line,
7817 _("pc-relative address offset out of range"));
7818 insn = get_aarch64_insn (buf);
7819 insn |= encode_adr_imm (value);
7820 put_aarch64_insn (buf, insn);
7821 }
7822 break;
7823
7824 case BFD_RELOC_AARCH64_BRANCH19:
7825 if (fixP->fx_done || !seg->use_rela_p)
7826 {
7827 if (value & 3)
7828 as_bad_where (fixP->fx_file, fixP->fx_line,
7829 _("conditional branch target not word aligned"));
7830 if (signed_overflow (value, 21))
7831 as_bad_where (fixP->fx_file, fixP->fx_line,
7832 _("conditional branch out of range"));
7833 insn = get_aarch64_insn (buf);
7834 insn |= encode_cond_branch_ofs_19 (value >> 2);
7835 put_aarch64_insn (buf, insn);
7836 }
7837 break;
7838
7839 case BFD_RELOC_AARCH64_TSTBR14:
7840 if (fixP->fx_done || !seg->use_rela_p)
7841 {
7842 if (value & 3)
7843 as_bad_where (fixP->fx_file, fixP->fx_line,
7844 _("conditional branch target not word aligned"));
7845 if (signed_overflow (value, 16))
7846 as_bad_where (fixP->fx_file, fixP->fx_line,
7847 _("conditional branch out of range"));
7848 insn = get_aarch64_insn (buf);
7849 insn |= encode_tst_branch_ofs_14 (value >> 2);
7850 put_aarch64_insn (buf, insn);
7851 }
7852 break;
7853
7854 case BFD_RELOC_AARCH64_CALL26:
7855 case BFD_RELOC_AARCH64_JUMP26:
7856 if (fixP->fx_done || !seg->use_rela_p)
7857 {
7858 if (value & 3)
7859 as_bad_where (fixP->fx_file, fixP->fx_line,
7860 _("branch target not word aligned"));
7861 if (signed_overflow (value, 28))
7862 as_bad_where (fixP->fx_file, fixP->fx_line,
7863 _("branch out of range"));
7864 insn = get_aarch64_insn (buf);
7865 insn |= encode_branch_ofs_26 (value >> 2);
7866 put_aarch64_insn (buf, insn);
7867 }
7868 break;
7869
7870 case BFD_RELOC_AARCH64_MOVW_G0:
7871 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7872 case BFD_RELOC_AARCH64_MOVW_G0_S:
7873 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7874 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7875 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
7876 scale = 0;
7877 goto movw_common;
7878 case BFD_RELOC_AARCH64_MOVW_G1:
7879 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7880 case BFD_RELOC_AARCH64_MOVW_G1_S:
7881 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7882 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7883 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
7884 scale = 16;
7885 goto movw_common;
7886 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7887 scale = 0;
7888 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7889 /* Should always be exported to object file, see
7890 aarch64_force_relocation(). */
7891 gas_assert (!fixP->fx_done);
7892 gas_assert (seg->use_rela_p);
7893 goto movw_common;
7894 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7895 scale = 16;
7896 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7897 /* Should always be exported to object file, see
7898 aarch64_force_relocation(). */
7899 gas_assert (!fixP->fx_done);
7900 gas_assert (seg->use_rela_p);
7901 goto movw_common;
7902 case BFD_RELOC_AARCH64_MOVW_G2:
7903 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7904 case BFD_RELOC_AARCH64_MOVW_G2_S:
7905 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7906 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
7907 scale = 32;
7908 goto movw_common;
7909 case BFD_RELOC_AARCH64_MOVW_G3:
7910 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
7911 scale = 48;
7912 movw_common:
7913 if (fixP->fx_done || !seg->use_rela_p)
7914 {
7915 insn = get_aarch64_insn (buf);
7916
7917 if (!fixP->fx_done)
7918 {
7919 /* REL signed addend must fit in 16 bits */
7920 if (signed_overflow (value, 16))
7921 as_bad_where (fixP->fx_file, fixP->fx_line,
7922 _("offset out of range"));
7923 }
7924 else
7925 {
7926 /* Check for overflow and scale. */
7927 switch (fixP->fx_r_type)
7928 {
7929 case BFD_RELOC_AARCH64_MOVW_G0:
7930 case BFD_RELOC_AARCH64_MOVW_G1:
7931 case BFD_RELOC_AARCH64_MOVW_G2:
7932 case BFD_RELOC_AARCH64_MOVW_G3:
7933 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7934 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7935 if (unsigned_overflow (value, scale + 16))
7936 as_bad_where (fixP->fx_file, fixP->fx_line,
7937 _("unsigned value out of range"));
7938 break;
7939 case BFD_RELOC_AARCH64_MOVW_G0_S:
7940 case BFD_RELOC_AARCH64_MOVW_G1_S:
7941 case BFD_RELOC_AARCH64_MOVW_G2_S:
7942 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7943 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7944 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7945 /* NOTE: We can only come here with movz or movn. */
7946 if (signed_overflow (value, scale + 16))
7947 as_bad_where (fixP->fx_file, fixP->fx_line,
7948 _("signed value out of range"));
7949 if (value < 0)
7950 {
7951 /* Force use of MOVN. */
7952 value = ~value;
7953 insn = reencode_movzn_to_movn (insn);
7954 }
7955 else
7956 {
7957 /* Force use of MOVZ. */
7958 insn = reencode_movzn_to_movz (insn);
7959 }
7960 break;
7961 default:
7962 /* Unchecked relocations. */
7963 break;
7964 }
7965 value >>= scale;
7966 }
7967
7968 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7969 insn |= encode_movw_imm (value & 0xffff);
7970
7971 put_aarch64_insn (buf, insn);
7972 }
7973 break;
7974
7975 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7976 fixP->fx_r_type = (ilp32_p
7977 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7978 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7979 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7980 /* Should always be exported to object file, see
7981 aarch64_force_relocation(). */
7982 gas_assert (!fixP->fx_done);
7983 gas_assert (seg->use_rela_p);
7984 break;
7985
7986 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7987 fixP->fx_r_type = (ilp32_p
7988 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7989 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
7990 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7991 /* Should always be exported to object file, see
7992 aarch64_force_relocation(). */
7993 gas_assert (!fixP->fx_done);
7994 gas_assert (seg->use_rela_p);
7995 break;
7996
7997 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7998 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7999 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8000 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8001 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8002 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8003 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8004 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8005 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8006 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8007 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8008 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8009 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8010 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8011 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8012 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8013 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8014 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8015 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8016 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8017 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8018 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8019 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8020 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8021 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8022 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8023 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8024 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8025 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8026 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8027 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8028 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8029 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8030 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8031 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8032 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8033 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8034 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8035 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8036 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8037 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8038 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8039 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8040 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8041 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8042 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8043 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8044 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8045 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8046 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8047 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8048 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8049 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8050 /* Should always be exported to object file, see
8051 aarch64_force_relocation(). */
8052 gas_assert (!fixP->fx_done);
8053 gas_assert (seg->use_rela_p);
8054 break;
8055
8056 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8057 /* Should always be exported to object file, see
8058 aarch64_force_relocation(). */
8059 fixP->fx_r_type = (ilp32_p
8060 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8061 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8062 gas_assert (!fixP->fx_done);
8063 gas_assert (seg->use_rela_p);
8064 break;
8065
8066 case BFD_RELOC_AARCH64_ADD_LO12:
8067 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8068 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8069 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8070 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8071 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8072 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8073 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8074 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8075 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8076 case BFD_RELOC_AARCH64_LDST128_LO12:
8077 case BFD_RELOC_AARCH64_LDST16_LO12:
8078 case BFD_RELOC_AARCH64_LDST32_LO12:
8079 case BFD_RELOC_AARCH64_LDST64_LO12:
8080 case BFD_RELOC_AARCH64_LDST8_LO12:
8081 /* Should always be exported to object file, see
8082 aarch64_force_relocation(). */
8083 gas_assert (!fixP->fx_done);
8084 gas_assert (seg->use_rela_p);
8085 break;
8086
8087 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8088 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8089 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8090 break;
8091
8092 case BFD_RELOC_UNUSED:
8093 /* An error will already have been reported. */
8094 break;
8095
8096 default:
8097 as_bad_where (fixP->fx_file, fixP->fx_line,
8098 _("unexpected %s fixup"),
8099 bfd_get_reloc_code_name (fixP->fx_r_type));
8100 break;
8101 }
8102
8103 apply_fix_return:
8104 /* Free the allocated the struct aarch64_inst.
8105 N.B. currently there are very limited number of fix-up types actually use
8106 this field, so the impact on the performance should be minimal . */
8107 if (fixP->tc_fix_data.inst != NULL)
8108 free (fixP->tc_fix_data.inst);
8109
8110 return;
8111 }
8112
8113 /* Translate internal representation of relocation info to BFD target
8114 format. */
8115
8116 arelent *
8117 tc_gen_reloc (asection * section, fixS * fixp)
8118 {
8119 arelent *reloc;
8120 bfd_reloc_code_real_type code;
8121
8122 reloc = XNEW (arelent);
8123
8124 reloc->sym_ptr_ptr = XNEW (asymbol *);
8125 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8126 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8127
8128 if (fixp->fx_pcrel)
8129 {
8130 if (section->use_rela_p)
8131 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8132 else
8133 fixp->fx_offset = reloc->address;
8134 }
8135 reloc->addend = fixp->fx_offset;
8136
8137 code = fixp->fx_r_type;
8138 switch (code)
8139 {
8140 case BFD_RELOC_16:
8141 if (fixp->fx_pcrel)
8142 code = BFD_RELOC_16_PCREL;
8143 break;
8144
8145 case BFD_RELOC_32:
8146 if (fixp->fx_pcrel)
8147 code = BFD_RELOC_32_PCREL;
8148 break;
8149
8150 case BFD_RELOC_64:
8151 if (fixp->fx_pcrel)
8152 code = BFD_RELOC_64_PCREL;
8153 break;
8154
8155 default:
8156 break;
8157 }
8158
8159 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8160 if (reloc->howto == NULL)
8161 {
8162 as_bad_where (fixp->fx_file, fixp->fx_line,
8163 _
8164 ("cannot represent %s relocation in this object file format"),
8165 bfd_get_reloc_code_name (code));
8166 return NULL;
8167 }
8168
8169 return reloc;
8170 }
8171
8172 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8173
8174 void
8175 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8176 {
8177 bfd_reloc_code_real_type type;
8178 int pcrel = 0;
8179
8180 /* Pick a reloc.
8181 FIXME: @@ Should look at CPU word size. */
8182 switch (size)
8183 {
8184 case 1:
8185 type = BFD_RELOC_8;
8186 break;
8187 case 2:
8188 type = BFD_RELOC_16;
8189 break;
8190 case 4:
8191 type = BFD_RELOC_32;
8192 break;
8193 case 8:
8194 type = BFD_RELOC_64;
8195 break;
8196 default:
8197 as_bad (_("cannot do %u-byte relocation"), size);
8198 type = BFD_RELOC_UNUSED;
8199 break;
8200 }
8201
8202 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8203 }
8204
8205 int
8206 aarch64_force_relocation (struct fix *fixp)
8207 {
8208 switch (fixp->fx_r_type)
8209 {
8210 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8211 /* Perform these "immediate" internal relocations
8212 even if the symbol is extern or weak. */
8213 return 0;
8214
8215 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8216 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8217 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8218 /* Pseudo relocs that need to be fixed up according to
8219 ilp32_p. */
8220 return 0;
8221
8222 case BFD_RELOC_AARCH64_ADD_LO12:
8223 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8224 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8225 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8226 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8227 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8228 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8229 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8230 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8231 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8232 case BFD_RELOC_AARCH64_LDST128_LO12:
8233 case BFD_RELOC_AARCH64_LDST16_LO12:
8234 case BFD_RELOC_AARCH64_LDST32_LO12:
8235 case BFD_RELOC_AARCH64_LDST64_LO12:
8236 case BFD_RELOC_AARCH64_LDST8_LO12:
8237 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8238 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8239 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8240 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8241 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8242 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8243 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8244 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8245 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8246 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8247 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8248 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8249 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8250 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8251 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8252 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8253 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8254 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8255 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8256 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8257 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8258 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8259 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8260 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8261 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8262 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8263 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8264 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8265 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8266 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8267 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8268 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8269 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8270 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8271 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8272 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8273 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8274 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8275 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8276 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8277 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8278 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8279 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8280 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8281 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8282 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8283 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8284 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8285 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8286 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8287 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8288 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8289 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8290 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8291 /* Always leave these relocations for the linker. */
8292 return 1;
8293
8294 default:
8295 break;
8296 }
8297
8298 return generic_force_reloc (fixp);
8299 }
8300
8301 #ifdef OBJ_ELF
8302
8303 /* Implement md_after_parse_args. This is the earliest time we need to decide
8304 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8305
8306 void
8307 aarch64_after_parse_args (void)
8308 {
8309 if (aarch64_abi != AARCH64_ABI_NONE)
8310 return;
8311
8312 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8313 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8314 aarch64_abi = AARCH64_ABI_ILP32;
8315 else
8316 aarch64_abi = AARCH64_ABI_LP64;
8317 }
8318
8319 const char *
8320 elf64_aarch64_target_format (void)
8321 {
8322 if (strcmp (TARGET_OS, "cloudabi") == 0)
8323 {
8324 /* FIXME: What to do for ilp32_p ? */
8325 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
8326 }
8327 if (target_big_endian)
8328 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8329 else
8330 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8331 }
8332
8333 void
8334 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8335 {
8336 elf_frob_symbol (symp, puntp);
8337 }
8338 #endif
8339
8340 /* MD interface: Finalization. */
8341
8342 /* A good place to do this, although this was probably not intended
8343 for this kind of use. We need to dump the literal pool before
8344 references are made to a null symbol pointer. */
8345
8346 void
8347 aarch64_cleanup (void)
8348 {
8349 literal_pool *pool;
8350
8351 for (pool = list_of_pools; pool; pool = pool->next)
8352 {
8353 /* Put it at the end of the relevant section. */
8354 subseg_set (pool->section, pool->sub_section);
8355 s_ltorg (0);
8356 }
8357 }
8358
8359 #ifdef OBJ_ELF
8360 /* Remove any excess mapping symbols generated for alignment frags in
8361 SEC. We may have created a mapping symbol before a zero byte
8362 alignment; remove it if there's a mapping symbol after the
8363 alignment. */
8364 static void
8365 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8366 void *dummy ATTRIBUTE_UNUSED)
8367 {
8368 segment_info_type *seginfo = seg_info (sec);
8369 fragS *fragp;
8370
8371 if (seginfo == NULL || seginfo->frchainP == NULL)
8372 return;
8373
8374 for (fragp = seginfo->frchainP->frch_root;
8375 fragp != NULL; fragp = fragp->fr_next)
8376 {
8377 symbolS *sym = fragp->tc_frag_data.last_map;
8378 fragS *next = fragp->fr_next;
8379
8380 /* Variable-sized frags have been converted to fixed size by
8381 this point. But if this was variable-sized to start with,
8382 there will be a fixed-size frag after it. So don't handle
8383 next == NULL. */
8384 if (sym == NULL || next == NULL)
8385 continue;
8386
8387 if (S_GET_VALUE (sym) < next->fr_address)
8388 /* Not at the end of this frag. */
8389 continue;
8390 know (S_GET_VALUE (sym) == next->fr_address);
8391
8392 do
8393 {
8394 if (next->tc_frag_data.first_map != NULL)
8395 {
8396 /* Next frag starts with a mapping symbol. Discard this
8397 one. */
8398 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8399 break;
8400 }
8401
8402 if (next->fr_next == NULL)
8403 {
8404 /* This mapping symbol is at the end of the section. Discard
8405 it. */
8406 know (next->fr_fix == 0 && next->fr_var == 0);
8407 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8408 break;
8409 }
8410
8411 /* As long as we have empty frags without any mapping symbols,
8412 keep looking. */
8413 /* If the next frag is non-empty and does not start with a
8414 mapping symbol, then this mapping symbol is required. */
8415 if (next->fr_address != next->fr_next->fr_address)
8416 break;
8417
8418 next = next->fr_next;
8419 }
8420 while (next != NULL);
8421 }
8422 }
8423 #endif
8424
8425 /* Adjust the symbol table. */
8426
8427 void
8428 aarch64_adjust_symtab (void)
8429 {
8430 #ifdef OBJ_ELF
8431 /* Remove any overlapping mapping symbols generated by alignment frags. */
8432 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8433 /* Now do generic ELF adjustments. */
8434 elf_adjust_symtab ();
8435 #endif
8436 }
8437
8438 static void
8439 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8440 {
8441 const char *hash_err;
8442
8443 hash_err = hash_insert (table, key, value);
8444 if (hash_err)
8445 printf ("Internal Error: Can't hash %s\n", key);
8446 }
8447
8448 static void
8449 fill_instruction_hash_table (void)
8450 {
8451 aarch64_opcode *opcode = aarch64_opcode_table;
8452
8453 while (opcode->name != NULL)
8454 {
8455 templates *templ, *new_templ;
8456 templ = hash_find (aarch64_ops_hsh, opcode->name);
8457
8458 new_templ = XNEW (templates);
8459 new_templ->opcode = opcode;
8460 new_templ->next = NULL;
8461
8462 if (!templ)
8463 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8464 else
8465 {
8466 new_templ->next = templ->next;
8467 templ->next = new_templ;
8468 }
8469 ++opcode;
8470 }
8471 }
8472
8473 static inline void
8474 convert_to_upper (char *dst, const char *src, size_t num)
8475 {
8476 unsigned int i;
8477 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8478 *dst = TOUPPER (*src);
8479 *dst = '\0';
8480 }
8481
8482 /* Assume STR point to a lower-case string, allocate, convert and return
8483 the corresponding upper-case string. */
8484 static inline const char*
8485 get_upper_str (const char *str)
8486 {
8487 char *ret;
8488 size_t len = strlen (str);
8489 ret = XNEWVEC (char, len + 1);
8490 convert_to_upper (ret, str, len);
8491 return ret;
8492 }
8493
8494 /* MD interface: Initialization. */
8495
8496 void
8497 md_begin (void)
8498 {
8499 unsigned mach;
8500 unsigned int i;
8501
8502 if ((aarch64_ops_hsh = hash_new ()) == NULL
8503 || (aarch64_cond_hsh = hash_new ()) == NULL
8504 || (aarch64_shift_hsh = hash_new ()) == NULL
8505 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8506 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8507 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8508 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8509 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8510 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8511 || (aarch64_sys_regs_sr_hsh = hash_new ()) == NULL
8512 || (aarch64_reg_hsh = hash_new ()) == NULL
8513 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8514 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8515 || (aarch64_pldop_hsh = hash_new ()) == NULL
8516 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8517 as_fatal (_("virtual memory exhausted"));
8518
8519 fill_instruction_hash_table ();
8520
8521 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8522 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8523 (void *) (aarch64_sys_regs + i));
8524
8525 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8526 checked_hash_insert (aarch64_pstatefield_hsh,
8527 aarch64_pstatefields[i].name,
8528 (void *) (aarch64_pstatefields + i));
8529
8530 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8531 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8532 aarch64_sys_regs_ic[i].name,
8533 (void *) (aarch64_sys_regs_ic + i));
8534
8535 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8536 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8537 aarch64_sys_regs_dc[i].name,
8538 (void *) (aarch64_sys_regs_dc + i));
8539
8540 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8541 checked_hash_insert (aarch64_sys_regs_at_hsh,
8542 aarch64_sys_regs_at[i].name,
8543 (void *) (aarch64_sys_regs_at + i));
8544
8545 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8546 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8547 aarch64_sys_regs_tlbi[i].name,
8548 (void *) (aarch64_sys_regs_tlbi + i));
8549
8550 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8551 checked_hash_insert (aarch64_sys_regs_sr_hsh,
8552 aarch64_sys_regs_sr[i].name,
8553 (void *) (aarch64_sys_regs_sr + i));
8554
8555 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8556 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8557 (void *) (reg_names + i));
8558
8559 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8560 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8561 (void *) (nzcv_names + i));
8562
8563 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8564 {
8565 const char *name = aarch64_operand_modifiers[i].name;
8566 checked_hash_insert (aarch64_shift_hsh, name,
8567 (void *) (aarch64_operand_modifiers + i));
8568 /* Also hash the name in the upper case. */
8569 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8570 (void *) (aarch64_operand_modifiers + i));
8571 }
8572
8573 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8574 {
8575 unsigned int j;
8576 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8577 the same condition code. */
8578 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8579 {
8580 const char *name = aarch64_conds[i].names[j];
8581 if (name == NULL)
8582 break;
8583 checked_hash_insert (aarch64_cond_hsh, name,
8584 (void *) (aarch64_conds + i));
8585 /* Also hash the name in the upper case. */
8586 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8587 (void *) (aarch64_conds + i));
8588 }
8589 }
8590
8591 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8592 {
8593 const char *name = aarch64_barrier_options[i].name;
8594 /* Skip xx00 - the unallocated values of option. */
8595 if ((i & 0x3) == 0)
8596 continue;
8597 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8598 (void *) (aarch64_barrier_options + i));
8599 /* Also hash the name in the upper case. */
8600 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8601 (void *) (aarch64_barrier_options + i));
8602 }
8603
8604 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8605 {
8606 const char* name = aarch64_prfops[i].name;
8607 /* Skip the unallocated hint encodings. */
8608 if (name == NULL)
8609 continue;
8610 checked_hash_insert (aarch64_pldop_hsh, name,
8611 (void *) (aarch64_prfops + i));
8612 /* Also hash the name in the upper case. */
8613 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8614 (void *) (aarch64_prfops + i));
8615 }
8616
8617 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8618 {
8619 const char* name = aarch64_hint_options[i].name;
8620
8621 checked_hash_insert (aarch64_hint_opt_hsh, name,
8622 (void *) (aarch64_hint_options + i));
8623 /* Also hash the name in the upper case. */
8624 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8625 (void *) (aarch64_hint_options + i));
8626 }
8627
8628 /* Set the cpu variant based on the command-line options. */
8629 if (!mcpu_cpu_opt)
8630 mcpu_cpu_opt = march_cpu_opt;
8631
8632 if (!mcpu_cpu_opt)
8633 mcpu_cpu_opt = &cpu_default;
8634
8635 cpu_variant = *mcpu_cpu_opt;
8636
8637 /* Record the CPU type. */
8638 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8639
8640 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8641 }
8642
8643 /* Command line processing. */
8644
8645 const char *md_shortopts = "m:";
8646
8647 #ifdef AARCH64_BI_ENDIAN
8648 #define OPTION_EB (OPTION_MD_BASE + 0)
8649 #define OPTION_EL (OPTION_MD_BASE + 1)
8650 #else
8651 #if TARGET_BYTES_BIG_ENDIAN
8652 #define OPTION_EB (OPTION_MD_BASE + 0)
8653 #else
8654 #define OPTION_EL (OPTION_MD_BASE + 1)
8655 #endif
8656 #endif
8657
8658 struct option md_longopts[] = {
8659 #ifdef OPTION_EB
8660 {"EB", no_argument, NULL, OPTION_EB},
8661 #endif
8662 #ifdef OPTION_EL
8663 {"EL", no_argument, NULL, OPTION_EL},
8664 #endif
8665 {NULL, no_argument, NULL, 0}
8666 };
8667
8668 size_t md_longopts_size = sizeof (md_longopts);
8669
8670 struct aarch64_option_table
8671 {
8672 const char *option; /* Option name to match. */
8673 const char *help; /* Help information. */
8674 int *var; /* Variable to change. */
8675 int value; /* What to change it to. */
8676 char *deprecated; /* If non-null, print this message. */
8677 };
8678
8679 static struct aarch64_option_table aarch64_opts[] = {
8680 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8681 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8682 NULL},
8683 #ifdef DEBUG_AARCH64
8684 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8685 #endif /* DEBUG_AARCH64 */
8686 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8687 NULL},
8688 {"mno-verbose-error", N_("do not output verbose error messages"),
8689 &verbose_error_p, 0, NULL},
8690 {NULL, NULL, NULL, 0, NULL}
8691 };
8692
8693 struct aarch64_cpu_option_table
8694 {
8695 const char *name;
8696 const aarch64_feature_set value;
8697 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8698 case. */
8699 const char *canonical_name;
8700 };
8701
8702 /* This list should, at a minimum, contain all the cpu names
8703 recognized by GCC. */
8704 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8705 {"all", AARCH64_ANY, NULL},
8706 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8707 AARCH64_FEATURE_CRC), "Cortex-A35"},
8708 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8709 AARCH64_FEATURE_CRC), "Cortex-A53"},
8710 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8711 AARCH64_FEATURE_CRC), "Cortex-A57"},
8712 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8713 AARCH64_FEATURE_CRC), "Cortex-A72"},
8714 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8715 AARCH64_FEATURE_CRC), "Cortex-A73"},
8716 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8717 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8718 "Cortex-A55"},
8719 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8720 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8721 "Cortex-A75"},
8722 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8723 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8724 "Cortex-A76"},
8725 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8726 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8727 "Samsung Exynos M1"},
8728 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8729 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8730 | AARCH64_FEATURE_RDMA),
8731 "Qualcomm Falkor"},
8732 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8733 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8734 | AARCH64_FEATURE_RDMA),
8735 "Qualcomm QDF24XX"},
8736 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8737 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8738 "Qualcomm Saphira"},
8739 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8740 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8741 "Cavium ThunderX"},
8742 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8743 AARCH64_FEATURE_CRYPTO),
8744 "Broadcom Vulcan"},
8745 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8746 in earlier releases and is superseded by 'xgene1' in all
8747 tools. */
8748 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8749 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8750 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8751 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8752 {"generic", AARCH64_ARCH_V8, NULL},
8753
8754 {NULL, AARCH64_ARCH_NONE, NULL}
8755 };
8756
8757 struct aarch64_arch_option_table
8758 {
8759 const char *name;
8760 const aarch64_feature_set value;
8761 };
8762
8763 /* This list should, at a minimum, contain all the architecture names
8764 recognized by GCC. */
8765 static const struct aarch64_arch_option_table aarch64_archs[] = {
8766 {"all", AARCH64_ANY},
8767 {"armv8-a", AARCH64_ARCH_V8},
8768 {"armv8.1-a", AARCH64_ARCH_V8_1},
8769 {"armv8.2-a", AARCH64_ARCH_V8_2},
8770 {"armv8.3-a", AARCH64_ARCH_V8_3},
8771 {"armv8.4-a", AARCH64_ARCH_V8_4},
8772 {"armv8.5-a", AARCH64_ARCH_V8_5},
8773 {NULL, AARCH64_ARCH_NONE}
8774 };
8775
8776 /* ISA extensions. */
8777 struct aarch64_option_cpu_value_table
8778 {
8779 const char *name;
8780 const aarch64_feature_set value;
8781 const aarch64_feature_set require; /* Feature dependencies. */
8782 };
8783
8784 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8785 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8786 AARCH64_ARCH_NONE},
8787 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO
8788 | AARCH64_FEATURE_AES
8789 | AARCH64_FEATURE_SHA2, 0),
8790 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8791 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8792 AARCH64_ARCH_NONE},
8793 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8794 AARCH64_ARCH_NONE},
8795 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8796 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8797 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8798 AARCH64_ARCH_NONE},
8799 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8800 AARCH64_ARCH_NONE},
8801 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8802 AARCH64_ARCH_NONE},
8803 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8804 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8805 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8806 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8807 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
8808 AARCH64_FEATURE (AARCH64_FEATURE_FP
8809 | AARCH64_FEATURE_F16, 0)},
8810 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8811 AARCH64_ARCH_NONE},
8812 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8813 AARCH64_FEATURE (AARCH64_FEATURE_F16
8814 | AARCH64_FEATURE_SIMD
8815 | AARCH64_FEATURE_COMPNUM, 0)},
8816 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8817 AARCH64_FEATURE (AARCH64_FEATURE_F16
8818 | AARCH64_FEATURE_SIMD, 0)},
8819 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8820 AARCH64_ARCH_NONE},
8821 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8822 AARCH64_ARCH_NONE},
8823 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
8824 AARCH64_ARCH_NONE},
8825 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
8826 AARCH64_ARCH_NONE},
8827 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
8828 AARCH64_ARCH_NONE},
8829 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
8830 AARCH64_ARCH_NONE},
8831 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
8832 AARCH64_ARCH_NONE},
8833 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA2
8834 | AARCH64_FEATURE_SHA3, 0),
8835 AARCH64_ARCH_NONE},
8836 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
8837 AARCH64_ARCH_NONE},
8838 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
8839 AARCH64_ARCH_NONE},
8840 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
8841 AARCH64_ARCH_NONE},
8842 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8843 };
8844
8845 struct aarch64_long_option_table
8846 {
8847 const char *option; /* Substring to match. */
8848 const char *help; /* Help information. */
8849 int (*func) (const char *subopt); /* Function to decode sub-option. */
8850 char *deprecated; /* If non-null, print this message. */
8851 };
8852
8853 /* Transitive closure of features depending on set. */
8854 static aarch64_feature_set
8855 aarch64_feature_disable_set (aarch64_feature_set set)
8856 {
8857 const struct aarch64_option_cpu_value_table *opt;
8858 aarch64_feature_set prev = 0;
8859
8860 while (prev != set) {
8861 prev = set;
8862 for (opt = aarch64_features; opt->name != NULL; opt++)
8863 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8864 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8865 }
8866 return set;
8867 }
8868
8869 /* Transitive closure of dependencies of set. */
8870 static aarch64_feature_set
8871 aarch64_feature_enable_set (aarch64_feature_set set)
8872 {
8873 const struct aarch64_option_cpu_value_table *opt;
8874 aarch64_feature_set prev = 0;
8875
8876 while (prev != set) {
8877 prev = set;
8878 for (opt = aarch64_features; opt->name != NULL; opt++)
8879 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8880 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8881 }
8882 return set;
8883 }
8884
8885 static int
8886 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8887 bfd_boolean ext_only)
8888 {
8889 /* We insist on extensions being added before being removed. We achieve
8890 this by using the ADDING_VALUE variable to indicate whether we are
8891 adding an extension (1) or removing it (0) and only allowing it to
8892 change in the order -1 -> 1 -> 0. */
8893 int adding_value = -1;
8894 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8895
8896 /* Copy the feature set, so that we can modify it. */
8897 *ext_set = **opt_p;
8898 *opt_p = ext_set;
8899
8900 while (str != NULL && *str != 0)
8901 {
8902 const struct aarch64_option_cpu_value_table *opt;
8903 const char *ext = NULL;
8904 int optlen;
8905
8906 if (!ext_only)
8907 {
8908 if (*str != '+')
8909 {
8910 as_bad (_("invalid architectural extension"));
8911 return 0;
8912 }
8913
8914 ext = strchr (++str, '+');
8915 }
8916
8917 if (ext != NULL)
8918 optlen = ext - str;
8919 else
8920 optlen = strlen (str);
8921
8922 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8923 {
8924 if (adding_value != 0)
8925 adding_value = 0;
8926 optlen -= 2;
8927 str += 2;
8928 }
8929 else if (optlen > 0)
8930 {
8931 if (adding_value == -1)
8932 adding_value = 1;
8933 else if (adding_value != 1)
8934 {
8935 as_bad (_("must specify extensions to add before specifying "
8936 "those to remove"));
8937 return FALSE;
8938 }
8939 }
8940
8941 if (optlen == 0)
8942 {
8943 as_bad (_("missing architectural extension"));
8944 return 0;
8945 }
8946
8947 gas_assert (adding_value != -1);
8948
8949 for (opt = aarch64_features; opt->name != NULL; opt++)
8950 if (strncmp (opt->name, str, optlen) == 0)
8951 {
8952 aarch64_feature_set set;
8953
8954 /* Add or remove the extension. */
8955 if (adding_value)
8956 {
8957 set = aarch64_feature_enable_set (opt->value);
8958 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8959 }
8960 else
8961 {
8962 set = aarch64_feature_disable_set (opt->value);
8963 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8964 }
8965 break;
8966 }
8967
8968 if (opt->name == NULL)
8969 {
8970 as_bad (_("unknown architectural extension `%s'"), str);
8971 return 0;
8972 }
8973
8974 str = ext;
8975 };
8976
8977 return 1;
8978 }
8979
8980 static int
8981 aarch64_parse_cpu (const char *str)
8982 {
8983 const struct aarch64_cpu_option_table *opt;
8984 const char *ext = strchr (str, '+');
8985 size_t optlen;
8986
8987 if (ext != NULL)
8988 optlen = ext - str;
8989 else
8990 optlen = strlen (str);
8991
8992 if (optlen == 0)
8993 {
8994 as_bad (_("missing cpu name `%s'"), str);
8995 return 0;
8996 }
8997
8998 for (opt = aarch64_cpus; opt->name != NULL; opt++)
8999 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9000 {
9001 mcpu_cpu_opt = &opt->value;
9002 if (ext != NULL)
9003 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9004
9005 return 1;
9006 }
9007
9008 as_bad (_("unknown cpu `%s'"), str);
9009 return 0;
9010 }
9011
9012 static int
9013 aarch64_parse_arch (const char *str)
9014 {
9015 const struct aarch64_arch_option_table *opt;
9016 const char *ext = strchr (str, '+');
9017 size_t optlen;
9018
9019 if (ext != NULL)
9020 optlen = ext - str;
9021 else
9022 optlen = strlen (str);
9023
9024 if (optlen == 0)
9025 {
9026 as_bad (_("missing architecture name `%s'"), str);
9027 return 0;
9028 }
9029
9030 for (opt = aarch64_archs; opt->name != NULL; opt++)
9031 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9032 {
9033 march_cpu_opt = &opt->value;
9034 if (ext != NULL)
9035 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9036
9037 return 1;
9038 }
9039
9040 as_bad (_("unknown architecture `%s'\n"), str);
9041 return 0;
9042 }
9043
9044 /* ABIs. */
9045 struct aarch64_option_abi_value_table
9046 {
9047 const char *name;
9048 enum aarch64_abi_type value;
9049 };
9050
9051 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9052 {"ilp32", AARCH64_ABI_ILP32},
9053 {"lp64", AARCH64_ABI_LP64},
9054 };
9055
9056 static int
9057 aarch64_parse_abi (const char *str)
9058 {
9059 unsigned int i;
9060
9061 if (str[0] == '\0')
9062 {
9063 as_bad (_("missing abi name `%s'"), str);
9064 return 0;
9065 }
9066
9067 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9068 if (strcmp (str, aarch64_abis[i].name) == 0)
9069 {
9070 aarch64_abi = aarch64_abis[i].value;
9071 return 1;
9072 }
9073
9074 as_bad (_("unknown abi `%s'\n"), str);
9075 return 0;
9076 }
9077
9078 static struct aarch64_long_option_table aarch64_long_opts[] = {
9079 #ifdef OBJ_ELF
9080 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9081 aarch64_parse_abi, NULL},
9082 #endif /* OBJ_ELF */
9083 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9084 aarch64_parse_cpu, NULL},
9085 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9086 aarch64_parse_arch, NULL},
9087 {NULL, NULL, 0, NULL}
9088 };
9089
9090 int
9091 md_parse_option (int c, const char *arg)
9092 {
9093 struct aarch64_option_table *opt;
9094 struct aarch64_long_option_table *lopt;
9095
9096 switch (c)
9097 {
9098 #ifdef OPTION_EB
9099 case OPTION_EB:
9100 target_big_endian = 1;
9101 break;
9102 #endif
9103
9104 #ifdef OPTION_EL
9105 case OPTION_EL:
9106 target_big_endian = 0;
9107 break;
9108 #endif
9109
9110 case 'a':
9111 /* Listing option. Just ignore these, we don't support additional
9112 ones. */
9113 return 0;
9114
9115 default:
9116 for (opt = aarch64_opts; opt->option != NULL; opt++)
9117 {
9118 if (c == opt->option[0]
9119 && ((arg == NULL && opt->option[1] == 0)
9120 || streq (arg, opt->option + 1)))
9121 {
9122 /* If the option is deprecated, tell the user. */
9123 if (opt->deprecated != NULL)
9124 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9125 arg ? arg : "", _(opt->deprecated));
9126
9127 if (opt->var != NULL)
9128 *opt->var = opt->value;
9129
9130 return 1;
9131 }
9132 }
9133
9134 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9135 {
9136 /* These options are expected to have an argument. */
9137 if (c == lopt->option[0]
9138 && arg != NULL
9139 && strncmp (arg, lopt->option + 1,
9140 strlen (lopt->option + 1)) == 0)
9141 {
9142 /* If the option is deprecated, tell the user. */
9143 if (lopt->deprecated != NULL)
9144 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9145 _(lopt->deprecated));
9146
9147 /* Call the sup-option parser. */
9148 return lopt->func (arg + strlen (lopt->option) - 1);
9149 }
9150 }
9151
9152 return 0;
9153 }
9154
9155 return 1;
9156 }
9157
9158 void
9159 md_show_usage (FILE * fp)
9160 {
9161 struct aarch64_option_table *opt;
9162 struct aarch64_long_option_table *lopt;
9163
9164 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9165
9166 for (opt = aarch64_opts; opt->option != NULL; opt++)
9167 if (opt->help != NULL)
9168 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9169
9170 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9171 if (lopt->help != NULL)
9172 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9173
9174 #ifdef OPTION_EB
9175 fprintf (fp, _("\
9176 -EB assemble code for a big-endian cpu\n"));
9177 #endif
9178
9179 #ifdef OPTION_EL
9180 fprintf (fp, _("\
9181 -EL assemble code for a little-endian cpu\n"));
9182 #endif
9183 }
9184
9185 /* Parse a .cpu directive. */
9186
9187 static void
9188 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9189 {
9190 const struct aarch64_cpu_option_table *opt;
9191 char saved_char;
9192 char *name;
9193 char *ext;
9194 size_t optlen;
9195
9196 name = input_line_pointer;
9197 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9198 input_line_pointer++;
9199 saved_char = *input_line_pointer;
9200 *input_line_pointer = 0;
9201
9202 ext = strchr (name, '+');
9203
9204 if (ext != NULL)
9205 optlen = ext - name;
9206 else
9207 optlen = strlen (name);
9208
9209 /* Skip the first "all" entry. */
9210 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9211 if (strlen (opt->name) == optlen
9212 && strncmp (name, opt->name, optlen) == 0)
9213 {
9214 mcpu_cpu_opt = &opt->value;
9215 if (ext != NULL)
9216 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9217 return;
9218
9219 cpu_variant = *mcpu_cpu_opt;
9220
9221 *input_line_pointer = saved_char;
9222 demand_empty_rest_of_line ();
9223 return;
9224 }
9225 as_bad (_("unknown cpu `%s'"), name);
9226 *input_line_pointer = saved_char;
9227 ignore_rest_of_line ();
9228 }
9229
9230
9231 /* Parse a .arch directive. */
9232
9233 static void
9234 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9235 {
9236 const struct aarch64_arch_option_table *opt;
9237 char saved_char;
9238 char *name;
9239 char *ext;
9240 size_t optlen;
9241
9242 name = input_line_pointer;
9243 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9244 input_line_pointer++;
9245 saved_char = *input_line_pointer;
9246 *input_line_pointer = 0;
9247
9248 ext = strchr (name, '+');
9249
9250 if (ext != NULL)
9251 optlen = ext - name;
9252 else
9253 optlen = strlen (name);
9254
9255 /* Skip the first "all" entry. */
9256 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9257 if (strlen (opt->name) == optlen
9258 && strncmp (name, opt->name, optlen) == 0)
9259 {
9260 mcpu_cpu_opt = &opt->value;
9261 if (ext != NULL)
9262 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9263 return;
9264
9265 cpu_variant = *mcpu_cpu_opt;
9266
9267 *input_line_pointer = saved_char;
9268 demand_empty_rest_of_line ();
9269 return;
9270 }
9271
9272 as_bad (_("unknown architecture `%s'\n"), name);
9273 *input_line_pointer = saved_char;
9274 ignore_rest_of_line ();
9275 }
9276
9277 /* Parse a .arch_extension directive. */
9278
9279 static void
9280 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9281 {
9282 char saved_char;
9283 char *ext = input_line_pointer;;
9284
9285 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9286 input_line_pointer++;
9287 saved_char = *input_line_pointer;
9288 *input_line_pointer = 0;
9289
9290 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9291 return;
9292
9293 cpu_variant = *mcpu_cpu_opt;
9294
9295 *input_line_pointer = saved_char;
9296 demand_empty_rest_of_line ();
9297 }
9298
9299 /* Copy symbol information. */
9300
9301 void
9302 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9303 {
9304 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9305 }
This page took 0.270108 seconds and 5 git commands to generate.