0cefa16debafb8255901a87d9846446f24c754b0
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2017 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_NONE = 0,
66 AARCH64_ABI_LP64 = 1,
67 AARCH64_ABI_ILP32 = 2
68 };
69
70 #ifndef DEFAULT_ARCH
71 #define DEFAULT_ARCH "aarch64"
72 #endif
73
74 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
75 static const char *default_arch = DEFAULT_ARCH;
76
77 /* AArch64 ABI for the output file. */
78 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
79
80 /* When non-zero, program to a 32-bit model, in which the C data types
81 int, long and all pointer types are 32-bit objects (ILP32); or to a
82 64-bit model, in which the C int type is 32-bits but the C long type
83 and all pointer types are 64-bit objects (LP64). */
84 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
85 #endif
86
87 enum vector_el_type
88 {
89 NT_invtype = -1,
90 NT_b,
91 NT_h,
92 NT_s,
93 NT_d,
94 NT_q,
95 NT_zero,
96 NT_merge
97 };
98
99 /* Bits for DEFINED field in vector_type_el. */
100 #define NTA_HASTYPE 1
101 #define NTA_HASINDEX 2
102 #define NTA_HASVARWIDTH 4
103
104 struct vector_type_el
105 {
106 enum vector_el_type type;
107 unsigned char defined;
108 unsigned width;
109 int64_t index;
110 };
111
112 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
113
114 struct reloc
115 {
116 bfd_reloc_code_real_type type;
117 expressionS exp;
118 int pc_rel;
119 enum aarch64_opnd opnd;
120 uint32_t flags;
121 unsigned need_libopcodes_p : 1;
122 };
123
124 struct aarch64_instruction
125 {
126 /* libopcodes structure for instruction intermediate representation. */
127 aarch64_inst base;
128 /* Record assembly errors found during the parsing. */
129 struct
130 {
131 enum aarch64_operand_error_kind kind;
132 const char *error;
133 } parsing_error;
134 /* The condition that appears in the assembly line. */
135 int cond;
136 /* Relocation information (including the GAS internal fixup). */
137 struct reloc reloc;
138 /* Need to generate an immediate in the literal pool. */
139 unsigned gen_lit_pool : 1;
140 };
141
142 typedef struct aarch64_instruction aarch64_instruction;
143
144 static aarch64_instruction inst;
145
146 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
147 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
148
149 /* Diagnostics inline function utilities.
150
151 These are lightweight utilities which should only be called by parse_operands
152 and other parsers. GAS processes each assembly line by parsing it against
153 instruction template(s), in the case of multiple templates (for the same
154 mnemonic name), those templates are tried one by one until one succeeds or
155 all fail. An assembly line may fail a few templates before being
156 successfully parsed; an error saved here in most cases is not a user error
157 but an error indicating the current template is not the right template.
158 Therefore it is very important that errors can be saved at a low cost during
159 the parsing; we don't want to slow down the whole parsing by recording
160 non-user errors in detail.
161
162 Remember that the objective is to help GAS pick up the most appropriate
163 error message in the case of multiple templates, e.g. FMOV which has 8
164 templates. */
165
166 static inline void
167 clear_error (void)
168 {
169 inst.parsing_error.kind = AARCH64_OPDE_NIL;
170 inst.parsing_error.error = NULL;
171 }
172
173 static inline bfd_boolean
174 error_p (void)
175 {
176 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
177 }
178
179 static inline const char *
180 get_error_message (void)
181 {
182 return inst.parsing_error.error;
183 }
184
185 static inline enum aarch64_operand_error_kind
186 get_error_kind (void)
187 {
188 return inst.parsing_error.kind;
189 }
190
191 static inline void
192 set_error (enum aarch64_operand_error_kind kind, const char *error)
193 {
194 inst.parsing_error.kind = kind;
195 inst.parsing_error.error = error;
196 }
197
198 static inline void
199 set_recoverable_error (const char *error)
200 {
201 set_error (AARCH64_OPDE_RECOVERABLE, error);
202 }
203
204 /* Use the DESC field of the corresponding aarch64_operand entry to compose
205 the error message. */
206 static inline void
207 set_default_error (void)
208 {
209 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
210 }
211
212 static inline void
213 set_syntax_error (const char *error)
214 {
215 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
216 }
217
218 static inline void
219 set_first_syntax_error (const char *error)
220 {
221 if (! error_p ())
222 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
223 }
224
225 static inline void
226 set_fatal_syntax_error (const char *error)
227 {
228 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
229 }
230 \f
231 /* Number of littlenums required to hold an extended precision number. */
232 #define MAX_LITTLENUMS 6
233
234 /* Return value for certain parsers when the parsing fails; those parsers
235 return the information of the parsed result, e.g. register number, on
236 success. */
237 #define PARSE_FAIL -1
238
239 /* This is an invalid condition code that means no conditional field is
240 present. */
241 #define COND_ALWAYS 0x10
242
243 typedef struct
244 {
245 const char *template;
246 unsigned long value;
247 } asm_barrier_opt;
248
249 typedef struct
250 {
251 const char *template;
252 uint32_t value;
253 } asm_nzcv;
254
255 struct reloc_entry
256 {
257 char *name;
258 bfd_reloc_code_real_type reloc;
259 };
260
261 /* Macros to define the register types and masks for the purpose
262 of parsing. */
263
264 #undef AARCH64_REG_TYPES
265 #define AARCH64_REG_TYPES \
266 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
267 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
268 BASIC_REG_TYPE(SP_32) /* wsp */ \
269 BASIC_REG_TYPE(SP_64) /* sp */ \
270 BASIC_REG_TYPE(Z_32) /* wzr */ \
271 BASIC_REG_TYPE(Z_64) /* xzr */ \
272 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
273 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
274 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
275 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
276 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
277 BASIC_REG_TYPE(VN) /* v[0-31] */ \
278 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
279 BASIC_REG_TYPE(PN) /* p[0-15] */ \
280 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
281 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
282 /* Typecheck: same, plus SVE registers. */ \
283 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
284 | REG_TYPE(ZN)) \
285 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
286 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
287 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
288 /* Typecheck: same, plus SVE registers. */ \
289 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
290 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
291 | REG_TYPE(ZN)) \
292 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
293 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
295 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
296 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
298 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
299 /* Typecheck: any [BHSDQ]P FP. */ \
300 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
301 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
302 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
303 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
305 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
306 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
307 /* Typecheck: as above, but also Zn and Pn. This should only be \
308 used for SVE instructions, since Zn and Pn are valid symbols \
309 in other contexts. */ \
310 MULTI_REG_TYPE(R_Z_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
312 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
313 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
314 | REG_TYPE(ZN) | REG_TYPE(PN)) \
315 /* Any integer register; used for error messages only. */ \
316 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
317 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
318 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
319 /* Pseudo type to mark the end of the enumerator sequence. */ \
320 BASIC_REG_TYPE(MAX)
321
322 #undef BASIC_REG_TYPE
323 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
324 #undef MULTI_REG_TYPE
325 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
326
327 /* Register type enumerators. */
328 typedef enum aarch64_reg_type_
329 {
330 /* A list of REG_TYPE_*. */
331 AARCH64_REG_TYPES
332 } aarch64_reg_type;
333
334 #undef BASIC_REG_TYPE
335 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
336 #undef REG_TYPE
337 #define REG_TYPE(T) (1 << REG_TYPE_##T)
338 #undef MULTI_REG_TYPE
339 #define MULTI_REG_TYPE(T,V) V,
340
341 /* Structure for a hash table entry for a register. */
342 typedef struct
343 {
344 const char *name;
345 unsigned char number;
346 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
347 unsigned char builtin;
348 } reg_entry;
349
350 /* Values indexed by aarch64_reg_type to assist the type checking. */
351 static const unsigned reg_type_masks[] =
352 {
353 AARCH64_REG_TYPES
354 };
355
356 #undef BASIC_REG_TYPE
357 #undef REG_TYPE
358 #undef MULTI_REG_TYPE
359 #undef AARCH64_REG_TYPES
360
361 /* Diagnostics used when we don't get a register of the expected type.
362 Note: this has to synchronized with aarch64_reg_type definitions
363 above. */
364 static const char *
365 get_reg_expected_msg (aarch64_reg_type reg_type)
366 {
367 const char *msg;
368
369 switch (reg_type)
370 {
371 case REG_TYPE_R_32:
372 msg = N_("integer 32-bit register expected");
373 break;
374 case REG_TYPE_R_64:
375 msg = N_("integer 64-bit register expected");
376 break;
377 case REG_TYPE_R_N:
378 msg = N_("integer register expected");
379 break;
380 case REG_TYPE_R64_SP:
381 msg = N_("64-bit integer or SP register expected");
382 break;
383 case REG_TYPE_SVE_BASE:
384 msg = N_("base register expected");
385 break;
386 case REG_TYPE_R_Z:
387 msg = N_("integer or zero register expected");
388 break;
389 case REG_TYPE_SVE_OFFSET:
390 msg = N_("offset register expected");
391 break;
392 case REG_TYPE_R_SP:
393 msg = N_("integer or SP register expected");
394 break;
395 case REG_TYPE_R_Z_SP:
396 msg = N_("integer, zero or SP register expected");
397 break;
398 case REG_TYPE_FP_B:
399 msg = N_("8-bit SIMD scalar register expected");
400 break;
401 case REG_TYPE_FP_H:
402 msg = N_("16-bit SIMD scalar or floating-point half precision "
403 "register expected");
404 break;
405 case REG_TYPE_FP_S:
406 msg = N_("32-bit SIMD scalar or floating-point single precision "
407 "register expected");
408 break;
409 case REG_TYPE_FP_D:
410 msg = N_("64-bit SIMD scalar or floating-point double precision "
411 "register expected");
412 break;
413 case REG_TYPE_FP_Q:
414 msg = N_("128-bit SIMD scalar or floating-point quad precision "
415 "register expected");
416 break;
417 case REG_TYPE_R_Z_BHSDQ_V:
418 case REG_TYPE_R_Z_BHSDQ_VZP:
419 msg = N_("register expected");
420 break;
421 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
422 msg = N_("SIMD scalar or floating-point register expected");
423 break;
424 case REG_TYPE_VN: /* any V reg */
425 msg = N_("vector register expected");
426 break;
427 case REG_TYPE_ZN:
428 msg = N_("SVE vector register expected");
429 break;
430 case REG_TYPE_PN:
431 msg = N_("SVE predicate register expected");
432 break;
433 default:
434 as_fatal (_("invalid register type %d"), reg_type);
435 }
436 return msg;
437 }
438
439 /* Some well known registers that we refer to directly elsewhere. */
440 #define REG_SP 31
441
442 /* Instructions take 4 bytes in the object file. */
443 #define INSN_SIZE 4
444
445 static struct hash_control *aarch64_ops_hsh;
446 static struct hash_control *aarch64_cond_hsh;
447 static struct hash_control *aarch64_shift_hsh;
448 static struct hash_control *aarch64_sys_regs_hsh;
449 static struct hash_control *aarch64_pstatefield_hsh;
450 static struct hash_control *aarch64_sys_regs_ic_hsh;
451 static struct hash_control *aarch64_sys_regs_dc_hsh;
452 static struct hash_control *aarch64_sys_regs_at_hsh;
453 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
454 static struct hash_control *aarch64_reg_hsh;
455 static struct hash_control *aarch64_barrier_opt_hsh;
456 static struct hash_control *aarch64_nzcv_hsh;
457 static struct hash_control *aarch64_pldop_hsh;
458 static struct hash_control *aarch64_hint_opt_hsh;
459
460 /* Stuff needed to resolve the label ambiguity
461 As:
462 ...
463 label: <insn>
464 may differ from:
465 ...
466 label:
467 <insn> */
468
469 static symbolS *last_label_seen;
470
471 /* Literal pool structure. Held on a per-section
472 and per-sub-section basis. */
473
474 #define MAX_LITERAL_POOL_SIZE 1024
475 typedef struct literal_expression
476 {
477 expressionS exp;
478 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
479 LITTLENUM_TYPE * bignum;
480 } literal_expression;
481
482 typedef struct literal_pool
483 {
484 literal_expression literals[MAX_LITERAL_POOL_SIZE];
485 unsigned int next_free_entry;
486 unsigned int id;
487 symbolS *symbol;
488 segT section;
489 subsegT sub_section;
490 int size;
491 struct literal_pool *next;
492 } literal_pool;
493
494 /* Pointer to a linked list of literal pools. */
495 static literal_pool *list_of_pools = NULL;
496 \f
497 /* Pure syntax. */
498
499 /* This array holds the chars that always start a comment. If the
500 pre-processor is disabled, these aren't very useful. */
501 const char comment_chars[] = "";
502
503 /* This array holds the chars that only start a comment at the beginning of
504 a line. If the line seems to have the form '# 123 filename'
505 .line and .file directives will appear in the pre-processed output. */
506 /* Note that input_file.c hand checks for '#' at the beginning of the
507 first line of the input file. This is because the compiler outputs
508 #NO_APP at the beginning of its output. */
509 /* Also note that comments like this one will always work. */
510 const char line_comment_chars[] = "#";
511
512 const char line_separator_chars[] = ";";
513
514 /* Chars that can be used to separate mant
515 from exp in floating point numbers. */
516 const char EXP_CHARS[] = "eE";
517
518 /* Chars that mean this number is a floating point constant. */
519 /* As in 0f12.456 */
520 /* or 0d1.2345e12 */
521
522 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
523
524 /* Prefix character that indicates the start of an immediate value. */
525 #define is_immediate_prefix(C) ((C) == '#')
526
527 /* Separator character handling. */
528
529 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
530
531 static inline bfd_boolean
532 skip_past_char (char **str, char c)
533 {
534 if (**str == c)
535 {
536 (*str)++;
537 return TRUE;
538 }
539 else
540 return FALSE;
541 }
542
543 #define skip_past_comma(str) skip_past_char (str, ',')
544
545 /* Arithmetic expressions (possibly involving symbols). */
546
547 static bfd_boolean in_my_get_expression_p = FALSE;
548
549 /* Third argument to my_get_expression. */
550 #define GE_NO_PREFIX 0
551 #define GE_OPT_PREFIX 1
552
553 /* Return TRUE if the string pointed by *STR is successfully parsed
554 as an valid expression; *EP will be filled with the information of
555 such an expression. Otherwise return FALSE. */
556
557 static bfd_boolean
558 my_get_expression (expressionS * ep, char **str, int prefix_mode,
559 int reject_absent)
560 {
561 char *save_in;
562 segT seg;
563 int prefix_present_p = 0;
564
565 switch (prefix_mode)
566 {
567 case GE_NO_PREFIX:
568 break;
569 case GE_OPT_PREFIX:
570 if (is_immediate_prefix (**str))
571 {
572 (*str)++;
573 prefix_present_p = 1;
574 }
575 break;
576 default:
577 abort ();
578 }
579
580 memset (ep, 0, sizeof (expressionS));
581
582 save_in = input_line_pointer;
583 input_line_pointer = *str;
584 in_my_get_expression_p = TRUE;
585 seg = expression (ep);
586 in_my_get_expression_p = FALSE;
587
588 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
589 {
590 /* We found a bad expression in md_operand(). */
591 *str = input_line_pointer;
592 input_line_pointer = save_in;
593 if (prefix_present_p && ! error_p ())
594 set_fatal_syntax_error (_("bad expression"));
595 else
596 set_first_syntax_error (_("bad expression"));
597 return FALSE;
598 }
599
600 #ifdef OBJ_AOUT
601 if (seg != absolute_section
602 && seg != text_section
603 && seg != data_section
604 && seg != bss_section && seg != undefined_section)
605 {
606 set_syntax_error (_("bad segment"));
607 *str = input_line_pointer;
608 input_line_pointer = save_in;
609 return FALSE;
610 }
611 #else
612 (void) seg;
613 #endif
614
615 *str = input_line_pointer;
616 input_line_pointer = save_in;
617 return TRUE;
618 }
619
620 /* Turn a string in input_line_pointer into a floating point constant
621 of type TYPE, and store the appropriate bytes in *LITP. The number
622 of LITTLENUMS emitted is stored in *SIZEP. An error message is
623 returned, or NULL on OK. */
624
625 const char *
626 md_atof (int type, char *litP, int *sizeP)
627 {
628 return ieee_md_atof (type, litP, sizeP, target_big_endian);
629 }
630
631 /* We handle all bad expressions here, so that we can report the faulty
632 instruction in the error message. */
633 void
634 md_operand (expressionS * exp)
635 {
636 if (in_my_get_expression_p)
637 exp->X_op = O_illegal;
638 }
639
640 /* Immediate values. */
641
642 /* Errors may be set multiple times during parsing or bit encoding
643 (particularly in the Neon bits), but usually the earliest error which is set
644 will be the most meaningful. Avoid overwriting it with later (cascading)
645 errors by calling this function. */
646
647 static void
648 first_error (const char *error)
649 {
650 if (! error_p ())
651 set_syntax_error (error);
652 }
653
654 /* Similar to first_error, but this function accepts formatted error
655 message. */
656 static void
657 first_error_fmt (const char *format, ...)
658 {
659 va_list args;
660 enum
661 { size = 100 };
662 /* N.B. this single buffer will not cause error messages for different
663 instructions to pollute each other; this is because at the end of
664 processing of each assembly line, error message if any will be
665 collected by as_bad. */
666 static char buffer[size];
667
668 if (! error_p ())
669 {
670 int ret ATTRIBUTE_UNUSED;
671 va_start (args, format);
672 ret = vsnprintf (buffer, size, format, args);
673 know (ret <= size - 1 && ret >= 0);
674 va_end (args);
675 set_syntax_error (buffer);
676 }
677 }
678
679 /* Register parsing. */
680
681 /* Generic register parser which is called by other specialized
682 register parsers.
683 CCP points to what should be the beginning of a register name.
684 If it is indeed a valid register name, advance CCP over it and
685 return the reg_entry structure; otherwise return NULL.
686 It does not issue diagnostics. */
687
688 static reg_entry *
689 parse_reg (char **ccp)
690 {
691 char *start = *ccp;
692 char *p;
693 reg_entry *reg;
694
695 #ifdef REGISTER_PREFIX
696 if (*start != REGISTER_PREFIX)
697 return NULL;
698 start++;
699 #endif
700
701 p = start;
702 if (!ISALPHA (*p) || !is_name_beginner (*p))
703 return NULL;
704
705 do
706 p++;
707 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
708
709 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
710
711 if (!reg)
712 return NULL;
713
714 *ccp = p;
715 return reg;
716 }
717
718 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
719 return FALSE. */
720 static bfd_boolean
721 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
722 {
723 return (reg_type_masks[type] & (1 << reg->type)) != 0;
724 }
725
726 /* Try to parse a base or offset register. Allow SVE base and offset
727 registers if REG_TYPE includes SVE registers. Return the register
728 entry on success, setting *QUALIFIER to the register qualifier.
729 Return null otherwise.
730
731 Note that this function does not issue any diagnostics. */
732
733 static const reg_entry *
734 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
735 aarch64_opnd_qualifier_t *qualifier)
736 {
737 char *str = *ccp;
738 const reg_entry *reg = parse_reg (&str);
739
740 if (reg == NULL)
741 return NULL;
742
743 switch (reg->type)
744 {
745 case REG_TYPE_R_32:
746 case REG_TYPE_SP_32:
747 case REG_TYPE_Z_32:
748 *qualifier = AARCH64_OPND_QLF_W;
749 break;
750
751 case REG_TYPE_R_64:
752 case REG_TYPE_SP_64:
753 case REG_TYPE_Z_64:
754 *qualifier = AARCH64_OPND_QLF_X;
755 break;
756
757 case REG_TYPE_ZN:
758 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
759 || str[0] != '.')
760 return NULL;
761 switch (TOLOWER (str[1]))
762 {
763 case 's':
764 *qualifier = AARCH64_OPND_QLF_S_S;
765 break;
766 case 'd':
767 *qualifier = AARCH64_OPND_QLF_S_D;
768 break;
769 default:
770 return NULL;
771 }
772 str += 2;
773 break;
774
775 default:
776 return NULL;
777 }
778
779 *ccp = str;
780
781 return reg;
782 }
783
784 /* Try to parse a base or offset register. Return the register entry
785 on success, setting *QUALIFIER to the register qualifier. Return null
786 otherwise.
787
788 Note that this function does not issue any diagnostics. */
789
790 static const reg_entry *
791 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
792 {
793 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
794 }
795
796 /* Parse the qualifier of a vector register or vector element of type
797 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
798 succeeds; otherwise return FALSE.
799
800 Accept only one occurrence of:
801 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
802 b h s d q */
803 static bfd_boolean
804 parse_vector_type_for_operand (aarch64_reg_type reg_type,
805 struct vector_type_el *parsed_type, char **str)
806 {
807 char *ptr = *str;
808 unsigned width;
809 unsigned element_size;
810 enum vector_el_type type;
811
812 /* skip '.' */
813 gas_assert (*ptr == '.');
814 ptr++;
815
816 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
817 {
818 width = 0;
819 goto elt_size;
820 }
821 width = strtoul (ptr, &ptr, 10);
822 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
823 {
824 first_error_fmt (_("bad size %d in vector width specifier"), width);
825 return FALSE;
826 }
827
828 elt_size:
829 switch (TOLOWER (*ptr))
830 {
831 case 'b':
832 type = NT_b;
833 element_size = 8;
834 break;
835 case 'h':
836 type = NT_h;
837 element_size = 16;
838 break;
839 case 's':
840 type = NT_s;
841 element_size = 32;
842 break;
843 case 'd':
844 type = NT_d;
845 element_size = 64;
846 break;
847 case 'q':
848 if (reg_type == REG_TYPE_ZN || width == 1)
849 {
850 type = NT_q;
851 element_size = 128;
852 break;
853 }
854 /* fall through. */
855 default:
856 if (*ptr != '\0')
857 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
858 else
859 first_error (_("missing element size"));
860 return FALSE;
861 }
862 if (width != 0 && width * element_size != 64
863 && width * element_size != 128
864 && !(width == 2 && element_size == 16)
865 && !(width == 4 && element_size == 8))
866 {
867 first_error_fmt (_
868 ("invalid element size %d and vector size combination %c"),
869 width, *ptr);
870 return FALSE;
871 }
872 ptr++;
873
874 parsed_type->type = type;
875 parsed_type->width = width;
876
877 *str = ptr;
878
879 return TRUE;
880 }
881
882 /* *STR contains an SVE zero/merge predication suffix. Parse it into
883 *PARSED_TYPE and point *STR at the end of the suffix. */
884
885 static bfd_boolean
886 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
887 {
888 char *ptr = *str;
889
890 /* Skip '/'. */
891 gas_assert (*ptr == '/');
892 ptr++;
893 switch (TOLOWER (*ptr))
894 {
895 case 'z':
896 parsed_type->type = NT_zero;
897 break;
898 case 'm':
899 parsed_type->type = NT_merge;
900 break;
901 default:
902 if (*ptr != '\0' && *ptr != ',')
903 first_error_fmt (_("unexpected character `%c' in predication type"),
904 *ptr);
905 else
906 first_error (_("missing predication type"));
907 return FALSE;
908 }
909 parsed_type->width = 0;
910 *str = ptr + 1;
911 return TRUE;
912 }
913
914 /* Parse a register of the type TYPE.
915
916 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
917 name or the parsed register is not of TYPE.
918
919 Otherwise return the register number, and optionally fill in the actual
920 type of the register in *RTYPE when multiple alternatives were given, and
921 return the register shape and element index information in *TYPEINFO.
922
923 IN_REG_LIST should be set with TRUE if the caller is parsing a register
924 list. */
925
926 static int
927 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
928 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
929 {
930 char *str = *ccp;
931 const reg_entry *reg = parse_reg (&str);
932 struct vector_type_el atype;
933 struct vector_type_el parsetype;
934 bfd_boolean is_typed_vecreg = FALSE;
935
936 atype.defined = 0;
937 atype.type = NT_invtype;
938 atype.width = -1;
939 atype.index = 0;
940
941 if (reg == NULL)
942 {
943 if (typeinfo)
944 *typeinfo = atype;
945 set_default_error ();
946 return PARSE_FAIL;
947 }
948
949 if (! aarch64_check_reg_type (reg, type))
950 {
951 DEBUG_TRACE ("reg type check failed");
952 set_default_error ();
953 return PARSE_FAIL;
954 }
955 type = reg->type;
956
957 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
958 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
959 {
960 if (*str == '.')
961 {
962 if (!parse_vector_type_for_operand (type, &parsetype, &str))
963 return PARSE_FAIL;
964 }
965 else
966 {
967 if (!parse_predication_for_operand (&parsetype, &str))
968 return PARSE_FAIL;
969 }
970
971 /* Register if of the form Vn.[bhsdq]. */
972 is_typed_vecreg = TRUE;
973
974 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
975 {
976 /* The width is always variable; we don't allow an integer width
977 to be specified. */
978 gas_assert (parsetype.width == 0);
979 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
980 }
981 else if (parsetype.width == 0)
982 /* Expect index. In the new scheme we cannot have
983 Vn.[bhsdq] represent a scalar. Therefore any
984 Vn.[bhsdq] should have an index following it.
985 Except in reglists of course. */
986 atype.defined |= NTA_HASINDEX;
987 else
988 atype.defined |= NTA_HASTYPE;
989
990 atype.type = parsetype.type;
991 atype.width = parsetype.width;
992 }
993
994 if (skip_past_char (&str, '['))
995 {
996 expressionS exp;
997
998 /* Reject Sn[index] syntax. */
999 if (!is_typed_vecreg)
1000 {
1001 first_error (_("this type of register can't be indexed"));
1002 return PARSE_FAIL;
1003 }
1004
1005 if (in_reg_list)
1006 {
1007 first_error (_("index not allowed inside register list"));
1008 return PARSE_FAIL;
1009 }
1010
1011 atype.defined |= NTA_HASINDEX;
1012
1013 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1014
1015 if (exp.X_op != O_constant)
1016 {
1017 first_error (_("constant expression required"));
1018 return PARSE_FAIL;
1019 }
1020
1021 if (! skip_past_char (&str, ']'))
1022 return PARSE_FAIL;
1023
1024 atype.index = exp.X_add_number;
1025 }
1026 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1027 {
1028 /* Indexed vector register expected. */
1029 first_error (_("indexed vector register expected"));
1030 return PARSE_FAIL;
1031 }
1032
1033 /* A vector reg Vn should be typed or indexed. */
1034 if (type == REG_TYPE_VN && atype.defined == 0)
1035 {
1036 first_error (_("invalid use of vector register"));
1037 }
1038
1039 if (typeinfo)
1040 *typeinfo = atype;
1041
1042 if (rtype)
1043 *rtype = type;
1044
1045 *ccp = str;
1046
1047 return reg->number;
1048 }
1049
1050 /* Parse register.
1051
1052 Return the register number on success; return PARSE_FAIL otherwise.
1053
1054 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1055 the register (e.g. NEON double or quad reg when either has been requested).
1056
1057 If this is a NEON vector register with additional type information, fill
1058 in the struct pointed to by VECTYPE (if non-NULL).
1059
1060 This parser does not handle register list. */
1061
1062 static int
1063 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1064 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1065 {
1066 struct vector_type_el atype;
1067 char *str = *ccp;
1068 int reg = parse_typed_reg (&str, type, rtype, &atype,
1069 /*in_reg_list= */ FALSE);
1070
1071 if (reg == PARSE_FAIL)
1072 return PARSE_FAIL;
1073
1074 if (vectype)
1075 *vectype = atype;
1076
1077 *ccp = str;
1078
1079 return reg;
1080 }
1081
1082 static inline bfd_boolean
1083 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1084 {
1085 return
1086 e1.type == e2.type
1087 && e1.defined == e2.defined
1088 && e1.width == e2.width && e1.index == e2.index;
1089 }
1090
1091 /* This function parses a list of vector registers of type TYPE.
1092 On success, it returns the parsed register list information in the
1093 following encoded format:
1094
1095 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1096 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1097
1098 The information of the register shape and/or index is returned in
1099 *VECTYPE.
1100
1101 It returns PARSE_FAIL if the register list is invalid.
1102
1103 The list contains one to four registers.
1104 Each register can be one of:
1105 <Vt>.<T>[<index>]
1106 <Vt>.<T>
1107 All <T> should be identical.
1108 All <index> should be identical.
1109 There are restrictions on <Vt> numbers which are checked later
1110 (by reg_list_valid_p). */
1111
1112 static int
1113 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1114 struct vector_type_el *vectype)
1115 {
1116 char *str = *ccp;
1117 int nb_regs;
1118 struct vector_type_el typeinfo, typeinfo_first;
1119 int val, val_range;
1120 int in_range;
1121 int ret_val;
1122 int i;
1123 bfd_boolean error = FALSE;
1124 bfd_boolean expect_index = FALSE;
1125
1126 if (*str != '{')
1127 {
1128 set_syntax_error (_("expecting {"));
1129 return PARSE_FAIL;
1130 }
1131 str++;
1132
1133 nb_regs = 0;
1134 typeinfo_first.defined = 0;
1135 typeinfo_first.type = NT_invtype;
1136 typeinfo_first.width = -1;
1137 typeinfo_first.index = 0;
1138 ret_val = 0;
1139 val = -1;
1140 val_range = -1;
1141 in_range = 0;
1142 do
1143 {
1144 if (in_range)
1145 {
1146 str++; /* skip over '-' */
1147 val_range = val;
1148 }
1149 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1150 /*in_reg_list= */ TRUE);
1151 if (val == PARSE_FAIL)
1152 {
1153 set_first_syntax_error (_("invalid vector register in list"));
1154 error = TRUE;
1155 continue;
1156 }
1157 /* reject [bhsd]n */
1158 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1159 {
1160 set_first_syntax_error (_("invalid scalar register in list"));
1161 error = TRUE;
1162 continue;
1163 }
1164
1165 if (typeinfo.defined & NTA_HASINDEX)
1166 expect_index = TRUE;
1167
1168 if (in_range)
1169 {
1170 if (val < val_range)
1171 {
1172 set_first_syntax_error
1173 (_("invalid range in vector register list"));
1174 error = TRUE;
1175 }
1176 val_range++;
1177 }
1178 else
1179 {
1180 val_range = val;
1181 if (nb_regs == 0)
1182 typeinfo_first = typeinfo;
1183 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1184 {
1185 set_first_syntax_error
1186 (_("type mismatch in vector register list"));
1187 error = TRUE;
1188 }
1189 }
1190 if (! error)
1191 for (i = val_range; i <= val; i++)
1192 {
1193 ret_val |= i << (5 * nb_regs);
1194 nb_regs++;
1195 }
1196 in_range = 0;
1197 }
1198 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1199
1200 skip_whitespace (str);
1201 if (*str != '}')
1202 {
1203 set_first_syntax_error (_("end of vector register list not found"));
1204 error = TRUE;
1205 }
1206 str++;
1207
1208 skip_whitespace (str);
1209
1210 if (expect_index)
1211 {
1212 if (skip_past_char (&str, '['))
1213 {
1214 expressionS exp;
1215
1216 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1217 if (exp.X_op != O_constant)
1218 {
1219 set_first_syntax_error (_("constant expression required."));
1220 error = TRUE;
1221 }
1222 if (! skip_past_char (&str, ']'))
1223 error = TRUE;
1224 else
1225 typeinfo_first.index = exp.X_add_number;
1226 }
1227 else
1228 {
1229 set_first_syntax_error (_("expected index"));
1230 error = TRUE;
1231 }
1232 }
1233
1234 if (nb_regs > 4)
1235 {
1236 set_first_syntax_error (_("too many registers in vector register list"));
1237 error = TRUE;
1238 }
1239 else if (nb_regs == 0)
1240 {
1241 set_first_syntax_error (_("empty vector register list"));
1242 error = TRUE;
1243 }
1244
1245 *ccp = str;
1246 if (! error)
1247 *vectype = typeinfo_first;
1248
1249 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1250 }
1251
1252 /* Directives: register aliases. */
1253
1254 static reg_entry *
1255 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1256 {
1257 reg_entry *new;
1258 const char *name;
1259
1260 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1261 {
1262 if (new->builtin)
1263 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1264 str);
1265
1266 /* Only warn about a redefinition if it's not defined as the
1267 same register. */
1268 else if (new->number != number || new->type != type)
1269 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1270
1271 return NULL;
1272 }
1273
1274 name = xstrdup (str);
1275 new = XNEW (reg_entry);
1276
1277 new->name = name;
1278 new->number = number;
1279 new->type = type;
1280 new->builtin = FALSE;
1281
1282 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1283 abort ();
1284
1285 return new;
1286 }
1287
1288 /* Look for the .req directive. This is of the form:
1289
1290 new_register_name .req existing_register_name
1291
1292 If we find one, or if it looks sufficiently like one that we want to
1293 handle any error here, return TRUE. Otherwise return FALSE. */
1294
1295 static bfd_boolean
1296 create_register_alias (char *newname, char *p)
1297 {
1298 const reg_entry *old;
1299 char *oldname, *nbuf;
1300 size_t nlen;
1301
1302 /* The input scrubber ensures that whitespace after the mnemonic is
1303 collapsed to single spaces. */
1304 oldname = p;
1305 if (strncmp (oldname, " .req ", 6) != 0)
1306 return FALSE;
1307
1308 oldname += 6;
1309 if (*oldname == '\0')
1310 return FALSE;
1311
1312 old = hash_find (aarch64_reg_hsh, oldname);
1313 if (!old)
1314 {
1315 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1316 return TRUE;
1317 }
1318
1319 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1320 the desired alias name, and p points to its end. If not, then
1321 the desired alias name is in the global original_case_string. */
1322 #ifdef TC_CASE_SENSITIVE
1323 nlen = p - newname;
1324 #else
1325 newname = original_case_string;
1326 nlen = strlen (newname);
1327 #endif
1328
1329 nbuf = xmemdup0 (newname, nlen);
1330
1331 /* Create aliases under the new name as stated; an all-lowercase
1332 version of the new name; and an all-uppercase version of the new
1333 name. */
1334 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1335 {
1336 for (p = nbuf; *p; p++)
1337 *p = TOUPPER (*p);
1338
1339 if (strncmp (nbuf, newname, nlen))
1340 {
1341 /* If this attempt to create an additional alias fails, do not bother
1342 trying to create the all-lower case alias. We will fail and issue
1343 a second, duplicate error message. This situation arises when the
1344 programmer does something like:
1345 foo .req r0
1346 Foo .req r1
1347 The second .req creates the "Foo" alias but then fails to create
1348 the artificial FOO alias because it has already been created by the
1349 first .req. */
1350 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1351 {
1352 free (nbuf);
1353 return TRUE;
1354 }
1355 }
1356
1357 for (p = nbuf; *p; p++)
1358 *p = TOLOWER (*p);
1359
1360 if (strncmp (nbuf, newname, nlen))
1361 insert_reg_alias (nbuf, old->number, old->type);
1362 }
1363
1364 free (nbuf);
1365 return TRUE;
1366 }
1367
1368 /* Should never be called, as .req goes between the alias and the
1369 register name, not at the beginning of the line. */
1370 static void
1371 s_req (int a ATTRIBUTE_UNUSED)
1372 {
1373 as_bad (_("invalid syntax for .req directive"));
1374 }
1375
1376 /* The .unreq directive deletes an alias which was previously defined
1377 by .req. For example:
1378
1379 my_alias .req r11
1380 .unreq my_alias */
1381
1382 static void
1383 s_unreq (int a ATTRIBUTE_UNUSED)
1384 {
1385 char *name;
1386 char saved_char;
1387
1388 name = input_line_pointer;
1389
1390 while (*input_line_pointer != 0
1391 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1392 ++input_line_pointer;
1393
1394 saved_char = *input_line_pointer;
1395 *input_line_pointer = 0;
1396
1397 if (!*name)
1398 as_bad (_("invalid syntax for .unreq directive"));
1399 else
1400 {
1401 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1402
1403 if (!reg)
1404 as_bad (_("unknown register alias '%s'"), name);
1405 else if (reg->builtin)
1406 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1407 name);
1408 else
1409 {
1410 char *p;
1411 char *nbuf;
1412
1413 hash_delete (aarch64_reg_hsh, name, FALSE);
1414 free ((char *) reg->name);
1415 free (reg);
1416
1417 /* Also locate the all upper case and all lower case versions.
1418 Do not complain if we cannot find one or the other as it
1419 was probably deleted above. */
1420
1421 nbuf = strdup (name);
1422 for (p = nbuf; *p; p++)
1423 *p = TOUPPER (*p);
1424 reg = hash_find (aarch64_reg_hsh, nbuf);
1425 if (reg)
1426 {
1427 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1428 free ((char *) reg->name);
1429 free (reg);
1430 }
1431
1432 for (p = nbuf; *p; p++)
1433 *p = TOLOWER (*p);
1434 reg = hash_find (aarch64_reg_hsh, nbuf);
1435 if (reg)
1436 {
1437 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1438 free ((char *) reg->name);
1439 free (reg);
1440 }
1441
1442 free (nbuf);
1443 }
1444 }
1445
1446 *input_line_pointer = saved_char;
1447 demand_empty_rest_of_line ();
1448 }
1449
1450 /* Directives: Instruction set selection. */
1451
1452 #ifdef OBJ_ELF
1453 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1454 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1455 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1456 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1457
1458 /* Create a new mapping symbol for the transition to STATE. */
1459
1460 static void
1461 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1462 {
1463 symbolS *symbolP;
1464 const char *symname;
1465 int type;
1466
1467 switch (state)
1468 {
1469 case MAP_DATA:
1470 symname = "$d";
1471 type = BSF_NO_FLAGS;
1472 break;
1473 case MAP_INSN:
1474 symname = "$x";
1475 type = BSF_NO_FLAGS;
1476 break;
1477 default:
1478 abort ();
1479 }
1480
1481 symbolP = symbol_new (symname, now_seg, value, frag);
1482 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1483
1484 /* Save the mapping symbols for future reference. Also check that
1485 we do not place two mapping symbols at the same offset within a
1486 frag. We'll handle overlap between frags in
1487 check_mapping_symbols.
1488
1489 If .fill or other data filling directive generates zero sized data,
1490 the mapping symbol for the following code will have the same value
1491 as the one generated for the data filling directive. In this case,
1492 we replace the old symbol with the new one at the same address. */
1493 if (value == 0)
1494 {
1495 if (frag->tc_frag_data.first_map != NULL)
1496 {
1497 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1498 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1499 &symbol_lastP);
1500 }
1501 frag->tc_frag_data.first_map = symbolP;
1502 }
1503 if (frag->tc_frag_data.last_map != NULL)
1504 {
1505 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1506 S_GET_VALUE (symbolP));
1507 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1508 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1509 &symbol_lastP);
1510 }
1511 frag->tc_frag_data.last_map = symbolP;
1512 }
1513
1514 /* We must sometimes convert a region marked as code to data during
1515 code alignment, if an odd number of bytes have to be padded. The
1516 code mapping symbol is pushed to an aligned address. */
1517
1518 static void
1519 insert_data_mapping_symbol (enum mstate state,
1520 valueT value, fragS * frag, offsetT bytes)
1521 {
1522 /* If there was already a mapping symbol, remove it. */
1523 if (frag->tc_frag_data.last_map != NULL
1524 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1525 frag->fr_address + value)
1526 {
1527 symbolS *symp = frag->tc_frag_data.last_map;
1528
1529 if (value == 0)
1530 {
1531 know (frag->tc_frag_data.first_map == symp);
1532 frag->tc_frag_data.first_map = NULL;
1533 }
1534 frag->tc_frag_data.last_map = NULL;
1535 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1536 }
1537
1538 make_mapping_symbol (MAP_DATA, value, frag);
1539 make_mapping_symbol (state, value + bytes, frag);
1540 }
1541
1542 static void mapping_state_2 (enum mstate state, int max_chars);
1543
1544 /* Set the mapping state to STATE. Only call this when about to
1545 emit some STATE bytes to the file. */
1546
1547 void
1548 mapping_state (enum mstate state)
1549 {
1550 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1551
1552 if (state == MAP_INSN)
1553 /* AArch64 instructions require 4-byte alignment. When emitting
1554 instructions into any section, record the appropriate section
1555 alignment. */
1556 record_alignment (now_seg, 2);
1557
1558 if (mapstate == state)
1559 /* The mapping symbol has already been emitted.
1560 There is nothing else to do. */
1561 return;
1562
1563 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1564 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1565 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1566 evaluated later in the next else. */
1567 return;
1568 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1569 {
1570 /* Only add the symbol if the offset is > 0:
1571 if we're at the first frag, check it's size > 0;
1572 if we're not at the first frag, then for sure
1573 the offset is > 0. */
1574 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1575 const int add_symbol = (frag_now != frag_first)
1576 || (frag_now_fix () > 0);
1577
1578 if (add_symbol)
1579 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1580 }
1581 #undef TRANSITION
1582
1583 mapping_state_2 (state, 0);
1584 }
1585
1586 /* Same as mapping_state, but MAX_CHARS bytes have already been
1587 allocated. Put the mapping symbol that far back. */
1588
1589 static void
1590 mapping_state_2 (enum mstate state, int max_chars)
1591 {
1592 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1593
1594 if (!SEG_NORMAL (now_seg))
1595 return;
1596
1597 if (mapstate == state)
1598 /* The mapping symbol has already been emitted.
1599 There is nothing else to do. */
1600 return;
1601
1602 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1603 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1604 }
1605 #else
1606 #define mapping_state(x) /* nothing */
1607 #define mapping_state_2(x, y) /* nothing */
1608 #endif
1609
1610 /* Directives: sectioning and alignment. */
1611
1612 static void
1613 s_bss (int ignore ATTRIBUTE_UNUSED)
1614 {
1615 /* We don't support putting frags in the BSS segment, we fake it by
1616 marking in_bss, then looking at s_skip for clues. */
1617 subseg_set (bss_section, 0);
1618 demand_empty_rest_of_line ();
1619 mapping_state (MAP_DATA);
1620 }
1621
1622 static void
1623 s_even (int ignore ATTRIBUTE_UNUSED)
1624 {
1625 /* Never make frag if expect extra pass. */
1626 if (!need_pass_2)
1627 frag_align (1, 0, 0);
1628
1629 record_alignment (now_seg, 1);
1630
1631 demand_empty_rest_of_line ();
1632 }
1633
1634 /* Directives: Literal pools. */
1635
1636 static literal_pool *
1637 find_literal_pool (int size)
1638 {
1639 literal_pool *pool;
1640
1641 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1642 {
1643 if (pool->section == now_seg
1644 && pool->sub_section == now_subseg && pool->size == size)
1645 break;
1646 }
1647
1648 return pool;
1649 }
1650
1651 static literal_pool *
1652 find_or_make_literal_pool (int size)
1653 {
1654 /* Next literal pool ID number. */
1655 static unsigned int latest_pool_num = 1;
1656 literal_pool *pool;
1657
1658 pool = find_literal_pool (size);
1659
1660 if (pool == NULL)
1661 {
1662 /* Create a new pool. */
1663 pool = XNEW (literal_pool);
1664 if (!pool)
1665 return NULL;
1666
1667 /* Currently we always put the literal pool in the current text
1668 section. If we were generating "small" model code where we
1669 knew that all code and initialised data was within 1MB then
1670 we could output literals to mergeable, read-only data
1671 sections. */
1672
1673 pool->next_free_entry = 0;
1674 pool->section = now_seg;
1675 pool->sub_section = now_subseg;
1676 pool->size = size;
1677 pool->next = list_of_pools;
1678 pool->symbol = NULL;
1679
1680 /* Add it to the list. */
1681 list_of_pools = pool;
1682 }
1683
1684 /* New pools, and emptied pools, will have a NULL symbol. */
1685 if (pool->symbol == NULL)
1686 {
1687 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1688 (valueT) 0, &zero_address_frag);
1689 pool->id = latest_pool_num++;
1690 }
1691
1692 /* Done. */
1693 return pool;
1694 }
1695
1696 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1697 Return TRUE on success, otherwise return FALSE. */
1698 static bfd_boolean
1699 add_to_lit_pool (expressionS *exp, int size)
1700 {
1701 literal_pool *pool;
1702 unsigned int entry;
1703
1704 pool = find_or_make_literal_pool (size);
1705
1706 /* Check if this literal value is already in the pool. */
1707 for (entry = 0; entry < pool->next_free_entry; entry++)
1708 {
1709 expressionS * litexp = & pool->literals[entry].exp;
1710
1711 if ((litexp->X_op == exp->X_op)
1712 && (exp->X_op == O_constant)
1713 && (litexp->X_add_number == exp->X_add_number)
1714 && (litexp->X_unsigned == exp->X_unsigned))
1715 break;
1716
1717 if ((litexp->X_op == exp->X_op)
1718 && (exp->X_op == O_symbol)
1719 && (litexp->X_add_number == exp->X_add_number)
1720 && (litexp->X_add_symbol == exp->X_add_symbol)
1721 && (litexp->X_op_symbol == exp->X_op_symbol))
1722 break;
1723 }
1724
1725 /* Do we need to create a new entry? */
1726 if (entry == pool->next_free_entry)
1727 {
1728 if (entry >= MAX_LITERAL_POOL_SIZE)
1729 {
1730 set_syntax_error (_("literal pool overflow"));
1731 return FALSE;
1732 }
1733
1734 pool->literals[entry].exp = *exp;
1735 pool->next_free_entry += 1;
1736 if (exp->X_op == O_big)
1737 {
1738 /* PR 16688: Bignums are held in a single global array. We must
1739 copy and preserve that value now, before it is overwritten. */
1740 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1741 exp->X_add_number);
1742 memcpy (pool->literals[entry].bignum, generic_bignum,
1743 CHARS_PER_LITTLENUM * exp->X_add_number);
1744 }
1745 else
1746 pool->literals[entry].bignum = NULL;
1747 }
1748
1749 exp->X_op = O_symbol;
1750 exp->X_add_number = ((int) entry) * size;
1751 exp->X_add_symbol = pool->symbol;
1752
1753 return TRUE;
1754 }
1755
1756 /* Can't use symbol_new here, so have to create a symbol and then at
1757 a later date assign it a value. That's what these functions do. */
1758
1759 static void
1760 symbol_locate (symbolS * symbolP,
1761 const char *name,/* It is copied, the caller can modify. */
1762 segT segment, /* Segment identifier (SEG_<something>). */
1763 valueT valu, /* Symbol value. */
1764 fragS * frag) /* Associated fragment. */
1765 {
1766 size_t name_length;
1767 char *preserved_copy_of_name;
1768
1769 name_length = strlen (name) + 1; /* +1 for \0. */
1770 obstack_grow (&notes, name, name_length);
1771 preserved_copy_of_name = obstack_finish (&notes);
1772
1773 #ifdef tc_canonicalize_symbol_name
1774 preserved_copy_of_name =
1775 tc_canonicalize_symbol_name (preserved_copy_of_name);
1776 #endif
1777
1778 S_SET_NAME (symbolP, preserved_copy_of_name);
1779
1780 S_SET_SEGMENT (symbolP, segment);
1781 S_SET_VALUE (symbolP, valu);
1782 symbol_clear_list_pointers (symbolP);
1783
1784 symbol_set_frag (symbolP, frag);
1785
1786 /* Link to end of symbol chain. */
1787 {
1788 extern int symbol_table_frozen;
1789
1790 if (symbol_table_frozen)
1791 abort ();
1792 }
1793
1794 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1795
1796 obj_symbol_new_hook (symbolP);
1797
1798 #ifdef tc_symbol_new_hook
1799 tc_symbol_new_hook (symbolP);
1800 #endif
1801
1802 #ifdef DEBUG_SYMS
1803 verify_symbol_chain (symbol_rootP, symbol_lastP);
1804 #endif /* DEBUG_SYMS */
1805 }
1806
1807
1808 static void
1809 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1810 {
1811 unsigned int entry;
1812 literal_pool *pool;
1813 char sym_name[20];
1814 int align;
1815
1816 for (align = 2; align <= 4; align++)
1817 {
1818 int size = 1 << align;
1819
1820 pool = find_literal_pool (size);
1821 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1822 continue;
1823
1824 /* Align pool as you have word accesses.
1825 Only make a frag if we have to. */
1826 if (!need_pass_2)
1827 frag_align (align, 0, 0);
1828
1829 mapping_state (MAP_DATA);
1830
1831 record_alignment (now_seg, align);
1832
1833 sprintf (sym_name, "$$lit_\002%x", pool->id);
1834
1835 symbol_locate (pool->symbol, sym_name, now_seg,
1836 (valueT) frag_now_fix (), frag_now);
1837 symbol_table_insert (pool->symbol);
1838
1839 for (entry = 0; entry < pool->next_free_entry; entry++)
1840 {
1841 expressionS * exp = & pool->literals[entry].exp;
1842
1843 if (exp->X_op == O_big)
1844 {
1845 /* PR 16688: Restore the global bignum value. */
1846 gas_assert (pool->literals[entry].bignum != NULL);
1847 memcpy (generic_bignum, pool->literals[entry].bignum,
1848 CHARS_PER_LITTLENUM * exp->X_add_number);
1849 }
1850
1851 /* First output the expression in the instruction to the pool. */
1852 emit_expr (exp, size); /* .word|.xword */
1853
1854 if (exp->X_op == O_big)
1855 {
1856 free (pool->literals[entry].bignum);
1857 pool->literals[entry].bignum = NULL;
1858 }
1859 }
1860
1861 /* Mark the pool as empty. */
1862 pool->next_free_entry = 0;
1863 pool->symbol = NULL;
1864 }
1865 }
1866
1867 #ifdef OBJ_ELF
1868 /* Forward declarations for functions below, in the MD interface
1869 section. */
1870 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1871 static struct reloc_table_entry * find_reloc_table_entry (char **);
1872
1873 /* Directives: Data. */
1874 /* N.B. the support for relocation suffix in this directive needs to be
1875 implemented properly. */
1876
1877 static void
1878 s_aarch64_elf_cons (int nbytes)
1879 {
1880 expressionS exp;
1881
1882 #ifdef md_flush_pending_output
1883 md_flush_pending_output ();
1884 #endif
1885
1886 if (is_it_end_of_statement ())
1887 {
1888 demand_empty_rest_of_line ();
1889 return;
1890 }
1891
1892 #ifdef md_cons_align
1893 md_cons_align (nbytes);
1894 #endif
1895
1896 mapping_state (MAP_DATA);
1897 do
1898 {
1899 struct reloc_table_entry *reloc;
1900
1901 expression (&exp);
1902
1903 if (exp.X_op != O_symbol)
1904 emit_expr (&exp, (unsigned int) nbytes);
1905 else
1906 {
1907 skip_past_char (&input_line_pointer, '#');
1908 if (skip_past_char (&input_line_pointer, ':'))
1909 {
1910 reloc = find_reloc_table_entry (&input_line_pointer);
1911 if (reloc == NULL)
1912 as_bad (_("unrecognized relocation suffix"));
1913 else
1914 as_bad (_("unimplemented relocation suffix"));
1915 ignore_rest_of_line ();
1916 return;
1917 }
1918 else
1919 emit_expr (&exp, (unsigned int) nbytes);
1920 }
1921 }
1922 while (*input_line_pointer++ == ',');
1923
1924 /* Put terminator back into stream. */
1925 input_line_pointer--;
1926 demand_empty_rest_of_line ();
1927 }
1928
1929 #endif /* OBJ_ELF */
1930
1931 /* Output a 32-bit word, but mark as an instruction. */
1932
1933 static void
1934 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1935 {
1936 expressionS exp;
1937
1938 #ifdef md_flush_pending_output
1939 md_flush_pending_output ();
1940 #endif
1941
1942 if (is_it_end_of_statement ())
1943 {
1944 demand_empty_rest_of_line ();
1945 return;
1946 }
1947
1948 /* Sections are assumed to start aligned. In executable section, there is no
1949 MAP_DATA symbol pending. So we only align the address during
1950 MAP_DATA --> MAP_INSN transition.
1951 For other sections, this is not guaranteed. */
1952 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1953 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1954 frag_align_code (2, 0);
1955
1956 #ifdef OBJ_ELF
1957 mapping_state (MAP_INSN);
1958 #endif
1959
1960 do
1961 {
1962 expression (&exp);
1963 if (exp.X_op != O_constant)
1964 {
1965 as_bad (_("constant expression required"));
1966 ignore_rest_of_line ();
1967 return;
1968 }
1969
1970 if (target_big_endian)
1971 {
1972 unsigned int val = exp.X_add_number;
1973 exp.X_add_number = SWAP_32 (val);
1974 }
1975 emit_expr (&exp, 4);
1976 }
1977 while (*input_line_pointer++ == ',');
1978
1979 /* Put terminator back into stream. */
1980 input_line_pointer--;
1981 demand_empty_rest_of_line ();
1982 }
1983
1984 #ifdef OBJ_ELF
1985 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1986
1987 static void
1988 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1989 {
1990 expressionS exp;
1991
1992 expression (&exp);
1993 frag_grow (4);
1994 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1995 BFD_RELOC_AARCH64_TLSDESC_ADD);
1996
1997 demand_empty_rest_of_line ();
1998 }
1999
2000 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2001
2002 static void
2003 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2004 {
2005 expressionS exp;
2006
2007 /* Since we're just labelling the code, there's no need to define a
2008 mapping symbol. */
2009 expression (&exp);
2010 /* Make sure there is enough room in this frag for the following
2011 blr. This trick only works if the blr follows immediately after
2012 the .tlsdesc directive. */
2013 frag_grow (4);
2014 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2015 BFD_RELOC_AARCH64_TLSDESC_CALL);
2016
2017 demand_empty_rest_of_line ();
2018 }
2019
2020 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2021
2022 static void
2023 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2024 {
2025 expressionS exp;
2026
2027 expression (&exp);
2028 frag_grow (4);
2029 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2030 BFD_RELOC_AARCH64_TLSDESC_LDR);
2031
2032 demand_empty_rest_of_line ();
2033 }
2034 #endif /* OBJ_ELF */
2035
2036 static void s_aarch64_arch (int);
2037 static void s_aarch64_cpu (int);
2038 static void s_aarch64_arch_extension (int);
2039
2040 /* This table describes all the machine specific pseudo-ops the assembler
2041 has to support. The fields are:
2042 pseudo-op name without dot
2043 function to call to execute this pseudo-op
2044 Integer arg to pass to the function. */
2045
2046 const pseudo_typeS md_pseudo_table[] = {
2047 /* Never called because '.req' does not start a line. */
2048 {"req", s_req, 0},
2049 {"unreq", s_unreq, 0},
2050 {"bss", s_bss, 0},
2051 {"even", s_even, 0},
2052 {"ltorg", s_ltorg, 0},
2053 {"pool", s_ltorg, 0},
2054 {"cpu", s_aarch64_cpu, 0},
2055 {"arch", s_aarch64_arch, 0},
2056 {"arch_extension", s_aarch64_arch_extension, 0},
2057 {"inst", s_aarch64_inst, 0},
2058 #ifdef OBJ_ELF
2059 {"tlsdescadd", s_tlsdescadd, 0},
2060 {"tlsdesccall", s_tlsdesccall, 0},
2061 {"tlsdescldr", s_tlsdescldr, 0},
2062 {"word", s_aarch64_elf_cons, 4},
2063 {"long", s_aarch64_elf_cons, 4},
2064 {"xword", s_aarch64_elf_cons, 8},
2065 {"dword", s_aarch64_elf_cons, 8},
2066 #endif
2067 {0, 0, 0}
2068 };
2069 \f
2070
2071 /* Check whether STR points to a register name followed by a comma or the
2072 end of line; REG_TYPE indicates which register types are checked
2073 against. Return TRUE if STR is such a register name; otherwise return
2074 FALSE. The function does not intend to produce any diagnostics, but since
2075 the register parser aarch64_reg_parse, which is called by this function,
2076 does produce diagnostics, we call clear_error to clear any diagnostics
2077 that may be generated by aarch64_reg_parse.
2078 Also, the function returns FALSE directly if there is any user error
2079 present at the function entry. This prevents the existing diagnostics
2080 state from being spoiled.
2081 The function currently serves parse_constant_immediate and
2082 parse_big_immediate only. */
2083 static bfd_boolean
2084 reg_name_p (char *str, aarch64_reg_type reg_type)
2085 {
2086 int reg;
2087
2088 /* Prevent the diagnostics state from being spoiled. */
2089 if (error_p ())
2090 return FALSE;
2091
2092 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2093
2094 /* Clear the parsing error that may be set by the reg parser. */
2095 clear_error ();
2096
2097 if (reg == PARSE_FAIL)
2098 return FALSE;
2099
2100 skip_whitespace (str);
2101 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2102 return TRUE;
2103
2104 return FALSE;
2105 }
2106
2107 /* Parser functions used exclusively in instruction operands. */
2108
2109 /* Parse an immediate expression which may not be constant.
2110
2111 To prevent the expression parser from pushing a register name
2112 into the symbol table as an undefined symbol, firstly a check is
2113 done to find out whether STR is a register of type REG_TYPE followed
2114 by a comma or the end of line. Return FALSE if STR is such a string. */
2115
2116 static bfd_boolean
2117 parse_immediate_expression (char **str, expressionS *exp,
2118 aarch64_reg_type reg_type)
2119 {
2120 if (reg_name_p (*str, reg_type))
2121 {
2122 set_recoverable_error (_("immediate operand required"));
2123 return FALSE;
2124 }
2125
2126 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2127
2128 if (exp->X_op == O_absent)
2129 {
2130 set_fatal_syntax_error (_("missing immediate expression"));
2131 return FALSE;
2132 }
2133
2134 return TRUE;
2135 }
2136
2137 /* Constant immediate-value read function for use in insn parsing.
2138 STR points to the beginning of the immediate (with the optional
2139 leading #); *VAL receives the value. REG_TYPE says which register
2140 names should be treated as registers rather than as symbolic immediates.
2141
2142 Return TRUE on success; otherwise return FALSE. */
2143
2144 static bfd_boolean
2145 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2146 {
2147 expressionS exp;
2148
2149 if (! parse_immediate_expression (str, &exp, reg_type))
2150 return FALSE;
2151
2152 if (exp.X_op != O_constant)
2153 {
2154 set_syntax_error (_("constant expression required"));
2155 return FALSE;
2156 }
2157
2158 *val = exp.X_add_number;
2159 return TRUE;
2160 }
2161
2162 static uint32_t
2163 encode_imm_float_bits (uint32_t imm)
2164 {
2165 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2166 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2167 }
2168
2169 /* Return TRUE if the single-precision floating-point value encoded in IMM
2170 can be expressed in the AArch64 8-bit signed floating-point format with
2171 3-bit exponent and normalized 4 bits of precision; in other words, the
2172 floating-point value must be expressable as
2173 (+/-) n / 16 * power (2, r)
2174 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2175
2176 static bfd_boolean
2177 aarch64_imm_float_p (uint32_t imm)
2178 {
2179 /* If a single-precision floating-point value has the following bit
2180 pattern, it can be expressed in the AArch64 8-bit floating-point
2181 format:
2182
2183 3 32222222 2221111111111
2184 1 09876543 21098765432109876543210
2185 n Eeeeeexx xxxx0000000000000000000
2186
2187 where n, e and each x are either 0 or 1 independently, with
2188 E == ~ e. */
2189
2190 uint32_t pattern;
2191
2192 /* Prepare the pattern for 'Eeeeee'. */
2193 if (((imm >> 30) & 0x1) == 0)
2194 pattern = 0x3e000000;
2195 else
2196 pattern = 0x40000000;
2197
2198 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2199 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2200 }
2201
2202 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2203 as an IEEE float without any loss of precision. Store the value in
2204 *FPWORD if so. */
2205
2206 static bfd_boolean
2207 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2208 {
2209 /* If a double-precision floating-point value has the following bit
2210 pattern, it can be expressed in a float:
2211
2212 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2213 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2214 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2215
2216 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2217 if Eeee_eeee != 1111_1111
2218
2219 where n, e, s and S are either 0 or 1 independently and where ~ is the
2220 inverse of E. */
2221
2222 uint32_t pattern;
2223 uint32_t high32 = imm >> 32;
2224 uint32_t low32 = imm;
2225
2226 /* Lower 29 bits need to be 0s. */
2227 if ((imm & 0x1fffffff) != 0)
2228 return FALSE;
2229
2230 /* Prepare the pattern for 'Eeeeeeeee'. */
2231 if (((high32 >> 30) & 0x1) == 0)
2232 pattern = 0x38000000;
2233 else
2234 pattern = 0x40000000;
2235
2236 /* Check E~~~. */
2237 if ((high32 & 0x78000000) != pattern)
2238 return FALSE;
2239
2240 /* Check Eeee_eeee != 1111_1111. */
2241 if ((high32 & 0x7ff00000) == 0x47f00000)
2242 return FALSE;
2243
2244 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2245 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2246 | (low32 >> 29)); /* 3 S bits. */
2247 return TRUE;
2248 }
2249
2250 /* Return true if we should treat OPERAND as a double-precision
2251 floating-point operand rather than a single-precision one. */
2252 static bfd_boolean
2253 double_precision_operand_p (const aarch64_opnd_info *operand)
2254 {
2255 /* Check for unsuffixed SVE registers, which are allowed
2256 for LDR and STR but not in instructions that require an
2257 immediate. We get better error messages if we arbitrarily
2258 pick one size, parse the immediate normally, and then
2259 report the match failure in the normal way. */
2260 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2261 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2262 }
2263
2264 /* Parse a floating-point immediate. Return TRUE on success and return the
2265 value in *IMMED in the format of IEEE754 single-precision encoding.
2266 *CCP points to the start of the string; DP_P is TRUE when the immediate
2267 is expected to be in double-precision (N.B. this only matters when
2268 hexadecimal representation is involved). REG_TYPE says which register
2269 names should be treated as registers rather than as symbolic immediates.
2270
2271 This routine accepts any IEEE float; it is up to the callers to reject
2272 invalid ones. */
2273
2274 static bfd_boolean
2275 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2276 aarch64_reg_type reg_type)
2277 {
2278 char *str = *ccp;
2279 char *fpnum;
2280 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2281 int found_fpchar = 0;
2282 int64_t val = 0;
2283 unsigned fpword = 0;
2284 bfd_boolean hex_p = FALSE;
2285
2286 skip_past_char (&str, '#');
2287
2288 fpnum = str;
2289 skip_whitespace (fpnum);
2290
2291 if (strncmp (fpnum, "0x", 2) == 0)
2292 {
2293 /* Support the hexadecimal representation of the IEEE754 encoding.
2294 Double-precision is expected when DP_P is TRUE, otherwise the
2295 representation should be in single-precision. */
2296 if (! parse_constant_immediate (&str, &val, reg_type))
2297 goto invalid_fp;
2298
2299 if (dp_p)
2300 {
2301 if (!can_convert_double_to_float (val, &fpword))
2302 goto invalid_fp;
2303 }
2304 else if ((uint64_t) val > 0xffffffff)
2305 goto invalid_fp;
2306 else
2307 fpword = val;
2308
2309 hex_p = TRUE;
2310 }
2311 else
2312 {
2313 if (reg_name_p (str, reg_type))
2314 {
2315 set_recoverable_error (_("immediate operand required"));
2316 return FALSE;
2317 }
2318
2319 /* We must not accidentally parse an integer as a floating-point number.
2320 Make sure that the value we parse is not an integer by checking for
2321 special characters '.' or 'e'. */
2322 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2323 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2324 {
2325 found_fpchar = 1;
2326 break;
2327 }
2328
2329 if (!found_fpchar)
2330 return FALSE;
2331 }
2332
2333 if (! hex_p)
2334 {
2335 int i;
2336
2337 if ((str = atof_ieee (str, 's', words)) == NULL)
2338 goto invalid_fp;
2339
2340 /* Our FP word must be 32 bits (single-precision FP). */
2341 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2342 {
2343 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2344 fpword |= words[i];
2345 }
2346 }
2347
2348 *immed = fpword;
2349 *ccp = str;
2350 return TRUE;
2351
2352 invalid_fp:
2353 set_fatal_syntax_error (_("invalid floating-point constant"));
2354 return FALSE;
2355 }
2356
2357 /* Less-generic immediate-value read function with the possibility of loading
2358 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2359 instructions.
2360
2361 To prevent the expression parser from pushing a register name into the
2362 symbol table as an undefined symbol, a check is firstly done to find
2363 out whether STR is a register of type REG_TYPE followed by a comma or
2364 the end of line. Return FALSE if STR is such a register. */
2365
2366 static bfd_boolean
2367 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2368 {
2369 char *ptr = *str;
2370
2371 if (reg_name_p (ptr, reg_type))
2372 {
2373 set_syntax_error (_("immediate operand required"));
2374 return FALSE;
2375 }
2376
2377 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2378
2379 if (inst.reloc.exp.X_op == O_constant)
2380 *imm = inst.reloc.exp.X_add_number;
2381
2382 *str = ptr;
2383
2384 return TRUE;
2385 }
2386
2387 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2388 if NEED_LIBOPCODES is non-zero, the fixup will need
2389 assistance from the libopcodes. */
2390
2391 static inline void
2392 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2393 const aarch64_opnd_info *operand,
2394 int need_libopcodes_p)
2395 {
2396 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2397 reloc->opnd = operand->type;
2398 if (need_libopcodes_p)
2399 reloc->need_libopcodes_p = 1;
2400 };
2401
2402 /* Return TRUE if the instruction needs to be fixed up later internally by
2403 the GAS; otherwise return FALSE. */
2404
2405 static inline bfd_boolean
2406 aarch64_gas_internal_fixup_p (void)
2407 {
2408 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2409 }
2410
2411 /* Assign the immediate value to the relevant field in *OPERAND if
2412 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2413 needs an internal fixup in a later stage.
2414 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2415 IMM.VALUE that may get assigned with the constant. */
2416 static inline void
2417 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2418 aarch64_opnd_info *operand,
2419 int addr_off_p,
2420 int need_libopcodes_p,
2421 int skip_p)
2422 {
2423 if (reloc->exp.X_op == O_constant)
2424 {
2425 if (addr_off_p)
2426 operand->addr.offset.imm = reloc->exp.X_add_number;
2427 else
2428 operand->imm.value = reloc->exp.X_add_number;
2429 reloc->type = BFD_RELOC_UNUSED;
2430 }
2431 else
2432 {
2433 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2434 /* Tell libopcodes to ignore this operand or not. This is helpful
2435 when one of the operands needs to be fixed up later but we need
2436 libopcodes to check the other operands. */
2437 operand->skip = skip_p;
2438 }
2439 }
2440
2441 /* Relocation modifiers. Each entry in the table contains the textual
2442 name for the relocation which may be placed before a symbol used as
2443 a load/store offset, or add immediate. It must be surrounded by a
2444 leading and trailing colon, for example:
2445
2446 ldr x0, [x1, #:rello:varsym]
2447 add x0, x1, #:rello:varsym */
2448
2449 struct reloc_table_entry
2450 {
2451 const char *name;
2452 int pc_rel;
2453 bfd_reloc_code_real_type adr_type;
2454 bfd_reloc_code_real_type adrp_type;
2455 bfd_reloc_code_real_type movw_type;
2456 bfd_reloc_code_real_type add_type;
2457 bfd_reloc_code_real_type ldst_type;
2458 bfd_reloc_code_real_type ld_literal_type;
2459 };
2460
2461 static struct reloc_table_entry reloc_table[] = {
2462 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2463 {"lo12", 0,
2464 0, /* adr_type */
2465 0,
2466 0,
2467 BFD_RELOC_AARCH64_ADD_LO12,
2468 BFD_RELOC_AARCH64_LDST_LO12,
2469 0},
2470
2471 /* Higher 21 bits of pc-relative page offset: ADRP */
2472 {"pg_hi21", 1,
2473 0, /* adr_type */
2474 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2475 0,
2476 0,
2477 0,
2478 0},
2479
2480 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2481 {"pg_hi21_nc", 1,
2482 0, /* adr_type */
2483 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2484 0,
2485 0,
2486 0,
2487 0},
2488
2489 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2490 {"abs_g0", 0,
2491 0, /* adr_type */
2492 0,
2493 BFD_RELOC_AARCH64_MOVW_G0,
2494 0,
2495 0,
2496 0},
2497
2498 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2499 {"abs_g0_s", 0,
2500 0, /* adr_type */
2501 0,
2502 BFD_RELOC_AARCH64_MOVW_G0_S,
2503 0,
2504 0,
2505 0},
2506
2507 /* Less significant bits 0-15 of address/value: MOVK, no check */
2508 {"abs_g0_nc", 0,
2509 0, /* adr_type */
2510 0,
2511 BFD_RELOC_AARCH64_MOVW_G0_NC,
2512 0,
2513 0,
2514 0},
2515
2516 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2517 {"abs_g1", 0,
2518 0, /* adr_type */
2519 0,
2520 BFD_RELOC_AARCH64_MOVW_G1,
2521 0,
2522 0,
2523 0},
2524
2525 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2526 {"abs_g1_s", 0,
2527 0, /* adr_type */
2528 0,
2529 BFD_RELOC_AARCH64_MOVW_G1_S,
2530 0,
2531 0,
2532 0},
2533
2534 /* Less significant bits 16-31 of address/value: MOVK, no check */
2535 {"abs_g1_nc", 0,
2536 0, /* adr_type */
2537 0,
2538 BFD_RELOC_AARCH64_MOVW_G1_NC,
2539 0,
2540 0,
2541 0},
2542
2543 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2544 {"abs_g2", 0,
2545 0, /* adr_type */
2546 0,
2547 BFD_RELOC_AARCH64_MOVW_G2,
2548 0,
2549 0,
2550 0},
2551
2552 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2553 {"abs_g2_s", 0,
2554 0, /* adr_type */
2555 0,
2556 BFD_RELOC_AARCH64_MOVW_G2_S,
2557 0,
2558 0,
2559 0},
2560
2561 /* Less significant bits 32-47 of address/value: MOVK, no check */
2562 {"abs_g2_nc", 0,
2563 0, /* adr_type */
2564 0,
2565 BFD_RELOC_AARCH64_MOVW_G2_NC,
2566 0,
2567 0,
2568 0},
2569
2570 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2571 {"abs_g3", 0,
2572 0, /* adr_type */
2573 0,
2574 BFD_RELOC_AARCH64_MOVW_G3,
2575 0,
2576 0,
2577 0},
2578
2579 /* Get to the page containing GOT entry for a symbol. */
2580 {"got", 1,
2581 0, /* adr_type */
2582 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2583 0,
2584 0,
2585 0,
2586 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2587
2588 /* 12 bit offset into the page containing GOT entry for that symbol. */
2589 {"got_lo12", 0,
2590 0, /* adr_type */
2591 0,
2592 0,
2593 0,
2594 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2595 0},
2596
2597 /* 0-15 bits of address/value: MOVk, no check. */
2598 {"gotoff_g0_nc", 0,
2599 0, /* adr_type */
2600 0,
2601 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2602 0,
2603 0,
2604 0},
2605
2606 /* Most significant bits 16-31 of address/value: MOVZ. */
2607 {"gotoff_g1", 0,
2608 0, /* adr_type */
2609 0,
2610 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2611 0,
2612 0,
2613 0},
2614
2615 /* 15 bit offset into the page containing GOT entry for that symbol. */
2616 {"gotoff_lo15", 0,
2617 0, /* adr_type */
2618 0,
2619 0,
2620 0,
2621 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2622 0},
2623
2624 /* Get to the page containing GOT TLS entry for a symbol */
2625 {"gottprel_g0_nc", 0,
2626 0, /* adr_type */
2627 0,
2628 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2629 0,
2630 0,
2631 0},
2632
2633 /* Get to the page containing GOT TLS entry for a symbol */
2634 {"gottprel_g1", 0,
2635 0, /* adr_type */
2636 0,
2637 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2638 0,
2639 0,
2640 0},
2641
2642 /* Get to the page containing GOT TLS entry for a symbol */
2643 {"tlsgd", 0,
2644 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2645 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2646 0,
2647 0,
2648 0,
2649 0},
2650
2651 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2652 {"tlsgd_lo12", 0,
2653 0, /* adr_type */
2654 0,
2655 0,
2656 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2657 0,
2658 0},
2659
2660 /* Lower 16 bits address/value: MOVk. */
2661 {"tlsgd_g0_nc", 0,
2662 0, /* adr_type */
2663 0,
2664 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2665 0,
2666 0,
2667 0},
2668
2669 /* Most significant bits 16-31 of address/value: MOVZ. */
2670 {"tlsgd_g1", 0,
2671 0, /* adr_type */
2672 0,
2673 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2674 0,
2675 0,
2676 0},
2677
2678 /* Get to the page containing GOT TLS entry for a symbol */
2679 {"tlsdesc", 0,
2680 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2681 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2682 0,
2683 0,
2684 0,
2685 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2686
2687 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2688 {"tlsdesc_lo12", 0,
2689 0, /* adr_type */
2690 0,
2691 0,
2692 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2693 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2694 0},
2695
2696 /* Get to the page containing GOT TLS entry for a symbol.
2697 The same as GD, we allocate two consecutive GOT slots
2698 for module index and module offset, the only difference
2699 with GD is the module offset should be initialized to
2700 zero without any outstanding runtime relocation. */
2701 {"tlsldm", 0,
2702 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2703 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2704 0,
2705 0,
2706 0,
2707 0},
2708
2709 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2710 {"tlsldm_lo12_nc", 0,
2711 0, /* adr_type */
2712 0,
2713 0,
2714 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2715 0,
2716 0},
2717
2718 /* 12 bit offset into the module TLS base address. */
2719 {"dtprel_lo12", 0,
2720 0, /* adr_type */
2721 0,
2722 0,
2723 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2724 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2725 0},
2726
2727 /* Same as dtprel_lo12, no overflow check. */
2728 {"dtprel_lo12_nc", 0,
2729 0, /* adr_type */
2730 0,
2731 0,
2732 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2733 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2734 0},
2735
2736 /* bits[23:12] of offset to the module TLS base address. */
2737 {"dtprel_hi12", 0,
2738 0, /* adr_type */
2739 0,
2740 0,
2741 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2742 0,
2743 0},
2744
2745 /* bits[15:0] of offset to the module TLS base address. */
2746 {"dtprel_g0", 0,
2747 0, /* adr_type */
2748 0,
2749 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2750 0,
2751 0,
2752 0},
2753
2754 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2755 {"dtprel_g0_nc", 0,
2756 0, /* adr_type */
2757 0,
2758 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2759 0,
2760 0,
2761 0},
2762
2763 /* bits[31:16] of offset to the module TLS base address. */
2764 {"dtprel_g1", 0,
2765 0, /* adr_type */
2766 0,
2767 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2768 0,
2769 0,
2770 0},
2771
2772 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2773 {"dtprel_g1_nc", 0,
2774 0, /* adr_type */
2775 0,
2776 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2777 0,
2778 0,
2779 0},
2780
2781 /* bits[47:32] of offset to the module TLS base address. */
2782 {"dtprel_g2", 0,
2783 0, /* adr_type */
2784 0,
2785 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2786 0,
2787 0,
2788 0},
2789
2790 /* Lower 16 bit offset into GOT entry for a symbol */
2791 {"tlsdesc_off_g0_nc", 0,
2792 0, /* adr_type */
2793 0,
2794 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2795 0,
2796 0,
2797 0},
2798
2799 /* Higher 16 bit offset into GOT entry for a symbol */
2800 {"tlsdesc_off_g1", 0,
2801 0, /* adr_type */
2802 0,
2803 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2804 0,
2805 0,
2806 0},
2807
2808 /* Get to the page containing GOT TLS entry for a symbol */
2809 {"gottprel", 0,
2810 0, /* adr_type */
2811 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2812 0,
2813 0,
2814 0,
2815 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2816
2817 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2818 {"gottprel_lo12", 0,
2819 0, /* adr_type */
2820 0,
2821 0,
2822 0,
2823 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2824 0},
2825
2826 /* Get tp offset for a symbol. */
2827 {"tprel", 0,
2828 0, /* adr_type */
2829 0,
2830 0,
2831 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2832 0,
2833 0},
2834
2835 /* Get tp offset for a symbol. */
2836 {"tprel_lo12", 0,
2837 0, /* adr_type */
2838 0,
2839 0,
2840 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2841 0,
2842 0},
2843
2844 /* Get tp offset for a symbol. */
2845 {"tprel_hi12", 0,
2846 0, /* adr_type */
2847 0,
2848 0,
2849 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2850 0,
2851 0},
2852
2853 /* Get tp offset for a symbol. */
2854 {"tprel_lo12_nc", 0,
2855 0, /* adr_type */
2856 0,
2857 0,
2858 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2859 0,
2860 0},
2861
2862 /* Most significant bits 32-47 of address/value: MOVZ. */
2863 {"tprel_g2", 0,
2864 0, /* adr_type */
2865 0,
2866 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2867 0,
2868 0,
2869 0},
2870
2871 /* Most significant bits 16-31 of address/value: MOVZ. */
2872 {"tprel_g1", 0,
2873 0, /* adr_type */
2874 0,
2875 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2876 0,
2877 0,
2878 0},
2879
2880 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2881 {"tprel_g1_nc", 0,
2882 0, /* adr_type */
2883 0,
2884 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2885 0,
2886 0,
2887 0},
2888
2889 /* Most significant bits 0-15 of address/value: MOVZ. */
2890 {"tprel_g0", 0,
2891 0, /* adr_type */
2892 0,
2893 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2894 0,
2895 0,
2896 0},
2897
2898 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2899 {"tprel_g0_nc", 0,
2900 0, /* adr_type */
2901 0,
2902 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2903 0,
2904 0,
2905 0},
2906
2907 /* 15bit offset from got entry to base address of GOT table. */
2908 {"gotpage_lo15", 0,
2909 0,
2910 0,
2911 0,
2912 0,
2913 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2914 0},
2915
2916 /* 14bit offset from got entry to base address of GOT table. */
2917 {"gotpage_lo14", 0,
2918 0,
2919 0,
2920 0,
2921 0,
2922 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2923 0},
2924 };
2925
2926 /* Given the address of a pointer pointing to the textual name of a
2927 relocation as may appear in assembler source, attempt to find its
2928 details in reloc_table. The pointer will be updated to the character
2929 after the trailing colon. On failure, NULL will be returned;
2930 otherwise return the reloc_table_entry. */
2931
2932 static struct reloc_table_entry *
2933 find_reloc_table_entry (char **str)
2934 {
2935 unsigned int i;
2936 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2937 {
2938 int length = strlen (reloc_table[i].name);
2939
2940 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2941 && (*str)[length] == ':')
2942 {
2943 *str += (length + 1);
2944 return &reloc_table[i];
2945 }
2946 }
2947
2948 return NULL;
2949 }
2950
2951 /* Mode argument to parse_shift and parser_shifter_operand. */
2952 enum parse_shift_mode
2953 {
2954 SHIFTED_NONE, /* no shifter allowed */
2955 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2956 "#imm{,lsl #n}" */
2957 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2958 "#imm" */
2959 SHIFTED_LSL, /* bare "lsl #n" */
2960 SHIFTED_MUL, /* bare "mul #n" */
2961 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2962 SHIFTED_MUL_VL, /* "mul vl" */
2963 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2964 };
2965
2966 /* Parse a <shift> operator on an AArch64 data processing instruction.
2967 Return TRUE on success; otherwise return FALSE. */
2968 static bfd_boolean
2969 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2970 {
2971 const struct aarch64_name_value_pair *shift_op;
2972 enum aarch64_modifier_kind kind;
2973 expressionS exp;
2974 int exp_has_prefix;
2975 char *s = *str;
2976 char *p = s;
2977
2978 for (p = *str; ISALPHA (*p); p++)
2979 ;
2980
2981 if (p == *str)
2982 {
2983 set_syntax_error (_("shift expression expected"));
2984 return FALSE;
2985 }
2986
2987 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2988
2989 if (shift_op == NULL)
2990 {
2991 set_syntax_error (_("shift operator expected"));
2992 return FALSE;
2993 }
2994
2995 kind = aarch64_get_operand_modifier (shift_op);
2996
2997 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2998 {
2999 set_syntax_error (_("invalid use of 'MSL'"));
3000 return FALSE;
3001 }
3002
3003 if (kind == AARCH64_MOD_MUL
3004 && mode != SHIFTED_MUL
3005 && mode != SHIFTED_MUL_VL)
3006 {
3007 set_syntax_error (_("invalid use of 'MUL'"));
3008 return FALSE;
3009 }
3010
3011 switch (mode)
3012 {
3013 case SHIFTED_LOGIC_IMM:
3014 if (aarch64_extend_operator_p (kind))
3015 {
3016 set_syntax_error (_("extending shift is not permitted"));
3017 return FALSE;
3018 }
3019 break;
3020
3021 case SHIFTED_ARITH_IMM:
3022 if (kind == AARCH64_MOD_ROR)
3023 {
3024 set_syntax_error (_("'ROR' shift is not permitted"));
3025 return FALSE;
3026 }
3027 break;
3028
3029 case SHIFTED_LSL:
3030 if (kind != AARCH64_MOD_LSL)
3031 {
3032 set_syntax_error (_("only 'LSL' shift is permitted"));
3033 return FALSE;
3034 }
3035 break;
3036
3037 case SHIFTED_MUL:
3038 if (kind != AARCH64_MOD_MUL)
3039 {
3040 set_syntax_error (_("only 'MUL' is permitted"));
3041 return FALSE;
3042 }
3043 break;
3044
3045 case SHIFTED_MUL_VL:
3046 /* "MUL VL" consists of two separate tokens. Require the first
3047 token to be "MUL" and look for a following "VL". */
3048 if (kind == AARCH64_MOD_MUL)
3049 {
3050 skip_whitespace (p);
3051 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3052 {
3053 p += 2;
3054 kind = AARCH64_MOD_MUL_VL;
3055 break;
3056 }
3057 }
3058 set_syntax_error (_("only 'MUL VL' is permitted"));
3059 return FALSE;
3060
3061 case SHIFTED_REG_OFFSET:
3062 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3063 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3064 {
3065 set_fatal_syntax_error
3066 (_("invalid shift for the register offset addressing mode"));
3067 return FALSE;
3068 }
3069 break;
3070
3071 case SHIFTED_LSL_MSL:
3072 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3073 {
3074 set_syntax_error (_("invalid shift operator"));
3075 return FALSE;
3076 }
3077 break;
3078
3079 default:
3080 abort ();
3081 }
3082
3083 /* Whitespace can appear here if the next thing is a bare digit. */
3084 skip_whitespace (p);
3085
3086 /* Parse shift amount. */
3087 exp_has_prefix = 0;
3088 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3089 exp.X_op = O_absent;
3090 else
3091 {
3092 if (is_immediate_prefix (*p))
3093 {
3094 p++;
3095 exp_has_prefix = 1;
3096 }
3097 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3098 }
3099 if (kind == AARCH64_MOD_MUL_VL)
3100 /* For consistency, give MUL VL the same shift amount as an implicit
3101 MUL #1. */
3102 operand->shifter.amount = 1;
3103 else if (exp.X_op == O_absent)
3104 {
3105 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3106 {
3107 set_syntax_error (_("missing shift amount"));
3108 return FALSE;
3109 }
3110 operand->shifter.amount = 0;
3111 }
3112 else if (exp.X_op != O_constant)
3113 {
3114 set_syntax_error (_("constant shift amount required"));
3115 return FALSE;
3116 }
3117 /* For parsing purposes, MUL #n has no inherent range. The range
3118 depends on the operand and will be checked by operand-specific
3119 routines. */
3120 else if (kind != AARCH64_MOD_MUL
3121 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3122 {
3123 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3124 return FALSE;
3125 }
3126 else
3127 {
3128 operand->shifter.amount = exp.X_add_number;
3129 operand->shifter.amount_present = 1;
3130 }
3131
3132 operand->shifter.operator_present = 1;
3133 operand->shifter.kind = kind;
3134
3135 *str = p;
3136 return TRUE;
3137 }
3138
3139 /* Parse a <shifter_operand> for a data processing instruction:
3140
3141 #<immediate>
3142 #<immediate>, LSL #imm
3143
3144 Validation of immediate operands is deferred to md_apply_fix.
3145
3146 Return TRUE on success; otherwise return FALSE. */
3147
3148 static bfd_boolean
3149 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3150 enum parse_shift_mode mode)
3151 {
3152 char *p;
3153
3154 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3155 return FALSE;
3156
3157 p = *str;
3158
3159 /* Accept an immediate expression. */
3160 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3161 return FALSE;
3162
3163 /* Accept optional LSL for arithmetic immediate values. */
3164 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3165 if (! parse_shift (&p, operand, SHIFTED_LSL))
3166 return FALSE;
3167
3168 /* Not accept any shifter for logical immediate values. */
3169 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3170 && parse_shift (&p, operand, mode))
3171 {
3172 set_syntax_error (_("unexpected shift operator"));
3173 return FALSE;
3174 }
3175
3176 *str = p;
3177 return TRUE;
3178 }
3179
3180 /* Parse a <shifter_operand> for a data processing instruction:
3181
3182 <Rm>
3183 <Rm>, <shift>
3184 #<immediate>
3185 #<immediate>, LSL #imm
3186
3187 where <shift> is handled by parse_shift above, and the last two
3188 cases are handled by the function above.
3189
3190 Validation of immediate operands is deferred to md_apply_fix.
3191
3192 Return TRUE on success; otherwise return FALSE. */
3193
3194 static bfd_boolean
3195 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3196 enum parse_shift_mode mode)
3197 {
3198 const reg_entry *reg;
3199 aarch64_opnd_qualifier_t qualifier;
3200 enum aarch64_operand_class opd_class
3201 = aarch64_get_operand_class (operand->type);
3202
3203 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3204 if (reg)
3205 {
3206 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3207 {
3208 set_syntax_error (_("unexpected register in the immediate operand"));
3209 return FALSE;
3210 }
3211
3212 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3213 {
3214 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3215 return FALSE;
3216 }
3217
3218 operand->reg.regno = reg->number;
3219 operand->qualifier = qualifier;
3220
3221 /* Accept optional shift operation on register. */
3222 if (! skip_past_comma (str))
3223 return TRUE;
3224
3225 if (! parse_shift (str, operand, mode))
3226 return FALSE;
3227
3228 return TRUE;
3229 }
3230 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3231 {
3232 set_syntax_error
3233 (_("integer register expected in the extended/shifted operand "
3234 "register"));
3235 return FALSE;
3236 }
3237
3238 /* We have a shifted immediate variable. */
3239 return parse_shifter_operand_imm (str, operand, mode);
3240 }
3241
3242 /* Return TRUE on success; return FALSE otherwise. */
3243
3244 static bfd_boolean
3245 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3246 enum parse_shift_mode mode)
3247 {
3248 char *p = *str;
3249
3250 /* Determine if we have the sequence of characters #: or just :
3251 coming next. If we do, then we check for a :rello: relocation
3252 modifier. If we don't, punt the whole lot to
3253 parse_shifter_operand. */
3254
3255 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3256 {
3257 struct reloc_table_entry *entry;
3258
3259 if (p[0] == '#')
3260 p += 2;
3261 else
3262 p++;
3263 *str = p;
3264
3265 /* Try to parse a relocation. Anything else is an error. */
3266 if (!(entry = find_reloc_table_entry (str)))
3267 {
3268 set_syntax_error (_("unknown relocation modifier"));
3269 return FALSE;
3270 }
3271
3272 if (entry->add_type == 0)
3273 {
3274 set_syntax_error
3275 (_("this relocation modifier is not allowed on this instruction"));
3276 return FALSE;
3277 }
3278
3279 /* Save str before we decompose it. */
3280 p = *str;
3281
3282 /* Next, we parse the expression. */
3283 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3284 return FALSE;
3285
3286 /* Record the relocation type (use the ADD variant here). */
3287 inst.reloc.type = entry->add_type;
3288 inst.reloc.pc_rel = entry->pc_rel;
3289
3290 /* If str is empty, we've reached the end, stop here. */
3291 if (**str == '\0')
3292 return TRUE;
3293
3294 /* Otherwise, we have a shifted reloc modifier, so rewind to
3295 recover the variable name and continue parsing for the shifter. */
3296 *str = p;
3297 return parse_shifter_operand_imm (str, operand, mode);
3298 }
3299
3300 return parse_shifter_operand (str, operand, mode);
3301 }
3302
3303 /* Parse all forms of an address expression. Information is written
3304 to *OPERAND and/or inst.reloc.
3305
3306 The A64 instruction set has the following addressing modes:
3307
3308 Offset
3309 [base] // in SIMD ld/st structure
3310 [base{,#0}] // in ld/st exclusive
3311 [base{,#imm}]
3312 [base,Xm{,LSL #imm}]
3313 [base,Xm,SXTX {#imm}]
3314 [base,Wm,(S|U)XTW {#imm}]
3315 Pre-indexed
3316 [base,#imm]!
3317 Post-indexed
3318 [base],#imm
3319 [base],Xm // in SIMD ld/st structure
3320 PC-relative (literal)
3321 label
3322 SVE:
3323 [base,#imm,MUL VL]
3324 [base,Zm.D{,LSL #imm}]
3325 [base,Zm.S,(S|U)XTW {#imm}]
3326 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3327 [Zn.S,#imm]
3328 [Zn.D,#imm]
3329 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3330 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3331 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3332
3333 (As a convenience, the notation "=immediate" is permitted in conjunction
3334 with the pc-relative literal load instructions to automatically place an
3335 immediate value or symbolic address in a nearby literal pool and generate
3336 a hidden label which references it.)
3337
3338 Upon a successful parsing, the address structure in *OPERAND will be
3339 filled in the following way:
3340
3341 .base_regno = <base>
3342 .offset.is_reg // 1 if the offset is a register
3343 .offset.imm = <imm>
3344 .offset.regno = <Rm>
3345
3346 For different addressing modes defined in the A64 ISA:
3347
3348 Offset
3349 .pcrel=0; .preind=1; .postind=0; .writeback=0
3350 Pre-indexed
3351 .pcrel=0; .preind=1; .postind=0; .writeback=1
3352 Post-indexed
3353 .pcrel=0; .preind=0; .postind=1; .writeback=1
3354 PC-relative (literal)
3355 .pcrel=1; .preind=1; .postind=0; .writeback=0
3356
3357 The shift/extension information, if any, will be stored in .shifter.
3358 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3359 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3360 corresponding register.
3361
3362 BASE_TYPE says which types of base register should be accepted and
3363 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3364 is the type of shifter that is allowed for immediate offsets,
3365 or SHIFTED_NONE if none.
3366
3367 In all other respects, it is the caller's responsibility to check
3368 for addressing modes not supported by the instruction, and to set
3369 inst.reloc.type. */
3370
3371 static bfd_boolean
3372 parse_address_main (char **str, aarch64_opnd_info *operand,
3373 aarch64_opnd_qualifier_t *base_qualifier,
3374 aarch64_opnd_qualifier_t *offset_qualifier,
3375 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3376 enum parse_shift_mode imm_shift_mode)
3377 {
3378 char *p = *str;
3379 const reg_entry *reg;
3380 expressionS *exp = &inst.reloc.exp;
3381
3382 *base_qualifier = AARCH64_OPND_QLF_NIL;
3383 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3384 if (! skip_past_char (&p, '['))
3385 {
3386 /* =immediate or label. */
3387 operand->addr.pcrel = 1;
3388 operand->addr.preind = 1;
3389
3390 /* #:<reloc_op>:<symbol> */
3391 skip_past_char (&p, '#');
3392 if (skip_past_char (&p, ':'))
3393 {
3394 bfd_reloc_code_real_type ty;
3395 struct reloc_table_entry *entry;
3396
3397 /* Try to parse a relocation modifier. Anything else is
3398 an error. */
3399 entry = find_reloc_table_entry (&p);
3400 if (! entry)
3401 {
3402 set_syntax_error (_("unknown relocation modifier"));
3403 return FALSE;
3404 }
3405
3406 switch (operand->type)
3407 {
3408 case AARCH64_OPND_ADDR_PCREL21:
3409 /* adr */
3410 ty = entry->adr_type;
3411 break;
3412
3413 default:
3414 ty = entry->ld_literal_type;
3415 break;
3416 }
3417
3418 if (ty == 0)
3419 {
3420 set_syntax_error
3421 (_("this relocation modifier is not allowed on this "
3422 "instruction"));
3423 return FALSE;
3424 }
3425
3426 /* #:<reloc_op>: */
3427 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3428 {
3429 set_syntax_error (_("invalid relocation expression"));
3430 return FALSE;
3431 }
3432
3433 /* #:<reloc_op>:<expr> */
3434 /* Record the relocation type. */
3435 inst.reloc.type = ty;
3436 inst.reloc.pc_rel = entry->pc_rel;
3437 }
3438 else
3439 {
3440
3441 if (skip_past_char (&p, '='))
3442 /* =immediate; need to generate the literal in the literal pool. */
3443 inst.gen_lit_pool = 1;
3444
3445 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3446 {
3447 set_syntax_error (_("invalid address"));
3448 return FALSE;
3449 }
3450 }
3451
3452 *str = p;
3453 return TRUE;
3454 }
3455
3456 /* [ */
3457
3458 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3459 if (!reg || !aarch64_check_reg_type (reg, base_type))
3460 {
3461 set_syntax_error (_(get_reg_expected_msg (base_type)));
3462 return FALSE;
3463 }
3464 operand->addr.base_regno = reg->number;
3465
3466 /* [Xn */
3467 if (skip_past_comma (&p))
3468 {
3469 /* [Xn, */
3470 operand->addr.preind = 1;
3471
3472 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3473 if (reg)
3474 {
3475 if (!aarch64_check_reg_type (reg, offset_type))
3476 {
3477 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3478 return FALSE;
3479 }
3480
3481 /* [Xn,Rm */
3482 operand->addr.offset.regno = reg->number;
3483 operand->addr.offset.is_reg = 1;
3484 /* Shifted index. */
3485 if (skip_past_comma (&p))
3486 {
3487 /* [Xn,Rm, */
3488 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3489 /* Use the diagnostics set in parse_shift, so not set new
3490 error message here. */
3491 return FALSE;
3492 }
3493 /* We only accept:
3494 [base,Xm{,LSL #imm}]
3495 [base,Xm,SXTX {#imm}]
3496 [base,Wm,(S|U)XTW {#imm}] */
3497 if (operand->shifter.kind == AARCH64_MOD_NONE
3498 || operand->shifter.kind == AARCH64_MOD_LSL
3499 || operand->shifter.kind == AARCH64_MOD_SXTX)
3500 {
3501 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3502 {
3503 set_syntax_error (_("invalid use of 32-bit register offset"));
3504 return FALSE;
3505 }
3506 if (aarch64_get_qualifier_esize (*base_qualifier)
3507 != aarch64_get_qualifier_esize (*offset_qualifier))
3508 {
3509 set_syntax_error (_("offset has different size from base"));
3510 return FALSE;
3511 }
3512 }
3513 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3514 {
3515 set_syntax_error (_("invalid use of 64-bit register offset"));
3516 return FALSE;
3517 }
3518 }
3519 else
3520 {
3521 /* [Xn,#:<reloc_op>:<symbol> */
3522 skip_past_char (&p, '#');
3523 if (skip_past_char (&p, ':'))
3524 {
3525 struct reloc_table_entry *entry;
3526
3527 /* Try to parse a relocation modifier. Anything else is
3528 an error. */
3529 if (!(entry = find_reloc_table_entry (&p)))
3530 {
3531 set_syntax_error (_("unknown relocation modifier"));
3532 return FALSE;
3533 }
3534
3535 if (entry->ldst_type == 0)
3536 {
3537 set_syntax_error
3538 (_("this relocation modifier is not allowed on this "
3539 "instruction"));
3540 return FALSE;
3541 }
3542
3543 /* [Xn,#:<reloc_op>: */
3544 /* We now have the group relocation table entry corresponding to
3545 the name in the assembler source. Next, we parse the
3546 expression. */
3547 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3548 {
3549 set_syntax_error (_("invalid relocation expression"));
3550 return FALSE;
3551 }
3552
3553 /* [Xn,#:<reloc_op>:<expr> */
3554 /* Record the load/store relocation type. */
3555 inst.reloc.type = entry->ldst_type;
3556 inst.reloc.pc_rel = entry->pc_rel;
3557 }
3558 else
3559 {
3560 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3561 {
3562 set_syntax_error (_("invalid expression in the address"));
3563 return FALSE;
3564 }
3565 /* [Xn,<expr> */
3566 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3567 /* [Xn,<expr>,<shifter> */
3568 if (! parse_shift (&p, operand, imm_shift_mode))
3569 return FALSE;
3570 }
3571 }
3572 }
3573
3574 if (! skip_past_char (&p, ']'))
3575 {
3576 set_syntax_error (_("']' expected"));
3577 return FALSE;
3578 }
3579
3580 if (skip_past_char (&p, '!'))
3581 {
3582 if (operand->addr.preind && operand->addr.offset.is_reg)
3583 {
3584 set_syntax_error (_("register offset not allowed in pre-indexed "
3585 "addressing mode"));
3586 return FALSE;
3587 }
3588 /* [Xn]! */
3589 operand->addr.writeback = 1;
3590 }
3591 else if (skip_past_comma (&p))
3592 {
3593 /* [Xn], */
3594 operand->addr.postind = 1;
3595 operand->addr.writeback = 1;
3596
3597 if (operand->addr.preind)
3598 {
3599 set_syntax_error (_("cannot combine pre- and post-indexing"));
3600 return FALSE;
3601 }
3602
3603 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3604 if (reg)
3605 {
3606 /* [Xn],Xm */
3607 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3608 {
3609 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3610 return FALSE;
3611 }
3612
3613 operand->addr.offset.regno = reg->number;
3614 operand->addr.offset.is_reg = 1;
3615 }
3616 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3617 {
3618 /* [Xn],#expr */
3619 set_syntax_error (_("invalid expression in the address"));
3620 return FALSE;
3621 }
3622 }
3623
3624 /* If at this point neither .preind nor .postind is set, we have a
3625 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3626 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3627 {
3628 if (operand->addr.writeback)
3629 {
3630 /* Reject [Rn]! */
3631 set_syntax_error (_("missing offset in the pre-indexed address"));
3632 return FALSE;
3633 }
3634 operand->addr.preind = 1;
3635 inst.reloc.exp.X_op = O_constant;
3636 inst.reloc.exp.X_add_number = 0;
3637 }
3638
3639 *str = p;
3640 return TRUE;
3641 }
3642
3643 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3644 on success. */
3645 static bfd_boolean
3646 parse_address (char **str, aarch64_opnd_info *operand)
3647 {
3648 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3649 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3650 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3651 }
3652
3653 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3654 The arguments have the same meaning as for parse_address_main.
3655 Return TRUE on success. */
3656 static bfd_boolean
3657 parse_sve_address (char **str, aarch64_opnd_info *operand,
3658 aarch64_opnd_qualifier_t *base_qualifier,
3659 aarch64_opnd_qualifier_t *offset_qualifier)
3660 {
3661 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3662 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3663 SHIFTED_MUL_VL);
3664 }
3665
3666 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3667 Return TRUE on success; otherwise return FALSE. */
3668 static bfd_boolean
3669 parse_half (char **str, int *internal_fixup_p)
3670 {
3671 char *p = *str;
3672
3673 skip_past_char (&p, '#');
3674
3675 gas_assert (internal_fixup_p);
3676 *internal_fixup_p = 0;
3677
3678 if (*p == ':')
3679 {
3680 struct reloc_table_entry *entry;
3681
3682 /* Try to parse a relocation. Anything else is an error. */
3683 ++p;
3684 if (!(entry = find_reloc_table_entry (&p)))
3685 {
3686 set_syntax_error (_("unknown relocation modifier"));
3687 return FALSE;
3688 }
3689
3690 if (entry->movw_type == 0)
3691 {
3692 set_syntax_error
3693 (_("this relocation modifier is not allowed on this instruction"));
3694 return FALSE;
3695 }
3696
3697 inst.reloc.type = entry->movw_type;
3698 }
3699 else
3700 *internal_fixup_p = 1;
3701
3702 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3703 return FALSE;
3704
3705 *str = p;
3706 return TRUE;
3707 }
3708
3709 /* Parse an operand for an ADRP instruction:
3710 ADRP <Xd>, <label>
3711 Return TRUE on success; otherwise return FALSE. */
3712
3713 static bfd_boolean
3714 parse_adrp (char **str)
3715 {
3716 char *p;
3717
3718 p = *str;
3719 if (*p == ':')
3720 {
3721 struct reloc_table_entry *entry;
3722
3723 /* Try to parse a relocation. Anything else is an error. */
3724 ++p;
3725 if (!(entry = find_reloc_table_entry (&p)))
3726 {
3727 set_syntax_error (_("unknown relocation modifier"));
3728 return FALSE;
3729 }
3730
3731 if (entry->adrp_type == 0)
3732 {
3733 set_syntax_error
3734 (_("this relocation modifier is not allowed on this instruction"));
3735 return FALSE;
3736 }
3737
3738 inst.reloc.type = entry->adrp_type;
3739 }
3740 else
3741 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3742
3743 inst.reloc.pc_rel = 1;
3744
3745 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3746 return FALSE;
3747
3748 *str = p;
3749 return TRUE;
3750 }
3751
3752 /* Miscellaneous. */
3753
3754 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3755 of SIZE tokens in which index I gives the token for field value I,
3756 or is null if field value I is invalid. REG_TYPE says which register
3757 names should be treated as registers rather than as symbolic immediates.
3758
3759 Return true on success, moving *STR past the operand and storing the
3760 field value in *VAL. */
3761
3762 static int
3763 parse_enum_string (char **str, int64_t *val, const char *const *array,
3764 size_t size, aarch64_reg_type reg_type)
3765 {
3766 expressionS exp;
3767 char *p, *q;
3768 size_t i;
3769
3770 /* Match C-like tokens. */
3771 p = q = *str;
3772 while (ISALNUM (*q))
3773 q++;
3774
3775 for (i = 0; i < size; ++i)
3776 if (array[i]
3777 && strncasecmp (array[i], p, q - p) == 0
3778 && array[i][q - p] == 0)
3779 {
3780 *val = i;
3781 *str = q;
3782 return TRUE;
3783 }
3784
3785 if (!parse_immediate_expression (&p, &exp, reg_type))
3786 return FALSE;
3787
3788 if (exp.X_op == O_constant
3789 && (uint64_t) exp.X_add_number < size)
3790 {
3791 *val = exp.X_add_number;
3792 *str = p;
3793 return TRUE;
3794 }
3795
3796 /* Use the default error for this operand. */
3797 return FALSE;
3798 }
3799
3800 /* Parse an option for a preload instruction. Returns the encoding for the
3801 option, or PARSE_FAIL. */
3802
3803 static int
3804 parse_pldop (char **str)
3805 {
3806 char *p, *q;
3807 const struct aarch64_name_value_pair *o;
3808
3809 p = q = *str;
3810 while (ISALNUM (*q))
3811 q++;
3812
3813 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3814 if (!o)
3815 return PARSE_FAIL;
3816
3817 *str = q;
3818 return o->value;
3819 }
3820
3821 /* Parse an option for a barrier instruction. Returns the encoding for the
3822 option, or PARSE_FAIL. */
3823
3824 static int
3825 parse_barrier (char **str)
3826 {
3827 char *p, *q;
3828 const asm_barrier_opt *o;
3829
3830 p = q = *str;
3831 while (ISALPHA (*q))
3832 q++;
3833
3834 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3835 if (!o)
3836 return PARSE_FAIL;
3837
3838 *str = q;
3839 return o->value;
3840 }
3841
3842 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3843 return 0 if successful. Otherwise return PARSE_FAIL. */
3844
3845 static int
3846 parse_barrier_psb (char **str,
3847 const struct aarch64_name_value_pair ** hint_opt)
3848 {
3849 char *p, *q;
3850 const struct aarch64_name_value_pair *o;
3851
3852 p = q = *str;
3853 while (ISALPHA (*q))
3854 q++;
3855
3856 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3857 if (!o)
3858 {
3859 set_fatal_syntax_error
3860 ( _("unknown or missing option to PSB"));
3861 return PARSE_FAIL;
3862 }
3863
3864 if (o->value != 0x11)
3865 {
3866 /* PSB only accepts option name 'CSYNC'. */
3867 set_syntax_error
3868 (_("the specified option is not accepted for PSB"));
3869 return PARSE_FAIL;
3870 }
3871
3872 *str = q;
3873 *hint_opt = o;
3874 return 0;
3875 }
3876
3877 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3878 Returns the encoding for the option, or PARSE_FAIL.
3879
3880 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3881 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3882
3883 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3884 field, otherwise as a system register.
3885 */
3886
3887 static int
3888 parse_sys_reg (char **str, struct hash_control *sys_regs,
3889 int imple_defined_p, int pstatefield_p)
3890 {
3891 char *p, *q;
3892 char buf[32];
3893 const aarch64_sys_reg *o;
3894 int value;
3895
3896 p = buf;
3897 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3898 if (p < buf + 31)
3899 *p++ = TOLOWER (*q);
3900 *p = '\0';
3901 /* Assert that BUF be large enough. */
3902 gas_assert (p - buf == q - *str);
3903
3904 o = hash_find (sys_regs, buf);
3905 if (!o)
3906 {
3907 if (!imple_defined_p)
3908 return PARSE_FAIL;
3909 else
3910 {
3911 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3912 unsigned int op0, op1, cn, cm, op2;
3913
3914 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3915 != 5)
3916 return PARSE_FAIL;
3917 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3918 return PARSE_FAIL;
3919 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3920 }
3921 }
3922 else
3923 {
3924 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3925 as_bad (_("selected processor does not support PSTATE field "
3926 "name '%s'"), buf);
3927 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3928 as_bad (_("selected processor does not support system register "
3929 "name '%s'"), buf);
3930 if (aarch64_sys_reg_deprecated_p (o))
3931 as_warn (_("system register name '%s' is deprecated and may be "
3932 "removed in a future release"), buf);
3933 value = o->value;
3934 }
3935
3936 *str = q;
3937 return value;
3938 }
3939
3940 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3941 for the option, or NULL. */
3942
3943 static const aarch64_sys_ins_reg *
3944 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3945 {
3946 char *p, *q;
3947 char buf[32];
3948 const aarch64_sys_ins_reg *o;
3949
3950 p = buf;
3951 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3952 if (p < buf + 31)
3953 *p++ = TOLOWER (*q);
3954 *p = '\0';
3955
3956 o = hash_find (sys_ins_regs, buf);
3957 if (!o)
3958 return NULL;
3959
3960 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3961 as_bad (_("selected processor does not support system register "
3962 "name '%s'"), buf);
3963
3964 *str = q;
3965 return o;
3966 }
3967 \f
3968 #define po_char_or_fail(chr) do { \
3969 if (! skip_past_char (&str, chr)) \
3970 goto failure; \
3971 } while (0)
3972
3973 #define po_reg_or_fail(regtype) do { \
3974 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3975 if (val == PARSE_FAIL) \
3976 { \
3977 set_default_error (); \
3978 goto failure; \
3979 } \
3980 } while (0)
3981
3982 #define po_int_reg_or_fail(reg_type) do { \
3983 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
3984 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
3985 { \
3986 set_default_error (); \
3987 goto failure; \
3988 } \
3989 info->reg.regno = reg->number; \
3990 info->qualifier = qualifier; \
3991 } while (0)
3992
3993 #define po_imm_nc_or_fail() do { \
3994 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3995 goto failure; \
3996 } while (0)
3997
3998 #define po_imm_or_fail(min, max) do { \
3999 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4000 goto failure; \
4001 if (val < min || val > max) \
4002 { \
4003 set_fatal_syntax_error (_("immediate value out of range "\
4004 #min " to "#max)); \
4005 goto failure; \
4006 } \
4007 } while (0)
4008
4009 #define po_enum_or_fail(array) do { \
4010 if (!parse_enum_string (&str, &val, array, \
4011 ARRAY_SIZE (array), imm_reg_type)) \
4012 goto failure; \
4013 } while (0)
4014
4015 #define po_misc_or_fail(expr) do { \
4016 if (!expr) \
4017 goto failure; \
4018 } while (0)
4019 \f
4020 /* encode the 12-bit imm field of Add/sub immediate */
4021 static inline uint32_t
4022 encode_addsub_imm (uint32_t imm)
4023 {
4024 return imm << 10;
4025 }
4026
4027 /* encode the shift amount field of Add/sub immediate */
4028 static inline uint32_t
4029 encode_addsub_imm_shift_amount (uint32_t cnt)
4030 {
4031 return cnt << 22;
4032 }
4033
4034
4035 /* encode the imm field of Adr instruction */
4036 static inline uint32_t
4037 encode_adr_imm (uint32_t imm)
4038 {
4039 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4040 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4041 }
4042
4043 /* encode the immediate field of Move wide immediate */
4044 static inline uint32_t
4045 encode_movw_imm (uint32_t imm)
4046 {
4047 return imm << 5;
4048 }
4049
4050 /* encode the 26-bit offset of unconditional branch */
4051 static inline uint32_t
4052 encode_branch_ofs_26 (uint32_t ofs)
4053 {
4054 return ofs & ((1 << 26) - 1);
4055 }
4056
4057 /* encode the 19-bit offset of conditional branch and compare & branch */
4058 static inline uint32_t
4059 encode_cond_branch_ofs_19 (uint32_t ofs)
4060 {
4061 return (ofs & ((1 << 19) - 1)) << 5;
4062 }
4063
4064 /* encode the 19-bit offset of ld literal */
4065 static inline uint32_t
4066 encode_ld_lit_ofs_19 (uint32_t ofs)
4067 {
4068 return (ofs & ((1 << 19) - 1)) << 5;
4069 }
4070
4071 /* Encode the 14-bit offset of test & branch. */
4072 static inline uint32_t
4073 encode_tst_branch_ofs_14 (uint32_t ofs)
4074 {
4075 return (ofs & ((1 << 14) - 1)) << 5;
4076 }
4077
4078 /* Encode the 16-bit imm field of svc/hvc/smc. */
4079 static inline uint32_t
4080 encode_svc_imm (uint32_t imm)
4081 {
4082 return imm << 5;
4083 }
4084
4085 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4086 static inline uint32_t
4087 reencode_addsub_switch_add_sub (uint32_t opcode)
4088 {
4089 return opcode ^ (1 << 30);
4090 }
4091
4092 static inline uint32_t
4093 reencode_movzn_to_movz (uint32_t opcode)
4094 {
4095 return opcode | (1 << 30);
4096 }
4097
4098 static inline uint32_t
4099 reencode_movzn_to_movn (uint32_t opcode)
4100 {
4101 return opcode & ~(1 << 30);
4102 }
4103
4104 /* Overall per-instruction processing. */
4105
4106 /* We need to be able to fix up arbitrary expressions in some statements.
4107 This is so that we can handle symbols that are an arbitrary distance from
4108 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4109 which returns part of an address in a form which will be valid for
4110 a data instruction. We do this by pushing the expression into a symbol
4111 in the expr_section, and creating a fix for that. */
4112
4113 static fixS *
4114 fix_new_aarch64 (fragS * frag,
4115 int where,
4116 short int size, expressionS * exp, int pc_rel, int reloc)
4117 {
4118 fixS *new_fix;
4119
4120 switch (exp->X_op)
4121 {
4122 case O_constant:
4123 case O_symbol:
4124 case O_add:
4125 case O_subtract:
4126 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4127 break;
4128
4129 default:
4130 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4131 pc_rel, reloc);
4132 break;
4133 }
4134 return new_fix;
4135 }
4136 \f
4137 /* Diagnostics on operands errors. */
4138
4139 /* By default, output verbose error message.
4140 Disable the verbose error message by -mno-verbose-error. */
4141 static int verbose_error_p = 1;
4142
4143 #ifdef DEBUG_AARCH64
4144 /* N.B. this is only for the purpose of debugging. */
4145 const char* operand_mismatch_kind_names[] =
4146 {
4147 "AARCH64_OPDE_NIL",
4148 "AARCH64_OPDE_RECOVERABLE",
4149 "AARCH64_OPDE_SYNTAX_ERROR",
4150 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4151 "AARCH64_OPDE_INVALID_VARIANT",
4152 "AARCH64_OPDE_OUT_OF_RANGE",
4153 "AARCH64_OPDE_UNALIGNED",
4154 "AARCH64_OPDE_REG_LIST",
4155 "AARCH64_OPDE_OTHER_ERROR",
4156 };
4157 #endif /* DEBUG_AARCH64 */
4158
4159 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4160
4161 When multiple errors of different kinds are found in the same assembly
4162 line, only the error of the highest severity will be picked up for
4163 issuing the diagnostics. */
4164
4165 static inline bfd_boolean
4166 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4167 enum aarch64_operand_error_kind rhs)
4168 {
4169 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4170 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4171 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4172 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4173 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4174 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4175 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4176 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4177 return lhs > rhs;
4178 }
4179
4180 /* Helper routine to get the mnemonic name from the assembly instruction
4181 line; should only be called for the diagnosis purpose, as there is
4182 string copy operation involved, which may affect the runtime
4183 performance if used in elsewhere. */
4184
4185 static const char*
4186 get_mnemonic_name (const char *str)
4187 {
4188 static char mnemonic[32];
4189 char *ptr;
4190
4191 /* Get the first 15 bytes and assume that the full name is included. */
4192 strncpy (mnemonic, str, 31);
4193 mnemonic[31] = '\0';
4194
4195 /* Scan up to the end of the mnemonic, which must end in white space,
4196 '.', or end of string. */
4197 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4198 ;
4199
4200 *ptr = '\0';
4201
4202 /* Append '...' to the truncated long name. */
4203 if (ptr - mnemonic == 31)
4204 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4205
4206 return mnemonic;
4207 }
4208
4209 static void
4210 reset_aarch64_instruction (aarch64_instruction *instruction)
4211 {
4212 memset (instruction, '\0', sizeof (aarch64_instruction));
4213 instruction->reloc.type = BFD_RELOC_UNUSED;
4214 }
4215
4216 /* Data structures storing one user error in the assembly code related to
4217 operands. */
4218
4219 struct operand_error_record
4220 {
4221 const aarch64_opcode *opcode;
4222 aarch64_operand_error detail;
4223 struct operand_error_record *next;
4224 };
4225
4226 typedef struct operand_error_record operand_error_record;
4227
4228 struct operand_errors
4229 {
4230 operand_error_record *head;
4231 operand_error_record *tail;
4232 };
4233
4234 typedef struct operand_errors operand_errors;
4235
4236 /* Top-level data structure reporting user errors for the current line of
4237 the assembly code.
4238 The way md_assemble works is that all opcodes sharing the same mnemonic
4239 name are iterated to find a match to the assembly line. In this data
4240 structure, each of the such opcodes will have one operand_error_record
4241 allocated and inserted. In other words, excessive errors related with
4242 a single opcode are disregarded. */
4243 operand_errors operand_error_report;
4244
4245 /* Free record nodes. */
4246 static operand_error_record *free_opnd_error_record_nodes = NULL;
4247
4248 /* Initialize the data structure that stores the operand mismatch
4249 information on assembling one line of the assembly code. */
4250 static void
4251 init_operand_error_report (void)
4252 {
4253 if (operand_error_report.head != NULL)
4254 {
4255 gas_assert (operand_error_report.tail != NULL);
4256 operand_error_report.tail->next = free_opnd_error_record_nodes;
4257 free_opnd_error_record_nodes = operand_error_report.head;
4258 operand_error_report.head = NULL;
4259 operand_error_report.tail = NULL;
4260 return;
4261 }
4262 gas_assert (operand_error_report.tail == NULL);
4263 }
4264
4265 /* Return TRUE if some operand error has been recorded during the
4266 parsing of the current assembly line using the opcode *OPCODE;
4267 otherwise return FALSE. */
4268 static inline bfd_boolean
4269 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4270 {
4271 operand_error_record *record = operand_error_report.head;
4272 return record && record->opcode == opcode;
4273 }
4274
4275 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4276 OPCODE field is initialized with OPCODE.
4277 N.B. only one record for each opcode, i.e. the maximum of one error is
4278 recorded for each instruction template. */
4279
4280 static void
4281 add_operand_error_record (const operand_error_record* new_record)
4282 {
4283 const aarch64_opcode *opcode = new_record->opcode;
4284 operand_error_record* record = operand_error_report.head;
4285
4286 /* The record may have been created for this opcode. If not, we need
4287 to prepare one. */
4288 if (! opcode_has_operand_error_p (opcode))
4289 {
4290 /* Get one empty record. */
4291 if (free_opnd_error_record_nodes == NULL)
4292 {
4293 record = XNEW (operand_error_record);
4294 }
4295 else
4296 {
4297 record = free_opnd_error_record_nodes;
4298 free_opnd_error_record_nodes = record->next;
4299 }
4300 record->opcode = opcode;
4301 /* Insert at the head. */
4302 record->next = operand_error_report.head;
4303 operand_error_report.head = record;
4304 if (operand_error_report.tail == NULL)
4305 operand_error_report.tail = record;
4306 }
4307 else if (record->detail.kind != AARCH64_OPDE_NIL
4308 && record->detail.index <= new_record->detail.index
4309 && operand_error_higher_severity_p (record->detail.kind,
4310 new_record->detail.kind))
4311 {
4312 /* In the case of multiple errors found on operands related with a
4313 single opcode, only record the error of the leftmost operand and
4314 only if the error is of higher severity. */
4315 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4316 " the existing error %s on operand %d",
4317 operand_mismatch_kind_names[new_record->detail.kind],
4318 new_record->detail.index,
4319 operand_mismatch_kind_names[record->detail.kind],
4320 record->detail.index);
4321 return;
4322 }
4323
4324 record->detail = new_record->detail;
4325 }
4326
4327 static inline void
4328 record_operand_error_info (const aarch64_opcode *opcode,
4329 aarch64_operand_error *error_info)
4330 {
4331 operand_error_record record;
4332 record.opcode = opcode;
4333 record.detail = *error_info;
4334 add_operand_error_record (&record);
4335 }
4336
4337 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4338 error message *ERROR, for operand IDX (count from 0). */
4339
4340 static void
4341 record_operand_error (const aarch64_opcode *opcode, int idx,
4342 enum aarch64_operand_error_kind kind,
4343 const char* error)
4344 {
4345 aarch64_operand_error info;
4346 memset(&info, 0, sizeof (info));
4347 info.index = idx;
4348 info.kind = kind;
4349 info.error = error;
4350 record_operand_error_info (opcode, &info);
4351 }
4352
4353 static void
4354 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4355 enum aarch64_operand_error_kind kind,
4356 const char* error, const int *extra_data)
4357 {
4358 aarch64_operand_error info;
4359 info.index = idx;
4360 info.kind = kind;
4361 info.error = error;
4362 info.data[0] = extra_data[0];
4363 info.data[1] = extra_data[1];
4364 info.data[2] = extra_data[2];
4365 record_operand_error_info (opcode, &info);
4366 }
4367
4368 static void
4369 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4370 const char* error, int lower_bound,
4371 int upper_bound)
4372 {
4373 int data[3] = {lower_bound, upper_bound, 0};
4374 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4375 error, data);
4376 }
4377
4378 /* Remove the operand error record for *OPCODE. */
4379 static void ATTRIBUTE_UNUSED
4380 remove_operand_error_record (const aarch64_opcode *opcode)
4381 {
4382 if (opcode_has_operand_error_p (opcode))
4383 {
4384 operand_error_record* record = operand_error_report.head;
4385 gas_assert (record != NULL && operand_error_report.tail != NULL);
4386 operand_error_report.head = record->next;
4387 record->next = free_opnd_error_record_nodes;
4388 free_opnd_error_record_nodes = record;
4389 if (operand_error_report.head == NULL)
4390 {
4391 gas_assert (operand_error_report.tail == record);
4392 operand_error_report.tail = NULL;
4393 }
4394 }
4395 }
4396
4397 /* Given the instruction in *INSTR, return the index of the best matched
4398 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4399
4400 Return -1 if there is no qualifier sequence; return the first match
4401 if there is multiple matches found. */
4402
4403 static int
4404 find_best_match (const aarch64_inst *instr,
4405 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4406 {
4407 int i, num_opnds, max_num_matched, idx;
4408
4409 num_opnds = aarch64_num_of_operands (instr->opcode);
4410 if (num_opnds == 0)
4411 {
4412 DEBUG_TRACE ("no operand");
4413 return -1;
4414 }
4415
4416 max_num_matched = 0;
4417 idx = 0;
4418
4419 /* For each pattern. */
4420 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4421 {
4422 int j, num_matched;
4423 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4424
4425 /* Most opcodes has much fewer patterns in the list. */
4426 if (empty_qualifier_sequence_p (qualifiers))
4427 {
4428 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4429 break;
4430 }
4431
4432 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4433 if (*qualifiers == instr->operands[j].qualifier)
4434 ++num_matched;
4435
4436 if (num_matched > max_num_matched)
4437 {
4438 max_num_matched = num_matched;
4439 idx = i;
4440 }
4441 }
4442
4443 DEBUG_TRACE ("return with %d", idx);
4444 return idx;
4445 }
4446
4447 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4448 corresponding operands in *INSTR. */
4449
4450 static inline void
4451 assign_qualifier_sequence (aarch64_inst *instr,
4452 const aarch64_opnd_qualifier_t *qualifiers)
4453 {
4454 int i = 0;
4455 int num_opnds = aarch64_num_of_operands (instr->opcode);
4456 gas_assert (num_opnds);
4457 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4458 instr->operands[i].qualifier = *qualifiers;
4459 }
4460
4461 /* Print operands for the diagnosis purpose. */
4462
4463 static void
4464 print_operands (char *buf, const aarch64_opcode *opcode,
4465 const aarch64_opnd_info *opnds)
4466 {
4467 int i;
4468
4469 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4470 {
4471 char str[128];
4472
4473 /* We regard the opcode operand info more, however we also look into
4474 the inst->operands to support the disassembling of the optional
4475 operand.
4476 The two operand code should be the same in all cases, apart from
4477 when the operand can be optional. */
4478 if (opcode->operands[i] == AARCH64_OPND_NIL
4479 || opnds[i].type == AARCH64_OPND_NIL)
4480 break;
4481
4482 /* Generate the operand string in STR. */
4483 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL);
4484
4485 /* Delimiter. */
4486 if (str[0] != '\0')
4487 strcat (buf, i == 0 ? " " : ", ");
4488
4489 /* Append the operand string. */
4490 strcat (buf, str);
4491 }
4492 }
4493
4494 /* Send to stderr a string as information. */
4495
4496 static void
4497 output_info (const char *format, ...)
4498 {
4499 const char *file;
4500 unsigned int line;
4501 va_list args;
4502
4503 file = as_where (&line);
4504 if (file)
4505 {
4506 if (line != 0)
4507 fprintf (stderr, "%s:%u: ", file, line);
4508 else
4509 fprintf (stderr, "%s: ", file);
4510 }
4511 fprintf (stderr, _("Info: "));
4512 va_start (args, format);
4513 vfprintf (stderr, format, args);
4514 va_end (args);
4515 (void) putc ('\n', stderr);
4516 }
4517
4518 /* Output one operand error record. */
4519
4520 static void
4521 output_operand_error_record (const operand_error_record *record, char *str)
4522 {
4523 const aarch64_operand_error *detail = &record->detail;
4524 int idx = detail->index;
4525 const aarch64_opcode *opcode = record->opcode;
4526 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4527 : AARCH64_OPND_NIL);
4528
4529 switch (detail->kind)
4530 {
4531 case AARCH64_OPDE_NIL:
4532 gas_assert (0);
4533 break;
4534
4535 case AARCH64_OPDE_SYNTAX_ERROR:
4536 case AARCH64_OPDE_RECOVERABLE:
4537 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4538 case AARCH64_OPDE_OTHER_ERROR:
4539 /* Use the prepared error message if there is, otherwise use the
4540 operand description string to describe the error. */
4541 if (detail->error != NULL)
4542 {
4543 if (idx < 0)
4544 as_bad (_("%s -- `%s'"), detail->error, str);
4545 else
4546 as_bad (_("%s at operand %d -- `%s'"),
4547 detail->error, idx + 1, str);
4548 }
4549 else
4550 {
4551 gas_assert (idx >= 0);
4552 as_bad (_("operand %d must be %s -- `%s'"), idx + 1,
4553 aarch64_get_operand_desc (opd_code), str);
4554 }
4555 break;
4556
4557 case AARCH64_OPDE_INVALID_VARIANT:
4558 as_bad (_("operand mismatch -- `%s'"), str);
4559 if (verbose_error_p)
4560 {
4561 /* We will try to correct the erroneous instruction and also provide
4562 more information e.g. all other valid variants.
4563
4564 The string representation of the corrected instruction and other
4565 valid variants are generated by
4566
4567 1) obtaining the intermediate representation of the erroneous
4568 instruction;
4569 2) manipulating the IR, e.g. replacing the operand qualifier;
4570 3) printing out the instruction by calling the printer functions
4571 shared with the disassembler.
4572
4573 The limitation of this method is that the exact input assembly
4574 line cannot be accurately reproduced in some cases, for example an
4575 optional operand present in the actual assembly line will be
4576 omitted in the output; likewise for the optional syntax rules,
4577 e.g. the # before the immediate. Another limitation is that the
4578 assembly symbols and relocation operations in the assembly line
4579 currently cannot be printed out in the error report. Last but not
4580 least, when there is other error(s) co-exist with this error, the
4581 'corrected' instruction may be still incorrect, e.g. given
4582 'ldnp h0,h1,[x0,#6]!'
4583 this diagnosis will provide the version:
4584 'ldnp s0,s1,[x0,#6]!'
4585 which is still not right. */
4586 size_t len = strlen (get_mnemonic_name (str));
4587 int i, qlf_idx;
4588 bfd_boolean result;
4589 char buf[2048];
4590 aarch64_inst *inst_base = &inst.base;
4591 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4592
4593 /* Init inst. */
4594 reset_aarch64_instruction (&inst);
4595 inst_base->opcode = opcode;
4596
4597 /* Reset the error report so that there is no side effect on the
4598 following operand parsing. */
4599 init_operand_error_report ();
4600
4601 /* Fill inst. */
4602 result = parse_operands (str + len, opcode)
4603 && programmer_friendly_fixup (&inst);
4604 gas_assert (result);
4605 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4606 NULL, NULL);
4607 gas_assert (!result);
4608
4609 /* Find the most matched qualifier sequence. */
4610 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4611 gas_assert (qlf_idx > -1);
4612
4613 /* Assign the qualifiers. */
4614 assign_qualifier_sequence (inst_base,
4615 opcode->qualifiers_list[qlf_idx]);
4616
4617 /* Print the hint. */
4618 output_info (_(" did you mean this?"));
4619 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4620 print_operands (buf, opcode, inst_base->operands);
4621 output_info (_(" %s"), buf);
4622
4623 /* Print out other variant(s) if there is any. */
4624 if (qlf_idx != 0 ||
4625 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4626 output_info (_(" other valid variant(s):"));
4627
4628 /* For each pattern. */
4629 qualifiers_list = opcode->qualifiers_list;
4630 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4631 {
4632 /* Most opcodes has much fewer patterns in the list.
4633 First NIL qualifier indicates the end in the list. */
4634 if (empty_qualifier_sequence_p (*qualifiers_list))
4635 break;
4636
4637 if (i != qlf_idx)
4638 {
4639 /* Mnemonics name. */
4640 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4641
4642 /* Assign the qualifiers. */
4643 assign_qualifier_sequence (inst_base, *qualifiers_list);
4644
4645 /* Print instruction. */
4646 print_operands (buf, opcode, inst_base->operands);
4647
4648 output_info (_(" %s"), buf);
4649 }
4650 }
4651 }
4652 break;
4653
4654 case AARCH64_OPDE_UNTIED_OPERAND:
4655 as_bad (_("operand %d must be the same register as operand 1 -- `%s'"),
4656 detail->index + 1, str);
4657 break;
4658
4659 case AARCH64_OPDE_OUT_OF_RANGE:
4660 if (detail->data[0] != detail->data[1])
4661 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4662 detail->error ? detail->error : _("immediate value"),
4663 detail->data[0], detail->data[1], idx + 1, str);
4664 else
4665 as_bad (_("%s must be %d at operand %d -- `%s'"),
4666 detail->error ? detail->error : _("immediate value"),
4667 detail->data[0], idx + 1, str);
4668 break;
4669
4670 case AARCH64_OPDE_REG_LIST:
4671 if (detail->data[0] == 1)
4672 as_bad (_("invalid number of registers in the list; "
4673 "only 1 register is expected at operand %d -- `%s'"),
4674 idx + 1, str);
4675 else
4676 as_bad (_("invalid number of registers in the list; "
4677 "%d registers are expected at operand %d -- `%s'"),
4678 detail->data[0], idx + 1, str);
4679 break;
4680
4681 case AARCH64_OPDE_UNALIGNED:
4682 as_bad (_("immediate value must be a multiple of "
4683 "%d at operand %d -- `%s'"),
4684 detail->data[0], idx + 1, str);
4685 break;
4686
4687 default:
4688 gas_assert (0);
4689 break;
4690 }
4691 }
4692
4693 /* Process and output the error message about the operand mismatching.
4694
4695 When this function is called, the operand error information had
4696 been collected for an assembly line and there will be multiple
4697 errors in the case of multiple instruction templates; output the
4698 error message that most closely describes the problem. */
4699
4700 static void
4701 output_operand_error_report (char *str)
4702 {
4703 int largest_error_pos;
4704 const char *msg = NULL;
4705 enum aarch64_operand_error_kind kind;
4706 operand_error_record *curr;
4707 operand_error_record *head = operand_error_report.head;
4708 operand_error_record *record = NULL;
4709
4710 /* No error to report. */
4711 if (head == NULL)
4712 return;
4713
4714 gas_assert (head != NULL && operand_error_report.tail != NULL);
4715
4716 /* Only one error. */
4717 if (head == operand_error_report.tail)
4718 {
4719 DEBUG_TRACE ("single opcode entry with error kind: %s",
4720 operand_mismatch_kind_names[head->detail.kind]);
4721 output_operand_error_record (head, str);
4722 return;
4723 }
4724
4725 /* Find the error kind of the highest severity. */
4726 DEBUG_TRACE ("multiple opcode entries with error kind");
4727 kind = AARCH64_OPDE_NIL;
4728 for (curr = head; curr != NULL; curr = curr->next)
4729 {
4730 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4731 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4732 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4733 kind = curr->detail.kind;
4734 }
4735 gas_assert (kind != AARCH64_OPDE_NIL);
4736
4737 /* Pick up one of errors of KIND to report. */
4738 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4739 for (curr = head; curr != NULL; curr = curr->next)
4740 {
4741 if (curr->detail.kind != kind)
4742 continue;
4743 /* If there are multiple errors, pick up the one with the highest
4744 mismatching operand index. In the case of multiple errors with
4745 the equally highest operand index, pick up the first one or the
4746 first one with non-NULL error message. */
4747 if (curr->detail.index > largest_error_pos
4748 || (curr->detail.index == largest_error_pos && msg == NULL
4749 && curr->detail.error != NULL))
4750 {
4751 largest_error_pos = curr->detail.index;
4752 record = curr;
4753 msg = record->detail.error;
4754 }
4755 }
4756
4757 gas_assert (largest_error_pos != -2 && record != NULL);
4758 DEBUG_TRACE ("Pick up error kind %s to report",
4759 operand_mismatch_kind_names[record->detail.kind]);
4760
4761 /* Output. */
4762 output_operand_error_record (record, str);
4763 }
4764 \f
4765 /* Write an AARCH64 instruction to buf - always little-endian. */
4766 static void
4767 put_aarch64_insn (char *buf, uint32_t insn)
4768 {
4769 unsigned char *where = (unsigned char *) buf;
4770 where[0] = insn;
4771 where[1] = insn >> 8;
4772 where[2] = insn >> 16;
4773 where[3] = insn >> 24;
4774 }
4775
4776 static uint32_t
4777 get_aarch64_insn (char *buf)
4778 {
4779 unsigned char *where = (unsigned char *) buf;
4780 uint32_t result;
4781 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4782 return result;
4783 }
4784
4785 static void
4786 output_inst (struct aarch64_inst *new_inst)
4787 {
4788 char *to = NULL;
4789
4790 to = frag_more (INSN_SIZE);
4791
4792 frag_now->tc_frag_data.recorded = 1;
4793
4794 put_aarch64_insn (to, inst.base.value);
4795
4796 if (inst.reloc.type != BFD_RELOC_UNUSED)
4797 {
4798 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4799 INSN_SIZE, &inst.reloc.exp,
4800 inst.reloc.pc_rel,
4801 inst.reloc.type);
4802 DEBUG_TRACE ("Prepared relocation fix up");
4803 /* Don't check the addend value against the instruction size,
4804 that's the job of our code in md_apply_fix(). */
4805 fixp->fx_no_overflow = 1;
4806 if (new_inst != NULL)
4807 fixp->tc_fix_data.inst = new_inst;
4808 if (aarch64_gas_internal_fixup_p ())
4809 {
4810 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4811 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4812 fixp->fx_addnumber = inst.reloc.flags;
4813 }
4814 }
4815
4816 dwarf2_emit_insn (INSN_SIZE);
4817 }
4818
4819 /* Link together opcodes of the same name. */
4820
4821 struct templates
4822 {
4823 aarch64_opcode *opcode;
4824 struct templates *next;
4825 };
4826
4827 typedef struct templates templates;
4828
4829 static templates *
4830 lookup_mnemonic (const char *start, int len)
4831 {
4832 templates *templ = NULL;
4833
4834 templ = hash_find_n (aarch64_ops_hsh, start, len);
4835 return templ;
4836 }
4837
4838 /* Subroutine of md_assemble, responsible for looking up the primary
4839 opcode from the mnemonic the user wrote. STR points to the
4840 beginning of the mnemonic. */
4841
4842 static templates *
4843 opcode_lookup (char **str)
4844 {
4845 char *end, *base, *dot;
4846 const aarch64_cond *cond;
4847 char condname[16];
4848 int len;
4849
4850 /* Scan up to the end of the mnemonic, which must end in white space,
4851 '.', or end of string. */
4852 dot = 0;
4853 for (base = end = *str; is_part_of_name(*end); end++)
4854 if (*end == '.' && !dot)
4855 dot = end;
4856
4857 if (end == base || dot == base)
4858 return 0;
4859
4860 inst.cond = COND_ALWAYS;
4861
4862 /* Handle a possible condition. */
4863 if (dot)
4864 {
4865 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
4866 if (cond)
4867 {
4868 inst.cond = cond->value;
4869 *str = end;
4870 }
4871 else
4872 {
4873 *str = dot;
4874 return 0;
4875 }
4876 len = dot - base;
4877 }
4878 else
4879 {
4880 *str = end;
4881 len = end - base;
4882 }
4883
4884 if (inst.cond == COND_ALWAYS)
4885 {
4886 /* Look for unaffixed mnemonic. */
4887 return lookup_mnemonic (base, len);
4888 }
4889 else if (len <= 13)
4890 {
4891 /* append ".c" to mnemonic if conditional */
4892 memcpy (condname, base, len);
4893 memcpy (condname + len, ".c", 2);
4894 base = condname;
4895 len += 2;
4896 return lookup_mnemonic (base, len);
4897 }
4898
4899 return NULL;
4900 }
4901
4902 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4903 to a corresponding operand qualifier. */
4904
4905 static inline aarch64_opnd_qualifier_t
4906 vectype_to_qualifier (const struct vector_type_el *vectype)
4907 {
4908 /* Element size in bytes indexed by vector_el_type. */
4909 const unsigned char ele_size[5]
4910 = {1, 2, 4, 8, 16};
4911 const unsigned int ele_base [5] =
4912 {
4913 AARCH64_OPND_QLF_V_8B,
4914 AARCH64_OPND_QLF_V_2H,
4915 AARCH64_OPND_QLF_V_2S,
4916 AARCH64_OPND_QLF_V_1D,
4917 AARCH64_OPND_QLF_V_1Q
4918 };
4919
4920 if (!vectype->defined || vectype->type == NT_invtype)
4921 goto vectype_conversion_fail;
4922
4923 if (vectype->type == NT_zero)
4924 return AARCH64_OPND_QLF_P_Z;
4925 if (vectype->type == NT_merge)
4926 return AARCH64_OPND_QLF_P_M;
4927
4928 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4929
4930 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
4931 /* Vector element register. */
4932 return AARCH64_OPND_QLF_S_B + vectype->type;
4933 else
4934 {
4935 /* Vector register. */
4936 int reg_size = ele_size[vectype->type] * vectype->width;
4937 unsigned offset;
4938 unsigned shift;
4939 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4940 goto vectype_conversion_fail;
4941
4942 /* The conversion is by calculating the offset from the base operand
4943 qualifier for the vector type. The operand qualifiers are regular
4944 enough that the offset can established by shifting the vector width by
4945 a vector-type dependent amount. */
4946 shift = 0;
4947 if (vectype->type == NT_b)
4948 shift = 4;
4949 else if (vectype->type == NT_h || vectype->type == NT_s)
4950 shift = 2;
4951 else if (vectype->type >= NT_d)
4952 shift = 1;
4953 else
4954 gas_assert (0);
4955
4956 offset = ele_base [vectype->type] + (vectype->width >> shift);
4957 gas_assert (AARCH64_OPND_QLF_V_8B <= offset
4958 && offset <= AARCH64_OPND_QLF_V_1Q);
4959 return offset;
4960 }
4961
4962 vectype_conversion_fail:
4963 first_error (_("bad vector arrangement type"));
4964 return AARCH64_OPND_QLF_NIL;
4965 }
4966
4967 /* Process an optional operand that is found omitted from the assembly line.
4968 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4969 instruction's opcode entry while IDX is the index of this omitted operand.
4970 */
4971
4972 static void
4973 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4974 int idx, aarch64_opnd_info *operand)
4975 {
4976 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4977 gas_assert (optional_operand_p (opcode, idx));
4978 gas_assert (!operand->present);
4979
4980 switch (type)
4981 {
4982 case AARCH64_OPND_Rd:
4983 case AARCH64_OPND_Rn:
4984 case AARCH64_OPND_Rm:
4985 case AARCH64_OPND_Rt:
4986 case AARCH64_OPND_Rt2:
4987 case AARCH64_OPND_Rs:
4988 case AARCH64_OPND_Ra:
4989 case AARCH64_OPND_Rt_SYS:
4990 case AARCH64_OPND_Rd_SP:
4991 case AARCH64_OPND_Rn_SP:
4992 case AARCH64_OPND_Rm_SP:
4993 case AARCH64_OPND_Fd:
4994 case AARCH64_OPND_Fn:
4995 case AARCH64_OPND_Fm:
4996 case AARCH64_OPND_Fa:
4997 case AARCH64_OPND_Ft:
4998 case AARCH64_OPND_Ft2:
4999 case AARCH64_OPND_Sd:
5000 case AARCH64_OPND_Sn:
5001 case AARCH64_OPND_Sm:
5002 case AARCH64_OPND_Vd:
5003 case AARCH64_OPND_Vn:
5004 case AARCH64_OPND_Vm:
5005 case AARCH64_OPND_VdD1:
5006 case AARCH64_OPND_VnD1:
5007 operand->reg.regno = default_value;
5008 break;
5009
5010 case AARCH64_OPND_Ed:
5011 case AARCH64_OPND_En:
5012 case AARCH64_OPND_Em:
5013 operand->reglane.regno = default_value;
5014 break;
5015
5016 case AARCH64_OPND_IDX:
5017 case AARCH64_OPND_BIT_NUM:
5018 case AARCH64_OPND_IMMR:
5019 case AARCH64_OPND_IMMS:
5020 case AARCH64_OPND_SHLL_IMM:
5021 case AARCH64_OPND_IMM_VLSL:
5022 case AARCH64_OPND_IMM_VLSR:
5023 case AARCH64_OPND_CCMP_IMM:
5024 case AARCH64_OPND_FBITS:
5025 case AARCH64_OPND_UIMM4:
5026 case AARCH64_OPND_UIMM3_OP1:
5027 case AARCH64_OPND_UIMM3_OP2:
5028 case AARCH64_OPND_IMM:
5029 case AARCH64_OPND_WIDTH:
5030 case AARCH64_OPND_UIMM7:
5031 case AARCH64_OPND_NZCV:
5032 case AARCH64_OPND_SVE_PATTERN:
5033 case AARCH64_OPND_SVE_PRFOP:
5034 operand->imm.value = default_value;
5035 break;
5036
5037 case AARCH64_OPND_SVE_PATTERN_SCALED:
5038 operand->imm.value = default_value;
5039 operand->shifter.kind = AARCH64_MOD_MUL;
5040 operand->shifter.amount = 1;
5041 break;
5042
5043 case AARCH64_OPND_EXCEPTION:
5044 inst.reloc.type = BFD_RELOC_UNUSED;
5045 break;
5046
5047 case AARCH64_OPND_BARRIER_ISB:
5048 operand->barrier = aarch64_barrier_options + default_value;
5049
5050 default:
5051 break;
5052 }
5053 }
5054
5055 /* Process the relocation type for move wide instructions.
5056 Return TRUE on success; otherwise return FALSE. */
5057
5058 static bfd_boolean
5059 process_movw_reloc_info (void)
5060 {
5061 int is32;
5062 unsigned shift;
5063
5064 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5065
5066 if (inst.base.opcode->op == OP_MOVK)
5067 switch (inst.reloc.type)
5068 {
5069 case BFD_RELOC_AARCH64_MOVW_G0_S:
5070 case BFD_RELOC_AARCH64_MOVW_G1_S:
5071 case BFD_RELOC_AARCH64_MOVW_G2_S:
5072 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5073 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5074 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5075 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5076 set_syntax_error
5077 (_("the specified relocation type is not allowed for MOVK"));
5078 return FALSE;
5079 default:
5080 break;
5081 }
5082
5083 switch (inst.reloc.type)
5084 {
5085 case BFD_RELOC_AARCH64_MOVW_G0:
5086 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5087 case BFD_RELOC_AARCH64_MOVW_G0_S:
5088 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5089 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5090 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5091 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5092 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5093 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5094 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5095 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5096 shift = 0;
5097 break;
5098 case BFD_RELOC_AARCH64_MOVW_G1:
5099 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5100 case BFD_RELOC_AARCH64_MOVW_G1_S:
5101 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5102 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5103 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5104 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5105 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5106 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5107 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5108 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5109 shift = 16;
5110 break;
5111 case BFD_RELOC_AARCH64_MOVW_G2:
5112 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5113 case BFD_RELOC_AARCH64_MOVW_G2_S:
5114 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5115 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5116 if (is32)
5117 {
5118 set_fatal_syntax_error
5119 (_("the specified relocation type is not allowed for 32-bit "
5120 "register"));
5121 return FALSE;
5122 }
5123 shift = 32;
5124 break;
5125 case BFD_RELOC_AARCH64_MOVW_G3:
5126 if (is32)
5127 {
5128 set_fatal_syntax_error
5129 (_("the specified relocation type is not allowed for 32-bit "
5130 "register"));
5131 return FALSE;
5132 }
5133 shift = 48;
5134 break;
5135 default:
5136 /* More cases should be added when more MOVW-related relocation types
5137 are supported in GAS. */
5138 gas_assert (aarch64_gas_internal_fixup_p ());
5139 /* The shift amount should have already been set by the parser. */
5140 return TRUE;
5141 }
5142 inst.base.operands[1].shifter.amount = shift;
5143 return TRUE;
5144 }
5145
5146 /* A primitive log calculator. */
5147
5148 static inline unsigned int
5149 get_logsz (unsigned int size)
5150 {
5151 const unsigned char ls[16] =
5152 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5153 if (size > 16)
5154 {
5155 gas_assert (0);
5156 return -1;
5157 }
5158 gas_assert (ls[size - 1] != (unsigned char)-1);
5159 return ls[size - 1];
5160 }
5161
5162 /* Determine and return the real reloc type code for an instruction
5163 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5164
5165 static inline bfd_reloc_code_real_type
5166 ldst_lo12_determine_real_reloc_type (void)
5167 {
5168 unsigned logsz;
5169 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5170 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5171
5172 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
5173 {
5174 BFD_RELOC_AARCH64_LDST8_LO12,
5175 BFD_RELOC_AARCH64_LDST16_LO12,
5176 BFD_RELOC_AARCH64_LDST32_LO12,
5177 BFD_RELOC_AARCH64_LDST64_LO12,
5178 BFD_RELOC_AARCH64_LDST128_LO12
5179 },
5180 {
5181 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5182 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5183 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5184 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5185 BFD_RELOC_AARCH64_NONE
5186 },
5187 {
5188 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5189 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5190 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5191 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5192 BFD_RELOC_AARCH64_NONE
5193 }
5194 };
5195
5196 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5197 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5198 || (inst.reloc.type
5199 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
5200 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5201
5202 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5203 opd1_qlf =
5204 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5205 1, opd0_qlf, 0);
5206 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5207
5208 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5209 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5210 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5211 gas_assert (logsz <= 3);
5212 else
5213 gas_assert (logsz <= 4);
5214
5215 /* In reloc.c, these pseudo relocation types should be defined in similar
5216 order as above reloc_ldst_lo12 array. Because the array index calculation
5217 below relies on this. */
5218 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5219 }
5220
5221 /* Check whether a register list REGINFO is valid. The registers must be
5222 numbered in increasing order (modulo 32), in increments of one or two.
5223
5224 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5225 increments of two.
5226
5227 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5228
5229 static bfd_boolean
5230 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5231 {
5232 uint32_t i, nb_regs, prev_regno, incr;
5233
5234 nb_regs = 1 + (reginfo & 0x3);
5235 reginfo >>= 2;
5236 prev_regno = reginfo & 0x1f;
5237 incr = accept_alternate ? 2 : 1;
5238
5239 for (i = 1; i < nb_regs; ++i)
5240 {
5241 uint32_t curr_regno;
5242 reginfo >>= 5;
5243 curr_regno = reginfo & 0x1f;
5244 if (curr_regno != ((prev_regno + incr) & 0x1f))
5245 return FALSE;
5246 prev_regno = curr_regno;
5247 }
5248
5249 return TRUE;
5250 }
5251
5252 /* Generic instruction operand parser. This does no encoding and no
5253 semantic validation; it merely squirrels values away in the inst
5254 structure. Returns TRUE or FALSE depending on whether the
5255 specified grammar matched. */
5256
5257 static bfd_boolean
5258 parse_operands (char *str, const aarch64_opcode *opcode)
5259 {
5260 int i;
5261 char *backtrack_pos = 0;
5262 const enum aarch64_opnd *operands = opcode->operands;
5263 aarch64_reg_type imm_reg_type;
5264
5265 clear_error ();
5266 skip_whitespace (str);
5267
5268 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5269 imm_reg_type = REG_TYPE_R_Z_BHSDQ_VZP;
5270 else
5271 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5272
5273 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5274 {
5275 int64_t val;
5276 const reg_entry *reg;
5277 int comma_skipped_p = 0;
5278 aarch64_reg_type rtype;
5279 struct vector_type_el vectype;
5280 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5281 aarch64_opnd_info *info = &inst.base.operands[i];
5282 aarch64_reg_type reg_type;
5283
5284 DEBUG_TRACE ("parse operand %d", i);
5285
5286 /* Assign the operand code. */
5287 info->type = operands[i];
5288
5289 if (optional_operand_p (opcode, i))
5290 {
5291 /* Remember where we are in case we need to backtrack. */
5292 gas_assert (!backtrack_pos);
5293 backtrack_pos = str;
5294 }
5295
5296 /* Expect comma between operands; the backtrack mechanism will take
5297 care of cases of omitted optional operand. */
5298 if (i > 0 && ! skip_past_char (&str, ','))
5299 {
5300 set_syntax_error (_("comma expected between operands"));
5301 goto failure;
5302 }
5303 else
5304 comma_skipped_p = 1;
5305
5306 switch (operands[i])
5307 {
5308 case AARCH64_OPND_Rd:
5309 case AARCH64_OPND_Rn:
5310 case AARCH64_OPND_Rm:
5311 case AARCH64_OPND_Rt:
5312 case AARCH64_OPND_Rt2:
5313 case AARCH64_OPND_Rs:
5314 case AARCH64_OPND_Ra:
5315 case AARCH64_OPND_Rt_SYS:
5316 case AARCH64_OPND_PAIRREG:
5317 case AARCH64_OPND_SVE_Rm:
5318 po_int_reg_or_fail (REG_TYPE_R_Z);
5319 break;
5320
5321 case AARCH64_OPND_Rd_SP:
5322 case AARCH64_OPND_Rn_SP:
5323 case AARCH64_OPND_SVE_Rn_SP:
5324 case AARCH64_OPND_Rm_SP:
5325 po_int_reg_or_fail (REG_TYPE_R_SP);
5326 break;
5327
5328 case AARCH64_OPND_Rm_EXT:
5329 case AARCH64_OPND_Rm_SFT:
5330 po_misc_or_fail (parse_shifter_operand
5331 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5332 ? SHIFTED_ARITH_IMM
5333 : SHIFTED_LOGIC_IMM)));
5334 if (!info->shifter.operator_present)
5335 {
5336 /* Default to LSL if not present. Libopcodes prefers shifter
5337 kind to be explicit. */
5338 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5339 info->shifter.kind = AARCH64_MOD_LSL;
5340 /* For Rm_EXT, libopcodes will carry out further check on whether
5341 or not stack pointer is used in the instruction (Recall that
5342 "the extend operator is not optional unless at least one of
5343 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5344 }
5345 break;
5346
5347 case AARCH64_OPND_Fd:
5348 case AARCH64_OPND_Fn:
5349 case AARCH64_OPND_Fm:
5350 case AARCH64_OPND_Fa:
5351 case AARCH64_OPND_Ft:
5352 case AARCH64_OPND_Ft2:
5353 case AARCH64_OPND_Sd:
5354 case AARCH64_OPND_Sn:
5355 case AARCH64_OPND_Sm:
5356 case AARCH64_OPND_SVE_VZn:
5357 case AARCH64_OPND_SVE_Vd:
5358 case AARCH64_OPND_SVE_Vm:
5359 case AARCH64_OPND_SVE_Vn:
5360 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5361 if (val == PARSE_FAIL)
5362 {
5363 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5364 goto failure;
5365 }
5366 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5367
5368 info->reg.regno = val;
5369 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5370 break;
5371
5372 case AARCH64_OPND_SVE_Pd:
5373 case AARCH64_OPND_SVE_Pg3:
5374 case AARCH64_OPND_SVE_Pg4_5:
5375 case AARCH64_OPND_SVE_Pg4_10:
5376 case AARCH64_OPND_SVE_Pg4_16:
5377 case AARCH64_OPND_SVE_Pm:
5378 case AARCH64_OPND_SVE_Pn:
5379 case AARCH64_OPND_SVE_Pt:
5380 reg_type = REG_TYPE_PN;
5381 goto vector_reg;
5382
5383 case AARCH64_OPND_SVE_Za_5:
5384 case AARCH64_OPND_SVE_Za_16:
5385 case AARCH64_OPND_SVE_Zd:
5386 case AARCH64_OPND_SVE_Zm_5:
5387 case AARCH64_OPND_SVE_Zm_16:
5388 case AARCH64_OPND_SVE_Zn:
5389 case AARCH64_OPND_SVE_Zt:
5390 reg_type = REG_TYPE_ZN;
5391 goto vector_reg;
5392
5393 case AARCH64_OPND_Vd:
5394 case AARCH64_OPND_Vn:
5395 case AARCH64_OPND_Vm:
5396 reg_type = REG_TYPE_VN;
5397 vector_reg:
5398 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5399 if (val == PARSE_FAIL)
5400 {
5401 first_error (_(get_reg_expected_msg (reg_type)));
5402 goto failure;
5403 }
5404 if (vectype.defined & NTA_HASINDEX)
5405 goto failure;
5406
5407 info->reg.regno = val;
5408 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5409 && vectype.type == NT_invtype)
5410 /* Unqualified Pn and Zn registers are allowed in certain
5411 contexts. Rely on F_STRICT qualifier checking to catch
5412 invalid uses. */
5413 info->qualifier = AARCH64_OPND_QLF_NIL;
5414 else
5415 {
5416 info->qualifier = vectype_to_qualifier (&vectype);
5417 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5418 goto failure;
5419 }
5420 break;
5421
5422 case AARCH64_OPND_VdD1:
5423 case AARCH64_OPND_VnD1:
5424 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5425 if (val == PARSE_FAIL)
5426 {
5427 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5428 goto failure;
5429 }
5430 if (vectype.type != NT_d || vectype.index != 1)
5431 {
5432 set_fatal_syntax_error
5433 (_("the top half of a 128-bit FP/SIMD register is expected"));
5434 goto failure;
5435 }
5436 info->reg.regno = val;
5437 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5438 here; it is correct for the purpose of encoding/decoding since
5439 only the register number is explicitly encoded in the related
5440 instructions, although this appears a bit hacky. */
5441 info->qualifier = AARCH64_OPND_QLF_S_D;
5442 break;
5443
5444 case AARCH64_OPND_SVE_Zm3_INDEX:
5445 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5446 case AARCH64_OPND_SVE_Zm4_INDEX:
5447 case AARCH64_OPND_SVE_Zn_INDEX:
5448 reg_type = REG_TYPE_ZN;
5449 goto vector_reg_index;
5450
5451 case AARCH64_OPND_Ed:
5452 case AARCH64_OPND_En:
5453 case AARCH64_OPND_Em:
5454 reg_type = REG_TYPE_VN;
5455 vector_reg_index:
5456 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5457 if (val == PARSE_FAIL)
5458 {
5459 first_error (_(get_reg_expected_msg (reg_type)));
5460 goto failure;
5461 }
5462 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5463 goto failure;
5464
5465 info->reglane.regno = val;
5466 info->reglane.index = vectype.index;
5467 info->qualifier = vectype_to_qualifier (&vectype);
5468 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5469 goto failure;
5470 break;
5471
5472 case AARCH64_OPND_SVE_ZnxN:
5473 case AARCH64_OPND_SVE_ZtxN:
5474 reg_type = REG_TYPE_ZN;
5475 goto vector_reg_list;
5476
5477 case AARCH64_OPND_LVn:
5478 case AARCH64_OPND_LVt:
5479 case AARCH64_OPND_LVt_AL:
5480 case AARCH64_OPND_LEt:
5481 reg_type = REG_TYPE_VN;
5482 vector_reg_list:
5483 if (reg_type == REG_TYPE_ZN
5484 && get_opcode_dependent_value (opcode) == 1
5485 && *str != '{')
5486 {
5487 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5488 if (val == PARSE_FAIL)
5489 {
5490 first_error (_(get_reg_expected_msg (reg_type)));
5491 goto failure;
5492 }
5493 info->reglist.first_regno = val;
5494 info->reglist.num_regs = 1;
5495 }
5496 else
5497 {
5498 val = parse_vector_reg_list (&str, reg_type, &vectype);
5499 if (val == PARSE_FAIL)
5500 goto failure;
5501 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5502 {
5503 set_fatal_syntax_error (_("invalid register list"));
5504 goto failure;
5505 }
5506 info->reglist.first_regno = (val >> 2) & 0x1f;
5507 info->reglist.num_regs = (val & 0x3) + 1;
5508 }
5509 if (operands[i] == AARCH64_OPND_LEt)
5510 {
5511 if (!(vectype.defined & NTA_HASINDEX))
5512 goto failure;
5513 info->reglist.has_index = 1;
5514 info->reglist.index = vectype.index;
5515 }
5516 else
5517 {
5518 if (vectype.defined & NTA_HASINDEX)
5519 goto failure;
5520 if (!(vectype.defined & NTA_HASTYPE))
5521 {
5522 if (reg_type == REG_TYPE_ZN)
5523 set_fatal_syntax_error (_("missing type suffix"));
5524 goto failure;
5525 }
5526 }
5527 info->qualifier = vectype_to_qualifier (&vectype);
5528 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5529 goto failure;
5530 break;
5531
5532 case AARCH64_OPND_CRn:
5533 case AARCH64_OPND_CRm:
5534 {
5535 char prefix = *(str++);
5536 if (prefix != 'c' && prefix != 'C')
5537 goto failure;
5538
5539 po_imm_nc_or_fail ();
5540 if (val > 15)
5541 {
5542 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5543 goto failure;
5544 }
5545 info->qualifier = AARCH64_OPND_QLF_CR;
5546 info->imm.value = val;
5547 break;
5548 }
5549
5550 case AARCH64_OPND_SHLL_IMM:
5551 case AARCH64_OPND_IMM_VLSR:
5552 po_imm_or_fail (1, 64);
5553 info->imm.value = val;
5554 break;
5555
5556 case AARCH64_OPND_CCMP_IMM:
5557 case AARCH64_OPND_SIMM5:
5558 case AARCH64_OPND_FBITS:
5559 case AARCH64_OPND_UIMM4:
5560 case AARCH64_OPND_UIMM3_OP1:
5561 case AARCH64_OPND_UIMM3_OP2:
5562 case AARCH64_OPND_IMM_VLSL:
5563 case AARCH64_OPND_IMM:
5564 case AARCH64_OPND_WIDTH:
5565 case AARCH64_OPND_SVE_INV_LIMM:
5566 case AARCH64_OPND_SVE_LIMM:
5567 case AARCH64_OPND_SVE_LIMM_MOV:
5568 case AARCH64_OPND_SVE_SHLIMM_PRED:
5569 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5570 case AARCH64_OPND_SVE_SHRIMM_PRED:
5571 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5572 case AARCH64_OPND_SVE_SIMM5:
5573 case AARCH64_OPND_SVE_SIMM5B:
5574 case AARCH64_OPND_SVE_SIMM6:
5575 case AARCH64_OPND_SVE_SIMM8:
5576 case AARCH64_OPND_SVE_UIMM3:
5577 case AARCH64_OPND_SVE_UIMM7:
5578 case AARCH64_OPND_SVE_UIMM8:
5579 case AARCH64_OPND_SVE_UIMM8_53:
5580 case AARCH64_OPND_IMM_ROT1:
5581 case AARCH64_OPND_IMM_ROT2:
5582 case AARCH64_OPND_IMM_ROT3:
5583 case AARCH64_OPND_SVE_IMM_ROT1:
5584 case AARCH64_OPND_SVE_IMM_ROT2:
5585 po_imm_nc_or_fail ();
5586 info->imm.value = val;
5587 break;
5588
5589 case AARCH64_OPND_SVE_AIMM:
5590 case AARCH64_OPND_SVE_ASIMM:
5591 po_imm_nc_or_fail ();
5592 info->imm.value = val;
5593 skip_whitespace (str);
5594 if (skip_past_comma (&str))
5595 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5596 else
5597 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5598 break;
5599
5600 case AARCH64_OPND_SVE_PATTERN:
5601 po_enum_or_fail (aarch64_sve_pattern_array);
5602 info->imm.value = val;
5603 break;
5604
5605 case AARCH64_OPND_SVE_PATTERN_SCALED:
5606 po_enum_or_fail (aarch64_sve_pattern_array);
5607 info->imm.value = val;
5608 if (skip_past_comma (&str)
5609 && !parse_shift (&str, info, SHIFTED_MUL))
5610 goto failure;
5611 if (!info->shifter.operator_present)
5612 {
5613 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5614 info->shifter.kind = AARCH64_MOD_MUL;
5615 info->shifter.amount = 1;
5616 }
5617 break;
5618
5619 case AARCH64_OPND_SVE_PRFOP:
5620 po_enum_or_fail (aarch64_sve_prfop_array);
5621 info->imm.value = val;
5622 break;
5623
5624 case AARCH64_OPND_UIMM7:
5625 po_imm_or_fail (0, 127);
5626 info->imm.value = val;
5627 break;
5628
5629 case AARCH64_OPND_IDX:
5630 case AARCH64_OPND_BIT_NUM:
5631 case AARCH64_OPND_IMMR:
5632 case AARCH64_OPND_IMMS:
5633 po_imm_or_fail (0, 63);
5634 info->imm.value = val;
5635 break;
5636
5637 case AARCH64_OPND_IMM0:
5638 po_imm_nc_or_fail ();
5639 if (val != 0)
5640 {
5641 set_fatal_syntax_error (_("immediate zero expected"));
5642 goto failure;
5643 }
5644 info->imm.value = 0;
5645 break;
5646
5647 case AARCH64_OPND_FPIMM0:
5648 {
5649 int qfloat;
5650 bfd_boolean res1 = FALSE, res2 = FALSE;
5651 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5652 it is probably not worth the effort to support it. */
5653 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5654 imm_reg_type))
5655 && (error_p ()
5656 || !(res2 = parse_constant_immediate (&str, &val,
5657 imm_reg_type))))
5658 goto failure;
5659 if ((res1 && qfloat == 0) || (res2 && val == 0))
5660 {
5661 info->imm.value = 0;
5662 info->imm.is_fp = 1;
5663 break;
5664 }
5665 set_fatal_syntax_error (_("immediate zero expected"));
5666 goto failure;
5667 }
5668
5669 case AARCH64_OPND_IMM_MOV:
5670 {
5671 char *saved = str;
5672 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5673 reg_name_p (str, REG_TYPE_VN))
5674 goto failure;
5675 str = saved;
5676 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5677 GE_OPT_PREFIX, 1));
5678 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5679 later. fix_mov_imm_insn will try to determine a machine
5680 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5681 message if the immediate cannot be moved by a single
5682 instruction. */
5683 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5684 inst.base.operands[i].skip = 1;
5685 }
5686 break;
5687
5688 case AARCH64_OPND_SIMD_IMM:
5689 case AARCH64_OPND_SIMD_IMM_SFT:
5690 if (! parse_big_immediate (&str, &val, imm_reg_type))
5691 goto failure;
5692 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5693 /* addr_off_p */ 0,
5694 /* need_libopcodes_p */ 1,
5695 /* skip_p */ 1);
5696 /* Parse shift.
5697 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5698 shift, we don't check it here; we leave the checking to
5699 the libopcodes (operand_general_constraint_met_p). By
5700 doing this, we achieve better diagnostics. */
5701 if (skip_past_comma (&str)
5702 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5703 goto failure;
5704 if (!info->shifter.operator_present
5705 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5706 {
5707 /* Default to LSL if not present. Libopcodes prefers shifter
5708 kind to be explicit. */
5709 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5710 info->shifter.kind = AARCH64_MOD_LSL;
5711 }
5712 break;
5713
5714 case AARCH64_OPND_FPIMM:
5715 case AARCH64_OPND_SIMD_FPIMM:
5716 case AARCH64_OPND_SVE_FPIMM8:
5717 {
5718 int qfloat;
5719 bfd_boolean dp_p;
5720
5721 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5722 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5723 || !aarch64_imm_float_p (qfloat))
5724 {
5725 if (!error_p ())
5726 set_fatal_syntax_error (_("invalid floating-point"
5727 " constant"));
5728 goto failure;
5729 }
5730 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5731 inst.base.operands[i].imm.is_fp = 1;
5732 }
5733 break;
5734
5735 case AARCH64_OPND_SVE_I1_HALF_ONE:
5736 case AARCH64_OPND_SVE_I1_HALF_TWO:
5737 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5738 {
5739 int qfloat;
5740 bfd_boolean dp_p;
5741
5742 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5743 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5744 {
5745 if (!error_p ())
5746 set_fatal_syntax_error (_("invalid floating-point"
5747 " constant"));
5748 goto failure;
5749 }
5750 inst.base.operands[i].imm.value = qfloat;
5751 inst.base.operands[i].imm.is_fp = 1;
5752 }
5753 break;
5754
5755 case AARCH64_OPND_LIMM:
5756 po_misc_or_fail (parse_shifter_operand (&str, info,
5757 SHIFTED_LOGIC_IMM));
5758 if (info->shifter.operator_present)
5759 {
5760 set_fatal_syntax_error
5761 (_("shift not allowed for bitmask immediate"));
5762 goto failure;
5763 }
5764 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5765 /* addr_off_p */ 0,
5766 /* need_libopcodes_p */ 1,
5767 /* skip_p */ 1);
5768 break;
5769
5770 case AARCH64_OPND_AIMM:
5771 if (opcode->op == OP_ADD)
5772 /* ADD may have relocation types. */
5773 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5774 SHIFTED_ARITH_IMM));
5775 else
5776 po_misc_or_fail (parse_shifter_operand (&str, info,
5777 SHIFTED_ARITH_IMM));
5778 switch (inst.reloc.type)
5779 {
5780 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5781 info->shifter.amount = 12;
5782 break;
5783 case BFD_RELOC_UNUSED:
5784 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5785 if (info->shifter.kind != AARCH64_MOD_NONE)
5786 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5787 inst.reloc.pc_rel = 0;
5788 break;
5789 default:
5790 break;
5791 }
5792 info->imm.value = 0;
5793 if (!info->shifter.operator_present)
5794 {
5795 /* Default to LSL if not present. Libopcodes prefers shifter
5796 kind to be explicit. */
5797 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5798 info->shifter.kind = AARCH64_MOD_LSL;
5799 }
5800 break;
5801
5802 case AARCH64_OPND_HALF:
5803 {
5804 /* #<imm16> or relocation. */
5805 int internal_fixup_p;
5806 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5807 if (internal_fixup_p)
5808 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5809 skip_whitespace (str);
5810 if (skip_past_comma (&str))
5811 {
5812 /* {, LSL #<shift>} */
5813 if (! aarch64_gas_internal_fixup_p ())
5814 {
5815 set_fatal_syntax_error (_("can't mix relocation modifier "
5816 "with explicit shift"));
5817 goto failure;
5818 }
5819 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5820 }
5821 else
5822 inst.base.operands[i].shifter.amount = 0;
5823 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5824 inst.base.operands[i].imm.value = 0;
5825 if (! process_movw_reloc_info ())
5826 goto failure;
5827 }
5828 break;
5829
5830 case AARCH64_OPND_EXCEPTION:
5831 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5832 imm_reg_type));
5833 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5834 /* addr_off_p */ 0,
5835 /* need_libopcodes_p */ 0,
5836 /* skip_p */ 1);
5837 break;
5838
5839 case AARCH64_OPND_NZCV:
5840 {
5841 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5842 if (nzcv != NULL)
5843 {
5844 str += 4;
5845 info->imm.value = nzcv->value;
5846 break;
5847 }
5848 po_imm_or_fail (0, 15);
5849 info->imm.value = val;
5850 }
5851 break;
5852
5853 case AARCH64_OPND_COND:
5854 case AARCH64_OPND_COND1:
5855 {
5856 char *start = str;
5857 do
5858 str++;
5859 while (ISALPHA (*str));
5860 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
5861 if (info->cond == NULL)
5862 {
5863 set_syntax_error (_("invalid condition"));
5864 goto failure;
5865 }
5866 else if (operands[i] == AARCH64_OPND_COND1
5867 && (info->cond->value & 0xe) == 0xe)
5868 {
5869 /* Do not allow AL or NV. */
5870 set_default_error ();
5871 goto failure;
5872 }
5873 }
5874 break;
5875
5876 case AARCH64_OPND_ADDR_ADRP:
5877 po_misc_or_fail (parse_adrp (&str));
5878 /* Clear the value as operand needs to be relocated. */
5879 info->imm.value = 0;
5880 break;
5881
5882 case AARCH64_OPND_ADDR_PCREL14:
5883 case AARCH64_OPND_ADDR_PCREL19:
5884 case AARCH64_OPND_ADDR_PCREL21:
5885 case AARCH64_OPND_ADDR_PCREL26:
5886 po_misc_or_fail (parse_address (&str, info));
5887 if (!info->addr.pcrel)
5888 {
5889 set_syntax_error (_("invalid pc-relative address"));
5890 goto failure;
5891 }
5892 if (inst.gen_lit_pool
5893 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5894 {
5895 /* Only permit "=value" in the literal load instructions.
5896 The literal will be generated by programmer_friendly_fixup. */
5897 set_syntax_error (_("invalid use of \"=immediate\""));
5898 goto failure;
5899 }
5900 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5901 {
5902 set_syntax_error (_("unrecognized relocation suffix"));
5903 goto failure;
5904 }
5905 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5906 {
5907 info->imm.value = inst.reloc.exp.X_add_number;
5908 inst.reloc.type = BFD_RELOC_UNUSED;
5909 }
5910 else
5911 {
5912 info->imm.value = 0;
5913 if (inst.reloc.type == BFD_RELOC_UNUSED)
5914 switch (opcode->iclass)
5915 {
5916 case compbranch:
5917 case condbranch:
5918 /* e.g. CBZ or B.COND */
5919 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5920 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5921 break;
5922 case testbranch:
5923 /* e.g. TBZ */
5924 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5925 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5926 break;
5927 case branch_imm:
5928 /* e.g. B or BL */
5929 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5930 inst.reloc.type =
5931 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5932 : BFD_RELOC_AARCH64_JUMP26;
5933 break;
5934 case loadlit:
5935 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5936 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5937 break;
5938 case pcreladdr:
5939 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5940 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5941 break;
5942 default:
5943 gas_assert (0);
5944 abort ();
5945 }
5946 inst.reloc.pc_rel = 1;
5947 }
5948 break;
5949
5950 case AARCH64_OPND_ADDR_SIMPLE:
5951 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5952 {
5953 /* [<Xn|SP>{, #<simm>}] */
5954 char *start = str;
5955 /* First use the normal address-parsing routines, to get
5956 the usual syntax errors. */
5957 po_misc_or_fail (parse_address (&str, info));
5958 if (info->addr.pcrel || info->addr.offset.is_reg
5959 || !info->addr.preind || info->addr.postind
5960 || info->addr.writeback)
5961 {
5962 set_syntax_error (_("invalid addressing mode"));
5963 goto failure;
5964 }
5965
5966 /* Then retry, matching the specific syntax of these addresses. */
5967 str = start;
5968 po_char_or_fail ('[');
5969 po_reg_or_fail (REG_TYPE_R64_SP);
5970 /* Accept optional ", #0". */
5971 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5972 && skip_past_char (&str, ','))
5973 {
5974 skip_past_char (&str, '#');
5975 if (! skip_past_char (&str, '0'))
5976 {
5977 set_fatal_syntax_error
5978 (_("the optional immediate offset can only be 0"));
5979 goto failure;
5980 }
5981 }
5982 po_char_or_fail (']');
5983 break;
5984 }
5985
5986 case AARCH64_OPND_ADDR_REGOFF:
5987 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5988 po_misc_or_fail (parse_address (&str, info));
5989 regoff_addr:
5990 if (info->addr.pcrel || !info->addr.offset.is_reg
5991 || !info->addr.preind || info->addr.postind
5992 || info->addr.writeback)
5993 {
5994 set_syntax_error (_("invalid addressing mode"));
5995 goto failure;
5996 }
5997 if (!info->shifter.operator_present)
5998 {
5999 /* Default to LSL if not present. Libopcodes prefers shifter
6000 kind to be explicit. */
6001 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6002 info->shifter.kind = AARCH64_MOD_LSL;
6003 }
6004 /* Qualifier to be deduced by libopcodes. */
6005 break;
6006
6007 case AARCH64_OPND_ADDR_SIMM7:
6008 po_misc_or_fail (parse_address (&str, info));
6009 if (info->addr.pcrel || info->addr.offset.is_reg
6010 || (!info->addr.preind && !info->addr.postind))
6011 {
6012 set_syntax_error (_("invalid addressing mode"));
6013 goto failure;
6014 }
6015 if (inst.reloc.type != BFD_RELOC_UNUSED)
6016 {
6017 set_syntax_error (_("relocation not allowed"));
6018 goto failure;
6019 }
6020 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6021 /* addr_off_p */ 1,
6022 /* need_libopcodes_p */ 1,
6023 /* skip_p */ 0);
6024 break;
6025
6026 case AARCH64_OPND_ADDR_SIMM9:
6027 case AARCH64_OPND_ADDR_SIMM9_2:
6028 po_misc_or_fail (parse_address (&str, info));
6029 if (info->addr.pcrel || info->addr.offset.is_reg
6030 || (!info->addr.preind && !info->addr.postind)
6031 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6032 && info->addr.writeback))
6033 {
6034 set_syntax_error (_("invalid addressing mode"));
6035 goto failure;
6036 }
6037 if (inst.reloc.type != BFD_RELOC_UNUSED)
6038 {
6039 set_syntax_error (_("relocation not allowed"));
6040 goto failure;
6041 }
6042 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6043 /* addr_off_p */ 1,
6044 /* need_libopcodes_p */ 1,
6045 /* skip_p */ 0);
6046 break;
6047
6048 case AARCH64_OPND_ADDR_SIMM10:
6049 po_misc_or_fail (parse_address (&str, info));
6050 if (info->addr.pcrel || info->addr.offset.is_reg
6051 || !info->addr.preind || info->addr.postind)
6052 {
6053 set_syntax_error (_("invalid addressing mode"));
6054 goto failure;
6055 }
6056 if (inst.reloc.type != BFD_RELOC_UNUSED)
6057 {
6058 set_syntax_error (_("relocation not allowed"));
6059 goto failure;
6060 }
6061 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6062 /* addr_off_p */ 1,
6063 /* need_libopcodes_p */ 1,
6064 /* skip_p */ 0);
6065 break;
6066
6067 case AARCH64_OPND_ADDR_UIMM12:
6068 po_misc_or_fail (parse_address (&str, info));
6069 if (info->addr.pcrel || info->addr.offset.is_reg
6070 || !info->addr.preind || info->addr.writeback)
6071 {
6072 set_syntax_error (_("invalid addressing mode"));
6073 goto failure;
6074 }
6075 if (inst.reloc.type == BFD_RELOC_UNUSED)
6076 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6077 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6078 || (inst.reloc.type
6079 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6080 || (inst.reloc.type
6081 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
6082 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6083 /* Leave qualifier to be determined by libopcodes. */
6084 break;
6085
6086 case AARCH64_OPND_SIMD_ADDR_POST:
6087 /* [<Xn|SP>], <Xm|#<amount>> */
6088 po_misc_or_fail (parse_address (&str, info));
6089 if (!info->addr.postind || !info->addr.writeback)
6090 {
6091 set_syntax_error (_("invalid addressing mode"));
6092 goto failure;
6093 }
6094 if (!info->addr.offset.is_reg)
6095 {
6096 if (inst.reloc.exp.X_op == O_constant)
6097 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6098 else
6099 {
6100 set_fatal_syntax_error
6101 (_("writeback value must be an immediate constant"));
6102 goto failure;
6103 }
6104 }
6105 /* No qualifier. */
6106 break;
6107
6108 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6109 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6110 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6111 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6112 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6113 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6114 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6115 case AARCH64_OPND_SVE_ADDR_RI_U6:
6116 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6117 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6118 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6119 /* [X<n>{, #imm, MUL VL}]
6120 [X<n>{, #imm}]
6121 but recognizing SVE registers. */
6122 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6123 &offset_qualifier));
6124 if (base_qualifier != AARCH64_OPND_QLF_X)
6125 {
6126 set_syntax_error (_("invalid addressing mode"));
6127 goto failure;
6128 }
6129 sve_regimm:
6130 if (info->addr.pcrel || info->addr.offset.is_reg
6131 || !info->addr.preind || info->addr.writeback)
6132 {
6133 set_syntax_error (_("invalid addressing mode"));
6134 goto failure;
6135 }
6136 if (inst.reloc.type != BFD_RELOC_UNUSED
6137 || inst.reloc.exp.X_op != O_constant)
6138 {
6139 /* Make sure this has priority over
6140 "invalid addressing mode". */
6141 set_fatal_syntax_error (_("constant offset required"));
6142 goto failure;
6143 }
6144 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6145 break;
6146
6147 case AARCH64_OPND_SVE_ADDR_RR:
6148 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6149 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6150 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6151 case AARCH64_OPND_SVE_ADDR_RX:
6152 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6153 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6154 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6155 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6156 but recognizing SVE registers. */
6157 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6158 &offset_qualifier));
6159 if (base_qualifier != AARCH64_OPND_QLF_X
6160 || offset_qualifier != AARCH64_OPND_QLF_X)
6161 {
6162 set_syntax_error (_("invalid addressing mode"));
6163 goto failure;
6164 }
6165 goto regoff_addr;
6166
6167 case AARCH64_OPND_SVE_ADDR_RZ:
6168 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6169 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6170 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6171 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6172 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6173 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6174 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6175 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6176 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6177 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6178 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6179 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6180 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6181 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6182 &offset_qualifier));
6183 if (base_qualifier != AARCH64_OPND_QLF_X
6184 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6185 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6186 {
6187 set_syntax_error (_("invalid addressing mode"));
6188 goto failure;
6189 }
6190 info->qualifier = offset_qualifier;
6191 goto regoff_addr;
6192
6193 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6194 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6195 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6196 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6197 /* [Z<n>.<T>{, #imm}] */
6198 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6199 &offset_qualifier));
6200 if (base_qualifier != AARCH64_OPND_QLF_S_S
6201 && base_qualifier != AARCH64_OPND_QLF_S_D)
6202 {
6203 set_syntax_error (_("invalid addressing mode"));
6204 goto failure;
6205 }
6206 info->qualifier = base_qualifier;
6207 goto sve_regimm;
6208
6209 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6210 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6211 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6212 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6213 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6214
6215 We don't reject:
6216
6217 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6218
6219 here since we get better error messages by leaving it to
6220 the qualifier checking routines. */
6221 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6222 &offset_qualifier));
6223 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6224 && base_qualifier != AARCH64_OPND_QLF_S_D)
6225 || offset_qualifier != base_qualifier)
6226 {
6227 set_syntax_error (_("invalid addressing mode"));
6228 goto failure;
6229 }
6230 info->qualifier = base_qualifier;
6231 goto regoff_addr;
6232
6233 case AARCH64_OPND_SYSREG:
6234 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
6235 == PARSE_FAIL)
6236 {
6237 set_syntax_error (_("unknown or missing system register name"));
6238 goto failure;
6239 }
6240 inst.base.operands[i].sysreg = val;
6241 break;
6242
6243 case AARCH64_OPND_PSTATEFIELD:
6244 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
6245 == PARSE_FAIL)
6246 {
6247 set_syntax_error (_("unknown or missing PSTATE field name"));
6248 goto failure;
6249 }
6250 inst.base.operands[i].pstatefield = val;
6251 break;
6252
6253 case AARCH64_OPND_SYSREG_IC:
6254 inst.base.operands[i].sysins_op =
6255 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6256 goto sys_reg_ins;
6257 case AARCH64_OPND_SYSREG_DC:
6258 inst.base.operands[i].sysins_op =
6259 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6260 goto sys_reg_ins;
6261 case AARCH64_OPND_SYSREG_AT:
6262 inst.base.operands[i].sysins_op =
6263 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6264 goto sys_reg_ins;
6265 case AARCH64_OPND_SYSREG_TLBI:
6266 inst.base.operands[i].sysins_op =
6267 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6268 sys_reg_ins:
6269 if (inst.base.operands[i].sysins_op == NULL)
6270 {
6271 set_fatal_syntax_error ( _("unknown or missing operation name"));
6272 goto failure;
6273 }
6274 break;
6275
6276 case AARCH64_OPND_BARRIER:
6277 case AARCH64_OPND_BARRIER_ISB:
6278 val = parse_barrier (&str);
6279 if (val != PARSE_FAIL
6280 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6281 {
6282 /* ISB only accepts options name 'sy'. */
6283 set_syntax_error
6284 (_("the specified option is not accepted in ISB"));
6285 /* Turn off backtrack as this optional operand is present. */
6286 backtrack_pos = 0;
6287 goto failure;
6288 }
6289 /* This is an extension to accept a 0..15 immediate. */
6290 if (val == PARSE_FAIL)
6291 po_imm_or_fail (0, 15);
6292 info->barrier = aarch64_barrier_options + val;
6293 break;
6294
6295 case AARCH64_OPND_PRFOP:
6296 val = parse_pldop (&str);
6297 /* This is an extension to accept a 0..31 immediate. */
6298 if (val == PARSE_FAIL)
6299 po_imm_or_fail (0, 31);
6300 inst.base.operands[i].prfop = aarch64_prfops + val;
6301 break;
6302
6303 case AARCH64_OPND_BARRIER_PSB:
6304 val = parse_barrier_psb (&str, &(info->hint_option));
6305 if (val == PARSE_FAIL)
6306 goto failure;
6307 break;
6308
6309 default:
6310 as_fatal (_("unhandled operand code %d"), operands[i]);
6311 }
6312
6313 /* If we get here, this operand was successfully parsed. */
6314 inst.base.operands[i].present = 1;
6315 continue;
6316
6317 failure:
6318 /* The parse routine should already have set the error, but in case
6319 not, set a default one here. */
6320 if (! error_p ())
6321 set_default_error ();
6322
6323 if (! backtrack_pos)
6324 goto parse_operands_return;
6325
6326 {
6327 /* We reach here because this operand is marked as optional, and
6328 either no operand was supplied or the operand was supplied but it
6329 was syntactically incorrect. In the latter case we report an
6330 error. In the former case we perform a few more checks before
6331 dropping through to the code to insert the default operand. */
6332
6333 char *tmp = backtrack_pos;
6334 char endchar = END_OF_INSN;
6335
6336 if (i != (aarch64_num_of_operands (opcode) - 1))
6337 endchar = ',';
6338 skip_past_char (&tmp, ',');
6339
6340 if (*tmp != endchar)
6341 /* The user has supplied an operand in the wrong format. */
6342 goto parse_operands_return;
6343
6344 /* Make sure there is not a comma before the optional operand.
6345 For example the fifth operand of 'sys' is optional:
6346
6347 sys #0,c0,c0,#0, <--- wrong
6348 sys #0,c0,c0,#0 <--- correct. */
6349 if (comma_skipped_p && i && endchar == END_OF_INSN)
6350 {
6351 set_fatal_syntax_error
6352 (_("unexpected comma before the omitted optional operand"));
6353 goto parse_operands_return;
6354 }
6355 }
6356
6357 /* Reaching here means we are dealing with an optional operand that is
6358 omitted from the assembly line. */
6359 gas_assert (optional_operand_p (opcode, i));
6360 info->present = 0;
6361 process_omitted_operand (operands[i], opcode, i, info);
6362
6363 /* Try again, skipping the optional operand at backtrack_pos. */
6364 str = backtrack_pos;
6365 backtrack_pos = 0;
6366
6367 /* Clear any error record after the omitted optional operand has been
6368 successfully handled. */
6369 clear_error ();
6370 }
6371
6372 /* Check if we have parsed all the operands. */
6373 if (*str != '\0' && ! error_p ())
6374 {
6375 /* Set I to the index of the last present operand; this is
6376 for the purpose of diagnostics. */
6377 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6378 ;
6379 set_fatal_syntax_error
6380 (_("unexpected characters following instruction"));
6381 }
6382
6383 parse_operands_return:
6384
6385 if (error_p ())
6386 {
6387 DEBUG_TRACE ("parsing FAIL: %s - %s",
6388 operand_mismatch_kind_names[get_error_kind ()],
6389 get_error_message ());
6390 /* Record the operand error properly; this is useful when there
6391 are multiple instruction templates for a mnemonic name, so that
6392 later on, we can select the error that most closely describes
6393 the problem. */
6394 record_operand_error (opcode, i, get_error_kind (),
6395 get_error_message ());
6396 return FALSE;
6397 }
6398 else
6399 {
6400 DEBUG_TRACE ("parsing SUCCESS");
6401 return TRUE;
6402 }
6403 }
6404
6405 /* It does some fix-up to provide some programmer friendly feature while
6406 keeping the libopcodes happy, i.e. libopcodes only accepts
6407 the preferred architectural syntax.
6408 Return FALSE if there is any failure; otherwise return TRUE. */
6409
6410 static bfd_boolean
6411 programmer_friendly_fixup (aarch64_instruction *instr)
6412 {
6413 aarch64_inst *base = &instr->base;
6414 const aarch64_opcode *opcode = base->opcode;
6415 enum aarch64_op op = opcode->op;
6416 aarch64_opnd_info *operands = base->operands;
6417
6418 DEBUG_TRACE ("enter");
6419
6420 switch (opcode->iclass)
6421 {
6422 case testbranch:
6423 /* TBNZ Xn|Wn, #uimm6, label
6424 Test and Branch Not Zero: conditionally jumps to label if bit number
6425 uimm6 in register Xn is not zero. The bit number implies the width of
6426 the register, which may be written and should be disassembled as Wn if
6427 uimm is less than 32. */
6428 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6429 {
6430 if (operands[1].imm.value >= 32)
6431 {
6432 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6433 0, 31);
6434 return FALSE;
6435 }
6436 operands[0].qualifier = AARCH64_OPND_QLF_X;
6437 }
6438 break;
6439 case loadlit:
6440 /* LDR Wt, label | =value
6441 As a convenience assemblers will typically permit the notation
6442 "=value" in conjunction with the pc-relative literal load instructions
6443 to automatically place an immediate value or symbolic address in a
6444 nearby literal pool and generate a hidden label which references it.
6445 ISREG has been set to 0 in the case of =value. */
6446 if (instr->gen_lit_pool
6447 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6448 {
6449 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6450 if (op == OP_LDRSW_LIT)
6451 size = 4;
6452 if (instr->reloc.exp.X_op != O_constant
6453 && instr->reloc.exp.X_op != O_big
6454 && instr->reloc.exp.X_op != O_symbol)
6455 {
6456 record_operand_error (opcode, 1,
6457 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6458 _("constant expression expected"));
6459 return FALSE;
6460 }
6461 if (! add_to_lit_pool (&instr->reloc.exp, size))
6462 {
6463 record_operand_error (opcode, 1,
6464 AARCH64_OPDE_OTHER_ERROR,
6465 _("literal pool insertion failed"));
6466 return FALSE;
6467 }
6468 }
6469 break;
6470 case log_shift:
6471 case bitfield:
6472 /* UXT[BHW] Wd, Wn
6473 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6474 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6475 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6476 A programmer-friendly assembler should accept a destination Xd in
6477 place of Wd, however that is not the preferred form for disassembly.
6478 */
6479 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6480 && operands[1].qualifier == AARCH64_OPND_QLF_W
6481 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6482 operands[0].qualifier = AARCH64_OPND_QLF_W;
6483 break;
6484
6485 case addsub_ext:
6486 {
6487 /* In the 64-bit form, the final register operand is written as Wm
6488 for all but the (possibly omitted) UXTX/LSL and SXTX
6489 operators.
6490 As a programmer-friendly assembler, we accept e.g.
6491 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6492 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6493 int idx = aarch64_operand_index (opcode->operands,
6494 AARCH64_OPND_Rm_EXT);
6495 gas_assert (idx == 1 || idx == 2);
6496 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6497 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6498 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6499 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6500 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6501 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6502 }
6503 break;
6504
6505 default:
6506 break;
6507 }
6508
6509 DEBUG_TRACE ("exit with SUCCESS");
6510 return TRUE;
6511 }
6512
6513 /* Check for loads and stores that will cause unpredictable behavior. */
6514
6515 static void
6516 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6517 {
6518 aarch64_inst *base = &instr->base;
6519 const aarch64_opcode *opcode = base->opcode;
6520 const aarch64_opnd_info *opnds = base->operands;
6521 switch (opcode->iclass)
6522 {
6523 case ldst_pos:
6524 case ldst_imm9:
6525 case ldst_imm10:
6526 case ldst_unscaled:
6527 case ldst_unpriv:
6528 /* Loading/storing the base register is unpredictable if writeback. */
6529 if ((aarch64_get_operand_class (opnds[0].type)
6530 == AARCH64_OPND_CLASS_INT_REG)
6531 && opnds[0].reg.regno == opnds[1].addr.base_regno
6532 && opnds[1].addr.base_regno != REG_SP
6533 && opnds[1].addr.writeback)
6534 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6535 break;
6536 case ldstpair_off:
6537 case ldstnapair_offs:
6538 case ldstpair_indexed:
6539 /* Loading/storing the base register is unpredictable if writeback. */
6540 if ((aarch64_get_operand_class (opnds[0].type)
6541 == AARCH64_OPND_CLASS_INT_REG)
6542 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6543 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6544 && opnds[2].addr.base_regno != REG_SP
6545 && opnds[2].addr.writeback)
6546 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6547 /* Load operations must load different registers. */
6548 if ((opcode->opcode & (1 << 22))
6549 && opnds[0].reg.regno == opnds[1].reg.regno)
6550 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6551 break;
6552 default:
6553 break;
6554 }
6555 }
6556
6557 /* A wrapper function to interface with libopcodes on encoding and
6558 record the error message if there is any.
6559
6560 Return TRUE on success; otherwise return FALSE. */
6561
6562 static bfd_boolean
6563 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6564 aarch64_insn *code)
6565 {
6566 aarch64_operand_error error_info;
6567 error_info.kind = AARCH64_OPDE_NIL;
6568 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
6569 return TRUE;
6570 else
6571 {
6572 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6573 record_operand_error_info (opcode, &error_info);
6574 return FALSE;
6575 }
6576 }
6577
6578 #ifdef DEBUG_AARCH64
6579 static inline void
6580 dump_opcode_operands (const aarch64_opcode *opcode)
6581 {
6582 int i = 0;
6583 while (opcode->operands[i] != AARCH64_OPND_NIL)
6584 {
6585 aarch64_verbose ("\t\t opnd%d: %s", i,
6586 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6587 ? aarch64_get_operand_name (opcode->operands[i])
6588 : aarch64_get_operand_desc (opcode->operands[i]));
6589 ++i;
6590 }
6591 }
6592 #endif /* DEBUG_AARCH64 */
6593
6594 /* This is the guts of the machine-dependent assembler. STR points to a
6595 machine dependent instruction. This function is supposed to emit
6596 the frags/bytes it assembles to. */
6597
6598 void
6599 md_assemble (char *str)
6600 {
6601 char *p = str;
6602 templates *template;
6603 aarch64_opcode *opcode;
6604 aarch64_inst *inst_base;
6605 unsigned saved_cond;
6606
6607 /* Align the previous label if needed. */
6608 if (last_label_seen != NULL)
6609 {
6610 symbol_set_frag (last_label_seen, frag_now);
6611 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6612 S_SET_SEGMENT (last_label_seen, now_seg);
6613 }
6614
6615 inst.reloc.type = BFD_RELOC_UNUSED;
6616
6617 DEBUG_TRACE ("\n\n");
6618 DEBUG_TRACE ("==============================");
6619 DEBUG_TRACE ("Enter md_assemble with %s", str);
6620
6621 template = opcode_lookup (&p);
6622 if (!template)
6623 {
6624 /* It wasn't an instruction, but it might be a register alias of
6625 the form alias .req reg directive. */
6626 if (!create_register_alias (str, p))
6627 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6628 str);
6629 return;
6630 }
6631
6632 skip_whitespace (p);
6633 if (*p == ',')
6634 {
6635 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6636 get_mnemonic_name (str), str);
6637 return;
6638 }
6639
6640 init_operand_error_report ();
6641
6642 /* Sections are assumed to start aligned. In executable section, there is no
6643 MAP_DATA symbol pending. So we only align the address during
6644 MAP_DATA --> MAP_INSN transition.
6645 For other sections, this is not guaranteed. */
6646 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6647 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6648 frag_align_code (2, 0);
6649
6650 saved_cond = inst.cond;
6651 reset_aarch64_instruction (&inst);
6652 inst.cond = saved_cond;
6653
6654 /* Iterate through all opcode entries with the same mnemonic name. */
6655 do
6656 {
6657 opcode = template->opcode;
6658
6659 DEBUG_TRACE ("opcode %s found", opcode->name);
6660 #ifdef DEBUG_AARCH64
6661 if (debug_dump)
6662 dump_opcode_operands (opcode);
6663 #endif /* DEBUG_AARCH64 */
6664
6665 mapping_state (MAP_INSN);
6666
6667 inst_base = &inst.base;
6668 inst_base->opcode = opcode;
6669
6670 /* Truly conditionally executed instructions, e.g. b.cond. */
6671 if (opcode->flags & F_COND)
6672 {
6673 gas_assert (inst.cond != COND_ALWAYS);
6674 inst_base->cond = get_cond_from_value (inst.cond);
6675 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6676 }
6677 else if (inst.cond != COND_ALWAYS)
6678 {
6679 /* It shouldn't arrive here, where the assembly looks like a
6680 conditional instruction but the found opcode is unconditional. */
6681 gas_assert (0);
6682 continue;
6683 }
6684
6685 if (parse_operands (p, opcode)
6686 && programmer_friendly_fixup (&inst)
6687 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6688 {
6689 /* Check that this instruction is supported for this CPU. */
6690 if (!opcode->avariant
6691 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6692 {
6693 as_bad (_("selected processor does not support `%s'"), str);
6694 return;
6695 }
6696
6697 warn_unpredictable_ldst (&inst, str);
6698
6699 if (inst.reloc.type == BFD_RELOC_UNUSED
6700 || !inst.reloc.need_libopcodes_p)
6701 output_inst (NULL);
6702 else
6703 {
6704 /* If there is relocation generated for the instruction,
6705 store the instruction information for the future fix-up. */
6706 struct aarch64_inst *copy;
6707 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6708 copy = XNEW (struct aarch64_inst);
6709 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6710 output_inst (copy);
6711 }
6712 return;
6713 }
6714
6715 template = template->next;
6716 if (template != NULL)
6717 {
6718 reset_aarch64_instruction (&inst);
6719 inst.cond = saved_cond;
6720 }
6721 }
6722 while (template != NULL);
6723
6724 /* Issue the error messages if any. */
6725 output_operand_error_report (str);
6726 }
6727
6728 /* Various frobbings of labels and their addresses. */
6729
6730 void
6731 aarch64_start_line_hook (void)
6732 {
6733 last_label_seen = NULL;
6734 }
6735
6736 void
6737 aarch64_frob_label (symbolS * sym)
6738 {
6739 last_label_seen = sym;
6740
6741 dwarf2_emit_label (sym);
6742 }
6743
6744 int
6745 aarch64_data_in_code (void)
6746 {
6747 if (!strncmp (input_line_pointer + 1, "data:", 5))
6748 {
6749 *input_line_pointer = '/';
6750 input_line_pointer += 5;
6751 *input_line_pointer = 0;
6752 return 1;
6753 }
6754
6755 return 0;
6756 }
6757
6758 char *
6759 aarch64_canonicalize_symbol_name (char *name)
6760 {
6761 int len;
6762
6763 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6764 *(name + len - 5) = 0;
6765
6766 return name;
6767 }
6768 \f
6769 /* Table of all register names defined by default. The user can
6770 define additional names with .req. Note that all register names
6771 should appear in both upper and lowercase variants. Some registers
6772 also have mixed-case names. */
6773
6774 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6775 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
6776 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6777 #define REGSET16(p,t) \
6778 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6779 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6780 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6781 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
6782 #define REGSET31(p,t) \
6783 REGSET16(p, t), \
6784 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6785 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6786 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6787 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6788 #define REGSET(p,t) \
6789 REGSET31(p,t), REGNUM(p,31,t)
6790
6791 /* These go into aarch64_reg_hsh hash-table. */
6792 static const reg_entry reg_names[] = {
6793 /* Integer registers. */
6794 REGSET31 (x, R_64), REGSET31 (X, R_64),
6795 REGSET31 (w, R_32), REGSET31 (W, R_32),
6796
6797 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
6798 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 16, R_64),
6799 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
6800 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
6801 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6802 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6803
6804 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6805 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6806
6807 /* Floating-point single precision registers. */
6808 REGSET (s, FP_S), REGSET (S, FP_S),
6809
6810 /* Floating-point double precision registers. */
6811 REGSET (d, FP_D), REGSET (D, FP_D),
6812
6813 /* Floating-point half precision registers. */
6814 REGSET (h, FP_H), REGSET (H, FP_H),
6815
6816 /* Floating-point byte precision registers. */
6817 REGSET (b, FP_B), REGSET (B, FP_B),
6818
6819 /* Floating-point quad precision registers. */
6820 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6821
6822 /* FP/SIMD registers. */
6823 REGSET (v, VN), REGSET (V, VN),
6824
6825 /* SVE vector registers. */
6826 REGSET (z, ZN), REGSET (Z, ZN),
6827
6828 /* SVE predicate registers. */
6829 REGSET16 (p, PN), REGSET16 (P, PN)
6830 };
6831
6832 #undef REGDEF
6833 #undef REGDEF_ALIAS
6834 #undef REGNUM
6835 #undef REGSET16
6836 #undef REGSET31
6837 #undef REGSET
6838
6839 #define N 1
6840 #define n 0
6841 #define Z 1
6842 #define z 0
6843 #define C 1
6844 #define c 0
6845 #define V 1
6846 #define v 0
6847 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6848 static const asm_nzcv nzcv_names[] = {
6849 {"nzcv", B (n, z, c, v)},
6850 {"nzcV", B (n, z, c, V)},
6851 {"nzCv", B (n, z, C, v)},
6852 {"nzCV", B (n, z, C, V)},
6853 {"nZcv", B (n, Z, c, v)},
6854 {"nZcV", B (n, Z, c, V)},
6855 {"nZCv", B (n, Z, C, v)},
6856 {"nZCV", B (n, Z, C, V)},
6857 {"Nzcv", B (N, z, c, v)},
6858 {"NzcV", B (N, z, c, V)},
6859 {"NzCv", B (N, z, C, v)},
6860 {"NzCV", B (N, z, C, V)},
6861 {"NZcv", B (N, Z, c, v)},
6862 {"NZcV", B (N, Z, c, V)},
6863 {"NZCv", B (N, Z, C, v)},
6864 {"NZCV", B (N, Z, C, V)}
6865 };
6866
6867 #undef N
6868 #undef n
6869 #undef Z
6870 #undef z
6871 #undef C
6872 #undef c
6873 #undef V
6874 #undef v
6875 #undef B
6876 \f
6877 /* MD interface: bits in the object file. */
6878
6879 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6880 for use in the a.out file, and stores them in the array pointed to by buf.
6881 This knows about the endian-ness of the target machine and does
6882 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6883 2 (short) and 4 (long) Floating numbers are put out as a series of
6884 LITTLENUMS (shorts, here at least). */
6885
6886 void
6887 md_number_to_chars (char *buf, valueT val, int n)
6888 {
6889 if (target_big_endian)
6890 number_to_chars_bigendian (buf, val, n);
6891 else
6892 number_to_chars_littleendian (buf, val, n);
6893 }
6894
6895 /* MD interface: Sections. */
6896
6897 /* Estimate the size of a frag before relaxing. Assume everything fits in
6898 4 bytes. */
6899
6900 int
6901 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6902 {
6903 fragp->fr_var = 4;
6904 return 4;
6905 }
6906
6907 /* Round up a section size to the appropriate boundary. */
6908
6909 valueT
6910 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6911 {
6912 return size;
6913 }
6914
6915 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6916 of an rs_align_code fragment.
6917
6918 Here we fill the frag with the appropriate info for padding the
6919 output stream. The resulting frag will consist of a fixed (fr_fix)
6920 and of a repeating (fr_var) part.
6921
6922 The fixed content is always emitted before the repeating content and
6923 these two parts are used as follows in constructing the output:
6924 - the fixed part will be used to align to a valid instruction word
6925 boundary, in case that we start at a misaligned address; as no
6926 executable instruction can live at the misaligned location, we
6927 simply fill with zeros;
6928 - the variable part will be used to cover the remaining padding and
6929 we fill using the AArch64 NOP instruction.
6930
6931 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6932 enough storage space for up to 3 bytes for padding the back to a valid
6933 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6934
6935 void
6936 aarch64_handle_align (fragS * fragP)
6937 {
6938 /* NOP = d503201f */
6939 /* AArch64 instructions are always little-endian. */
6940 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6941
6942 int bytes, fix, noop_size;
6943 char *p;
6944
6945 if (fragP->fr_type != rs_align_code)
6946 return;
6947
6948 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6949 p = fragP->fr_literal + fragP->fr_fix;
6950
6951 #ifdef OBJ_ELF
6952 gas_assert (fragP->tc_frag_data.recorded);
6953 #endif
6954
6955 noop_size = sizeof (aarch64_noop);
6956
6957 fix = bytes & (noop_size - 1);
6958 if (fix)
6959 {
6960 #ifdef OBJ_ELF
6961 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6962 #endif
6963 memset (p, 0, fix);
6964 p += fix;
6965 fragP->fr_fix += fix;
6966 }
6967
6968 if (noop_size)
6969 memcpy (p, aarch64_noop, noop_size);
6970 fragP->fr_var = noop_size;
6971 }
6972
6973 /* Perform target specific initialisation of a frag.
6974 Note - despite the name this initialisation is not done when the frag
6975 is created, but only when its type is assigned. A frag can be created
6976 and used a long time before its type is set, so beware of assuming that
6977 this initialisation is performed first. */
6978
6979 #ifndef OBJ_ELF
6980 void
6981 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6982 int max_chars ATTRIBUTE_UNUSED)
6983 {
6984 }
6985
6986 #else /* OBJ_ELF is defined. */
6987 void
6988 aarch64_init_frag (fragS * fragP, int max_chars)
6989 {
6990 /* Record a mapping symbol for alignment frags. We will delete this
6991 later if the alignment ends up empty. */
6992 if (!fragP->tc_frag_data.recorded)
6993 fragP->tc_frag_data.recorded = 1;
6994
6995 /* PR 21809: Do not set a mapping state for debug sections
6996 - it just confuses other tools. */
6997 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
6998 return;
6999
7000 switch (fragP->fr_type)
7001 {
7002 case rs_align_test:
7003 case rs_fill:
7004 mapping_state_2 (MAP_DATA, max_chars);
7005 break;
7006 case rs_align:
7007 /* PR 20364: We can get alignment frags in code sections,
7008 so do not just assume that we should use the MAP_DATA state. */
7009 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7010 break;
7011 case rs_align_code:
7012 mapping_state_2 (MAP_INSN, max_chars);
7013 break;
7014 default:
7015 break;
7016 }
7017 }
7018 \f
7019 /* Initialize the DWARF-2 unwind information for this procedure. */
7020
7021 void
7022 tc_aarch64_frame_initial_instructions (void)
7023 {
7024 cfi_add_CFA_def_cfa (REG_SP, 0);
7025 }
7026 #endif /* OBJ_ELF */
7027
7028 /* Convert REGNAME to a DWARF-2 register number. */
7029
7030 int
7031 tc_aarch64_regname_to_dw2regnum (char *regname)
7032 {
7033 const reg_entry *reg = parse_reg (&regname);
7034 if (reg == NULL)
7035 return -1;
7036
7037 switch (reg->type)
7038 {
7039 case REG_TYPE_SP_32:
7040 case REG_TYPE_SP_64:
7041 case REG_TYPE_R_32:
7042 case REG_TYPE_R_64:
7043 return reg->number;
7044
7045 case REG_TYPE_FP_B:
7046 case REG_TYPE_FP_H:
7047 case REG_TYPE_FP_S:
7048 case REG_TYPE_FP_D:
7049 case REG_TYPE_FP_Q:
7050 return reg->number + 64;
7051
7052 default:
7053 break;
7054 }
7055 return -1;
7056 }
7057
7058 /* Implement DWARF2_ADDR_SIZE. */
7059
7060 int
7061 aarch64_dwarf2_addr_size (void)
7062 {
7063 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7064 if (ilp32_p)
7065 return 4;
7066 #endif
7067 return bfd_arch_bits_per_address (stdoutput) / 8;
7068 }
7069
7070 /* MD interface: Symbol and relocation handling. */
7071
7072 /* Return the address within the segment that a PC-relative fixup is
7073 relative to. For AArch64 PC-relative fixups applied to instructions
7074 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7075
7076 long
7077 md_pcrel_from_section (fixS * fixP, segT seg)
7078 {
7079 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7080
7081 /* If this is pc-relative and we are going to emit a relocation
7082 then we just want to put out any pipeline compensation that the linker
7083 will need. Otherwise we want to use the calculated base. */
7084 if (fixP->fx_pcrel
7085 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7086 || aarch64_force_relocation (fixP)))
7087 base = 0;
7088
7089 /* AArch64 should be consistent for all pc-relative relocations. */
7090 return base + AARCH64_PCREL_OFFSET;
7091 }
7092
7093 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7094 Otherwise we have no need to default values of symbols. */
7095
7096 symbolS *
7097 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7098 {
7099 #ifdef OBJ_ELF
7100 if (name[0] == '_' && name[1] == 'G'
7101 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7102 {
7103 if (!GOT_symbol)
7104 {
7105 if (symbol_find (name))
7106 as_bad (_("GOT already in the symbol table"));
7107
7108 GOT_symbol = symbol_new (name, undefined_section,
7109 (valueT) 0, &zero_address_frag);
7110 }
7111
7112 return GOT_symbol;
7113 }
7114 #endif
7115
7116 return 0;
7117 }
7118
7119 /* Return non-zero if the indicated VALUE has overflowed the maximum
7120 range expressible by a unsigned number with the indicated number of
7121 BITS. */
7122
7123 static bfd_boolean
7124 unsigned_overflow (valueT value, unsigned bits)
7125 {
7126 valueT lim;
7127 if (bits >= sizeof (valueT) * 8)
7128 return FALSE;
7129 lim = (valueT) 1 << bits;
7130 return (value >= lim);
7131 }
7132
7133
7134 /* Return non-zero if the indicated VALUE has overflowed the maximum
7135 range expressible by an signed number with the indicated number of
7136 BITS. */
7137
7138 static bfd_boolean
7139 signed_overflow (offsetT value, unsigned bits)
7140 {
7141 offsetT lim;
7142 if (bits >= sizeof (offsetT) * 8)
7143 return FALSE;
7144 lim = (offsetT) 1 << (bits - 1);
7145 return (value < -lim || value >= lim);
7146 }
7147
7148 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7149 unsigned immediate offset load/store instruction, try to encode it as
7150 an unscaled, 9-bit, signed immediate offset load/store instruction.
7151 Return TRUE if it is successful; otherwise return FALSE.
7152
7153 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7154 in response to the standard LDR/STR mnemonics when the immediate offset is
7155 unambiguous, i.e. when it is negative or unaligned. */
7156
7157 static bfd_boolean
7158 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7159 {
7160 int idx;
7161 enum aarch64_op new_op;
7162 const aarch64_opcode *new_opcode;
7163
7164 gas_assert (instr->opcode->iclass == ldst_pos);
7165
7166 switch (instr->opcode->op)
7167 {
7168 case OP_LDRB_POS:new_op = OP_LDURB; break;
7169 case OP_STRB_POS: new_op = OP_STURB; break;
7170 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7171 case OP_LDRH_POS: new_op = OP_LDURH; break;
7172 case OP_STRH_POS: new_op = OP_STURH; break;
7173 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7174 case OP_LDR_POS: new_op = OP_LDUR; break;
7175 case OP_STR_POS: new_op = OP_STUR; break;
7176 case OP_LDRF_POS: new_op = OP_LDURV; break;
7177 case OP_STRF_POS: new_op = OP_STURV; break;
7178 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7179 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7180 default: new_op = OP_NIL; break;
7181 }
7182
7183 if (new_op == OP_NIL)
7184 return FALSE;
7185
7186 new_opcode = aarch64_get_opcode (new_op);
7187 gas_assert (new_opcode != NULL);
7188
7189 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7190 instr->opcode->op, new_opcode->op);
7191
7192 aarch64_replace_opcode (instr, new_opcode);
7193
7194 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7195 qualifier matching may fail because the out-of-date qualifier will
7196 prevent the operand being updated with a new and correct qualifier. */
7197 idx = aarch64_operand_index (instr->opcode->operands,
7198 AARCH64_OPND_ADDR_SIMM9);
7199 gas_assert (idx == 1);
7200 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7201
7202 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7203
7204 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
7205 return FALSE;
7206
7207 return TRUE;
7208 }
7209
7210 /* Called by fix_insn to fix a MOV immediate alias instruction.
7211
7212 Operand for a generic move immediate instruction, which is an alias
7213 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7214 a 32-bit/64-bit immediate value into general register. An assembler error
7215 shall result if the immediate cannot be created by a single one of these
7216 instructions. If there is a choice, then to ensure reversability an
7217 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7218
7219 static void
7220 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7221 {
7222 const aarch64_opcode *opcode;
7223
7224 /* Need to check if the destination is SP/ZR. The check has to be done
7225 before any aarch64_replace_opcode. */
7226 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7227 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7228
7229 instr->operands[1].imm.value = value;
7230 instr->operands[1].skip = 0;
7231
7232 if (try_mov_wide_p)
7233 {
7234 /* Try the MOVZ alias. */
7235 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7236 aarch64_replace_opcode (instr, opcode);
7237 if (aarch64_opcode_encode (instr->opcode, instr,
7238 &instr->value, NULL, NULL))
7239 {
7240 put_aarch64_insn (buf, instr->value);
7241 return;
7242 }
7243 /* Try the MOVK alias. */
7244 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7245 aarch64_replace_opcode (instr, opcode);
7246 if (aarch64_opcode_encode (instr->opcode, instr,
7247 &instr->value, NULL, NULL))
7248 {
7249 put_aarch64_insn (buf, instr->value);
7250 return;
7251 }
7252 }
7253
7254 if (try_mov_bitmask_p)
7255 {
7256 /* Try the ORR alias. */
7257 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7258 aarch64_replace_opcode (instr, opcode);
7259 if (aarch64_opcode_encode (instr->opcode, instr,
7260 &instr->value, NULL, NULL))
7261 {
7262 put_aarch64_insn (buf, instr->value);
7263 return;
7264 }
7265 }
7266
7267 as_bad_where (fixP->fx_file, fixP->fx_line,
7268 _("immediate cannot be moved by a single instruction"));
7269 }
7270
7271 /* An instruction operand which is immediate related may have symbol used
7272 in the assembly, e.g.
7273
7274 mov w0, u32
7275 .set u32, 0x00ffff00
7276
7277 At the time when the assembly instruction is parsed, a referenced symbol,
7278 like 'u32' in the above example may not have been seen; a fixS is created
7279 in such a case and is handled here after symbols have been resolved.
7280 Instruction is fixed up with VALUE using the information in *FIXP plus
7281 extra information in FLAGS.
7282
7283 This function is called by md_apply_fix to fix up instructions that need
7284 a fix-up described above but does not involve any linker-time relocation. */
7285
7286 static void
7287 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7288 {
7289 int idx;
7290 uint32_t insn;
7291 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7292 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7293 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7294
7295 if (new_inst)
7296 {
7297 /* Now the instruction is about to be fixed-up, so the operand that
7298 was previously marked as 'ignored' needs to be unmarked in order
7299 to get the encoding done properly. */
7300 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7301 new_inst->operands[idx].skip = 0;
7302 }
7303
7304 gas_assert (opnd != AARCH64_OPND_NIL);
7305
7306 switch (opnd)
7307 {
7308 case AARCH64_OPND_EXCEPTION:
7309 if (unsigned_overflow (value, 16))
7310 as_bad_where (fixP->fx_file, fixP->fx_line,
7311 _("immediate out of range"));
7312 insn = get_aarch64_insn (buf);
7313 insn |= encode_svc_imm (value);
7314 put_aarch64_insn (buf, insn);
7315 break;
7316
7317 case AARCH64_OPND_AIMM:
7318 /* ADD or SUB with immediate.
7319 NOTE this assumes we come here with a add/sub shifted reg encoding
7320 3 322|2222|2 2 2 21111 111111
7321 1 098|7654|3 2 1 09876 543210 98765 43210
7322 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7323 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7324 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7325 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7326 ->
7327 3 322|2222|2 2 221111111111
7328 1 098|7654|3 2 109876543210 98765 43210
7329 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7330 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7331 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7332 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7333 Fields sf Rn Rd are already set. */
7334 insn = get_aarch64_insn (buf);
7335 if (value < 0)
7336 {
7337 /* Add <-> sub. */
7338 insn = reencode_addsub_switch_add_sub (insn);
7339 value = -value;
7340 }
7341
7342 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7343 && unsigned_overflow (value, 12))
7344 {
7345 /* Try to shift the value by 12 to make it fit. */
7346 if (((value >> 12) << 12) == value
7347 && ! unsigned_overflow (value, 12 + 12))
7348 {
7349 value >>= 12;
7350 insn |= encode_addsub_imm_shift_amount (1);
7351 }
7352 }
7353
7354 if (unsigned_overflow (value, 12))
7355 as_bad_where (fixP->fx_file, fixP->fx_line,
7356 _("immediate out of range"));
7357
7358 insn |= encode_addsub_imm (value);
7359
7360 put_aarch64_insn (buf, insn);
7361 break;
7362
7363 case AARCH64_OPND_SIMD_IMM:
7364 case AARCH64_OPND_SIMD_IMM_SFT:
7365 case AARCH64_OPND_LIMM:
7366 /* Bit mask immediate. */
7367 gas_assert (new_inst != NULL);
7368 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7369 new_inst->operands[idx].imm.value = value;
7370 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7371 &new_inst->value, NULL, NULL))
7372 put_aarch64_insn (buf, new_inst->value);
7373 else
7374 as_bad_where (fixP->fx_file, fixP->fx_line,
7375 _("invalid immediate"));
7376 break;
7377
7378 case AARCH64_OPND_HALF:
7379 /* 16-bit unsigned immediate. */
7380 if (unsigned_overflow (value, 16))
7381 as_bad_where (fixP->fx_file, fixP->fx_line,
7382 _("immediate out of range"));
7383 insn = get_aarch64_insn (buf);
7384 insn |= encode_movw_imm (value & 0xffff);
7385 put_aarch64_insn (buf, insn);
7386 break;
7387
7388 case AARCH64_OPND_IMM_MOV:
7389 /* Operand for a generic move immediate instruction, which is
7390 an alias instruction that generates a single MOVZ, MOVN or ORR
7391 instruction to loads a 32-bit/64-bit immediate value into general
7392 register. An assembler error shall result if the immediate cannot be
7393 created by a single one of these instructions. If there is a choice,
7394 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7395 and MOVZ or MOVN to ORR. */
7396 gas_assert (new_inst != NULL);
7397 fix_mov_imm_insn (fixP, buf, new_inst, value);
7398 break;
7399
7400 case AARCH64_OPND_ADDR_SIMM7:
7401 case AARCH64_OPND_ADDR_SIMM9:
7402 case AARCH64_OPND_ADDR_SIMM9_2:
7403 case AARCH64_OPND_ADDR_SIMM10:
7404 case AARCH64_OPND_ADDR_UIMM12:
7405 /* Immediate offset in an address. */
7406 insn = get_aarch64_insn (buf);
7407
7408 gas_assert (new_inst != NULL && new_inst->value == insn);
7409 gas_assert (new_inst->opcode->operands[1] == opnd
7410 || new_inst->opcode->operands[2] == opnd);
7411
7412 /* Get the index of the address operand. */
7413 if (new_inst->opcode->operands[1] == opnd)
7414 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7415 idx = 1;
7416 else
7417 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7418 idx = 2;
7419
7420 /* Update the resolved offset value. */
7421 new_inst->operands[idx].addr.offset.imm = value;
7422
7423 /* Encode/fix-up. */
7424 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7425 &new_inst->value, NULL, NULL))
7426 {
7427 put_aarch64_insn (buf, new_inst->value);
7428 break;
7429 }
7430 else if (new_inst->opcode->iclass == ldst_pos
7431 && try_to_encode_as_unscaled_ldst (new_inst))
7432 {
7433 put_aarch64_insn (buf, new_inst->value);
7434 break;
7435 }
7436
7437 as_bad_where (fixP->fx_file, fixP->fx_line,
7438 _("immediate offset out of range"));
7439 break;
7440
7441 default:
7442 gas_assert (0);
7443 as_fatal (_("unhandled operand code %d"), opnd);
7444 }
7445 }
7446
7447 /* Apply a fixup (fixP) to segment data, once it has been determined
7448 by our caller that we have all the info we need to fix it up.
7449
7450 Parameter valP is the pointer to the value of the bits. */
7451
7452 void
7453 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7454 {
7455 offsetT value = *valP;
7456 uint32_t insn;
7457 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7458 int scale;
7459 unsigned flags = fixP->fx_addnumber;
7460
7461 DEBUG_TRACE ("\n\n");
7462 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7463 DEBUG_TRACE ("Enter md_apply_fix");
7464
7465 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7466
7467 /* Note whether this will delete the relocation. */
7468
7469 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7470 fixP->fx_done = 1;
7471
7472 /* Process the relocations. */
7473 switch (fixP->fx_r_type)
7474 {
7475 case BFD_RELOC_NONE:
7476 /* This will need to go in the object file. */
7477 fixP->fx_done = 0;
7478 break;
7479
7480 case BFD_RELOC_8:
7481 case BFD_RELOC_8_PCREL:
7482 if (fixP->fx_done || !seg->use_rela_p)
7483 md_number_to_chars (buf, value, 1);
7484 break;
7485
7486 case BFD_RELOC_16:
7487 case BFD_RELOC_16_PCREL:
7488 if (fixP->fx_done || !seg->use_rela_p)
7489 md_number_to_chars (buf, value, 2);
7490 break;
7491
7492 case BFD_RELOC_32:
7493 case BFD_RELOC_32_PCREL:
7494 if (fixP->fx_done || !seg->use_rela_p)
7495 md_number_to_chars (buf, value, 4);
7496 break;
7497
7498 case BFD_RELOC_64:
7499 case BFD_RELOC_64_PCREL:
7500 if (fixP->fx_done || !seg->use_rela_p)
7501 md_number_to_chars (buf, value, 8);
7502 break;
7503
7504 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7505 /* We claim that these fixups have been processed here, even if
7506 in fact we generate an error because we do not have a reloc
7507 for them, so tc_gen_reloc() will reject them. */
7508 fixP->fx_done = 1;
7509 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7510 {
7511 as_bad_where (fixP->fx_file, fixP->fx_line,
7512 _("undefined symbol %s used as an immediate value"),
7513 S_GET_NAME (fixP->fx_addsy));
7514 goto apply_fix_return;
7515 }
7516 fix_insn (fixP, flags, value);
7517 break;
7518
7519 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7520 if (fixP->fx_done || !seg->use_rela_p)
7521 {
7522 if (value & 3)
7523 as_bad_where (fixP->fx_file, fixP->fx_line,
7524 _("pc-relative load offset not word aligned"));
7525 if (signed_overflow (value, 21))
7526 as_bad_where (fixP->fx_file, fixP->fx_line,
7527 _("pc-relative load offset out of range"));
7528 insn = get_aarch64_insn (buf);
7529 insn |= encode_ld_lit_ofs_19 (value >> 2);
7530 put_aarch64_insn (buf, insn);
7531 }
7532 break;
7533
7534 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7535 if (fixP->fx_done || !seg->use_rela_p)
7536 {
7537 if (signed_overflow (value, 21))
7538 as_bad_where (fixP->fx_file, fixP->fx_line,
7539 _("pc-relative address offset out of range"));
7540 insn = get_aarch64_insn (buf);
7541 insn |= encode_adr_imm (value);
7542 put_aarch64_insn (buf, insn);
7543 }
7544 break;
7545
7546 case BFD_RELOC_AARCH64_BRANCH19:
7547 if (fixP->fx_done || !seg->use_rela_p)
7548 {
7549 if (value & 3)
7550 as_bad_where (fixP->fx_file, fixP->fx_line,
7551 _("conditional branch target not word aligned"));
7552 if (signed_overflow (value, 21))
7553 as_bad_where (fixP->fx_file, fixP->fx_line,
7554 _("conditional branch out of range"));
7555 insn = get_aarch64_insn (buf);
7556 insn |= encode_cond_branch_ofs_19 (value >> 2);
7557 put_aarch64_insn (buf, insn);
7558 }
7559 break;
7560
7561 case BFD_RELOC_AARCH64_TSTBR14:
7562 if (fixP->fx_done || !seg->use_rela_p)
7563 {
7564 if (value & 3)
7565 as_bad_where (fixP->fx_file, fixP->fx_line,
7566 _("conditional branch target not word aligned"));
7567 if (signed_overflow (value, 16))
7568 as_bad_where (fixP->fx_file, fixP->fx_line,
7569 _("conditional branch out of range"));
7570 insn = get_aarch64_insn (buf);
7571 insn |= encode_tst_branch_ofs_14 (value >> 2);
7572 put_aarch64_insn (buf, insn);
7573 }
7574 break;
7575
7576 case BFD_RELOC_AARCH64_CALL26:
7577 case BFD_RELOC_AARCH64_JUMP26:
7578 if (fixP->fx_done || !seg->use_rela_p)
7579 {
7580 if (value & 3)
7581 as_bad_where (fixP->fx_file, fixP->fx_line,
7582 _("branch target not word aligned"));
7583 if (signed_overflow (value, 28))
7584 as_bad_where (fixP->fx_file, fixP->fx_line,
7585 _("branch out of range"));
7586 insn = get_aarch64_insn (buf);
7587 insn |= encode_branch_ofs_26 (value >> 2);
7588 put_aarch64_insn (buf, insn);
7589 }
7590 break;
7591
7592 case BFD_RELOC_AARCH64_MOVW_G0:
7593 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7594 case BFD_RELOC_AARCH64_MOVW_G0_S:
7595 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7596 scale = 0;
7597 goto movw_common;
7598 case BFD_RELOC_AARCH64_MOVW_G1:
7599 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7600 case BFD_RELOC_AARCH64_MOVW_G1_S:
7601 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7602 scale = 16;
7603 goto movw_common;
7604 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7605 scale = 0;
7606 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7607 /* Should always be exported to object file, see
7608 aarch64_force_relocation(). */
7609 gas_assert (!fixP->fx_done);
7610 gas_assert (seg->use_rela_p);
7611 goto movw_common;
7612 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7613 scale = 16;
7614 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7615 /* Should always be exported to object file, see
7616 aarch64_force_relocation(). */
7617 gas_assert (!fixP->fx_done);
7618 gas_assert (seg->use_rela_p);
7619 goto movw_common;
7620 case BFD_RELOC_AARCH64_MOVW_G2:
7621 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7622 case BFD_RELOC_AARCH64_MOVW_G2_S:
7623 scale = 32;
7624 goto movw_common;
7625 case BFD_RELOC_AARCH64_MOVW_G3:
7626 scale = 48;
7627 movw_common:
7628 if (fixP->fx_done || !seg->use_rela_p)
7629 {
7630 insn = get_aarch64_insn (buf);
7631
7632 if (!fixP->fx_done)
7633 {
7634 /* REL signed addend must fit in 16 bits */
7635 if (signed_overflow (value, 16))
7636 as_bad_where (fixP->fx_file, fixP->fx_line,
7637 _("offset out of range"));
7638 }
7639 else
7640 {
7641 /* Check for overflow and scale. */
7642 switch (fixP->fx_r_type)
7643 {
7644 case BFD_RELOC_AARCH64_MOVW_G0:
7645 case BFD_RELOC_AARCH64_MOVW_G1:
7646 case BFD_RELOC_AARCH64_MOVW_G2:
7647 case BFD_RELOC_AARCH64_MOVW_G3:
7648 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7649 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7650 if (unsigned_overflow (value, scale + 16))
7651 as_bad_where (fixP->fx_file, fixP->fx_line,
7652 _("unsigned value out of range"));
7653 break;
7654 case BFD_RELOC_AARCH64_MOVW_G0_S:
7655 case BFD_RELOC_AARCH64_MOVW_G1_S:
7656 case BFD_RELOC_AARCH64_MOVW_G2_S:
7657 /* NOTE: We can only come here with movz or movn. */
7658 if (signed_overflow (value, scale + 16))
7659 as_bad_where (fixP->fx_file, fixP->fx_line,
7660 _("signed value out of range"));
7661 if (value < 0)
7662 {
7663 /* Force use of MOVN. */
7664 value = ~value;
7665 insn = reencode_movzn_to_movn (insn);
7666 }
7667 else
7668 {
7669 /* Force use of MOVZ. */
7670 insn = reencode_movzn_to_movz (insn);
7671 }
7672 break;
7673 default:
7674 /* Unchecked relocations. */
7675 break;
7676 }
7677 value >>= scale;
7678 }
7679
7680 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7681 insn |= encode_movw_imm (value & 0xffff);
7682
7683 put_aarch64_insn (buf, insn);
7684 }
7685 break;
7686
7687 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7688 fixP->fx_r_type = (ilp32_p
7689 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7690 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7691 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7692 /* Should always be exported to object file, see
7693 aarch64_force_relocation(). */
7694 gas_assert (!fixP->fx_done);
7695 gas_assert (seg->use_rela_p);
7696 break;
7697
7698 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7699 fixP->fx_r_type = (ilp32_p
7700 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7701 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
7702 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7703 /* Should always be exported to object file, see
7704 aarch64_force_relocation(). */
7705 gas_assert (!fixP->fx_done);
7706 gas_assert (seg->use_rela_p);
7707 break;
7708
7709 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7710 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7711 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7712 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7713 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
7714 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7715 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7716 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7717 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7718 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7719 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7720 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7721 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7722 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7723 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7724 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7725 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7726 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7727 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7728 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7729 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7730 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7731 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7732 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7733 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7734 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7735 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7736 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7737 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7738 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7739 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7740 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7741 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7742 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7743 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7744 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7745 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7746 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7747 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7748 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7749 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7750 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7751 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7752 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7753 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7754 /* Should always be exported to object file, see
7755 aarch64_force_relocation(). */
7756 gas_assert (!fixP->fx_done);
7757 gas_assert (seg->use_rela_p);
7758 break;
7759
7760 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7761 /* Should always be exported to object file, see
7762 aarch64_force_relocation(). */
7763 fixP->fx_r_type = (ilp32_p
7764 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7765 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7766 gas_assert (!fixP->fx_done);
7767 gas_assert (seg->use_rela_p);
7768 break;
7769
7770 case BFD_RELOC_AARCH64_ADD_LO12:
7771 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7772 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7773 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7774 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7775 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7776 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7777 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7778 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7779 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7780 case BFD_RELOC_AARCH64_LDST128_LO12:
7781 case BFD_RELOC_AARCH64_LDST16_LO12:
7782 case BFD_RELOC_AARCH64_LDST32_LO12:
7783 case BFD_RELOC_AARCH64_LDST64_LO12:
7784 case BFD_RELOC_AARCH64_LDST8_LO12:
7785 /* Should always be exported to object file, see
7786 aarch64_force_relocation(). */
7787 gas_assert (!fixP->fx_done);
7788 gas_assert (seg->use_rela_p);
7789 break;
7790
7791 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7792 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7793 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7794 break;
7795
7796 case BFD_RELOC_UNUSED:
7797 /* An error will already have been reported. */
7798 break;
7799
7800 default:
7801 as_bad_where (fixP->fx_file, fixP->fx_line,
7802 _("unexpected %s fixup"),
7803 bfd_get_reloc_code_name (fixP->fx_r_type));
7804 break;
7805 }
7806
7807 apply_fix_return:
7808 /* Free the allocated the struct aarch64_inst.
7809 N.B. currently there are very limited number of fix-up types actually use
7810 this field, so the impact on the performance should be minimal . */
7811 if (fixP->tc_fix_data.inst != NULL)
7812 free (fixP->tc_fix_data.inst);
7813
7814 return;
7815 }
7816
7817 /* Translate internal representation of relocation info to BFD target
7818 format. */
7819
7820 arelent *
7821 tc_gen_reloc (asection * section, fixS * fixp)
7822 {
7823 arelent *reloc;
7824 bfd_reloc_code_real_type code;
7825
7826 reloc = XNEW (arelent);
7827
7828 reloc->sym_ptr_ptr = XNEW (asymbol *);
7829 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7830 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7831
7832 if (fixp->fx_pcrel)
7833 {
7834 if (section->use_rela_p)
7835 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7836 else
7837 fixp->fx_offset = reloc->address;
7838 }
7839 reloc->addend = fixp->fx_offset;
7840
7841 code = fixp->fx_r_type;
7842 switch (code)
7843 {
7844 case BFD_RELOC_16:
7845 if (fixp->fx_pcrel)
7846 code = BFD_RELOC_16_PCREL;
7847 break;
7848
7849 case BFD_RELOC_32:
7850 if (fixp->fx_pcrel)
7851 code = BFD_RELOC_32_PCREL;
7852 break;
7853
7854 case BFD_RELOC_64:
7855 if (fixp->fx_pcrel)
7856 code = BFD_RELOC_64_PCREL;
7857 break;
7858
7859 default:
7860 break;
7861 }
7862
7863 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7864 if (reloc->howto == NULL)
7865 {
7866 as_bad_where (fixp->fx_file, fixp->fx_line,
7867 _
7868 ("cannot represent %s relocation in this object file format"),
7869 bfd_get_reloc_code_name (code));
7870 return NULL;
7871 }
7872
7873 return reloc;
7874 }
7875
7876 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7877
7878 void
7879 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7880 {
7881 bfd_reloc_code_real_type type;
7882 int pcrel = 0;
7883
7884 /* Pick a reloc.
7885 FIXME: @@ Should look at CPU word size. */
7886 switch (size)
7887 {
7888 case 1:
7889 type = BFD_RELOC_8;
7890 break;
7891 case 2:
7892 type = BFD_RELOC_16;
7893 break;
7894 case 4:
7895 type = BFD_RELOC_32;
7896 break;
7897 case 8:
7898 type = BFD_RELOC_64;
7899 break;
7900 default:
7901 as_bad (_("cannot do %u-byte relocation"), size);
7902 type = BFD_RELOC_UNUSED;
7903 break;
7904 }
7905
7906 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7907 }
7908
7909 int
7910 aarch64_force_relocation (struct fix *fixp)
7911 {
7912 switch (fixp->fx_r_type)
7913 {
7914 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7915 /* Perform these "immediate" internal relocations
7916 even if the symbol is extern or weak. */
7917 return 0;
7918
7919 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7920 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7921 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7922 /* Pseudo relocs that need to be fixed up according to
7923 ilp32_p. */
7924 return 0;
7925
7926 case BFD_RELOC_AARCH64_ADD_LO12:
7927 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7928 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7929 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7930 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7931 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7932 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7933 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7934 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7935 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7936 case BFD_RELOC_AARCH64_LDST128_LO12:
7937 case BFD_RELOC_AARCH64_LDST16_LO12:
7938 case BFD_RELOC_AARCH64_LDST32_LO12:
7939 case BFD_RELOC_AARCH64_LDST64_LO12:
7940 case BFD_RELOC_AARCH64_LDST8_LO12:
7941 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7942 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7943 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7944 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7945 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
7946 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7947 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7948 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7949 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7950 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7951 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7952 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7953 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7954 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7955 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7956 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7957 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7958 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7959 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7960 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7961 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7962 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7963 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7964 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7965 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7966 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7967 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7968 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7969 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7970 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7971 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7972 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7973 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7974 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7975 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7976 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7977 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7978 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7979 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7980 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7981 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7982 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7983 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7984 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7985 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7986 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7987 /* Always leave these relocations for the linker. */
7988 return 1;
7989
7990 default:
7991 break;
7992 }
7993
7994 return generic_force_reloc (fixp);
7995 }
7996
7997 #ifdef OBJ_ELF
7998
7999 /* Implement md_after_parse_args. This is the earliest time we need to decide
8000 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8001
8002 void
8003 aarch64_after_parse_args (void)
8004 {
8005 if (aarch64_abi != AARCH64_ABI_NONE)
8006 return;
8007
8008 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8009 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8010 aarch64_abi = AARCH64_ABI_ILP32;
8011 else
8012 aarch64_abi = AARCH64_ABI_LP64;
8013 }
8014
8015 const char *
8016 elf64_aarch64_target_format (void)
8017 {
8018 if (strcmp (TARGET_OS, "cloudabi") == 0)
8019 {
8020 /* FIXME: What to do for ilp32_p ? */
8021 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
8022 }
8023 if (target_big_endian)
8024 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8025 else
8026 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8027 }
8028
8029 void
8030 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8031 {
8032 elf_frob_symbol (symp, puntp);
8033 }
8034 #endif
8035
8036 /* MD interface: Finalization. */
8037
8038 /* A good place to do this, although this was probably not intended
8039 for this kind of use. We need to dump the literal pool before
8040 references are made to a null symbol pointer. */
8041
8042 void
8043 aarch64_cleanup (void)
8044 {
8045 literal_pool *pool;
8046
8047 for (pool = list_of_pools; pool; pool = pool->next)
8048 {
8049 /* Put it at the end of the relevant section. */
8050 subseg_set (pool->section, pool->sub_section);
8051 s_ltorg (0);
8052 }
8053 }
8054
8055 #ifdef OBJ_ELF
8056 /* Remove any excess mapping symbols generated for alignment frags in
8057 SEC. We may have created a mapping symbol before a zero byte
8058 alignment; remove it if there's a mapping symbol after the
8059 alignment. */
8060 static void
8061 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8062 void *dummy ATTRIBUTE_UNUSED)
8063 {
8064 segment_info_type *seginfo = seg_info (sec);
8065 fragS *fragp;
8066
8067 if (seginfo == NULL || seginfo->frchainP == NULL)
8068 return;
8069
8070 for (fragp = seginfo->frchainP->frch_root;
8071 fragp != NULL; fragp = fragp->fr_next)
8072 {
8073 symbolS *sym = fragp->tc_frag_data.last_map;
8074 fragS *next = fragp->fr_next;
8075
8076 /* Variable-sized frags have been converted to fixed size by
8077 this point. But if this was variable-sized to start with,
8078 there will be a fixed-size frag after it. So don't handle
8079 next == NULL. */
8080 if (sym == NULL || next == NULL)
8081 continue;
8082
8083 if (S_GET_VALUE (sym) < next->fr_address)
8084 /* Not at the end of this frag. */
8085 continue;
8086 know (S_GET_VALUE (sym) == next->fr_address);
8087
8088 do
8089 {
8090 if (next->tc_frag_data.first_map != NULL)
8091 {
8092 /* Next frag starts with a mapping symbol. Discard this
8093 one. */
8094 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8095 break;
8096 }
8097
8098 if (next->fr_next == NULL)
8099 {
8100 /* This mapping symbol is at the end of the section. Discard
8101 it. */
8102 know (next->fr_fix == 0 && next->fr_var == 0);
8103 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8104 break;
8105 }
8106
8107 /* As long as we have empty frags without any mapping symbols,
8108 keep looking. */
8109 /* If the next frag is non-empty and does not start with a
8110 mapping symbol, then this mapping symbol is required. */
8111 if (next->fr_address != next->fr_next->fr_address)
8112 break;
8113
8114 next = next->fr_next;
8115 }
8116 while (next != NULL);
8117 }
8118 }
8119 #endif
8120
8121 /* Adjust the symbol table. */
8122
8123 void
8124 aarch64_adjust_symtab (void)
8125 {
8126 #ifdef OBJ_ELF
8127 /* Remove any overlapping mapping symbols generated by alignment frags. */
8128 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8129 /* Now do generic ELF adjustments. */
8130 elf_adjust_symtab ();
8131 #endif
8132 }
8133
8134 static void
8135 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8136 {
8137 const char *hash_err;
8138
8139 hash_err = hash_insert (table, key, value);
8140 if (hash_err)
8141 printf ("Internal Error: Can't hash %s\n", key);
8142 }
8143
8144 static void
8145 fill_instruction_hash_table (void)
8146 {
8147 aarch64_opcode *opcode = aarch64_opcode_table;
8148
8149 while (opcode->name != NULL)
8150 {
8151 templates *templ, *new_templ;
8152 templ = hash_find (aarch64_ops_hsh, opcode->name);
8153
8154 new_templ = XNEW (templates);
8155 new_templ->opcode = opcode;
8156 new_templ->next = NULL;
8157
8158 if (!templ)
8159 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8160 else
8161 {
8162 new_templ->next = templ->next;
8163 templ->next = new_templ;
8164 }
8165 ++opcode;
8166 }
8167 }
8168
8169 static inline void
8170 convert_to_upper (char *dst, const char *src, size_t num)
8171 {
8172 unsigned int i;
8173 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8174 *dst = TOUPPER (*src);
8175 *dst = '\0';
8176 }
8177
8178 /* Assume STR point to a lower-case string, allocate, convert and return
8179 the corresponding upper-case string. */
8180 static inline const char*
8181 get_upper_str (const char *str)
8182 {
8183 char *ret;
8184 size_t len = strlen (str);
8185 ret = XNEWVEC (char, len + 1);
8186 convert_to_upper (ret, str, len);
8187 return ret;
8188 }
8189
8190 /* MD interface: Initialization. */
8191
8192 void
8193 md_begin (void)
8194 {
8195 unsigned mach;
8196 unsigned int i;
8197
8198 if ((aarch64_ops_hsh = hash_new ()) == NULL
8199 || (aarch64_cond_hsh = hash_new ()) == NULL
8200 || (aarch64_shift_hsh = hash_new ()) == NULL
8201 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8202 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8203 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8204 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8205 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8206 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8207 || (aarch64_reg_hsh = hash_new ()) == NULL
8208 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8209 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8210 || (aarch64_pldop_hsh = hash_new ()) == NULL
8211 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8212 as_fatal (_("virtual memory exhausted"));
8213
8214 fill_instruction_hash_table ();
8215
8216 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8217 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8218 (void *) (aarch64_sys_regs + i));
8219
8220 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8221 checked_hash_insert (aarch64_pstatefield_hsh,
8222 aarch64_pstatefields[i].name,
8223 (void *) (aarch64_pstatefields + i));
8224
8225 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8226 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8227 aarch64_sys_regs_ic[i].name,
8228 (void *) (aarch64_sys_regs_ic + i));
8229
8230 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8231 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8232 aarch64_sys_regs_dc[i].name,
8233 (void *) (aarch64_sys_regs_dc + i));
8234
8235 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8236 checked_hash_insert (aarch64_sys_regs_at_hsh,
8237 aarch64_sys_regs_at[i].name,
8238 (void *) (aarch64_sys_regs_at + i));
8239
8240 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8241 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8242 aarch64_sys_regs_tlbi[i].name,
8243 (void *) (aarch64_sys_regs_tlbi + i));
8244
8245 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8246 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8247 (void *) (reg_names + i));
8248
8249 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8250 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8251 (void *) (nzcv_names + i));
8252
8253 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8254 {
8255 const char *name = aarch64_operand_modifiers[i].name;
8256 checked_hash_insert (aarch64_shift_hsh, name,
8257 (void *) (aarch64_operand_modifiers + i));
8258 /* Also hash the name in the upper case. */
8259 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8260 (void *) (aarch64_operand_modifiers + i));
8261 }
8262
8263 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8264 {
8265 unsigned int j;
8266 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8267 the same condition code. */
8268 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8269 {
8270 const char *name = aarch64_conds[i].names[j];
8271 if (name == NULL)
8272 break;
8273 checked_hash_insert (aarch64_cond_hsh, name,
8274 (void *) (aarch64_conds + i));
8275 /* Also hash the name in the upper case. */
8276 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8277 (void *) (aarch64_conds + i));
8278 }
8279 }
8280
8281 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8282 {
8283 const char *name = aarch64_barrier_options[i].name;
8284 /* Skip xx00 - the unallocated values of option. */
8285 if ((i & 0x3) == 0)
8286 continue;
8287 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8288 (void *) (aarch64_barrier_options + i));
8289 /* Also hash the name in the upper case. */
8290 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8291 (void *) (aarch64_barrier_options + i));
8292 }
8293
8294 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8295 {
8296 const char* name = aarch64_prfops[i].name;
8297 /* Skip the unallocated hint encodings. */
8298 if (name == NULL)
8299 continue;
8300 checked_hash_insert (aarch64_pldop_hsh, name,
8301 (void *) (aarch64_prfops + i));
8302 /* Also hash the name in the upper case. */
8303 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8304 (void *) (aarch64_prfops + i));
8305 }
8306
8307 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8308 {
8309 const char* name = aarch64_hint_options[i].name;
8310
8311 checked_hash_insert (aarch64_hint_opt_hsh, name,
8312 (void *) (aarch64_hint_options + i));
8313 /* Also hash the name in the upper case. */
8314 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8315 (void *) (aarch64_hint_options + i));
8316 }
8317
8318 /* Set the cpu variant based on the command-line options. */
8319 if (!mcpu_cpu_opt)
8320 mcpu_cpu_opt = march_cpu_opt;
8321
8322 if (!mcpu_cpu_opt)
8323 mcpu_cpu_opt = &cpu_default;
8324
8325 cpu_variant = *mcpu_cpu_opt;
8326
8327 /* Record the CPU type. */
8328 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8329
8330 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8331 }
8332
8333 /* Command line processing. */
8334
8335 const char *md_shortopts = "m:";
8336
8337 #ifdef AARCH64_BI_ENDIAN
8338 #define OPTION_EB (OPTION_MD_BASE + 0)
8339 #define OPTION_EL (OPTION_MD_BASE + 1)
8340 #else
8341 #if TARGET_BYTES_BIG_ENDIAN
8342 #define OPTION_EB (OPTION_MD_BASE + 0)
8343 #else
8344 #define OPTION_EL (OPTION_MD_BASE + 1)
8345 #endif
8346 #endif
8347
8348 struct option md_longopts[] = {
8349 #ifdef OPTION_EB
8350 {"EB", no_argument, NULL, OPTION_EB},
8351 #endif
8352 #ifdef OPTION_EL
8353 {"EL", no_argument, NULL, OPTION_EL},
8354 #endif
8355 {NULL, no_argument, NULL, 0}
8356 };
8357
8358 size_t md_longopts_size = sizeof (md_longopts);
8359
8360 struct aarch64_option_table
8361 {
8362 const char *option; /* Option name to match. */
8363 const char *help; /* Help information. */
8364 int *var; /* Variable to change. */
8365 int value; /* What to change it to. */
8366 char *deprecated; /* If non-null, print this message. */
8367 };
8368
8369 static struct aarch64_option_table aarch64_opts[] = {
8370 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8371 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8372 NULL},
8373 #ifdef DEBUG_AARCH64
8374 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8375 #endif /* DEBUG_AARCH64 */
8376 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8377 NULL},
8378 {"mno-verbose-error", N_("do not output verbose error messages"),
8379 &verbose_error_p, 0, NULL},
8380 {NULL, NULL, NULL, 0, NULL}
8381 };
8382
8383 struct aarch64_cpu_option_table
8384 {
8385 const char *name;
8386 const aarch64_feature_set value;
8387 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8388 case. */
8389 const char *canonical_name;
8390 };
8391
8392 /* This list should, at a minimum, contain all the cpu names
8393 recognized by GCC. */
8394 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8395 {"all", AARCH64_ANY, NULL},
8396 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8397 AARCH64_FEATURE_CRC), "Cortex-A35"},
8398 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8399 AARCH64_FEATURE_CRC), "Cortex-A53"},
8400 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8401 AARCH64_FEATURE_CRC), "Cortex-A57"},
8402 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8403 AARCH64_FEATURE_CRC), "Cortex-A72"},
8404 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8405 AARCH64_FEATURE_CRC), "Cortex-A73"},
8406 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8407 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16),
8408 "Cortex-A55"},
8409 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8410 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16),
8411 "Cortex-A75"},
8412 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8413 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8414 "Samsung Exynos M1"},
8415 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8416 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8417 | AARCH64_FEATURE_RDMA),
8418 "Qualcomm Falkor"},
8419 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8420 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8421 | AARCH64_FEATURE_RDMA),
8422 "Qualcomm QDF24XX"},
8423 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8424 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8425 "Cavium ThunderX"},
8426 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8427 AARCH64_FEATURE_CRYPTO),
8428 "Broadcom Vulcan"},
8429 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8430 in earlier releases and is superseded by 'xgene1' in all
8431 tools. */
8432 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8433 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8434 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8435 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8436 {"generic", AARCH64_ARCH_V8, NULL},
8437
8438 {NULL, AARCH64_ARCH_NONE, NULL}
8439 };
8440
8441 struct aarch64_arch_option_table
8442 {
8443 const char *name;
8444 const aarch64_feature_set value;
8445 };
8446
8447 /* This list should, at a minimum, contain all the architecture names
8448 recognized by GCC. */
8449 static const struct aarch64_arch_option_table aarch64_archs[] = {
8450 {"all", AARCH64_ANY},
8451 {"armv8-a", AARCH64_ARCH_V8},
8452 {"armv8.1-a", AARCH64_ARCH_V8_1},
8453 {"armv8.2-a", AARCH64_ARCH_V8_2},
8454 {"armv8.3-a", AARCH64_ARCH_V8_3},
8455 {NULL, AARCH64_ARCH_NONE}
8456 };
8457
8458 /* ISA extensions. */
8459 struct aarch64_option_cpu_value_table
8460 {
8461 const char *name;
8462 const aarch64_feature_set value;
8463 const aarch64_feature_set require; /* Feature dependencies. */
8464 };
8465
8466 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8467 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8468 AARCH64_ARCH_NONE},
8469 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
8470 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8471 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8472 AARCH64_ARCH_NONE},
8473 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8474 AARCH64_ARCH_NONE},
8475 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8476 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8477 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8478 AARCH64_ARCH_NONE},
8479 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8480 AARCH64_ARCH_NONE},
8481 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8482 AARCH64_ARCH_NONE},
8483 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8484 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8485 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8486 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8487 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8488 AARCH64_ARCH_NONE},
8489 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8490 AARCH64_FEATURE (AARCH64_FEATURE_F16
8491 | AARCH64_FEATURE_SIMD
8492 | AARCH64_FEATURE_COMPNUM, 0)},
8493 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8494 AARCH64_FEATURE (AARCH64_FEATURE_F16
8495 | AARCH64_FEATURE_SIMD, 0)},
8496 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8497 AARCH64_ARCH_NONE},
8498 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8499 AARCH64_ARCH_NONE},
8500 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8501 };
8502
8503 struct aarch64_long_option_table
8504 {
8505 const char *option; /* Substring to match. */
8506 const char *help; /* Help information. */
8507 int (*func) (const char *subopt); /* Function to decode sub-option. */
8508 char *deprecated; /* If non-null, print this message. */
8509 };
8510
8511 /* Transitive closure of features depending on set. */
8512 static aarch64_feature_set
8513 aarch64_feature_disable_set (aarch64_feature_set set)
8514 {
8515 const struct aarch64_option_cpu_value_table *opt;
8516 aarch64_feature_set prev = 0;
8517
8518 while (prev != set) {
8519 prev = set;
8520 for (opt = aarch64_features; opt->name != NULL; opt++)
8521 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8522 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8523 }
8524 return set;
8525 }
8526
8527 /* Transitive closure of dependencies of set. */
8528 static aarch64_feature_set
8529 aarch64_feature_enable_set (aarch64_feature_set set)
8530 {
8531 const struct aarch64_option_cpu_value_table *opt;
8532 aarch64_feature_set prev = 0;
8533
8534 while (prev != set) {
8535 prev = set;
8536 for (opt = aarch64_features; opt->name != NULL; opt++)
8537 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8538 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8539 }
8540 return set;
8541 }
8542
8543 static int
8544 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8545 bfd_boolean ext_only)
8546 {
8547 /* We insist on extensions being added before being removed. We achieve
8548 this by using the ADDING_VALUE variable to indicate whether we are
8549 adding an extension (1) or removing it (0) and only allowing it to
8550 change in the order -1 -> 1 -> 0. */
8551 int adding_value = -1;
8552 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8553
8554 /* Copy the feature set, so that we can modify it. */
8555 *ext_set = **opt_p;
8556 *opt_p = ext_set;
8557
8558 while (str != NULL && *str != 0)
8559 {
8560 const struct aarch64_option_cpu_value_table *opt;
8561 const char *ext = NULL;
8562 int optlen;
8563
8564 if (!ext_only)
8565 {
8566 if (*str != '+')
8567 {
8568 as_bad (_("invalid architectural extension"));
8569 return 0;
8570 }
8571
8572 ext = strchr (++str, '+');
8573 }
8574
8575 if (ext != NULL)
8576 optlen = ext - str;
8577 else
8578 optlen = strlen (str);
8579
8580 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8581 {
8582 if (adding_value != 0)
8583 adding_value = 0;
8584 optlen -= 2;
8585 str += 2;
8586 }
8587 else if (optlen > 0)
8588 {
8589 if (adding_value == -1)
8590 adding_value = 1;
8591 else if (adding_value != 1)
8592 {
8593 as_bad (_("must specify extensions to add before specifying "
8594 "those to remove"));
8595 return FALSE;
8596 }
8597 }
8598
8599 if (optlen == 0)
8600 {
8601 as_bad (_("missing architectural extension"));
8602 return 0;
8603 }
8604
8605 gas_assert (adding_value != -1);
8606
8607 for (opt = aarch64_features; opt->name != NULL; opt++)
8608 if (strncmp (opt->name, str, optlen) == 0)
8609 {
8610 aarch64_feature_set set;
8611
8612 /* Add or remove the extension. */
8613 if (adding_value)
8614 {
8615 set = aarch64_feature_enable_set (opt->value);
8616 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8617 }
8618 else
8619 {
8620 set = aarch64_feature_disable_set (opt->value);
8621 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8622 }
8623 break;
8624 }
8625
8626 if (opt->name == NULL)
8627 {
8628 as_bad (_("unknown architectural extension `%s'"), str);
8629 return 0;
8630 }
8631
8632 str = ext;
8633 };
8634
8635 return 1;
8636 }
8637
8638 static int
8639 aarch64_parse_cpu (const char *str)
8640 {
8641 const struct aarch64_cpu_option_table *opt;
8642 const char *ext = strchr (str, '+');
8643 size_t optlen;
8644
8645 if (ext != NULL)
8646 optlen = ext - str;
8647 else
8648 optlen = strlen (str);
8649
8650 if (optlen == 0)
8651 {
8652 as_bad (_("missing cpu name `%s'"), str);
8653 return 0;
8654 }
8655
8656 for (opt = aarch64_cpus; opt->name != NULL; opt++)
8657 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8658 {
8659 mcpu_cpu_opt = &opt->value;
8660 if (ext != NULL)
8661 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
8662
8663 return 1;
8664 }
8665
8666 as_bad (_("unknown cpu `%s'"), str);
8667 return 0;
8668 }
8669
8670 static int
8671 aarch64_parse_arch (const char *str)
8672 {
8673 const struct aarch64_arch_option_table *opt;
8674 const char *ext = strchr (str, '+');
8675 size_t optlen;
8676
8677 if (ext != NULL)
8678 optlen = ext - str;
8679 else
8680 optlen = strlen (str);
8681
8682 if (optlen == 0)
8683 {
8684 as_bad (_("missing architecture name `%s'"), str);
8685 return 0;
8686 }
8687
8688 for (opt = aarch64_archs; opt->name != NULL; opt++)
8689 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8690 {
8691 march_cpu_opt = &opt->value;
8692 if (ext != NULL)
8693 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8694
8695 return 1;
8696 }
8697
8698 as_bad (_("unknown architecture `%s'\n"), str);
8699 return 0;
8700 }
8701
8702 /* ABIs. */
8703 struct aarch64_option_abi_value_table
8704 {
8705 const char *name;
8706 enum aarch64_abi_type value;
8707 };
8708
8709 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8710 {"ilp32", AARCH64_ABI_ILP32},
8711 {"lp64", AARCH64_ABI_LP64},
8712 };
8713
8714 static int
8715 aarch64_parse_abi (const char *str)
8716 {
8717 unsigned int i;
8718
8719 if (str[0] == '\0')
8720 {
8721 as_bad (_("missing abi name `%s'"), str);
8722 return 0;
8723 }
8724
8725 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8726 if (strcmp (str, aarch64_abis[i].name) == 0)
8727 {
8728 aarch64_abi = aarch64_abis[i].value;
8729 return 1;
8730 }
8731
8732 as_bad (_("unknown abi `%s'\n"), str);
8733 return 0;
8734 }
8735
8736 static struct aarch64_long_option_table aarch64_long_opts[] = {
8737 #ifdef OBJ_ELF
8738 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8739 aarch64_parse_abi, NULL},
8740 #endif /* OBJ_ELF */
8741 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8742 aarch64_parse_cpu, NULL},
8743 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8744 aarch64_parse_arch, NULL},
8745 {NULL, NULL, 0, NULL}
8746 };
8747
8748 int
8749 md_parse_option (int c, const char *arg)
8750 {
8751 struct aarch64_option_table *opt;
8752 struct aarch64_long_option_table *lopt;
8753
8754 switch (c)
8755 {
8756 #ifdef OPTION_EB
8757 case OPTION_EB:
8758 target_big_endian = 1;
8759 break;
8760 #endif
8761
8762 #ifdef OPTION_EL
8763 case OPTION_EL:
8764 target_big_endian = 0;
8765 break;
8766 #endif
8767
8768 case 'a':
8769 /* Listing option. Just ignore these, we don't support additional
8770 ones. */
8771 return 0;
8772
8773 default:
8774 for (opt = aarch64_opts; opt->option != NULL; opt++)
8775 {
8776 if (c == opt->option[0]
8777 && ((arg == NULL && opt->option[1] == 0)
8778 || streq (arg, opt->option + 1)))
8779 {
8780 /* If the option is deprecated, tell the user. */
8781 if (opt->deprecated != NULL)
8782 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8783 arg ? arg : "", _(opt->deprecated));
8784
8785 if (opt->var != NULL)
8786 *opt->var = opt->value;
8787
8788 return 1;
8789 }
8790 }
8791
8792 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8793 {
8794 /* These options are expected to have an argument. */
8795 if (c == lopt->option[0]
8796 && arg != NULL
8797 && strncmp (arg, lopt->option + 1,
8798 strlen (lopt->option + 1)) == 0)
8799 {
8800 /* If the option is deprecated, tell the user. */
8801 if (lopt->deprecated != NULL)
8802 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8803 _(lopt->deprecated));
8804
8805 /* Call the sup-option parser. */
8806 return lopt->func (arg + strlen (lopt->option) - 1);
8807 }
8808 }
8809
8810 return 0;
8811 }
8812
8813 return 1;
8814 }
8815
8816 void
8817 md_show_usage (FILE * fp)
8818 {
8819 struct aarch64_option_table *opt;
8820 struct aarch64_long_option_table *lopt;
8821
8822 fprintf (fp, _(" AArch64-specific assembler options:\n"));
8823
8824 for (opt = aarch64_opts; opt->option != NULL; opt++)
8825 if (opt->help != NULL)
8826 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
8827
8828 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8829 if (lopt->help != NULL)
8830 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
8831
8832 #ifdef OPTION_EB
8833 fprintf (fp, _("\
8834 -EB assemble code for a big-endian cpu\n"));
8835 #endif
8836
8837 #ifdef OPTION_EL
8838 fprintf (fp, _("\
8839 -EL assemble code for a little-endian cpu\n"));
8840 #endif
8841 }
8842
8843 /* Parse a .cpu directive. */
8844
8845 static void
8846 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8847 {
8848 const struct aarch64_cpu_option_table *opt;
8849 char saved_char;
8850 char *name;
8851 char *ext;
8852 size_t optlen;
8853
8854 name = input_line_pointer;
8855 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8856 input_line_pointer++;
8857 saved_char = *input_line_pointer;
8858 *input_line_pointer = 0;
8859
8860 ext = strchr (name, '+');
8861
8862 if (ext != NULL)
8863 optlen = ext - name;
8864 else
8865 optlen = strlen (name);
8866
8867 /* Skip the first "all" entry. */
8868 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8869 if (strlen (opt->name) == optlen
8870 && strncmp (name, opt->name, optlen) == 0)
8871 {
8872 mcpu_cpu_opt = &opt->value;
8873 if (ext != NULL)
8874 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8875 return;
8876
8877 cpu_variant = *mcpu_cpu_opt;
8878
8879 *input_line_pointer = saved_char;
8880 demand_empty_rest_of_line ();
8881 return;
8882 }
8883 as_bad (_("unknown cpu `%s'"), name);
8884 *input_line_pointer = saved_char;
8885 ignore_rest_of_line ();
8886 }
8887
8888
8889 /* Parse a .arch directive. */
8890
8891 static void
8892 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8893 {
8894 const struct aarch64_arch_option_table *opt;
8895 char saved_char;
8896 char *name;
8897 char *ext;
8898 size_t optlen;
8899
8900 name = input_line_pointer;
8901 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8902 input_line_pointer++;
8903 saved_char = *input_line_pointer;
8904 *input_line_pointer = 0;
8905
8906 ext = strchr (name, '+');
8907
8908 if (ext != NULL)
8909 optlen = ext - name;
8910 else
8911 optlen = strlen (name);
8912
8913 /* Skip the first "all" entry. */
8914 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8915 if (strlen (opt->name) == optlen
8916 && strncmp (name, opt->name, optlen) == 0)
8917 {
8918 mcpu_cpu_opt = &opt->value;
8919 if (ext != NULL)
8920 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8921 return;
8922
8923 cpu_variant = *mcpu_cpu_opt;
8924
8925 *input_line_pointer = saved_char;
8926 demand_empty_rest_of_line ();
8927 return;
8928 }
8929
8930 as_bad (_("unknown architecture `%s'\n"), name);
8931 *input_line_pointer = saved_char;
8932 ignore_rest_of_line ();
8933 }
8934
8935 /* Parse a .arch_extension directive. */
8936
8937 static void
8938 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8939 {
8940 char saved_char;
8941 char *ext = input_line_pointer;;
8942
8943 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8944 input_line_pointer++;
8945 saved_char = *input_line_pointer;
8946 *input_line_pointer = 0;
8947
8948 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8949 return;
8950
8951 cpu_variant = *mcpu_cpu_opt;
8952
8953 *input_line_pointer = saved_char;
8954 demand_empty_rest_of_line ();
8955 }
8956
8957 /* Copy symbol information. */
8958
8959 void
8960 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8961 {
8962 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8963 }
This page took 0.32862 seconds and 3 git commands to generate.